From: Zakk Chen Date: Tue, 17 May 2022 03:09:22 +0000 (-0700) Subject: [NFC][Clang] Add missing test cases for segment load X-Git-Tag: upstream/17.0.6~24299 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4ca9c6a84f918be4314fc7066eb899f4e86f07f3;p=platform%2Fupstream%2Fllvm.git [NFC][Clang] Add missing test cases for segment load This patch fixed the missing test cases from: 010f329803c84e43ec15ffaff7b6e26b032cbcc6 Reviewed By: kito-cheng, craig.topper Differential Revision: https://reviews.llvm.org/D139387 --- diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c index 31aebd0..815012b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c @@ -6486,3 +6486,135 @@ void test_vlseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t void test_vlseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_tu( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t vl) { + return vlseg2e32_tu(v0, v1, merge0, merge1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_ta( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32( poison, poison, ptr [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_ta(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, size_t vl) { + return vlseg2e32_ta(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_tuma( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 2) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_tuma(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t vl) { + return vlseg2e32_tuma(v0, v1, mask, merge0, merge1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_tumu( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t vl) { + return vlseg2e32_tumu(v0, v1, mask, merge0, merge1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_tama( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 3) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_tama(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg2e32_tama(v0, v1, mask, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_tamu( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_tamu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t vl) { + return vlseg2e32_tamu(v0, v1, mask, merge0, merge1, base, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c index 6c9107f..a41556c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c @@ -7300,3 +7300,47 @@ void test_vlseg4e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v void test_vlseg2e16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t vl) { return vlseg2e16_v_f16m4(v0, v1, base, vl); } + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_tu( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32mf2_tu(v0, v1, merge0, merge1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_ta( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32( poison, poison, ptr [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_ta(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32mf2_ta(v0, v1, base, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg_mask.c index 47c05aa..2617dca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg_mask.c @@ -7300,3 +7300,91 @@ void test_vlseg4e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t void test_vlseg2e16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { return vlseg2e16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_tuma( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 2) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_tuma(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32mf2_tuma(v0, v1, mask, merge0, merge1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_tumu( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32mf2_tumu(v0, v1, mask, merge0, merge1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_tama( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 3) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_tama(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32mf2_tama(v0, v1, mask, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_tamu( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_tamu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32mf2_tamu(v0, v1, mask, merge0, merge1, base, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff_mask.c index f37b9b7..99fa792 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff_mask.c @@ -8204,3 +8204,107 @@ void test_vlseg4e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ void test_vlseg2e16ff_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { return vlseg2e16ff_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_tuma( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 2) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32mf2_tuma(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32mf2_tuma(v0, v1, mask, merge0, merge1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_tumu( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32mf2_tumu(v0, v1, mask, merge0, merge1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_tama( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 3) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32mf2_tama(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32mf2_tama(v0, v1, mask, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_tamu( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MERGE0:%.*]], [[MERGE1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32mf2_tamu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32mf2_tamu(v0, v1, mask, merge0, merge1, base, new_vl, vl); +}