From 2c442629f0bd210fdb76fa409e131c87387e884d Mon Sep 17 00:00:00 2001 From: Fraser Cormack Date: Sat, 9 Jan 2021 10:29:50 +0000 Subject: [PATCH] [RISCV] Add tests for scalable constant-folding (NFC) --- llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll | 17 ++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll | 30 ++++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll | 30 ++++++++++++++++++++++++ 3 files changed, 77 insertions(+) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll index 98b30b5..57006f5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode-rv32.ll @@ -37,6 +37,23 @@ define @vadd_vx_nxv1i8_1( %va) { ret %vc } +; Test constant adds to see if we can optimize them away for scalable vectors. +; FIXME: We can't. +define @vadd_ii_nxv1i8_1() { +; CHECK-LABEL: vadd_ii_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmv.v.i v25, 2 +; CHECK-NEXT: vadd.vi v16, v25, 3 +; CHECK-NEXT: ret + %heada = insertelement undef, i8 2, i32 0 + %splata = shufflevector %heada, undef, zeroinitializer + %headb = insertelement undef, i8 3, i32 0 + %splatb = shufflevector %headb, undef, zeroinitializer + %vc = add %splata, %splatb + ret %vc +} + define @vadd_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vadd_vx_nxv2i8: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll index ae93e8f..21fab82 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll @@ -36,6 +36,36 @@ define @vdiv_vi_nxv1i8_0( %va) { ret %vc } +; Test V/1 to see if we can optimize it away for scalable vectors. +; FIXME: We can't. +define @vdiv_vi_nxv1i8_1( %va) { +; CHECK-LABEL: vdiv_vi_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdivu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %va, %splat + ret %vc +} + +; Test 0/V to see if we can optimize it away for scalable vectors. +; FIXME: We can't. +define @vdiv_iv_nxv1i8_0( %va) { +; CHECK-LABEL: vdiv_iv_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vdivu.vv v16, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = sdiv %splat, %va + ret %vc +} + define @vdiv_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv2i8: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll index 0acc0eb..7982e12 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode-rv32.ll @@ -36,6 +36,36 @@ define @vdivu_vi_nxv1i8_0( %va) { ret %vc } +; Test V/1 to see if we can optimize it away for scalable vectors. +; FIXME: We can't. +define @vdivu_vi_nxv1i8_1( %va) { +; CHECK-LABEL: vdivu_vi_nxv1i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vdiv.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %va, %splat + ret %vc +} + +; Test 0/V to see if we can optimize it away for scalable vectors. +; FIXME: We can't. +define @vdivu_iv_nxv1i8_0( %va) { +; CHECK-LABEL: vdivu_iv_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vdiv.vv v16, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = udiv %splat, %va + ret %vc +} + define @vdivu_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv2i8: ; CHECK: # %bb.0: -- 2.7.4