From: Craig Topper Date: Sat, 4 Feb 2023 21:35:58 +0000 (-0800) Subject: [RISCV] Fix crash splatting f64 -0.0 into a vector on RV32 after D142953. X-Git-Tag: upstream/17.0.6~18562 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=712e143883d694d3b5817dae714da2315eae8c89;p=platform%2Fupstream%2Fllvm.git [RISCV] Fix crash splatting f64 -0.0 into a vector on RV32 after D142953. For RV32, we now use scalar fcvt of x0, scalar fneg, splat scalar fp to vector. For RV64, we use li of 1, slli by 63, splat GPR to vector. --- diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 1e2d57b..780bc6c 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -2592,8 +2592,12 @@ bool RISCVDAGToDAGISel::selectFPImm(SDValue N, SDValue &Imm) { // td can handle +0.0 already. if (APF.isPosZero()) return false; - SDLoc DL(N); MVT XLenVT = Subtarget->getXLenVT(); + if (CFP->getValueType(0) == MVT::f64 && !Subtarget->is64Bit()) { + assert(APF.isNegZero() && "Unexpected constant."); + return false; + } + SDLoc DL(N); Imm = selectImm(CurDAG, DL, XLenVT, APF.bitcastToAPInt().getSExtValue(), *Subtarget); return true; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll index cbe10bb..d6925d4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2 -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2 -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32,LMULMAX2,RV32-LMULMAX2 +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,LMULMAX2,RV64-LMULMAX2 +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32,LMULMAX1,RV32-LMULMAX1 +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,LMULMAX1,RV64-LMULMAX1 define void @splat_v8f16(ptr %x, half %y) { ; CHECK-LABEL: splat_v8f16: @@ -213,3 +213,149 @@ define void @splat_zero_v4f64(ptr %x) { store <4 x double> %b, ptr %x ret void } + +define void @splat_negzero_v8f16(ptr %x) { +; CHECK-LABEL: splat_negzero_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a1, 1048568 +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret + %a = insertelement <8 x half> poison, half -0.0, i32 0 + %b = shufflevector <8 x half> %a, <8 x half> poison, <8 x i32> zeroinitializer + store <8 x half> %b, ptr %x + ret void +} + +define void @splat_negzero_v4f32(ptr %x) { +; CHECK-LABEL: splat_negzero_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a1, 524288 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret + %a = insertelement <4 x float> poison, float -0.0, i32 0 + %b = shufflevector <4 x float> %a, <4 x float> poison, <4 x i32> zeroinitializer + store <4 x float> %b, ptr %x + ret void +} + +define void @splat_negzero_v2f64(ptr %x) { +; CHECK-RV32-LABEL: splat_negzero_v2f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: fcvt.d.w ft0, zero +; CHECK-RV32-NEXT: fneg.d ft0, ft0 +; CHECK-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-RV32-NEXT: vfmv.v.f v8, ft0 +; CHECK-RV32-NEXT: vse64.v v8, (a0) +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: splat_negzero_v2f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: li a1, -1 +; CHECK-RV64-NEXT: slli a1, a1, 63 +; CHECK-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-RV64-NEXT: vmv.v.x v8, a1 +; CHECK-RV64-NEXT: vse64.v v8, (a0) +; CHECK-RV64-NEXT: ret + %a = insertelement <2 x double> poison, double -0.0, i32 0 + %b = shufflevector <2 x double> %a, <2 x double> poison, <2 x i32> zeroinitializer + store <2 x double> %b, ptr %x + ret void +} + +define void @splat_negzero_16f16(ptr %x) { +; LMULMAX2-LABEL: splat_negzero_16f16: +; LMULMAX2: # %bb.0: +; LMULMAX2-NEXT: lui a1, 1048568 +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; LMULMAX2-NEXT: vmv.v.x v8, a1 +; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: ret +; +; LMULMAX1-LABEL: splat_negzero_16f16: +; LMULMAX1: # %bb.0: +; LMULMAX1-NEXT: lui a1, 1048568 +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-NEXT: vmv.v.x v8, a1 +; LMULMAX1-NEXT: addi a1, a0, 16 +; LMULMAX1-NEXT: vse16.v v8, (a1) +; LMULMAX1-NEXT: vse16.v v8, (a0) +; LMULMAX1-NEXT: ret + %a = insertelement <16 x half> poison, half -0.0, i32 0 + %b = shufflevector <16 x half> %a, <16 x half> poison, <16 x i32> zeroinitializer + store <16 x half> %b, ptr %x + ret void +} + +define void @splat_negzero_v8f32(ptr %x) { +; LMULMAX2-LABEL: splat_negzero_v8f32: +; LMULMAX2: # %bb.0: +; LMULMAX2-NEXT: lui a1, 524288 +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; LMULMAX2-NEXT: vmv.v.x v8, a1 +; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: ret +; +; LMULMAX1-LABEL: splat_negzero_v8f32: +; LMULMAX1: # %bb.0: +; LMULMAX1-NEXT: lui a1, 524288 +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1-NEXT: vmv.v.x v8, a1 +; LMULMAX1-NEXT: addi a1, a0, 16 +; LMULMAX1-NEXT: vse32.v v8, (a1) +; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: ret + %a = insertelement <8 x float> poison, float -0.0, i32 0 + %b = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> zeroinitializer + store <8 x float> %b, ptr %x + ret void +} + +define void @splat_negzero_v4f64(ptr %x) { +; RV32-LMULMAX2-LABEL: splat_negzero_v4f64: +; RV32-LMULMAX2: # %bb.0: +; RV32-LMULMAX2-NEXT: fcvt.d.w ft0, zero +; RV32-LMULMAX2-NEXT: fneg.d ft0, ft0 +; RV32-LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32-LMULMAX2-NEXT: vfmv.v.f v8, ft0 +; RV32-LMULMAX2-NEXT: vse64.v v8, (a0) +; RV32-LMULMAX2-NEXT: ret +; +; RV64-LMULMAX2-LABEL: splat_negzero_v4f64: +; RV64-LMULMAX2: # %bb.0: +; RV64-LMULMAX2-NEXT: li a1, -1 +; RV64-LMULMAX2-NEXT: slli a1, a1, 63 +; RV64-LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-LMULMAX2-NEXT: vmv.v.x v8, a1 +; RV64-LMULMAX2-NEXT: vse64.v v8, (a0) +; RV64-LMULMAX2-NEXT: ret +; +; RV32-LMULMAX1-LABEL: splat_negzero_v4f64: +; RV32-LMULMAX1: # %bb.0: +; RV32-LMULMAX1-NEXT: fcvt.d.w ft0, zero +; RV32-LMULMAX1-NEXT: fneg.d ft0, ft0 +; RV32-LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-LMULMAX1-NEXT: vfmv.v.f v8, ft0 +; RV32-LMULMAX1-NEXT: addi a1, a0, 16 +; RV32-LMULMAX1-NEXT: vse64.v v8, (a1) +; RV32-LMULMAX1-NEXT: vse64.v v8, (a0) +; RV32-LMULMAX1-NEXT: ret +; +; RV64-LMULMAX1-LABEL: splat_negzero_v4f64: +; RV64-LMULMAX1: # %bb.0: +; RV64-LMULMAX1-NEXT: li a1, -1 +; RV64-LMULMAX1-NEXT: slli a1, a1, 63 +; RV64-LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64-LMULMAX1-NEXT: vmv.v.x v8, a1 +; RV64-LMULMAX1-NEXT: addi a1, a0, 16 +; RV64-LMULMAX1-NEXT: vse64.v v8, (a1) +; RV64-LMULMAX1-NEXT: vse64.v v8, (a0) +; RV64-LMULMAX1-NEXT: ret + %a = insertelement <4 x double> poison, double -0.0, i32 0 + %b = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> zeroinitializer + store <4 x double> %b, ptr %x + ret void +}