From 382de9089604216d3ef1702b7864c564a1c9a264 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Mon, 28 Mar 2022 14:21:39 -0400 Subject: [PATCH] [RISCV] add tests for minnum/maxnum; NFC Issue #54554 --- llvm/test/CodeGen/RISCV/fmax-fmin.ll | 185 +++++++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 llvm/test/CodeGen/RISCV/fmax-fmin.ll diff --git a/llvm/test/CodeGen/RISCV/fmax-fmin.ll b/llvm/test/CodeGen/RISCV/fmax-fmin.ll new file mode 100644 index 0000000..4be3e9e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/fmax-fmin.ll @@ -0,0 +1,185 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=R32 +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=R64 + +define float @maxnum_f32(float %x, float %y) nounwind { +; R32-LABEL: maxnum_f32: +; R32: # %bb.0: +; R32-NEXT: addi sp, sp, -16 +; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; R32-NEXT: call fmaxf@plt +; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; R32-NEXT: addi sp, sp, 16 +; R32-NEXT: ret +; +; R64-LABEL: maxnum_f32: +; R64: # %bb.0: +; R64-NEXT: addi sp, sp, -16 +; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; R64-NEXT: call fmaxf@plt +; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; R64-NEXT: addi sp, sp, 16 +; R64-NEXT: ret + %r = call float @llvm.maxnum.f32(float %x, float %y) + ret float %r +} + +define float @maxnum_f32_fast(float %x, float %y) nounwind { +; R32-LABEL: maxnum_f32_fast: +; R32: # %bb.0: +; R32-NEXT: addi sp, sp, -16 +; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; R32-NEXT: call fmaxf@plt +; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; R32-NEXT: addi sp, sp, 16 +; R32-NEXT: ret +; +; R64-LABEL: maxnum_f32_fast: +; R64: # %bb.0: +; R64-NEXT: addi sp, sp, -16 +; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; R64-NEXT: call fmaxf@plt +; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; R64-NEXT: addi sp, sp, 16 +; R64-NEXT: ret + %r = call fast float @llvm.maxnum.f32(float %x, float %y) + ret float %r +} + +define double @maxnum_f64(double %x, double %y) nounwind { +; R32-LABEL: maxnum_f64: +; R32: # %bb.0: +; R32-NEXT: addi sp, sp, -16 +; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; R32-NEXT: call fmax@plt +; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; R32-NEXT: addi sp, sp, 16 +; R32-NEXT: ret +; +; R64-LABEL: maxnum_f64: +; R64: # %bb.0: +; R64-NEXT: addi sp, sp, -16 +; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; R64-NEXT: call fmax@plt +; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; R64-NEXT: addi sp, sp, 16 +; R64-NEXT: ret + %r = call double @llvm.maxnum.f64(double %x, double %y) + ret double %r +} + +define double @maxnum_f64_nnan(double %x, double %y) nounwind { +; R32-LABEL: maxnum_f64_nnan: +; R32: # %bb.0: +; R32-NEXT: addi sp, sp, -16 +; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; R32-NEXT: call fmax@plt +; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; R32-NEXT: addi sp, sp, 16 +; R32-NEXT: ret +; +; R64-LABEL: maxnum_f64_nnan: +; R64: # %bb.0: +; R64-NEXT: addi sp, sp, -16 +; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; R64-NEXT: call fmax@plt +; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; R64-NEXT: addi sp, sp, 16 +; R64-NEXT: ret + %r = call nnan double @llvm.maxnum.f64(double %x, double %y) + ret double %r +} + +define float @minnum_f32(float %x, float %y) nounwind { +; R32-LABEL: minnum_f32: +; R32: # %bb.0: +; R32-NEXT: addi sp, sp, -16 +; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; R32-NEXT: call fminf@plt +; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; R32-NEXT: addi sp, sp, 16 +; R32-NEXT: ret +; +; R64-LABEL: minnum_f32: +; R64: # %bb.0: +; R64-NEXT: addi sp, sp, -16 +; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; R64-NEXT: call fminf@plt +; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; R64-NEXT: addi sp, sp, 16 +; R64-NEXT: ret + %r = call float @llvm.minnum.f32(float %x, float %y) + ret float %r +} + +define float @minnum_f32_nnan(float %x, float %y) nounwind { +; R32-LABEL: minnum_f32_nnan: +; R32: # %bb.0: +; R32-NEXT: addi sp, sp, -16 +; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; R32-NEXT: call fminf@plt +; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; R32-NEXT: addi sp, sp, 16 +; R32-NEXT: ret +; +; R64-LABEL: minnum_f32_nnan: +; R64: # %bb.0: +; R64-NEXT: addi sp, sp, -16 +; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; R64-NEXT: call fminf@plt +; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; R64-NEXT: addi sp, sp, 16 +; R64-NEXT: ret + %r = call nnan float @llvm.minnum.f32(float %x, float %y) + ret float %r +} + +define double @minnum_f64(double %x, double %y) nounwind { +; R32-LABEL: minnum_f64: +; R32: # %bb.0: +; R32-NEXT: addi sp, sp, -16 +; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; R32-NEXT: call fmin@plt +; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; R32-NEXT: addi sp, sp, 16 +; R32-NEXT: ret +; +; R64-LABEL: minnum_f64: +; R64: # %bb.0: +; R64-NEXT: addi sp, sp, -16 +; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; R64-NEXT: call fmin@plt +; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; R64-NEXT: addi sp, sp, 16 +; R64-NEXT: ret + %r = call double @llvm.minnum.f64(double %x, double %y) + ret double %r +} + +define double @minnum_f64_fast(double %x, double %y) nounwind { +; R32-LABEL: minnum_f64_fast: +; R32: # %bb.0: +; R32-NEXT: addi sp, sp, -16 +; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; R32-NEXT: call fmin@plt +; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; R32-NEXT: addi sp, sp, 16 +; R32-NEXT: ret +; +; R64-LABEL: minnum_f64_fast: +; R64: # %bb.0: +; R64-NEXT: addi sp, sp, -16 +; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; R64-NEXT: call fmin@plt +; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; R64-NEXT: addi sp, sp, 16 +; R64-NEXT: ret + %r = call fast double @llvm.minnum.f64(double %x, double %y) + ret double %r +} + +declare float @llvm.maxnum.f32(float, float) +declare double @llvm.maxnum.f64(double, double) +declare float @llvm.minnum.f32(float, float) +declare double @llvm.minnum.f64(double, double) + -- 2.7.4