--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=R32
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=R64
+
+define float @maxnum_f32(float %x, float %y) nounwind {
+; R32-LABEL: maxnum_f32:
+; R32: # %bb.0:
+; R32-NEXT: addi sp, sp, -16
+; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; R32-NEXT: call fmaxf@plt
+; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; R32-NEXT: addi sp, sp, 16
+; R32-NEXT: ret
+;
+; R64-LABEL: maxnum_f32:
+; R64: # %bb.0:
+; R64-NEXT: addi sp, sp, -16
+; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; R64-NEXT: call fmaxf@plt
+; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; R64-NEXT: addi sp, sp, 16
+; R64-NEXT: ret
+ %r = call float @llvm.maxnum.f32(float %x, float %y)
+ ret float %r
+}
+
+define float @maxnum_f32_fast(float %x, float %y) nounwind {
+; R32-LABEL: maxnum_f32_fast:
+; R32: # %bb.0:
+; R32-NEXT: addi sp, sp, -16
+; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; R32-NEXT: call fmaxf@plt
+; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; R32-NEXT: addi sp, sp, 16
+; R32-NEXT: ret
+;
+; R64-LABEL: maxnum_f32_fast:
+; R64: # %bb.0:
+; R64-NEXT: addi sp, sp, -16
+; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; R64-NEXT: call fmaxf@plt
+; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; R64-NEXT: addi sp, sp, 16
+; R64-NEXT: ret
+ %r = call fast float @llvm.maxnum.f32(float %x, float %y)
+ ret float %r
+}
+
+define double @maxnum_f64(double %x, double %y) nounwind {
+; R32-LABEL: maxnum_f64:
+; R32: # %bb.0:
+; R32-NEXT: addi sp, sp, -16
+; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; R32-NEXT: call fmax@plt
+; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; R32-NEXT: addi sp, sp, 16
+; R32-NEXT: ret
+;
+; R64-LABEL: maxnum_f64:
+; R64: # %bb.0:
+; R64-NEXT: addi sp, sp, -16
+; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; R64-NEXT: call fmax@plt
+; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; R64-NEXT: addi sp, sp, 16
+; R64-NEXT: ret
+ %r = call double @llvm.maxnum.f64(double %x, double %y)
+ ret double %r
+}
+
+define double @maxnum_f64_nnan(double %x, double %y) nounwind {
+; R32-LABEL: maxnum_f64_nnan:
+; R32: # %bb.0:
+; R32-NEXT: addi sp, sp, -16
+; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; R32-NEXT: call fmax@plt
+; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; R32-NEXT: addi sp, sp, 16
+; R32-NEXT: ret
+;
+; R64-LABEL: maxnum_f64_nnan:
+; R64: # %bb.0:
+; R64-NEXT: addi sp, sp, -16
+; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; R64-NEXT: call fmax@plt
+; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; R64-NEXT: addi sp, sp, 16
+; R64-NEXT: ret
+ %r = call nnan double @llvm.maxnum.f64(double %x, double %y)
+ ret double %r
+}
+
+define float @minnum_f32(float %x, float %y) nounwind {
+; R32-LABEL: minnum_f32:
+; R32: # %bb.0:
+; R32-NEXT: addi sp, sp, -16
+; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; R32-NEXT: call fminf@plt
+; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; R32-NEXT: addi sp, sp, 16
+; R32-NEXT: ret
+;
+; R64-LABEL: minnum_f32:
+; R64: # %bb.0:
+; R64-NEXT: addi sp, sp, -16
+; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; R64-NEXT: call fminf@plt
+; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; R64-NEXT: addi sp, sp, 16
+; R64-NEXT: ret
+ %r = call float @llvm.minnum.f32(float %x, float %y)
+ ret float %r
+}
+
+define float @minnum_f32_nnan(float %x, float %y) nounwind {
+; R32-LABEL: minnum_f32_nnan:
+; R32: # %bb.0:
+; R32-NEXT: addi sp, sp, -16
+; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; R32-NEXT: call fminf@plt
+; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; R32-NEXT: addi sp, sp, 16
+; R32-NEXT: ret
+;
+; R64-LABEL: minnum_f32_nnan:
+; R64: # %bb.0:
+; R64-NEXT: addi sp, sp, -16
+; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; R64-NEXT: call fminf@plt
+; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; R64-NEXT: addi sp, sp, 16
+; R64-NEXT: ret
+ %r = call nnan float @llvm.minnum.f32(float %x, float %y)
+ ret float %r
+}
+
+define double @minnum_f64(double %x, double %y) nounwind {
+; R32-LABEL: minnum_f64:
+; R32: # %bb.0:
+; R32-NEXT: addi sp, sp, -16
+; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; R32-NEXT: call fmin@plt
+; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; R32-NEXT: addi sp, sp, 16
+; R32-NEXT: ret
+;
+; R64-LABEL: minnum_f64:
+; R64: # %bb.0:
+; R64-NEXT: addi sp, sp, -16
+; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; R64-NEXT: call fmin@plt
+; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; R64-NEXT: addi sp, sp, 16
+; R64-NEXT: ret
+ %r = call double @llvm.minnum.f64(double %x, double %y)
+ ret double %r
+}
+
+define double @minnum_f64_fast(double %x, double %y) nounwind {
+; R32-LABEL: minnum_f64_fast:
+; R32: # %bb.0:
+; R32-NEXT: addi sp, sp, -16
+; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; R32-NEXT: call fmin@plt
+; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; R32-NEXT: addi sp, sp, 16
+; R32-NEXT: ret
+;
+; R64-LABEL: minnum_f64_fast:
+; R64: # %bb.0:
+; R64-NEXT: addi sp, sp, -16
+; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; R64-NEXT: call fmin@plt
+; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; R64-NEXT: addi sp, sp, 16
+; R64-NEXT: ret
+ %r = call fast double @llvm.minnum.f64(double %x, double %y)
+ ret double %r
+}
+
+declare float @llvm.maxnum.f32(float, float)
+declare double @llvm.maxnum.f64(double, double)
+declare float @llvm.minnum.f32(float, float)
+declare double @llvm.minnum.f64(double, double)
+