--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | \
+; RUN: FileCheck %s -check-prefix=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | \
+; RUN: FileCheck %s -check-prefix=RV64
+
+; ================================================================================
+; trunc <vscale x 1 x double>
+; ================================================================================
+
+declare <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double>)
+
+define <vscale x 1 x i8> @trunc_nxv1f64_to_si8(<vscale x 1 x double> %x) {
+; RV32-LABEL: trunc_nxv1f64_to_si8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI0_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI0_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v9, 0
+; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v8, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f64_to_si8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI0_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI0_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v9, 0
+; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v8, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i8>
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 1 x i8> @trunc_nxv1f64_to_ui8(<vscale x 1 x double> %x) {
+; RV32-LABEL: trunc_nxv1f64_to_ui8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI1_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI1_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v9, 0
+; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v8, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f64_to_ui8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI1_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI1_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v9, 0
+; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v8, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i8>
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 1 x i16> @trunc_nxv1f64_to_si16(<vscale x 1 x double> %x) {
+; RV32-LABEL: trunc_nxv1f64_to_si16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI2_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI2_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v9, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f64_to_si16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI2_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI2_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v9, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 1 x i16> @trunc_nxv1f64_to_ui16(<vscale x 1 x double> %x) {
+; RV32-LABEL: trunc_nxv1f64_to_ui16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI3_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI3_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v9, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f64_to_ui16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI3_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI3_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v9, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 1 x i32> @trunc_nxv1f64_to_si32(<vscale x 1 x double> %x) {
+; RV32-LABEL: trunc_nxv1f64_to_si32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI4_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI4_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f64_to_si32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI4_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI4_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 1 x i32> @trunc_nxv1f64_to_ui32(<vscale x 1 x double> %x) {
+; RV32-LABEL: trunc_nxv1f64_to_ui32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI5_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI5_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f64_to_ui32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI5_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI5_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 1 x i64> @trunc_nxv1f64_to_si64(<vscale x 1 x double> %x) {
+; RV32-LABEL: trunc_nxv1f64_to_si64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI6_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI6_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f64_to_si64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI6_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI6_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %b
+}
+
+define <vscale x 1 x i64> @trunc_nxv1f64_to_ui64(<vscale x 1 x double> %x) {
+; RV32-LABEL: trunc_nxv1f64_to_ui64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI7_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI7_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f64_to_ui64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI7_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI7_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %b
+}
+
+; ================================================================================
+; trunc <vscale x 4 x double>
+; ================================================================================
+
+declare <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double>)
+
+define <vscale x 4 x i8> @trunc_nxv4f64_to_si8(<vscale x 4 x double> %x) {
+; RV32-LABEL: trunc_nxv4f64_to_si8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI8_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI8_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v12, 0
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v8, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f64_to_si8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI8_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI8_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v12, 0
+; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v8, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i8>
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 4 x i8> @trunc_nxv4f64_to_ui8(<vscale x 4 x double> %x) {
+; RV32-LABEL: trunc_nxv4f64_to_ui8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI9_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI9_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v12, 0
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v8, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f64_to_ui8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI9_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI9_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v12, 0
+; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v8, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i8>
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 4 x i16> @trunc_nxv4f64_to_si16(<vscale x 4 x double> %x) {
+; RV32-LABEL: trunc_nxv4f64_to_si16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI10_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI10_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v12, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f64_to_si16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI10_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI10_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v12, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 4 x i16> @trunc_nxv4f64_to_ui16(<vscale x 4 x double> %x) {
+; RV32-LABEL: trunc_nxv4f64_to_ui16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v12, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f64_to_ui16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI11_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI11_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v12, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 4 x i32> @trunc_nxv4f64_to_si32(<vscale x 4 x double> %x) {
+; RV32-LABEL: trunc_nxv4f64_to_si32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8
+; RV32-NEXT: vmv.v.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f64_to_si32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI12_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI12_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8
+; RV64-NEXT: vmv.v.v v8, v12
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 4 x i32> @trunc_nxv4f64_to_ui32(<vscale x 4 x double> %x) {
+; RV32-LABEL: trunc_nxv4f64_to_ui32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; RV32-NEXT: vmv.v.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f64_to_ui32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI13_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI13_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; RV64-NEXT: vmv.v.v v8, v12
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 4 x i64> @trunc_nxv4f64_to_si64(<vscale x 4 x double> %x) {
+; RV32-LABEL: trunc_nxv4f64_to_si64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f64_to_si64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI14_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI14_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %b
+}
+
+define <vscale x 4 x i64> @trunc_nxv4f64_to_ui64(<vscale x 4 x double> %x) {
+; RV32-LABEL: trunc_nxv4f64_to_ui64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI15_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI15_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f64_to_ui64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI15_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI15_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %b
+}
+
+; ================================================================================
+; ceil <vscale x 1 x double>
+; ================================================================================
+
+declare <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double>)
+
+define <vscale x 1 x i8> @ceil_nxv1f64_to_si8(<vscale x 1 x double> %x) {
+; RV32-LABEL: ceil_nxv1f64_to_si8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI16_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI16_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v9, 0
+; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v8, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f64_to_si8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI16_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI16_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v9, 0
+; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v8, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i8>
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 1 x i8> @ceil_nxv1f64_to_ui8(<vscale x 1 x double> %x) {
+; RV32-LABEL: ceil_nxv1f64_to_ui8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI17_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v9, 0
+; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v8, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f64_to_ui8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI17_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI17_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v9, 0
+; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v8, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i8>
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 1 x i16> @ceil_nxv1f64_to_si16(<vscale x 1 x double> %x) {
+; RV32-LABEL: ceil_nxv1f64_to_si16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI18_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v9, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f64_to_si16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI18_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI18_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v9, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 1 x i16> @ceil_nxv1f64_to_ui16(<vscale x 1 x double> %x) {
+; RV32-LABEL: ceil_nxv1f64_to_ui16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI19_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v9, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f64_to_ui16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI19_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI19_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v9, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 1 x i32> @ceil_nxv1f64_to_si32(<vscale x 1 x double> %x) {
+; RV32-LABEL: ceil_nxv1f64_to_si32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI20_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f64_to_si32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI20_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI20_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 1 x i32> @ceil_nxv1f64_to_ui32(<vscale x 1 x double> %x) {
+; RV32-LABEL: ceil_nxv1f64_to_ui32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI21_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI21_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f64_to_ui32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI21_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI21_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 1 x i64> @ceil_nxv1f64_to_si64(<vscale x 1 x double> %x) {
+; RV32-LABEL: ceil_nxv1f64_to_si64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI22_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f64_to_si64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI22_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI22_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptosi <vscale x 1 x double> %a to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %b
+}
+
+define <vscale x 1 x i64> @ceil_nxv1f64_to_ui64(<vscale x 1 x double> %x) {
+; RV32-LABEL: ceil_nxv1f64_to_ui64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI23_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI23_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f64_to_ui64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI23_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI23_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
+ %b = fptoui <vscale x 1 x double> %a to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %b
+}
+
+; ================================================================================
+; ceil <vscale x 4 x double>
+; ================================================================================
+
+declare <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double>)
+
+define <vscale x 4 x i8> @ceil_nxv4f64_to_si8(<vscale x 4 x double> %x) {
+; RV32-LABEL: ceil_nxv4f64_to_si8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI24_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v12, 0
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v8, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f64_to_si8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI24_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI24_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v12, 0
+; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v8, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i8>
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 4 x i8> @ceil_nxv4f64_to_ui8(<vscale x 4 x double> %x) {
+; RV32-LABEL: ceil_nxv4f64_to_ui8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI25_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI25_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v12, 0
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v8, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f64_to_ui8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI25_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI25_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v12, 0
+; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v8, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i8>
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 4 x i16> @ceil_nxv4f64_to_si16(<vscale x 4 x double> %x) {
+; RV32-LABEL: ceil_nxv4f64_to_si16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI26_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI26_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v12, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f64_to_si16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI26_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI26_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v12, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 4 x i16> @ceil_nxv4f64_to_ui16(<vscale x 4 x double> %x) {
+; RV32-LABEL: ceil_nxv4f64_to_ui16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI27_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI27_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v12, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f64_to_ui16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI27_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI27_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v12, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 4 x i32> @ceil_nxv4f64_to_si32(<vscale x 4 x double> %x) {
+; RV32-LABEL: ceil_nxv4f64_to_si32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI28_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI28_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8
+; RV32-NEXT: vmv.v.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f64_to_si32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI28_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI28_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8
+; RV64-NEXT: vmv.v.v v8, v12
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 4 x i32> @ceil_nxv4f64_to_ui32(<vscale x 4 x double> %x) {
+; RV32-LABEL: ceil_nxv4f64_to_ui32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI29_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI29_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; RV32-NEXT: vmv.v.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f64_to_ui32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI29_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI29_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; RV64-NEXT: vmv.v.v v8, v12
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 4 x i64> @ceil_nxv4f64_to_si64(<vscale x 4 x double> %x) {
+; RV32-LABEL: ceil_nxv4f64_to_si64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI30_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI30_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f64_to_si64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI30_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI30_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptosi <vscale x 4 x double> %a to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %b
+}
+
+define <vscale x 4 x i64> @ceil_nxv4f64_to_ui64(<vscale x 4 x double> %x) {
+; RV32-LABEL: ceil_nxv4f64_to_ui64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI31_0)
+; RV32-NEXT: fld ft0, %lo(.LCPI31_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f64_to_ui64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI31_0)
+; RV64-NEXT: fld ft0, %lo(.LCPI31_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: vmflt.vf v0, v12, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
+ %b = fptoui <vscale x 4 x double> %a to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %b
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | \
+; RUN: FileCheck %s -check-prefix=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | \
+; RUN: FileCheck %s -check-prefix=RV64
+
+; ================================================================================
+; trunc <vscale x 1 x float>
+; ================================================================================
+
+declare <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float>)
+
+define <vscale x 1 x i8> @trunc_nxv1f32_to_si8(<vscale x 1 x float> %x) {
+; RV32-LABEL: trunc_nxv1f32_to_si8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI0_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI0_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v9, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f32_to_si8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI0_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI0_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v9, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptosi <vscale x 1 x float> %a to <vscale x 1 x i8>
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 1 x i8> @trunc_nxv1f32_to_ui8(<vscale x 1 x float> %x) {
+; RV32-LABEL: trunc_nxv1f32_to_ui8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI1_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI1_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v9, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f32_to_ui8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI1_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI1_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v9, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptoui <vscale x 1 x float> %a to <vscale x 1 x i8>
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 1 x i16> @trunc_nxv1f32_to_si16(<vscale x 1 x float> %x) {
+; RV32-LABEL: trunc_nxv1f32_to_si16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI2_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI2_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f32_to_si16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI2_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI2_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptosi <vscale x 1 x float> %a to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 1 x i16> @trunc_nxv1f32_to_ui16(<vscale x 1 x float> %x) {
+; RV32-LABEL: trunc_nxv1f32_to_ui16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI3_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI3_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f32_to_ui16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI3_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI3_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptoui <vscale x 1 x float> %a to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 1 x i32> @trunc_nxv1f32_to_si32(<vscale x 1 x float> %x) {
+; RV32-LABEL: trunc_nxv1f32_to_si32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI4_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI4_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f32_to_si32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI4_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI4_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptosi <vscale x 1 x float> %a to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 1 x i32> @trunc_nxv1f32_to_ui32(<vscale x 1 x float> %x) {
+; RV32-LABEL: trunc_nxv1f32_to_ui32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI5_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI5_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f32_to_ui32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI5_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI5_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptoui <vscale x 1 x float> %a to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 1 x i64> @trunc_nxv1f32_to_si64(<vscale x 1 x float> %x) {
+; RV32-LABEL: trunc_nxv1f32_to_si64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI6_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI6_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f32_to_si64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI6_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI6_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptosi <vscale x 1 x float> %a to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %b
+}
+
+define <vscale x 1 x i64> @trunc_nxv1f32_to_ui64(<vscale x 1 x float> %x) {
+; RV32-LABEL: trunc_nxv1f32_to_ui64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI7_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI7_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f32_to_ui64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI7_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI7_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptoui <vscale x 1 x float> %a to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %b
+}
+
+; ================================================================================
+; trunc <vscale x 4 x float>
+; ================================================================================
+
+declare <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float>)
+
+define <vscale x 4 x i8> @trunc_nxv4f32_to_si8(<vscale x 4 x float> %x) {
+; RV32-LABEL: trunc_nxv4f32_to_si8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI8_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI8_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v10, v8
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v10, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f32_to_si8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI8_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI8_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v10, v8
+; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v10, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptosi <vscale x 4 x float> %a to <vscale x 4 x i8>
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 4 x i8> @trunc_nxv4f32_to_ui8(<vscale x 4 x float> %x) {
+; RV32-LABEL: trunc_nxv4f32_to_ui8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI9_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI9_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v10, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f32_to_ui8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI9_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI9_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v10, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptoui <vscale x 4 x float> %a to <vscale x 4 x i8>
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 4 x i16> @trunc_nxv4f32_to_si16(<vscale x 4 x float> %x) {
+; RV32-LABEL: trunc_nxv4f32_to_si16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI10_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI10_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v10, v8
+; RV32-NEXT: vmv.v.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f32_to_si16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI10_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI10_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v10, v8
+; RV64-NEXT: vmv.v.v v8, v10
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptosi <vscale x 4 x float> %a to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 4 x i16> @trunc_nxv4f32_to_ui16(<vscale x 4 x float> %x) {
+; RV32-LABEL: trunc_nxv4f32_to_ui16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; RV32-NEXT: vmv.v.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f32_to_ui16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI11_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI11_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; RV64-NEXT: vmv.v.v v8, v10
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptoui <vscale x 4 x float> %a to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 4 x i32> @trunc_nxv4f32_to_si32(<vscale x 4 x float> %x) {
+; RV32-LABEL: trunc_nxv4f32_to_si32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f32_to_si32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI12_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI12_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptosi <vscale x 4 x float> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 4 x i32> @trunc_nxv4f32_to_ui32(<vscale x 4 x float> %x) {
+; RV32-LABEL: trunc_nxv4f32_to_ui32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f32_to_ui32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI13_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI13_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptoui <vscale x 4 x float> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 4 x i64> @trunc_nxv4f32_to_si64(<vscale x 4 x float> %x) {
+; RV32-LABEL: trunc_nxv4f32_to_si64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.x.f.v v12, v8
+; RV32-NEXT: vmv4r.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f32_to_si64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI14_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI14_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.x.f.v v12, v8
+; RV64-NEXT: vmv4r.v v8, v12
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptosi <vscale x 4 x float> %a to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %b
+}
+
+define <vscale x 4 x i64> @trunc_nxv4f32_to_ui64(<vscale x 4 x float> %x) {
+; RV32-LABEL: trunc_nxv4f32_to_ui64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI15_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI15_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v12, v8
+; RV32-NEXT: vmv4r.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f32_to_ui64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI15_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI15_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v12, v8
+; RV64-NEXT: vmv4r.v v8, v12
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptoui <vscale x 4 x float> %a to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %b
+}
+
+; ================================================================================
+; ceil <vscale x 1 x float>
+; ================================================================================
+
+declare <vscale x 1 x float> @llvm.ceil.nxv1f32(<vscale x 1 x float>)
+
+define <vscale x 1 x i8> @ceil_nxv1f32_to_si8(<vscale x 1 x float> %x) {
+; RV32-LABEL: ceil_nxv1f32_to_si8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI16_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI16_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v9, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f32_to_si8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI16_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI16_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v9, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.ceil.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptosi <vscale x 1 x float> %a to <vscale x 1 x i8>
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 1 x i8> @ceil_nxv1f32_to_ui8(<vscale x 1 x float> %x) {
+; RV32-LABEL: ceil_nxv1f32_to_ui8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI17_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v9, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f32_to_ui8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI17_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI17_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v9, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.ceil.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptoui <vscale x 1 x float> %a to <vscale x 1 x i8>
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 1 x i16> @ceil_nxv1f32_to_si16(<vscale x 1 x float> %x) {
+; RV32-LABEL: ceil_nxv1f32_to_si16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI18_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f32_to_si16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI18_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI18_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.ceil.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptosi <vscale x 1 x float> %a to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 1 x i16> @ceil_nxv1f32_to_ui16(<vscale x 1 x float> %x) {
+; RV32-LABEL: ceil_nxv1f32_to_ui16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI19_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f32_to_ui16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI19_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI19_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.ceil.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptoui <vscale x 1 x float> %a to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 1 x i32> @ceil_nxv1f32_to_si32(<vscale x 1 x float> %x) {
+; RV32-LABEL: ceil_nxv1f32_to_si32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI20_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f32_to_si32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI20_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI20_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.ceil.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptosi <vscale x 1 x float> %a to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 1 x i32> @ceil_nxv1f32_to_ui32(<vscale x 1 x float> %x) {
+; RV32-LABEL: ceil_nxv1f32_to_ui32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI21_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI21_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f32_to_ui32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI21_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI21_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.ceil.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptoui <vscale x 1 x float> %a to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 1 x i64> @ceil_nxv1f32_to_si64(<vscale x 1 x float> %x) {
+; RV32-LABEL: ceil_nxv1f32_to_si64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI22_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f32_to_si64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI22_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI22_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.ceil.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptosi <vscale x 1 x float> %a to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %b
+}
+
+define <vscale x 1 x i64> @ceil_nxv1f32_to_ui64(<vscale x 1 x float> %x) {
+; RV32-LABEL: ceil_nxv1f32_to_ui64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI23_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI23_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f32_to_ui64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI23_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI23_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x float> @llvm.ceil.nxv1f32(<vscale x 1 x float> %x)
+ %b = fptoui <vscale x 1 x float> %a to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %b
+}
+
+; ================================================================================
+; ceil <vscale x 4 x float>
+; ================================================================================
+
+declare <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float>)
+
+define <vscale x 4 x i8> @ceil_nxv4f32_to_si8(<vscale x 4 x float> %x) {
+; RV32-LABEL: ceil_nxv4f32_to_si8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI24_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v10, v8
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v10, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f32_to_si8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI24_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI24_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v10, v8
+; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v10, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptosi <vscale x 4 x float> %a to <vscale x 4 x i8>
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 4 x i8> @ceil_nxv4f32_to_ui8(<vscale x 4 x float> %x) {
+; RV32-LABEL: ceil_nxv4f32_to_ui8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI25_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI25_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT: vnsrl.wi v8, v10, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f32_to_ui8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI25_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI25_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV64-NEXT: vnsrl.wi v8, v10, 0
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptoui <vscale x 4 x float> %a to <vscale x 4 x i8>
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 4 x i16> @ceil_nxv4f32_to_si16(<vscale x 4 x float> %x) {
+; RV32-LABEL: ceil_nxv4f32_to_si16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI26_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI26_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v10, v8
+; RV32-NEXT: vmv.v.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f32_to_si16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI26_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI26_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v10, v8
+; RV64-NEXT: vmv.v.v v8, v10
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptosi <vscale x 4 x float> %a to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 4 x i16> @ceil_nxv4f32_to_ui16(<vscale x 4 x float> %x) {
+; RV32-LABEL: ceil_nxv4f32_to_ui16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI27_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI27_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; RV32-NEXT: vmv.v.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f32_to_ui16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI27_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI27_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; RV64-NEXT: vmv.v.v v8, v10
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptoui <vscale x 4 x float> %a to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 4 x i32> @ceil_nxv4f32_to_si32(<vscale x 4 x float> %x) {
+; RV32-LABEL: ceil_nxv4f32_to_si32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI28_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI28_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f32_to_si32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI28_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI28_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptosi <vscale x 4 x float> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 4 x i32> @ceil_nxv4f32_to_ui32(<vscale x 4 x float> %x) {
+; RV32-LABEL: ceil_nxv4f32_to_ui32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI29_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI29_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f32_to_ui32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI29_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI29_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptoui <vscale x 4 x float> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 4 x i64> @ceil_nxv4f32_to_si64(<vscale x 4 x float> %x) {
+; RV32-LABEL: ceil_nxv4f32_to_si64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI30_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI30_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.x.f.v v12, v8
+; RV32-NEXT: vmv4r.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f32_to_si64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI30_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI30_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.x.f.v v12, v8
+; RV64-NEXT: vmv4r.v v8, v12
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptosi <vscale x 4 x float> %a to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %b
+}
+
+define <vscale x 4 x i64> @ceil_nxv4f32_to_ui64(<vscale x 4 x float> %x) {
+; RV32-LABEL: ceil_nxv4f32_to_ui64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI31_0)
+; RV32-NEXT: flw ft0, %lo(.LCPI31_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v12, v8
+; RV32-NEXT: vmv4r.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f32_to_ui64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI31_0)
+; RV64-NEXT: flw ft0, %lo(.LCPI31_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: vmflt.vf v0, v10, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v12, v8
+; RV64-NEXT: vmv4r.v v8, v12
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> %x)
+ %b = fptoui <vscale x 4 x float> %a to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %b
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+zfh,+experimental-zvfh,+v -verify-machineinstrs < %s | \
+; RUN: FileCheck %s -check-prefix=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+zfh,+experimental-zvfh,+v -verify-machineinstrs < %s | \
+; RUN: FileCheck %s -check-prefix=RV64
+
+; ================================================================================
+; trunc <vscale x 1 x half>
+; ================================================================================
+
+declare <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 1 x i8> @trunc_nxv1f16_to_si8(<vscale x 1 x half> %x) {
+; RV32-LABEL: trunc_nxv1f16_to_si8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI0_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI0_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f16_to_si8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI0_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI0_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptosi <vscale x 1 x half> %a to <vscale x 1 x i8>
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 1 x i8> @trunc_nxv1f16_to_ui8(<vscale x 1 x half> %x) {
+; RV32-LABEL: trunc_nxv1f16_to_ui8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI1_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI1_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f16_to_ui8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI1_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI1_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptoui <vscale x 1 x half> %a to <vscale x 1 x i8>
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 1 x i16> @trunc_nxv1f16_to_si16(<vscale x 1 x half> %x) {
+; RV32-LABEL: trunc_nxv1f16_to_si16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI2_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI2_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f16_to_si16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI2_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI2_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptosi <vscale x 1 x half> %a to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 1 x i16> @trunc_nxv1f16_to_ui16(<vscale x 1 x half> %x) {
+; RV32-LABEL: trunc_nxv1f16_to_ui16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI3_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI3_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f16_to_ui16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI3_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI3_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptoui <vscale x 1 x half> %a to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 1 x i32> @trunc_nxv1f16_to_si32(<vscale x 1 x half> %x) {
+; RV32-LABEL: trunc_nxv1f16_to_si32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI4_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI4_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f16_to_si32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI4_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI4_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptosi <vscale x 1 x half> %a to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 1 x i32> @trunc_nxv1f16_to_ui32(<vscale x 1 x half> %x) {
+; RV32-LABEL: trunc_nxv1f16_to_ui32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI5_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI5_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f16_to_ui32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI5_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI5_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptoui <vscale x 1 x half> %a to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 1 x i64> @trunc_nxv1f16_to_si64(<vscale x 1 x half> %x) {
+; RV32-LABEL: trunc_nxv1f16_to_si64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI6_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI6_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfwcvt.rtz.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f16_to_si64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI6_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI6_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfwcvt.rtz.x.f.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptosi <vscale x 1 x half> %a to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %b
+}
+
+define <vscale x 1 x i64> @trunc_nxv1f16_to_ui64(<vscale x 1 x half> %x) {
+; RV32-LABEL: trunc_nxv1f16_to_ui64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI7_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI7_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f16_to_ui64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI7_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI7_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptoui <vscale x 1 x half> %a to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %b
+}
+
+; ================================================================================
+; trunc <vscale x 4 x half>
+; ================================================================================
+
+declare <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 4 x i8> @trunc_nxv4f16_to_si8(<vscale x 4 x half> %x) {
+; RV32-LABEL: trunc_nxv4f16_to_si8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI8_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI8_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f16_to_si8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI8_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI8_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptosi <vscale x 4 x half> %a to <vscale x 4 x i8>
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 4 x i8> @trunc_nxv4f16_to_ui8(<vscale x 4 x half> %x) {
+; RV32-LABEL: trunc_nxv4f16_to_ui8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI9_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI9_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f16_to_ui8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI9_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI9_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptoui <vscale x 4 x half> %a to <vscale x 4 x i8>
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 4 x i16> @trunc_nxv4f16_to_si16(<vscale x 4 x half> %x) {
+; RV32-LABEL: trunc_nxv4f16_to_si16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI10_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI10_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f16_to_si16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI10_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI10_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptosi <vscale x 4 x half> %a to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 4 x i16> @trunc_nxv4f16_to_ui16(<vscale x 4 x half> %x) {
+; RV32-LABEL: trunc_nxv4f16_to_ui16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f16_to_ui16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI11_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI11_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptoui <vscale x 4 x half> %a to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 4 x i32> @trunc_nxv4f16_to_si32(<vscale x 4 x half> %x) {
+; RV32-LABEL: trunc_nxv4f16_to_si32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.x.f.v v10, v8
+; RV32-NEXT: vmv2r.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f16_to_si32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI12_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI12_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.x.f.v v10, v8
+; RV64-NEXT: vmv2r.v v8, v10
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptosi <vscale x 4 x half> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 4 x i32> @trunc_nxv4f16_to_ui32(<vscale x 4 x half> %x) {
+; RV32-LABEL: trunc_nxv4f16_to_ui32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v10, v8
+; RV32-NEXT: vmv2r.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f16_to_ui32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI13_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI13_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v10, v8
+; RV64-NEXT: vmv2r.v v8, v10
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptoui <vscale x 4 x half> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 4 x i64> @trunc_nxv4f16_to_si64(<vscale x 4 x half> %x) {
+; RV32-LABEL: trunc_nxv4f16_to_si64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.f.f.v v12, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfwcvt.rtz.x.f.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f16_to_si64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI14_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI14_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.f.f.v v12, v8
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfwcvt.rtz.x.f.v v8, v12
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptosi <vscale x 4 x half> %a to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %b
+}
+
+define <vscale x 4 x i64> @trunc_nxv4f16_to_ui64(<vscale x 4 x half> %x) {
+; RV32-LABEL: trunc_nxv4f16_to_ui64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI15_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI15_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.f.f.v v12, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f16_to_ui64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI15_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI15_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.f.f.v v12, v8
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v8, v12
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptoui <vscale x 4 x half> %a to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %b
+}
+
+; ================================================================================
+; ceil <vscale x 1 x half>
+; ================================================================================
+
+declare <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 1 x i8> @ceil_nxv1f16_to_si8(<vscale x 1 x half> %x) {
+; RV32-LABEL: ceil_nxv1f16_to_si8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI16_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI16_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f16_to_si8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI16_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI16_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptosi <vscale x 1 x half> %a to <vscale x 1 x i8>
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 1 x i8> @ceil_nxv1f16_to_ui8(<vscale x 1 x half> %x) {
+; RV32-LABEL: ceil_nxv1f16_to_ui8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI17_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f16_to_ui8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI17_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI17_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptoui <vscale x 1 x half> %a to <vscale x 1 x i8>
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 1 x i16> @ceil_nxv1f16_to_si16(<vscale x 1 x half> %x) {
+; RV32-LABEL: ceil_nxv1f16_to_si16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI18_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f16_to_si16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI18_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI18_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptosi <vscale x 1 x half> %a to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 1 x i16> @ceil_nxv1f16_to_ui16(<vscale x 1 x half> %x) {
+; RV32-LABEL: ceil_nxv1f16_to_ui16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI19_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f16_to_ui16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI19_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI19_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptoui <vscale x 1 x half> %a to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 1 x i32> @ceil_nxv1f16_to_si32(<vscale x 1 x half> %x) {
+; RV32-LABEL: ceil_nxv1f16_to_si32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI20_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f16_to_si32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI20_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI20_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptosi <vscale x 1 x half> %a to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 1 x i32> @ceil_nxv1f16_to_ui32(<vscale x 1 x half> %x) {
+; RV32-LABEL: ceil_nxv1f16_to_ui32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI21_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI21_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f16_to_ui32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI21_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI21_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptoui <vscale x 1 x half> %a to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 1 x i64> @ceil_nxv1f16_to_si64(<vscale x 1 x half> %x) {
+; RV32-LABEL: ceil_nxv1f16_to_si64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI22_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfwcvt.rtz.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f16_to_si64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI22_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI22_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfwcvt.rtz.x.f.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptosi <vscale x 1 x half> %a to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %b
+}
+
+define <vscale x 1 x i64> @ceil_nxv1f16_to_ui64(<vscale x 1 x half> %x) {
+; RV32-LABEL: ceil_nxv1f16_to_ui64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI23_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI23_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f16_to_ui64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI23_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI23_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half> %x)
+ %b = fptoui <vscale x 1 x half> %a to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %b
+}
+
+; ================================================================================
+; ceil <vscale x 4 x half>
+; ================================================================================
+
+declare <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 4 x i8> @ceil_nxv4f16_to_si8(<vscale x 4 x half> %x) {
+; RV32-LABEL: ceil_nxv4f16_to_si8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI24_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f16_to_si8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI24_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI24_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptosi <vscale x 4 x half> %a to <vscale x 4 x i8>
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 4 x i8> @ceil_nxv4f16_to_ui8(<vscale x 4 x half> %x) {
+; RV32-LABEL: ceil_nxv4f16_to_ui8:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI25_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI25_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f16_to_ui8:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI25_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI25_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; RV64-NEXT: vmv1r.v v8, v9
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptoui <vscale x 4 x half> %a to <vscale x 4 x i8>
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 4 x i16> @ceil_nxv4f16_to_si16(<vscale x 4 x half> %x) {
+; RV32-LABEL: ceil_nxv4f16_to_si16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI26_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI26_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f16_to_si16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI26_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI26_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptosi <vscale x 4 x half> %a to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 4 x i16> @ceil_nxv4f16_to_ui16(<vscale x 4 x half> %x) {
+; RV32-LABEL: ceil_nxv4f16_to_ui16:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI27_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI27_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f16_to_ui16:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI27_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI27_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptoui <vscale x 4 x half> %a to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 4 x i32> @ceil_nxv4f16_to_si32(<vscale x 4 x half> %x) {
+; RV32-LABEL: ceil_nxv4f16_to_si32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI28_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI28_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.x.f.v v10, v8
+; RV32-NEXT: vmv2r.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f16_to_si32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI28_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI28_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.x.f.v v10, v8
+; RV64-NEXT: vmv2r.v v8, v10
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptosi <vscale x 4 x half> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 4 x i32> @ceil_nxv4f16_to_ui32(<vscale x 4 x half> %x) {
+; RV32-LABEL: ceil_nxv4f16_to_ui32:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI29_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI29_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v10, v8
+; RV32-NEXT: vmv2r.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f16_to_ui32:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI29_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI29_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v10, v8
+; RV64-NEXT: vmv2r.v v8, v10
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptoui <vscale x 4 x half> %a to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 4 x i64> @ceil_nxv4f16_to_si64(<vscale x 4 x half> %x) {
+; RV32-LABEL: ceil_nxv4f16_to_si64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI30_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI30_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.f.f.v v12, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfwcvt.rtz.x.f.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f16_to_si64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI30_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI30_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.f.f.v v12, v8
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfwcvt.rtz.x.f.v v8, v12
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptosi <vscale x 4 x half> %a to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %b
+}
+
+define <vscale x 4 x i64> @ceil_nxv4f16_to_ui64(<vscale x 4 x half> %x) {
+; RV32-LABEL: ceil_nxv4f16_to_ui64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI31_0)
+; RV32-NEXT: flh ft0, %lo(.LCPI31_0)(a0)
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, ft0
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: vfwcvt.f.f.v v12, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f16_to_ui64:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a0, %hi(.LCPI31_0)
+; RV64-NEXT: flh ft0, %lo(.LCPI31_0)(a0)
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: vmflt.vf v0, v9, ft0
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: vfwcvt.f.f.v v12, v8
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v8, v12
+; RV64-NEXT: ret
+ %a = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> %x)
+ %b = fptoui <vscale x 4 x half> %a to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %b
+}