; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 < %s | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: -riscv-v-vector-bits-min=128 -target-abi=ilp32d < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: -riscv-v-vector-bits-min=128 -target-abi=lp64d < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
define <32 x i1> @bitcast_v4i8_v32i1(<4 x i8> %a, <32 x i1> %b) {
; CHECK-LABEL: bitcast_v4i8_v32i1:
; CHECK-LABEL: bitcast_v2i8_f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%b = bitcast <2 x i8> %a to half
ret half %b
; CHECK-LABEL: bitcast_v1i16_f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%b = bitcast <1 x i16> %a to half
ret half %b
; CHECK-LABEL: bitcast_v4i8_f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%b = bitcast <4 x i8> %a to float
ret float %b
; CHECK-LABEL: bitcast_v2i16_f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%b = bitcast <2 x i16> %a to float
ret float %b
; CHECK-LABEL: bitcast_v1i32_f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%b = bitcast <1 x i32> %a to float
ret float %b
}
define double @bitcast_v8i8_f64(<8 x i8> %a) {
-; RV32-LABEL: bitcast_v8i8_f64:
-; RV32: # %bb.0:
-; RV32-NEXT: addi a0, zero, 32
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
-; RV32-NEXT: vsrl.vx v9, v8, a0
-; RV32-NEXT: vmv.x.s a1, v9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: bitcast_v8i8_f64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: ret
+; CHECK-LABEL: bitcast_v8i8_f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu
+; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: ret
%b = bitcast <8 x i8> %a to double
ret double %b
}
define double @bitcast_v4i16_f64(<4 x i16> %a) {
-; RV32-LABEL: bitcast_v4i16_f64:
-; RV32: # %bb.0:
-; RV32-NEXT: addi a0, zero, 32
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
-; RV32-NEXT: vsrl.vx v9, v8, a0
-; RV32-NEXT: vmv.x.s a1, v9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: bitcast_v4i16_f64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: ret
+; CHECK-LABEL: bitcast_v4i16_f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu
+; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: ret
%b = bitcast <4 x i16> %a to double
ret double %b
}
define double @bitcast_v2i32_f64(<2 x i32> %a) {
-; RV32-LABEL: bitcast_v2i32_f64:
-; RV32: # %bb.0:
-; RV32-NEXT: addi a0, zero, 32
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
-; RV32-NEXT: vsrl.vx v9, v8, a0
-; RV32-NEXT: vmv.x.s a1, v9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: bitcast_v2i32_f64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: ret
+; CHECK-LABEL: bitcast_v2i32_f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu
+; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: ret
%b = bitcast <2 x i32> %a to double
ret double %b
}
define double @bitcast_v1i64_f64(<1 x i64> %a) {
-; RV32-LABEL: bitcast_v1i64_f64:
-; RV32: # %bb.0:
-; RV32-NEXT: addi a0, zero, 32
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
-; RV32-NEXT: vsrl.vx v9, v8, a0
-; RV32-NEXT: vmv.x.s a1, v9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: bitcast_v1i64_f64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: ret
+; CHECK-LABEL: bitcast_v1i64_f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu
+; CHECK-NEXT: vfmv.f.s fa0, v8
+; CHECK-NEXT: ret
%b = bitcast <1 x i64> %a to double
ret double %b
}