ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x half> @splice_nxv2f16_neg_idx(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
+; CHECK-LABEL: splice_nxv2f16_neg_idx:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: lastb d0, p0, z0.d
+; CHECK-NEXT: insr z1.d, d0
+; CHECK-NEXT: mov z0.d, z1.d
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -1)
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 2 x half> @splice_nxv2f16_neg2_idx(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
+; CHECK-LABEL: splice_nxv2f16_neg2_idx:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: st1h { z0.h }, p0, [sp]
+; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
+; CHECK-NEXT: addvl x8, x8, #1
+; CHECK-NEXT: mov x9, #-16
+; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -2)
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 4 x half> @splice_nxv4f16_neg_idx(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
+; CHECK-LABEL: splice_nxv4f16_neg_idx:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: lastb s0, p0, z0.s
+; CHECK-NEXT: insr z1.s, s0
+; CHECK-NEXT: mov z0.d, z1.d
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -1)
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 4 x half> @splice_nxv4f16_neg3_idx(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
+; CHECK-LABEL: splice_nxv4f16_neg3_idx:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: st1h { z0.h }, p0, [sp]
+; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
+; CHECK-NEXT: addvl x8, x8, #1
+; CHECK-NEXT: mov x9, #-12
+; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -3)
+ ret <vscale x 4 x half> %res
+}
+
define <vscale x 8 x half> @splice_nxv8f16_first_idx(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
; CHECK-LABEL: splice_nxv8f16_first_idx:
; CHECK: // %bb.0:
ret <vscale x 8 x half> %res
}
+define <vscale x 2 x float> @splice_nxv2f32_neg_idx(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
+; CHECK-LABEL: splice_nxv2f32_neg_idx:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: lastb d0, p0, z0.d
+; CHECK-NEXT: insr z1.d, d0
+; CHECK-NEXT: mov z0.d, z1.d
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -1)
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 2 x float> @splice_nxv2f32_neg2_idx(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
+; CHECK-LABEL: splice_nxv2f32_neg2_idx:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: st1w { z0.s }, p0, [sp]
+; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
+; CHECK-NEXT: addvl x8, x8, #1
+; CHECK-NEXT: mov x9, #-16
+; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -2)
+ ret <vscale x 2 x float> %res
+}
+
define <vscale x 4 x float> @splice_nxv4f32_first_idx(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
; CHECK-LABEL: splice_nxv4f32_first_idx:
; CHECK: // %bb.0:
define <vscale x 16 x i8> @splice_nxv16i8_1(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
; CHECK-LABEL: splice_nxv16i8_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: st1b { z0.b }, p0, [sp]
-; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl]
-; CHECK-NEXT: addvl x8, x8, #1
-; CHECK-NEXT: mov x9, #-1
-; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8, x9]
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: lastb b0, p0, z0.b
+; CHECK-NEXT: insr z1.b, b0
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -1)
ret <vscale x 16 x i8> %res
define <vscale x 8 x i16> @splice_nxv8i16_1(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
; CHECK-LABEL: splice_nxv8i16_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: st1h { z0.h }, p0, [sp]
-; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
-; CHECK-NEXT: addvl x8, x8, #1
-; CHECK-NEXT: mov x9, #-2
-; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: lastb h0, p0, z0.h
+; CHECK-NEXT: insr z1.h, h0
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -1)
ret <vscale x 8 x i16> %res
define <vscale x 4 x i32> @splice_nxv4i32_1(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
; CHECK-LABEL: splice_nxv4i32_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: st1w { z0.s }, p0, [sp]
-; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
-; CHECK-NEXT: addvl x8, x8, #1
-; CHECK-NEXT: mov x9, #-4
-; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: lastb s0, p0, z0.s
+; CHECK-NEXT: insr z1.s, s0
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
%res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -1)
ret <vscale x 4 x i32> %res
define <vscale x 2 x i64> @splice_nxv2i64_1(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
; CHECK-LABEL: splice_nxv2i64_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: st1d { z0.d }, p0, [sp]
-; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
-; CHECK-NEXT: addvl x8, x8, #1
-; CHECK-NEXT: mov x9, #-8
-; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: lastb d0, p0, z0.d
+; CHECK-NEXT: insr z1.d, d0
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
%res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -1)
ret <vscale x 2 x i64> %res
define <vscale x 8 x half> @splice_nxv8f16_1(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
; CHECK-LABEL: splice_nxv8f16_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: st1h { z0.h }, p0, [sp]
-; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
-; CHECK-NEXT: addvl x8, x8, #1
-; CHECK-NEXT: mov x9, #-2
-; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: lastb h0, p0, z0.h
+; CHECK-NEXT: insr z1.h, h0
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
%res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -1)
ret <vscale x 8 x half> %res
define <vscale x 4 x float> @splice_nxv4f32_1(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
; CHECK-LABEL: splice_nxv4f32_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: st1w { z0.s }, p0, [sp]
-; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
-; CHECK-NEXT: addvl x8, x8, #1
-; CHECK-NEXT: mov x9, #-4
-; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: lastb s0, p0, z0.s
+; CHECK-NEXT: insr z1.s, s0
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
%res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -1)
ret <vscale x 4 x float> %res
define <vscale x 2 x double> @splice_nxv2f64_1(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
; CHECK-LABEL: splice_nxv2f64_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: st1d { z0.d }, p0, [sp]
-; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
-; CHECK-NEXT: addvl x8, x8, #1
-; CHECK-NEXT: mov x9, #-8
-; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: lastb d0, p0, z0.d
+; CHECK-NEXT: insr z1.d, d0
+; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
%res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -1)
ret <vscale x 2 x double> %res
define <vscale x 2 x i1> @splice_nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv2i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: mov z0.d, p0/z, #1 // =0x1
; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: lastb d0, p0, z0.d
; CHECK-NEXT: mov z1.d, p1/z, #1 // =0x1
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: st1d { z0.d }, p0, [sp]
-; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
-; CHECK-NEXT: addvl x8, x8, #1
-; CHECK-NEXT: mov x9, #-8
-; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
-; CHECK-NEXT: and z0.d, z0.d, #0x1
-; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: insr z1.d, d0
+; CHECK-NEXT: and z1.d, z1.d, #0x1
+; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, #0
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.experimental.vector.splice.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b, i32 -1)
ret <vscale x 2 x i1> %res
define <vscale x 4 x i1> @splice_nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv4i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1
; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: lastb s0, p0, z0.s
; CHECK-NEXT: mov z1.s, p1/z, #1 // =0x1
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: st1w { z0.s }, p0, [sp]
-; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
-; CHECK-NEXT: addvl x8, x8, #1
-; CHECK-NEXT: mov x9, #-4
-; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
-; CHECK-NEXT: and z0.s, z0.s, #0x1
-; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: insr z1.s, s0
+; CHECK-NEXT: and z1.s, z1.s, #0x1
+; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.experimental.vector.splice.nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b, i32 -1)
ret <vscale x 4 x i1> %res
define <vscale x 8 x i1> @splice_nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv8i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: mov z0.h, p0/z, #1 // =0x1
; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: lastb h0, p0, z0.h
; CHECK-NEXT: mov z1.h, p1/z, #1 // =0x1
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: ptrue p1.b
-; CHECK-NEXT: st1h { z0.h }, p0, [sp]
-; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
-; CHECK-NEXT: addvl x8, x8, #1
-; CHECK-NEXT: mov x9, #-2
-; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
-; CHECK-NEXT: and z0.h, z0.h, #0x1
-; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: insr z1.h, h0
+; CHECK-NEXT: and z1.h, z1.h, #0x1
+; CHECK-NEXT: cmpne p0.h, p0/z, z1.h, #0
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.experimental.vector.splice.nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b, i32 -1)
ret <vscale x 8 x i1> %res
define <vscale x 16 x i1> @splice_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv16i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1
; CHECK-NEXT: ptrue p0.b
+; CHECK-NEXT: lastb b0, p0, z0.b
; CHECK-NEXT: mov z1.b, p1/z, #1 // =0x1
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: st1b { z0.b }, p0, [sp]
-; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl]
-; CHECK-NEXT: addvl x8, x8, #1
-; CHECK-NEXT: mov x9, #-1
-; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8, x9]
-; CHECK-NEXT: and z0.b, z0.b, #0x1
-; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: insr z1.b, b0
+; CHECK-NEXT: and z1.b, z1.b, #0x1
+; CHECK-NEXT: cmpne p0.b, p0/z, z1.b, #0
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.experimental.vector.splice.nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, i32 -1)
ret <vscale x 16 x i1> %res
declare <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 8 x i32> @llvm.experimental.vector.splice.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, i32)
declare <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i32)
+declare <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, i32)
+declare <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, i32)
declare <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, i32)
+declare <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i32)
declare <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, i32)
declare <vscale x 16 x float> @llvm.experimental.vector.splice.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, i32)
declare <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, i32)