; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=arm64-apple-ios -o - %s | FileCheck %s
-; RUN: llc -mtriple=aarch64_be-unknown-linux -o - %s | FileCheck --check-prefix=CHECK-BE %s
+; RUN: llc -mtriple=arm64-apple-ios -mattr=+sve -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64_be-unknown-linux -mattr=+sve -o - %s | FileCheck --check-prefix=CHECK-BE %s
; CHECK-LABEL: lCPI0_0:
; CHECK-NEXT: .byte 0 ; 0x0
exit:
ret void
}
+
+define void @zext_v16i8_to_v16i32_in_loop_scalable_vectors(i8* %src, i32* %dst) {
+; CHECK-LABEL: zext_v16i8_to_v16i32_in_loop_scalable_vectors:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: LBB8_1: ; %loop
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: add x9, x0, x8
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, x8]
+; CHECK-NEXT: ld1b { z1.s }, p0/z, [x9, #2, mul vl]
+; CHECK-NEXT: ld1b { z2.s }, p0/z, [x9, #3, mul vl]
+; CHECK-NEXT: ld1b { z3.s }, p0/z, [x9, #1, mul vl]
+; CHECK-NEXT: add z0.s, z0.s, z0.s
+; CHECK-NEXT: add x9, x1, x8, lsl #2
+; CHECK-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2]
+; CHECK-NEXT: add x8, x8, #16
+; CHECK-NEXT: cmp x8, #128
+; CHECK-NEXT: add z1.s, z1.s, z1.s
+; CHECK-NEXT: add z0.s, z3.s, z3.s
+; CHECK-NEXT: add z2.s, z2.s, z2.s
+; CHECK-NEXT: st1w { z1.s }, p0, [x9, #2, mul vl]
+; CHECK-NEXT: st1w { z2.s }, p0, [x9, #3, mul vl]
+; CHECK-NEXT: st1w { z0.s }, p0, [x9, #1, mul vl]
+; CHECK-NEXT: b.ne LBB8_1
+; CHECK-NEXT: ; %bb.2: ; %exit
+; CHECK-NEXT: ret
+;
+; CHECK-BE-LABEL: zext_v16i8_to_v16i32_in_loop_scalable_vectors:
+; CHECK-BE: // %bb.0: // %entry
+; CHECK-BE-NEXT: mov x8, xzr
+; CHECK-BE-NEXT: ptrue p0.s
+; CHECK-BE-NEXT: .LBB8_1: // %loop
+; CHECK-BE-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-BE-NEXT: add x9, x0, x8
+; CHECK-BE-NEXT: ld1b { z0.s }, p0/z, [x0, x8]
+; CHECK-BE-NEXT: ld1b { z1.s }, p0/z, [x9, #2, mul vl]
+; CHECK-BE-NEXT: ld1b { z2.s }, p0/z, [x9, #3, mul vl]
+; CHECK-BE-NEXT: ld1b { z3.s }, p0/z, [x9, #1, mul vl]
+; CHECK-BE-NEXT: add z0.s, z0.s, z0.s
+; CHECK-BE-NEXT: add x9, x1, x8, lsl #2
+; CHECK-BE-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2]
+; CHECK-BE-NEXT: add x8, x8, #16
+; CHECK-BE-NEXT: cmp x8, #128
+; CHECK-BE-NEXT: add z1.s, z1.s, z1.s
+; CHECK-BE-NEXT: add z0.s, z3.s, z3.s
+; CHECK-BE-NEXT: add z2.s, z2.s, z2.s
+; CHECK-BE-NEXT: st1w { z1.s }, p0, [x9, #2, mul vl]
+; CHECK-BE-NEXT: st1w { z2.s }, p0, [x9, #3, mul vl]
+; CHECK-BE-NEXT: st1w { z0.s }, p0, [x9, #1, mul vl]
+; CHECK-BE-NEXT: b.ne .LBB8_1
+; CHECK-BE-NEXT: // %bb.2: // %exit
+; CHECK-BE-NEXT: ret
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %src.gep = getelementptr i8, i8* %src, i64 %iv
+ %src.gep.cast = bitcast i8* %src.gep to <vscale x 16 x i8>*
+ %load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %src.gep.cast
+ %ext = zext <vscale x 16 x i8> %load to <vscale x 16 x i32>
+ %add = add <vscale x 16 x i32> %ext, %ext
+ %dst.gep = getelementptr i32, i32* %dst, i64 %iv
+ %dst.gep.cast = bitcast i32* %dst.gep to <vscale x 16 x i32>*
+ store <vscale x 16 x i32> %add, <vscale x 16 x i32>* %dst.gep.cast
+ %iv.next = add nuw i64 %iv, 16
+ %ec = icmp eq i64 %iv.next, 128
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}