--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64 -mattr=+sve2p1 < %s | FileCheck %s
+
+; == WHILEGE ==
+
+define <vscale x 16 x i1> @whilege_x2_nxv16i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilege_x2_nxv16i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilege { p0.b, p1.b }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilege.x2.nxv16i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0
+ ret <vscale x 16 x i1> %res
+}
+
+define <vscale x 8 x i1> @whilege_x2_nxv8i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilege_x2_nxv8i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilege { p0.h, p1.h }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilege.x2.nxv8i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0
+ ret <vscale x 8 x i1> %res
+}
+
+define <vscale x 4 x i1> @whilege_x2_nxv4i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilege_x2_nxv4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilege { p0.s, p1.s }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilege.x2.nxv4i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0
+ ret <vscale x 4 x i1> %res
+}
+
+define <vscale x 2 x i1> @whilege_x2_nxv2i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilege_x2_nxv2i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilege { p0.d, p1.d }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilege.x2.nxv2i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0
+ ret <vscale x 2 x i1> %res
+}
+
+
+; == WHILEGT ==
+
+define <vscale x 16 x i1> @whilegt_x2_nxv16i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilegt_x2_nxv16i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilegt { p0.b, p1.b }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv16i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0
+ ret <vscale x 16 x i1> %res
+}
+
+define <vscale x 8 x i1> @whilegt_x2_nxv8i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilegt_x2_nxv8i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilegt { p0.h, p1.h }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv8i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0
+ ret <vscale x 8 x i1> %res
+}
+
+define <vscale x 4 x i1> @whilegt_x2_nxv4i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilegt_x2_nxv4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilegt { p0.s, p1.s }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv4i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0
+ ret <vscale x 4 x i1> %res
+}
+
+define <vscale x 2 x i1> @whilegt_x2_nxv2i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilegt_x2_nxv2i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilegt { p0.d, p1.d }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv2i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0
+ ret <vscale x 2 x i1> %res
+}
+
+
+; == WHILEHI ==
+
+define <vscale x 16 x i1> @whilehi_x2_nxv16i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilehi_x2_nxv16i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilehi { p0.b, p1.b }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv16i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0
+ ret <vscale x 16 x i1> %res
+}
+
+define <vscale x 8 x i1> @whilehi_x2_nxv8i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilehi_x2_nxv8i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilehi { p0.h, p1.h }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv8i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0
+ ret <vscale x 8 x i1> %res
+}
+
+define <vscale x 4 x i1> @whilehi_x2_nxv4i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilehi_x2_nxv4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilehi { p0.s, p1.s }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv4i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0
+ ret <vscale x 4 x i1> %res
+}
+
+define <vscale x 2 x i1> @whilehi_x2_nxv2i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilehi_x2_nxv2i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilehi { p0.d, p1.d }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv2i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0
+ ret <vscale x 2 x i1> %res
+}
+
+
+; == WHILEHS ==
+
+define <vscale x 16 x i1> @whilehs_x2_nxv16i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilehs_x2_nxv16i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilehs { p0.b, p1.b }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv16i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0
+ ret <vscale x 16 x i1> %res
+}
+
+define <vscale x 8 x i1> @whilehs_x2_nxv8i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilehs_x2_nxv8i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilehs { p0.h, p1.h }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv8i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0
+ ret <vscale x 8 x i1> %res
+}
+
+define <vscale x 4 x i1> @whilehs_x2_nxv4i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilehs_x2_nxv4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilehs { p0.s, p1.s }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv4i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0
+ ret <vscale x 4 x i1> %res
+}
+
+define <vscale x 2 x i1> @whilehs_x2_nxv2i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilehs_x2_nxv2i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilehs { p0.d, p1.d }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv2i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0
+ ret <vscale x 2 x i1> %res
+}
+
+
+; == WHILELE ==
+
+define <vscale x 16 x i1> @whilele_x2_nxv16i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilele_x2_nxv16i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilele { p0.b, p1.b }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilele.x2.nxv16i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0
+ ret <vscale x 16 x i1> %res
+}
+
+define <vscale x 8 x i1> @whilele_x2_nxv8i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilele_x2_nxv8i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilele { p0.h, p1.h }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilele.x2.nxv8i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0
+ ret <vscale x 8 x i1> %res
+}
+
+define <vscale x 4 x i1> @whilele_x2_nxv4i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilele_x2_nxv4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilele { p0.s, p1.s }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilele.x2.nxv4i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0
+ ret <vscale x 4 x i1> %res
+}
+
+define <vscale x 2 x i1> @whilele_x2_nxv2i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilele_x2_nxv2i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilele { p0.d, p1.d }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilele.x2.nxv2i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0
+ ret <vscale x 2 x i1> %res
+}
+
+
+; == WHILELO ==
+
+define <vscale x 16 x i1> @whilelo_x2_nxv16i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilelo_x2_nxv16i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilelo { p0.b, p1.b }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv16i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0
+ ret <vscale x 16 x i1> %res
+}
+
+define <vscale x 8 x i1> @whilelo_x2_nxv8i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilelo_x2_nxv8i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilelo { p0.h, p1.h }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv8i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0
+ ret <vscale x 8 x i1> %res
+}
+
+define <vscale x 4 x i1> @whilelo_x2_nxv4i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilelo_x2_nxv4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilelo { p0.s, p1.s }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv4i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0
+ ret <vscale x 4 x i1> %res
+}
+
+define <vscale x 2 x i1> @whilelo_x2_nxv2i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilelo_x2_nxv2i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilelo { p0.d, p1.d }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv2i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0
+ ret <vscale x 2 x i1> %res
+}
+
+
+; == WHILELS ==
+
+define <vscale x 16 x i1> @whilels_x2_nxv16i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilels_x2_nxv16i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilels { p0.b, p1.b }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilels.x2.nxv16i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0
+ ret <vscale x 16 x i1> %res
+}
+
+define <vscale x 8 x i1> @whilels_x2_nxv8i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilels_x2_nxv8i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilels { p0.h, p1.h }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilels.x2.nxv8i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0
+ ret <vscale x 8 x i1> %res
+}
+
+define <vscale x 4 x i1> @whilels_x2_nxv4i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilels_x2_nxv4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilels { p0.s, p1.s }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilels.x2.nxv4i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0
+ ret <vscale x 4 x i1> %res
+}
+
+define <vscale x 2 x i1> @whilels_x2_nxv2i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilels_x2_nxv2i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilels { p0.d, p1.d }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilels.x2.nxv2i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0
+ ret <vscale x 2 x i1> %res
+}
+
+
+; == WHILELT ==
+
+define <vscale x 16 x i1> @whilelt_x2_nxv16i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilelt_x2_nxv16i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilelt { p0.b, p1.b }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv16i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0
+ ret <vscale x 16 x i1> %res
+}
+
+define <vscale x 8 x i1> @whilelt_x2_nxv8i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilelt_x2_nxv8i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilelt { p0.h, p1.h }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv8i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0
+ ret <vscale x 8 x i1> %res
+}
+
+define <vscale x 4 x i1> @whilelt_x2_nxv4i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilelt_x2_nxv4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilelt { p0.s, p1.s }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv4i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0
+ ret <vscale x 4 x i1> %res
+}
+
+define <vscale x 2 x i1> @whilelt_x2_nxv2i1(i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilelt_x2_nxv2i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilelt { p0.d, p1.d }, x0, x1
+; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv2i1(i64 %m, i64 %n)
+ %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0
+ ret <vscale x 2 x i1> %res
+}
+
+
+; Test that we get good code quality when using while in combination with other intrinsics
+
+define <vscale x 32 x i1> @codegen_whilege_b16_x2(i64 noundef %op1, i64 noundef %op2) nounwind {
+; CHECK-LABEL: codegen_whilege_b16_x2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: whilege { p0.h, p1.h }, x0, x1
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilege.x2.nxv8i1(i64 %op1, i64 %op2)
+ %1 = extractvalue { <vscale x 8 x i1>, <vscale x 8 x i1> } %0, 0
+ %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1)
+ %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0)
+ %4 = extractvalue { <vscale x 8 x i1>, <vscale x 8 x i1> } %0, 1
+ %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %4)
+ %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16)
+ ret <vscale x 32 x i1> %6
+}
+
+define <vscale x 32 x i1> @codegen_whilegt_b32_x2(i64 noundef %op1, i64 noundef %op2) nounwind {
+; CHECK-LABEL: codegen_whilegt_b32_x2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: whilegt { p0.s, p1.s }, x0, x1
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv4i1(i64 %op1, i64 %op2)
+ %1 = extractvalue { <vscale x 4 x i1>, <vscale x 4 x i1> } %0, 0
+ %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
+ %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0)
+ %4 = extractvalue { <vscale x 4 x i1>, <vscale x 4 x i1> } %0, 1
+ %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %4)
+ %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16)
+ ret <vscale x 32 x i1> %6
+}
+
+define <vscale x 32 x i1> @codegen_whilehi_b64_x2(i64 noundef %op1, i64 noundef %op2) nounwind {
+; CHECK-LABEL: codegen_whilehi_b64_x2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: whilehi { p0.d, p1.d }, x0, x1
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv2i1(i64 %op1, i64 %op2)
+ %1 = extractvalue { <vscale x 2 x i1>, <vscale x 2 x i1> } %0, 0
+ %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
+ %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0)
+ %4 = extractvalue { <vscale x 2 x i1>, <vscale x 2 x i1> } %0, 1
+ %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %4)
+ %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16)
+ ret <vscale x 32 x i1> %6
+}
+
+define <vscale x 32 x i1> @codegen_whilehs_b16_x2(i64 noundef %op1, i64 noundef %op2) nounwind {
+; CHECK-LABEL: codegen_whilehs_b16_x2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: whilehs { p0.h, p1.h }, x0, x1
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv8i1(i64 %op1, i64 %op2)
+ %1 = extractvalue { <vscale x 8 x i1>, <vscale x 8 x i1> } %0, 0
+ %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1)
+ %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0)
+ %4 = extractvalue { <vscale x 8 x i1>, <vscale x 8 x i1> } %0, 1
+ %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %4)
+ %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16)
+ ret <vscale x 32 x i1> %6
+}
+
+define <vscale x 32 x i1> @codegen_whilele_b32_x2(i64 noundef %op1, i64 noundef %op2) nounwind {
+; CHECK-LABEL: codegen_whilele_b32_x2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: whilele { p0.s, p1.s }, x0, x1
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilele.x2.nxv4i1(i64 %op1, i64 %op2)
+ %1 = extractvalue { <vscale x 4 x i1>, <vscale x 4 x i1> } %0, 0
+ %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
+ %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0)
+ %4 = extractvalue { <vscale x 4 x i1>, <vscale x 4 x i1> } %0, 1
+ %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %4)
+ %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16)
+ ret <vscale x 32 x i1> %6
+}
+
+define <vscale x 32 x i1> @codegen_whilelo_b64_x2(i64 noundef %op1, i64 noundef %op2) nounwind {
+; CHECK-LABEL: codegen_whilelo_b64_x2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: whilelo { p0.d, p1.d }, x0, x1
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv2i1(i64 %op1, i64 %op2)
+ %1 = extractvalue { <vscale x 2 x i1>, <vscale x 2 x i1> } %0, 0
+ %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
+ %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0)
+ %4 = extractvalue { <vscale x 2 x i1>, <vscale x 2 x i1> } %0, 1
+ %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %4)
+ %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16)
+ ret <vscale x 32 x i1> %6
+}
+
+define <vscale x 32 x i1> @codegen_whilels_b16_x2(i64 noundef %op1, i64 noundef %op2) nounwind {
+; CHECK-LABEL: codegen_whilels_b16_x2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: whilels { p0.h, p1.h }, x0, x1
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilels.x2.nxv8i1(i64 %op1, i64 %op2)
+ %1 = extractvalue { <vscale x 8 x i1>, <vscale x 8 x i1> } %0, 0
+ %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1)
+ %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0)
+ %4 = extractvalue { <vscale x 8 x i1>, <vscale x 8 x i1> } %0, 1
+ %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %4)
+ %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16)
+ ret <vscale x 32 x i1> %6
+}
+
+define <vscale x 32 x i1> @codegen_whilelt_b32_x2(i64 noundef %op1, i64 noundef %op2) nounwind {
+; CHECK-LABEL: codegen_whilelt_b32_x2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: whilelt { p0.s, p1.s }, x0, x1
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv4i1(i64 %op1, i64 %op2)
+ %1 = extractvalue { <vscale x 4 x i1>, <vscale x 4 x i1> } %0, 0
+ %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
+ %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0)
+ %4 = extractvalue { <vscale x 4 x i1>, <vscale x 4 x i1> } %0, 1
+ %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %4)
+ %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16)
+ ret <vscale x 32 x i1> %6
+}
+
+
+; == Test that we use predicate registers starting at a multiple of 2 ==
+
+define <vscale x 16 x i1> @whilege_x2_nxv16i1_reg_off(<vscale x 16 x i1> %p0, i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilege_x2_nxv16i1_reg_off:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilege { p2.b, p3.b }, x0, x1
+; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilege.x2.nxv16i1(i64 %m, i64 %n)
+ %part1 = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0
+ %res = and <vscale x 16 x i1> %part1, %p0
+ ret <vscale x 16 x i1> %res
+}
+
+define <vscale x 8 x i1> @whilegt_x2_nxv8i1_reg_off(<vscale x 8 x i1> %p0, i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilegt_x2_nxv8i1_reg_off:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilegt { p2.h, p3.h }, x0, x1
+; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv8i1(i64 %m, i64 %n)
+ %part1 = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0
+ %res = and <vscale x 8 x i1> %part1, %p0
+ ret <vscale x 8 x i1> %res
+}
+
+define <vscale x 4 x i1> @whilehi_x2_nxv4i1_reg_off(<vscale x 4 x i1> %p0, i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilehi_x2_nxv4i1_reg_off:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilehi { p2.s, p3.s }, x0, x1
+; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv4i1(i64 %m, i64 %n)
+ %part1 = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0
+ %res = and <vscale x 4 x i1> %part1, %p0
+ ret <vscale x 4 x i1> %res
+}
+
+define <vscale x 2 x i1> @whilehs_x2_nxv2i1_reg_off(<vscale x 2 x i1> %p0, i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilehs_x2_nxv2i1_reg_off:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilehs { p2.d, p3.d }, x0, x1
+; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv2i1(i64 %m, i64 %n)
+ %part1 = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0
+ %res = and <vscale x 2 x i1> %part1, %p0
+ ret <vscale x 2 x i1> %res
+}
+
+define <vscale x 16 x i1> @whilele_x2_nxv16i1_reg_off(<vscale x 16 x i1> %p0, i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilele_x2_nxv16i1_reg_off:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilele { p2.b, p3.b }, x0, x1
+; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilele.x2.nxv16i1(i64 %m, i64 %n)
+ %part1 = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0
+ %res = and <vscale x 16 x i1> %part1, %p0
+ ret <vscale x 16 x i1> %res
+}
+
+define <vscale x 8 x i1> @whilelo_x2_nxv8i1_reg_off(<vscale x 8 x i1> %p0, i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilelo_x2_nxv8i1_reg_off:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilelo { p2.h, p3.h }, x0, x1
+; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv8i1(i64 %m, i64 %n)
+ %part1 = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0
+ %res = and <vscale x 8 x i1> %part1, %p0
+ ret <vscale x 8 x i1> %res
+}
+
+define <vscale x 4 x i1> @whilels_x2_nxv4i1_reg_off(<vscale x 4 x i1> %p0, i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilels_x2_nxv4i1_reg_off:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilels { p2.s, p3.s }, x0, x1
+; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilels.x2.nxv4i1(i64 %m, i64 %n)
+ %part1 = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0
+ %res = and <vscale x 4 x i1> %part1, %p0
+ ret <vscale x 4 x i1> %res
+}
+
+define <vscale x 2 x i1> @whilelt_x2_nxv2i1_reg_off(<vscale x 2 x i1> %p0, i64 %m, i64 %n) nounwind {
+; CHECK-LABEL: whilelt_x2_nxv2i1_reg_off:
+; CHECK: // %bb.0:
+; CHECK-NEXT: whilelt { p2.d, p3.d }, x0, x1
+; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b
+; CHECK-NEXT: ret
+ %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv2i1(i64 %m, i64 %n)
+ %part1 = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0
+ %res = and <vscale x 2 x i1> %part1, %p0
+ ret <vscale x 2 x i1> %res
+}
+
+; == WHILEGE ==
+declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilege.x2.nxv16i1(i64, i64)
+declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilege.x2.nxv8i1(i64, i64)
+declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilege.x2.nxv4i1(i64, i64)
+declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilege.x2.nxv2i1(i64, i64)
+
+; == WHILEGT ==
+declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv16i1(i64, i64)
+declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv8i1(i64, i64)
+declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv4i1(i64, i64)
+declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv2i1(i64, i64)
+
+; == WHILEHI ==
+declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv16i1(i64, i64)
+declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv8i1(i64, i64)
+declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv4i1(i64, i64)
+declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv2i1(i64, i64)
+
+; == WHILEHS ==
+declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv16i1(i64, i64)
+declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv8i1(i64, i64)
+declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv4i1(i64, i64)
+declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv2i1(i64, i64)
+
+; == WHILELE ==
+declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilele.x2.nxv16i1(i64, i64)
+declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilele.x2.nxv8i1(i64, i64)
+declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilele.x2.nxv4i1(i64, i64)
+declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilele.x2.nxv2i1(i64, i64)
+
+; == WHILELO ==
+declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv16i1(i64, i64)
+declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv8i1(i64, i64)
+declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv4i1(i64, i64)
+declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv2i1(i64, i64)
+
+; == WHILELS ==
+declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilels.x2.nxv16i1(i64, i64)
+declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilels.x2.nxv8i1(i64, i64)
+declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilels.x2.nxv4i1(i64, i64)
+declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilels.x2.nxv2i1(i64, i64)
+
+; == WHILELT ==
+declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv16i1(i64, i64)
+declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv8i1(i64, i64)
+declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv4i1(i64, i64)
+declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv2i1(i64, i64)
+
+; == SVBOOL CONVERSION ==
+declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1>)
+declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>)
+declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>)
+
+; == VECTOR INSERTS ==
+declare <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1>, <vscale x 16 x i1>, i64 immarg)