+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -mattr=+use-experimental-zeroing-pseudos < %s | FileCheck %s
;
define <vscale x 16 x i8> @add_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; CHECK-LABEL: add_i8_zero:
-; CHECK: movprfx z0.b, p0/z, z0.b
-; CHECK-NEXT: add z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
+; CHECK-NEXT: add z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: ret
%a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a_z,
define <vscale x 8 x i16> @add_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: add_i16_zero:
-; CHECK: movprfx z0.h, p0/z, z0.h
-; CHECK-NEXT: add z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
+; CHECK-NEXT: add z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: ret
%a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %pg,
<vscale x 8 x i16> %a_z,
define <vscale x 4 x i32> @add_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: add_i32_zero:
-; CHECK: movprfx z0.s, p0/z, z0.s
-; CHECK-NEXT: add z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
+; CHECK-NEXT: add z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: ret
%a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %pg,
<vscale x 4 x i32> %a_z,
define <vscale x 2 x i64> @add_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: add_i64_zero:
-; CHECK: movprfx z0.d, p0/z, z0.d
-; CHECK-NEXT: add z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
+; CHECK-NEXT: add z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: ret
%a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %a_z,
define <vscale x 16 x i8> @sub_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; CHECK-LABEL: sub_i8_zero:
-; CHECK: movprfx z0.b, p0/z, z0.b
-; CHECK-NEXT: sub z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
+; CHECK-NEXT: sub z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: ret
%a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a_z,
define <vscale x 8 x i16> @sub_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: sub_i16_zero:
-; CHECK: movprfx z0.h, p0/z, z0.h
-; CHECK-NEXT: sub z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
+; CHECK-NEXT: sub z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: ret
%a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x 8 x i1> %pg,
<vscale x 8 x i16> %a_z,
define <vscale x 4 x i32> @sub_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: sub_i32_zero:
-; CHECK: movprfx z0.s, p0/z, z0.s
-; CHECK-NEXT: sub z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
+; CHECK-NEXT: sub z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: ret
%a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x 4 x i1> %pg,
<vscale x 4 x i32> %a_z,
define <vscale x 2 x i64> @sub_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: sub_i64_zero:
-; CHECK: movprfx z0.d, p0/z, z0.d
-; CHECK-NEXT: sub z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
+; CHECK-NEXT: sub z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: ret
%a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %a_z,
define <vscale x 16 x i8> @subr_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; CHECK-LABEL: subr_i8_zero:
-; CHECK: movprfx z0.b, p0/z, z0.b
-; CHECK-NEXT: subr z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
+; CHECK-NEXT: subr z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: ret
%a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a_z,
define <vscale x 8 x i16> @subr_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: subr_i16_zero:
-; CHECK: movprfx z0.h, p0/z, z0.h
-; CHECK-NEXT: subr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
+; CHECK-NEXT: subr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: ret
%a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %pg,
<vscale x 8 x i16> %a_z,
define <vscale x 4 x i32> @subr_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: subr_i32_zero:
-; CHECK: movprfx z0.s, p0/z, z0.s
-; CHECK-NEXT: subr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
+; CHECK-NEXT: subr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: ret
%a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %pg,
<vscale x 4 x i32> %a_z,
define <vscale x 2 x i64> @subr_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: subr_i64_zero:
-; CHECK: movprfx z0.d, p0/z, z0.d
-; CHECK-NEXT: subr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
+; CHECK-NEXT: subr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: ret
%a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %a_z,
ret <vscale x 2 x i64> %out
}
+;
+; ORR
+;
+
+define <vscale x 16 x i8> @orr_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: orr_i8_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
+; CHECK-NEXT: orr z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
+ %out = call <vscale x 16 x i8> @llvm.aarch64.sve.orr.nxv16i8(<vscale x 16 x i1> %pg,
+ <vscale x 16 x i8> %a_z,
+ <vscale x 16 x i8> %b)
+ ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @orr_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: orr_i16_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
+; CHECK-NEXT: orr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
+ %out = call <vscale x 8 x i16> @llvm.aarch64.sve.orr.nxv8i16(<vscale x 8 x i1> %pg,
+ <vscale x 8 x i16> %a_z,
+ <vscale x 8 x i16> %b)
+ ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @orr_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: orr_i32_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
+; CHECK-NEXT: orr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
+ %out = call <vscale x 4 x i32> @llvm.aarch64.sve.orr.nxv4i32(<vscale x 4 x i1> %pg,
+ <vscale x 4 x i32> %a_z,
+ <vscale x 4 x i32> %b)
+ ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @orr_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: orr_i64_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
+; CHECK-NEXT: orr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
+ %out = call <vscale x 2 x i64> @llvm.aarch64.sve.orr.nxv2i64(<vscale x 2 x i1> %pg,
+ <vscale x 2 x i64> %a_z,
+ <vscale x 2 x i64> %b)
+ ret <vscale x 2 x i64> %out
+}
+
+;
+; EOR
+;
+
+define <vscale x 16 x i8> @eor_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: eor_i8_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
+; CHECK-NEXT: eor z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
+ %out = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg,
+ <vscale x 16 x i8> %a_z,
+ <vscale x 16 x i8> %b)
+ ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @eor_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: eor_i16_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
+; CHECK-NEXT: eor z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
+ %out = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %pg,
+ <vscale x 8 x i16> %a_z,
+ <vscale x 8 x i16> %b)
+ ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @eor_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: eor_i32_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
+; CHECK-NEXT: eor z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
+ %out = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %pg,
+ <vscale x 4 x i32> %a_z,
+ <vscale x 4 x i32> %b)
+ ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @eor_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: eor_i64_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
+; CHECK-NEXT: eor z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
+ %out = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %pg,
+ <vscale x 2 x i64> %a_z,
+ <vscale x 2 x i64> %b)
+ ret <vscale x 2 x i64> %out
+}
+
+;
+; AND
+;
+
+define <vscale x 16 x i8> @and_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: and_i8_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
+; CHECK-NEXT: and z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
+ %out = call <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv16i8(<vscale x 16 x i1> %pg,
+ <vscale x 16 x i8> %a_z,
+ <vscale x 16 x i8> %b)
+ ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @and_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: and_i16_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
+; CHECK-NEXT: and z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
+ %out = call <vscale x 8 x i16> @llvm.aarch64.sve.and.nxv8i16(<vscale x 8 x i1> %pg,
+ <vscale x 8 x i16> %a_z,
+ <vscale x 8 x i16> %b)
+ ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @and_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: and_i32_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
+; CHECK-NEXT: and z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
+ %out = call <vscale x 4 x i32> @llvm.aarch64.sve.and.nxv4i32(<vscale x 4 x i1> %pg,
+ <vscale x 4 x i32> %a_z,
+ <vscale x 4 x i32> %b)
+ ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @and_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: and_i64_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
+; CHECK-NEXT: and z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
+ %out = call <vscale x 2 x i64> @llvm.aarch64.sve.and.nxv2i64(<vscale x 2 x i1> %pg,
+ <vscale x 2 x i64> %a_z,
+ <vscale x 2 x i64> %b)
+ ret <vscale x 2 x i64> %out
+}
+
+;
+; BIC
+;
+
+define <vscale x 16 x i8> @bic_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: bic_i8_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
+; CHECK-NEXT: bic z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
+ %out = call <vscale x 16 x i8> @llvm.aarch64.sve.bic.nxv16i8(<vscale x 16 x i1> %pg,
+ <vscale x 16 x i8> %a_z,
+ <vscale x 16 x i8> %b)
+ ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @bic_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: bic_i16_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
+; CHECK-NEXT: bic z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
+ %out = call <vscale x 8 x i16> @llvm.aarch64.sve.bic.nxv8i16(<vscale x 8 x i1> %pg,
+ <vscale x 8 x i16> %a_z,
+ <vscale x 8 x i16> %b)
+ ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @bic_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: bic_i32_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
+; CHECK-NEXT: bic z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
+ %out = call <vscale x 4 x i32> @llvm.aarch64.sve.bic.nxv4i32(<vscale x 4 x i1> %pg,
+ <vscale x 4 x i32> %a_z,
+ <vscale x 4 x i32> %b)
+ ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @bic_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: bic_i64_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
+; CHECK-NEXT: bic z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: ret
+ %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
+ %out = call <vscale x 2 x i64> @llvm.aarch64.sve.bic.nxv2i64(<vscale x 2 x i1> %pg,
+ <vscale x 2 x i64> %a_z,
+ <vscale x 2 x i64> %b)
+ ret <vscale x 2 x i64> %out
+}
+
declare <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.orr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.orr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.orr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.orr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.and.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.and.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.and.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.bic.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.bic.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.bic.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.bic.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)