define <8 x i16> @hadds_const_bothhigh() {
; CHECK-LABEL: hadds_const_bothhigh:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #32766
+; CHECK-NEXT: mov w8, #32766 // =0x7ffe
; CHECK-NEXT: dup v0.8h, w8
; CHECK-NEXT: ret
%ext1 = sext <8 x i16> <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766> to <8 x i32>
define <8 x i16> @hadds_i_const_bothhigh() {
; CHECK-LABEL: hadds_i_const_bothhigh:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #32766
+; CHECK-NEXT: mov w8, #32766 // =0x7ffe
; CHECK-NEXT: dup v0.8h, w8
; CHECK-NEXT: ret
%result = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>, <8 x i16> <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>)
ret <8 x i16> %result
}
-declare <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i8> @shadd_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: shadd_v8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shadd v0.8b, v0.8b, v0.8b
+; CHECK-NEXT: ret
+ %r = tail call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %x, <8 x i8> %x)
+ ret <8 x i8> %r
+}
+
+define <4 x i16> @shadd_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: shadd_v4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shadd v0.4h, v0.4h, v0.4h
+; CHECK-NEXT: ret
+ %r = tail call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> %x, <4 x i16> %x)
+ ret <4 x i16> %r
+}
+
+define <2 x i32> @shadd_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: shadd_v2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shadd v0.2s, v0.2s, v0.2s
+; CHECK-NEXT: ret
+ %r = tail call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> %x, <2 x i32> %x)
+ ret <2 x i32> %r
+}
+
+define <16 x i8> @shadd_v16i8(<16 x i8> %x) {
+; CHECK-LABEL: shadd_v16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shadd v0.16b, v0.16b, v0.16b
+; CHECK-NEXT: ret
+ %r = tail call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %x, <16 x i8> %x)
+ ret <16 x i8> %r
+}
+
+define <8 x i16> @shadd_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: shadd_v8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shadd v0.8h, v0.8h, v0.8h
+; CHECK-NEXT: ret
+ %r = tail call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %x, <8 x i16> %x)
+ ret <8 x i16> %r
+}
+
+define <4 x i32> @shadd_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: shadd_v4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shadd v0.4s, v0.4s, v0.4s
+; CHECK-NEXT: ret
+ %r = tail call <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32> %x, <4 x i32> %x)
+ ret <4 x i32> %r
+}
+
+define <8 x i8> @uhadd_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: uhadd_v8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uhadd v0.8b, v0.8b, v0.8b
+; CHECK-NEXT: ret
+ %r = tail call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> %x, <8 x i8> %x)
+ ret <8 x i8> %r
+}
+
+define <4 x i16> @uhadd_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: uhadd_v4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uhadd v0.4h, v0.4h, v0.4h
+; CHECK-NEXT: ret
+ %r = tail call <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16> %x, <4 x i16> %x)
+ ret <4 x i16> %r
+}
+
+define <2 x i32> @uhadd_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: uhadd_v2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uhadd v0.2s, v0.2s, v0.2s
+; CHECK-NEXT: ret
+ %r = tail call <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32> %x, <2 x i32> %x)
+ ret <2 x i32> %r
+}
+
+define <16 x i8> @uhadd_v16i8(<16 x i8> %x) {
+; CHECK-LABEL: uhadd_v16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uhadd v0.16b, v0.16b, v0.16b
+; CHECK-NEXT: ret
+ %r = tail call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %x, <16 x i8> %x)
+ ret <16 x i8> %r
+}
+
+define <8 x i16> @uhadd_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: uhadd_v8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uhadd v0.8h, v0.8h, v0.8h
+; CHECK-NEXT: ret
+ %r = tail call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %x, <8 x i16> %x)
+ ret <8 x i16> %r
+}
+
+define <4 x i32> @uhadd_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: uhadd_v4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uhadd v0.4s, v0.4s, v0.4s
+; CHECK-NEXT: ret
+ %r = tail call <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32> %x, <4 x i32> %x)
+ ret <4 x i32> %r
+}
+define <8 x i8> @srhadd_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: srhadd_v8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: srhadd v0.8b, v0.8b, v0.8b
+; CHECK-NEXT: ret
+ %r = tail call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> %x, <8 x i8> %x)
+ ret <8 x i8> %r
+}
+
+define <4 x i16> @srhadd_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: srhadd_v4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: srhadd v0.4h, v0.4h, v0.4h
+; CHECK-NEXT: ret
+ %r = tail call <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16> %x, <4 x i16> %x)
+ ret <4 x i16> %r
+}
+
+define <2 x i32> @srhadd_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: srhadd_v2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: srhadd v0.2s, v0.2s, v0.2s
+; CHECK-NEXT: ret
+ %r = tail call <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32> %x, <2 x i32> %x)
+ ret <2 x i32> %r
+}
+
+define <16 x i8> @srhadd_v16i8(<16 x i8> %x) {
+; CHECK-LABEL: srhadd_v16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: srhadd v0.16b, v0.16b, v0.16b
+; CHECK-NEXT: ret
+ %r = tail call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %x, <16 x i8> %x)
+ ret <16 x i8> %r
+}
+
+define <8 x i16> @srhadd_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: srhadd_v8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: srhadd v0.8h, v0.8h, v0.8h
+; CHECK-NEXT: ret
+ %r = tail call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %x, <8 x i16> %x)
+ ret <8 x i16> %r
+}
+
+define <4 x i32> @srhadd_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: srhadd_v4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: srhadd v0.4s, v0.4s, v0.4s
+; CHECK-NEXT: ret
+ %r = tail call <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32> %x, <4 x i32> %x)
+ ret <4 x i32> %r
+}
+
+define <8 x i8> @urhadd_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: urhadd_v8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: urhadd v0.8b, v0.8b, v0.8b
+; CHECK-NEXT: ret
+ %r = tail call <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8> %x, <8 x i8> %x)
+ ret <8 x i8> %r
+}
+
+define <4 x i16> @urhadd_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: urhadd_v4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: urhadd v0.4h, v0.4h, v0.4h
+; CHECK-NEXT: ret
+ %r = tail call <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16> %x, <4 x i16> %x)
+ ret <4 x i16> %r
+}
+
+define <2 x i32> @urhadd_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: urhadd_v2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: urhadd v0.2s, v0.2s, v0.2s
+; CHECK-NEXT: ret
+ %r = tail call <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32> %x, <2 x i32> %x)
+ ret <2 x i32> %r
+}
+
+define <16 x i8> @urhadd_v16i8(<16 x i8> %x) {
+; CHECK-LABEL: urhadd_v16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: urhadd v0.16b, v0.16b, v0.16b
+; CHECK-NEXT: ret
+ %r = tail call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> %x, <16 x i8> %x)
+ ret <16 x i8> %r
+}
+
+define <8 x i16> @urhadd_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: urhadd_v8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: urhadd v0.8h, v0.8h, v0.8h
+; CHECK-NEXT: ret
+ %r = tail call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> %x, <8 x i16> %x)
+ ret <8 x i16> %r
+}
+
+define <4 x i32> @urhadd_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: urhadd_v4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: urhadd v0.4s, v0.4s, v0.4s
+; CHECK-NEXT: ret
+ %r = tail call <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32> %x, <4 x i32> %x)
+ ret <4 x i32> %r
+}
+
+declare <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8>, <8 x i8>)
+declare <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16>, <4 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32>, <2 x i32>)
+declare <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8>, <8 x i8>)
+declare <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16>, <4 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32>, <2 x i32>)
+declare <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8>, <16 x i8>)
declare <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32>, <4 x i32>)
+
+declare <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8>, <8 x i8>)
+declare <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16>, <4 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32>, <2 x i32>)
+declare <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8>, <8 x i8>)
+declare <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16>, <4 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32>, <2 x i32>)
+declare <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8>, <16 x i8>)
declare <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32>, <4 x i32>)