define <8 x i8> @shadd_v8i8(<8 x i8> %x) {
; CHECK-LABEL: shadd_v8i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: shadd v0.8b, v0.8b, v0.8b
; CHECK-NEXT: ret
%r = tail call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %x, <8 x i8> %x)
ret <8 x i8> %r
define <4 x i16> @shadd_v4i16(<4 x i16> %x) {
; CHECK-LABEL: shadd_v4i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: shadd v0.4h, v0.4h, v0.4h
; CHECK-NEXT: ret
%r = tail call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> %x, <4 x i16> %x)
ret <4 x i16> %r
define <2 x i32> @shadd_v2i32(<2 x i32> %x) {
; CHECK-LABEL: shadd_v2i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: shadd v0.2s, v0.2s, v0.2s
; CHECK-NEXT: ret
%r = tail call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> %x, <2 x i32> %x)
ret <2 x i32> %r
define <16 x i8> @shadd_v16i8(<16 x i8> %x) {
; CHECK-LABEL: shadd_v16i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: shadd v0.16b, v0.16b, v0.16b
; CHECK-NEXT: ret
%r = tail call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %x, <16 x i8> %x)
ret <16 x i8> %r
define <8 x i16> @shadd_v8i16(<8 x i16> %x) {
; CHECK-LABEL: shadd_v8i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: shadd v0.8h, v0.8h, v0.8h
; CHECK-NEXT: ret
%r = tail call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %x, <8 x i16> %x)
ret <8 x i16> %r
define <4 x i32> @shadd_v4i32(<4 x i32> %x) {
; CHECK-LABEL: shadd_v4i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: shadd v0.4s, v0.4s, v0.4s
; CHECK-NEXT: ret
%r = tail call <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32> %x, <4 x i32> %x)
ret <4 x i32> %r
define <8 x i8> @uhadd_v8i8(<8 x i8> %x) {
; CHECK-LABEL: uhadd_v8i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: uhadd v0.8b, v0.8b, v0.8b
; CHECK-NEXT: ret
%r = tail call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> %x, <8 x i8> %x)
ret <8 x i8> %r
define <4 x i16> @uhadd_v4i16(<4 x i16> %x) {
; CHECK-LABEL: uhadd_v4i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: uhadd v0.4h, v0.4h, v0.4h
; CHECK-NEXT: ret
%r = tail call <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16> %x, <4 x i16> %x)
ret <4 x i16> %r
define <2 x i32> @uhadd_v2i32(<2 x i32> %x) {
; CHECK-LABEL: uhadd_v2i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: uhadd v0.2s, v0.2s, v0.2s
; CHECK-NEXT: ret
%r = tail call <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32> %x, <2 x i32> %x)
ret <2 x i32> %r
define <16 x i8> @uhadd_v16i8(<16 x i8> %x) {
; CHECK-LABEL: uhadd_v16i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: uhadd v0.16b, v0.16b, v0.16b
; CHECK-NEXT: ret
%r = tail call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %x, <16 x i8> %x)
ret <16 x i8> %r
define <8 x i16> @uhadd_v8i16(<8 x i16> %x) {
; CHECK-LABEL: uhadd_v8i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: uhadd v0.8h, v0.8h, v0.8h
; CHECK-NEXT: ret
%r = tail call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %x, <8 x i16> %x)
ret <8 x i16> %r
define <4 x i32> @uhadd_v4i32(<4 x i32> %x) {
; CHECK-LABEL: uhadd_v4i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: uhadd v0.4s, v0.4s, v0.4s
; CHECK-NEXT: ret
%r = tail call <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32> %x, <4 x i32> %x)
ret <4 x i32> %r
define <8 x i8> @srhadd_v8i8(<8 x i8> %x) {
; CHECK-LABEL: srhadd_v8i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: srhadd v0.8b, v0.8b, v0.8b
; CHECK-NEXT: ret
%r = tail call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> %x, <8 x i8> %x)
ret <8 x i8> %r
define <4 x i16> @srhadd_v4i16(<4 x i16> %x) {
; CHECK-LABEL: srhadd_v4i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: srhadd v0.4h, v0.4h, v0.4h
; CHECK-NEXT: ret
%r = tail call <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16> %x, <4 x i16> %x)
ret <4 x i16> %r
define <2 x i32> @srhadd_v2i32(<2 x i32> %x) {
; CHECK-LABEL: srhadd_v2i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: srhadd v0.2s, v0.2s, v0.2s
; CHECK-NEXT: ret
%r = tail call <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32> %x, <2 x i32> %x)
ret <2 x i32> %r
define <16 x i8> @srhadd_v16i8(<16 x i8> %x) {
; CHECK-LABEL: srhadd_v16i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: srhadd v0.16b, v0.16b, v0.16b
; CHECK-NEXT: ret
%r = tail call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %x, <16 x i8> %x)
ret <16 x i8> %r
define <8 x i16> @srhadd_v8i16(<8 x i16> %x) {
; CHECK-LABEL: srhadd_v8i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: srhadd v0.8h, v0.8h, v0.8h
; CHECK-NEXT: ret
%r = tail call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %x, <8 x i16> %x)
ret <8 x i16> %r
define <4 x i32> @srhadd_v4i32(<4 x i32> %x) {
; CHECK-LABEL: srhadd_v4i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: srhadd v0.4s, v0.4s, v0.4s
; CHECK-NEXT: ret
%r = tail call <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32> %x, <4 x i32> %x)
ret <4 x i32> %r
define <8 x i8> @urhadd_v8i8(<8 x i8> %x) {
; CHECK-LABEL: urhadd_v8i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: urhadd v0.8b, v0.8b, v0.8b
; CHECK-NEXT: ret
%r = tail call <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8> %x, <8 x i8> %x)
ret <8 x i8> %r
define <4 x i16> @urhadd_v4i16(<4 x i16> %x) {
; CHECK-LABEL: urhadd_v4i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: urhadd v0.4h, v0.4h, v0.4h
; CHECK-NEXT: ret
%r = tail call <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16> %x, <4 x i16> %x)
ret <4 x i16> %r
define <2 x i32> @urhadd_v2i32(<2 x i32> %x) {
; CHECK-LABEL: urhadd_v2i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: urhadd v0.2s, v0.2s, v0.2s
; CHECK-NEXT: ret
%r = tail call <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32> %x, <2 x i32> %x)
ret <2 x i32> %r
define <16 x i8> @urhadd_v16i8(<16 x i8> %x) {
; CHECK-LABEL: urhadd_v16i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: urhadd v0.16b, v0.16b, v0.16b
; CHECK-NEXT: ret
%r = tail call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> %x, <16 x i8> %x)
ret <16 x i8> %r
define <8 x i16> @urhadd_v8i16(<8 x i16> %x) {
; CHECK-LABEL: urhadd_v8i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: urhadd v0.8h, v0.8h, v0.8h
; CHECK-NEXT: ret
%r = tail call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> %x, <8 x i16> %x)
ret <8 x i16> %r
define <4 x i32> @urhadd_v4i32(<4 x i32> %x) {
; CHECK-LABEL: urhadd_v4i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: urhadd v0.4s, v0.4s, v0.4s
; CHECK-NEXT: ret
%r = tail call <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32> %x, <4 x i32> %x)
ret <4 x i32> %r
define void @avg_v64i8_2(ptr %a, ptr %b) nounwind {
; SSE2-LABEL: avg_v64i8_2:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rsi), %xmm0
-; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: movdqa 32(%rsi), %xmm2
-; SSE2-NEXT: movdqa 48(%rsi), %xmm3
-; SSE2-NEXT: pavgb %xmm0, %xmm0
-; SSE2-NEXT: pavgb %xmm1, %xmm1
-; SSE2-NEXT: pavgb %xmm2, %xmm2
-; SSE2-NEXT: pavgb %xmm3, %xmm3
-; SSE2-NEXT: movdqu %xmm3, (%rax)
-; SSE2-NEXT: movdqu %xmm2, (%rax)
-; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm0, (%rax)
+; SSE2-NEXT: movaps (%rsi), %xmm0
+; SSE2-NEXT: movaps 16(%rsi), %xmm1
+; SSE2-NEXT: movaps 32(%rsi), %xmm2
+; SSE2-NEXT: movaps 48(%rsi), %xmm3
+; SSE2-NEXT: movups %xmm3, (%rax)
+; SSE2-NEXT: movups %xmm2, (%rax)
+; SSE2-NEXT: movups %xmm1, (%rax)
+; SSE2-NEXT: movups %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v64i8_2:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rsi), %xmm0
-; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT: vmovdqa 32(%rsi), %xmm2
-; AVX1-NEXT: vmovdqa 48(%rsi), %xmm3
-; AVX1-NEXT: vpavgb %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: vpavgb %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpavgb %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpavgb %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vmovdqu %xmm3, (%rax)
-; AVX1-NEXT: vmovdqu %xmm2, (%rax)
-; AVX1-NEXT: vmovdqu %xmm1, (%rax)
-; AVX1-NEXT: vmovdqu %xmm0, (%rax)
+; AVX1-NEXT: vmovaps (%rsi), %ymm0
+; AVX1-NEXT: vmovaps 32(%rsi), %ymm1
+; AVX1-NEXT: vmovups %ymm1, (%rax)
+; AVX1-NEXT: vmovups %ymm0, (%rax)
+; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v64i8_2:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovdqa (%rsi), %ymm0
-; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX2-NEXT: vpavgb %ymm0, %ymm0, %ymm0
-; AVX2-NEXT: vpavgb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vmovdqu %ymm1, (%rax)
-; AVX2-NEXT: vmovdqu %ymm0, (%rax)
+; AVX2-NEXT: vmovaps (%rsi), %ymm0
+; AVX2-NEXT: vmovaps 32(%rsi), %ymm1
+; AVX2-NEXT: vmovups %ymm1, (%rax)
+; AVX2-NEXT: vmovups %ymm0, (%rax)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512F-LABEL: avg_v64i8_2:
-; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX512F-NEXT: vpavgb %ymm0, %ymm0, %ymm0
-; AVX512F-NEXT: vpavgb %ymm1, %ymm1, %ymm1
-; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
-; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
-; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: retq
-;
-; AVX512BW-LABEL: avg_v64i8_2:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
-; AVX512BW-NEXT: vpavgb %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
+; AVX512-LABEL: avg_v64i8_2:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovaps (%rsi), %zmm0
+; AVX512-NEXT: vmovups %zmm0, (%rax)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
%1 = load <64 x i8>, ptr %a
%2 = load <64 x i8>, ptr %b
%3 = zext <64 x i8> %1 to <64 x i32>