--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s
+
+define <8 x i16> @abdu_base(<8 x i16> %src1, <8 x i16> %src2) {
+; CHECK-LABEL: abdu_base:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %zextsrc1 = zext <8 x i16> %src1 to <8 x i32>
+ %zextsrc2 = zext <8 x i16> %src2 to <8 x i32>
+ %sub = sub <8 x i32> %zextsrc1, %zextsrc2
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_const(<8 x i16> %src1) {
+; CHECK-LABEL: abdu_const:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ushll2 v2.4s, v0.8h, #0
+; CHECK-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-NEXT: sub v2.4s, v2.4s, v1.4s
+; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: abs v1.4s, v2.4s
+; CHECK-NEXT: abs v0.4s, v0.4s
+; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %zextsrc1 = zext <8 x i16> %src1 to <8 x i32>
+ %sub = sub <8 x i32> %zextsrc1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_const_lhs(<8 x i16> %src1) {
+; CHECK-LABEL: abdu_const_lhs:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: usubw2 v2.4s, v1.4s, v0.8h
+; CHECK-NEXT: usubw v0.4s, v1.4s, v0.4h
+; CHECK-NEXT: abs v1.4s, v2.4s
+; CHECK-NEXT: abs v0.4s, v0.4s
+; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %zextsrc1 = zext <8 x i16> %src1 to <8 x i32>
+ %sub = sub <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %zextsrc1
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_const_zero(<8 x i16> %src1) {
+; CHECK-LABEL: abdu_const_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.2d, #0000000000000000
+; CHECK-NEXT: ushll v2.4s, v0.4h, #0
+; CHECK-NEXT: usubw2 v0.4s, v1.4s, v0.8h
+; CHECK-NEXT: neg v1.4s, v2.4s
+; CHECK-NEXT: abs v0.4s, v0.4s
+; CHECK-NEXT: abs v1.4s, v1.4s
+; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: ret
+ %zextsrc1 = zext <8 x i16> %src1 to <8 x i32>
+ %sub = sub <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, %zextsrc1
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_const_both() {
+; CHECK-LABEL: abdu_const_both:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.8h, #2
+; CHECK-NEXT: ret
+ %sub = sub <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_const_bothhigh() {
+; CHECK-LABEL: abdu_const_bothhigh:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.8h, #1
+; CHECK-NEXT: ret
+ %zextsrc1 = zext <8 x i16> <i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534> to <8 x i32>
+ %zextsrc2 = zext <8 x i16> <i16 65535, i16 65535, i16 65535, i16 65535, i16 65535, i16 65535, i16 65535, i16 65535> to <8 x i32>
+ %sub = sub <8 x i32> %zextsrc1, %zextsrc2
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_undef(<8 x i16> %src1) {
+; CHECK-LABEL: abdu_undef:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ret
+ %zextsrc1 = zext <8 x i16> %src1 to <8 x i32>
+ %zextsrc2 = zext <8 x i16> undef to <8 x i32>
+ %sub = sub <8 x i32> %zextsrc1, %zextsrc2
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+
+
+define <8 x i16> @abdu_i_base(<8 x i16> %src1, <8 x i16> %src2) {
+; CHECK-LABEL: abdu_i_base:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %src1, <8 x i16> %src2)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_i_const(<8 x i16> %src1) {
+; CHECK-LABEL: abdu_i_const:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.8h, #1
+; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %src1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_i_const_lhs(<8 x i16> %src1) {
+; CHECK-LABEL: abdu_i_const_lhs:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.8h, #1
+; CHECK-NEXT: uabd v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %src1)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_i_const_zero(float %t, <8 x i16> %src1) {
+; CHECK-LABEL: abdu_i_const_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16> %src1)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_i_const_both() {
+; CHECK-LABEL: abdu_i_const_both:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.8h, #1
+; CHECK-NEXT: movi v1.8h, #3
+; CHECK-NEXT: uabd v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_i_const_bothhigh() {
+; CHECK-LABEL: abdu_i_const_bothhigh:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0xffffffffffffffff
+; CHECK-NEXT: mvni v1.8h, #1
+; CHECK-NEXT: uabd v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> <i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534>, <8 x i16> <i16 65535, i16 65535, i16 65535, i16 65535, i16 65535, i16 65535, i16 65535, i16 65535>)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_i_const_onehigh() {
+; CHECK-LABEL: abdu_i_const_onehigh:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #32766
+; CHECK-NEXT: movi v0.8h, #1
+; CHECK-NEXT: dup v1.8h, w8
+; CHECK-NEXT: uabd v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_i_const_oneneg() {
+; CHECK-LABEL: abdu_i_const_oneneg:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #32766
+; CHECK-NEXT: mvni v1.8h, #1
+; CHECK-NEXT: dup v0.8h, w8
+; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>, <8 x i16> <i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534>)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_i_zero(<8 x i16> %t, <8 x i16> %src1) {
+; CHECK-LABEL: abdu_i_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16> %src1)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_i_undef(<8 x i16> %t, <8 x i16> %src1) {
+; CHECK-LABEL: abdu_i_undef:
+; CHECK: // %bb.0:
+; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> undef, <8 x i16> %src1)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abdu_i_reassoc(<8 x i16> %src1) {
+; CHECK-LABEL: abdu_i_reassoc:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.8h, #3
+; CHECK-NEXT: movi v2.8h, #1
+; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: uabd v0.8h, v0.8h, v2.8h
+; CHECK-NEXT: ret
+ %r1 = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %src1, <8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
+ %result = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %r1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %result
+}
+
+
+
+
+
+define <8 x i16> @abds_base(<8 x i16> %src1, <8 x i16> %src2) {
+; CHECK-LABEL: abds_base:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %zextsrc1 = sext <8 x i16> %src1 to <8 x i32>
+ %zextsrc2 = sext <8 x i16> %src2 to <8 x i32>
+ %sub = sub <8 x i32> %zextsrc1, %zextsrc2
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_const(<8 x i16> %src1) {
+; CHECK-LABEL: abds_const:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: sshll2 v2.4s, v0.8h, #0
+; CHECK-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-NEXT: sub v2.4s, v2.4s, v1.4s
+; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: abs v1.4s, v2.4s
+; CHECK-NEXT: abs v0.4s, v0.4s
+; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %zextsrc1 = sext <8 x i16> %src1 to <8 x i32>
+ %sub = sub <8 x i32> %zextsrc1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_const_lhs(<8 x i16> %src1) {
+; CHECK-LABEL: abds_const_lhs:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.4s, #1
+; CHECK-NEXT: ssubw2 v2.4s, v1.4s, v0.8h
+; CHECK-NEXT: ssubw v0.4s, v1.4s, v0.4h
+; CHECK-NEXT: abs v1.4s, v2.4s
+; CHECK-NEXT: abs v0.4s, v0.4s
+; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %zextsrc1 = sext <8 x i16> %src1 to <8 x i32>
+ %sub = sub <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %zextsrc1
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_const_zero(<8 x i16> %src1) {
+; CHECK-LABEL: abds_const_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.2d, #0000000000000000
+; CHECK-NEXT: sshll v2.4s, v0.4h, #0
+; CHECK-NEXT: ssubw2 v0.4s, v1.4s, v0.8h
+; CHECK-NEXT: neg v1.4s, v2.4s
+; CHECK-NEXT: abs v0.4s, v0.4s
+; CHECK-NEXT: abs v1.4s, v1.4s
+; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: ret
+ %zextsrc1 = sext <8 x i16> %src1 to <8 x i32>
+ %sub = sub <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, %zextsrc1
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_const_both() {
+; CHECK-LABEL: abds_const_both:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.8h, #2
+; CHECK-NEXT: ret
+ %sub = sub <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_const_bothhigh() {
+; CHECK-LABEL: abds_const_bothhigh:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.8h, #1
+; CHECK-NEXT: ret
+ %zextsrc1 = sext <8 x i16> <i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534> to <8 x i32>
+ %zextsrc2 = sext <8 x i16> <i16 65535, i16 65535, i16 65535, i16 65535, i16 65535, i16 65535, i16 65535, i16 65535> to <8 x i32>
+ %sub = sub <8 x i32> %zextsrc1, %zextsrc2
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_undef(<8 x i16> %src1) {
+; CHECK-LABEL: abds_undef:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshll2 v1.4s, v0.8h, #0
+; CHECK-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-NEXT: abs v1.4s, v1.4s
+; CHECK-NEXT: abs v0.4s, v0.4s
+; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %zextsrc1 = sext <8 x i16> %src1 to <8 x i32>
+ %zextsrc2 = sext <8 x i16> undef to <8 x i32>
+ %sub = sub <8 x i32> %zextsrc1, %zextsrc2
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0)
+ %result = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %result
+}
+
+
+
+define <8 x i16> @abds_i_base(<8 x i16> %src1, <8 x i16> %src2) {
+; CHECK-LABEL: abds_i_base:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %src1, <8 x i16> %src2)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_i_const(<8 x i16> %src1) {
+; CHECK-LABEL: abds_i_const:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.8h, #1
+; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %src1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_i_const_lhs(<8 x i16> %src1) {
+; CHECK-LABEL: abds_i_const_lhs:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.8h, #1
+; CHECK-NEXT: sabd v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %src1)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_i_const_zero(<8 x i16> %src1) {
+; CHECK-LABEL: abds_i_const_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.2d, #0000000000000000
+; CHECK-NEXT: sabd v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16> %src1)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_i_const_both() {
+; CHECK-LABEL: abds_i_const_both:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.8h, #1
+; CHECK-NEXT: movi v1.8h, #3
+; CHECK-NEXT: sabd v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_i_const_bothhigh() {
+; CHECK-LABEL: abds_i_const_bothhigh:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #32766
+; CHECK-NEXT: mvni v1.8h, #128, lsl #8
+; CHECK-NEXT: dup v0.8h, w8
+; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>, <8 x i16> <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_i_const_onehigh() {
+; CHECK-LABEL: abds_i_const_onehigh:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #32766
+; CHECK-NEXT: movi v0.8h, #1
+; CHECK-NEXT: dup v1.8h, w8
+; CHECK-NEXT: sabd v0.8h, v1.8h, v0.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_i_const_oneneg() {
+; CHECK-LABEL: abds_i_const_oneneg:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #32766
+; CHECK-NEXT: mvni v1.8h, #1
+; CHECK-NEXT: dup v0.8h, w8
+; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>, <8 x i16> <i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534>)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_i_zero(<8 x i16> %t, <8 x i16> %src1) {
+; CHECK-LABEL: abds_i_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16> %src1)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_i_undef(<8 x i16> %t, <8 x i16> %src1) {
+; CHECK-LABEL: abds_i_undef:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %result = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> undef, <8 x i16> %src1)
+ ret <8 x i16> %result
+}
+
+define <8 x i16> @abds_i_reassoc(<8 x i16> %src1) {
+; CHECK-LABEL: abds_i_reassoc:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v1.8h, #3
+; CHECK-NEXT: movi v2.8h, #1
+; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: sabd v0.8h, v0.8h, v2.8h
+; CHECK-NEXT: ret
+ %r1 = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %src1, <8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
+ %result = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %r1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %result
+}
+
+
+declare <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)