liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_add_v16i8
- ; ALL: [[DEF:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
- ; ALL: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[DEF]], [[DEF1]]
- ; ALL: $xmm0 = COPY [[ADD]](<16 x s8>)
- ; ALL: RET 0
+ ; ALL: liveins: $xmm0, $xmm1
+ ; ALL-NEXT: {{ $}}
+ ; ALL-NEXT: [[DEF:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[DEF]], [[DEF1]]
+ ; ALL-NEXT: $xmm0 = COPY [[ADD]](<16 x s8>)
+ ; ALL-NEXT: RET 0
%0(<16 x s8>) = IMPLICIT_DEF
%1(<16 x s8>) = IMPLICIT_DEF
%2(<16 x s8>) = G_ADD %0, %1
liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_add_v8i16
- ; ALL: [[DEF:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
- ; ALL: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[DEF]], [[DEF1]]
- ; ALL: $xmm0 = COPY [[ADD]](<8 x s16>)
- ; ALL: RET 0
+ ; ALL: liveins: $xmm0, $xmm1
+ ; ALL-NEXT: {{ $}}
+ ; ALL-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[DEF]], [[DEF1]]
+ ; ALL-NEXT: $xmm0 = COPY [[ADD]](<8 x s16>)
+ ; ALL-NEXT: RET 0
%0(<8 x s16>) = IMPLICIT_DEF
%1(<8 x s16>) = IMPLICIT_DEF
%2(<8 x s16>) = G_ADD %0, %1
liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_add_v4i32
- ; ALL: [[DEF:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
- ; ALL: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[DEF]], [[DEF1]]
- ; ALL: $xmm0 = COPY [[ADD]](<4 x s32>)
- ; ALL: RET 0
+ ; ALL: liveins: $xmm0, $xmm1
+ ; ALL-NEXT: {{ $}}
+ ; ALL-NEXT: [[DEF:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[DEF]], [[DEF1]]
+ ; ALL-NEXT: $xmm0 = COPY [[ADD]](<4 x s32>)
+ ; ALL-NEXT: RET 0
%0(<4 x s32>) = IMPLICIT_DEF
%1(<4 x s32>) = IMPLICIT_DEF
%2(<4 x s32>) = G_ADD %0, %1
liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_add_v2i64
- ; ALL: [[DEF:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
- ; ALL: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[DEF]], [[DEF1]]
- ; ALL: $xmm0 = COPY [[ADD]](<2 x s64>)
- ; ALL: RET 0
+ ; ALL: liveins: $xmm0, $xmm1
+ ; ALL-NEXT: {{ $}}
+ ; ALL-NEXT: [[DEF:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[DEF1:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[DEF]], [[DEF1]]
+ ; ALL-NEXT: $xmm0 = COPY [[ADD]](<2 x s64>)
+ ; ALL-NEXT: RET 0
%0(<2 x s64>) = IMPLICIT_DEF
%1(<2 x s64>) = IMPLICIT_DEF
%2(<2 x s64>) = G_ADD %0, %1
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX1
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX2
+# TODO: add tests for additional configuration after the legalization supported
+
--- |
define void @test_add_v32i8() {
%ret = add <32 x i8> undef, undef
body: |
bb.1 (%ir-block.0):
liveins: $ymm0, $ymm1
+
; SSE2-LABEL: name: test_add_v32i8
- ; SSE2: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
- ; SSE2: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
- ; SSE2: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
- ; SSE2: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
- ; SSE2: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
- ; SSE2: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
- ; SSE2: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
- ; SSE2: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
- ; SSE2: RET 0
+ ; SSE2: liveins: $ymm0, $ymm1
+ ; SSE2-NEXT: {{ $}}
+ ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
+ ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
+ ; SSE2-NEXT: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
+ ; SSE2-NEXT: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
+ ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
+ ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
+ ; SSE2-NEXT: RET 0
; AVX1-LABEL: name: test_add_v32i8
- ; AVX1: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
- ; AVX1: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
- ; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
- ; AVX1: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
- ; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
- ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
- ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
- ; AVX1: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
- ; AVX1: RET 0
+ ; AVX1: liveins: $ymm0, $ymm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
+ ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
+ ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
+ ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
+ ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
+ ; AVX1-NEXT: RET 0
; AVX2-LABEL: name: test_add_v32i8
- ; AVX2: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
- ; AVX2: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
- ; AVX2: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX2: $ymm0 = COPY [[ADD]](<32 x s8>)
- ; AVX2: RET 0
+ ; AVX2: liveins: $ymm0, $ymm1
+ ; AVX2-NEXT: {{ $}}
+ ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[DEF]], [[DEF1]]
+ ; AVX2-NEXT: $ymm0 = COPY [[ADD]](<32 x s8>)
+ ; AVX2-NEXT: RET 0
%0(<32 x s8>) = IMPLICIT_DEF
%1(<32 x s8>) = IMPLICIT_DEF
%2(<32 x s8>) = G_ADD %0, %1
body: |
bb.1 (%ir-block.0):
liveins: $ymm0, $ymm1
+
; SSE2-LABEL: name: test_add_v16i16
- ; SSE2: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
- ; SSE2: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
- ; SSE2: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
- ; SSE2: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
- ; SSE2: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
- ; SSE2: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
- ; SSE2: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
- ; SSE2: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>)
- ; SSE2: RET 0
+ ; SSE2: liveins: $ymm0, $ymm1
+ ; SSE2-NEXT: {{ $}}
+ ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
+ ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
+ ; SSE2-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
+ ; SSE2-NEXT: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
+ ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
+ ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>)
+ ; SSE2-NEXT: RET 0
; AVX1-LABEL: name: test_add_v16i16
- ; AVX1: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
- ; AVX1: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
- ; AVX1: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
- ; AVX1: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
- ; AVX1: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
- ; AVX1: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
- ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
- ; AVX1: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>)
- ; AVX1: RET 0
+ ; AVX1: liveins: $ymm0, $ymm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
+ ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
+ ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
+ ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
+ ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>)
+ ; AVX1-NEXT: RET 0
; AVX2-LABEL: name: test_add_v16i16
- ; AVX2: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
- ; AVX2: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
- ; AVX2: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX2: $ymm0 = COPY [[ADD]](<16 x s16>)
- ; AVX2: RET 0
+ ; AVX2: liveins: $ymm0, $ymm1
+ ; AVX2-NEXT: {{ $}}
+ ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[DEF]], [[DEF1]]
+ ; AVX2-NEXT: $ymm0 = COPY [[ADD]](<16 x s16>)
+ ; AVX2-NEXT: RET 0
%0(<16 x s16>) = IMPLICIT_DEF
%1(<16 x s16>) = IMPLICIT_DEF
%2(<16 x s16>) = G_ADD %0, %1
body: |
bb.1 (%ir-block.0):
liveins: $ymm0, $ymm1
+
; SSE2-LABEL: name: test_add_v8i32
- ; SSE2: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
- ; SSE2: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
- ; SSE2: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
- ; SSE2: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
- ; SSE2: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
- ; SSE2: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
- ; SSE2: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
- ; SSE2: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
- ; SSE2: RET 0
+ ; SSE2: liveins: $ymm0, $ymm1
+ ; SSE2-NEXT: {{ $}}
+ ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
+ ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
+ ; SSE2-NEXT: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
+ ; SSE2-NEXT: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
+ ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
+ ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
+ ; SSE2-NEXT: RET 0
; AVX1-LABEL: name: test_add_v8i32
- ; AVX1: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
- ; AVX1: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
- ; AVX1: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
- ; AVX1: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
- ; AVX1: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
- ; AVX1: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
- ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
- ; AVX1: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
- ; AVX1: RET 0
+ ; AVX1: liveins: $ymm0, $ymm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
+ ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
+ ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
+ ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
+ ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
+ ; AVX1-NEXT: RET 0
; AVX2-LABEL: name: test_add_v8i32
- ; AVX2: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
- ; AVX2: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
- ; AVX2: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX2: $ymm0 = COPY [[ADD]](<8 x s32>)
- ; AVX2: RET 0
+ ; AVX2: liveins: $ymm0, $ymm1
+ ; AVX2-NEXT: {{ $}}
+ ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[DEF]], [[DEF1]]
+ ; AVX2-NEXT: $ymm0 = COPY [[ADD]](<8 x s32>)
+ ; AVX2-NEXT: RET 0
%0(<8 x s32>) = IMPLICIT_DEF
%1(<8 x s32>) = IMPLICIT_DEF
%2(<8 x s32>) = G_ADD %0, %1
body: |
bb.1 (%ir-block.0):
liveins: $ymm0, $ymm1
+
; SSE2-LABEL: name: test_add_v4i64
- ; SSE2: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
- ; SSE2: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
- ; SSE2: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
- ; SSE2: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
- ; SSE2: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
- ; SSE2: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
- ; SSE2: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
- ; SSE2: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>)
- ; SSE2: RET 0
+ ; SSE2: liveins: $ymm0, $ymm1
+ ; SSE2-NEXT: {{ $}}
+ ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
+ ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
+ ; SSE2-NEXT: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
+ ; SSE2-NEXT: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
+ ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
+ ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>)
+ ; SSE2-NEXT: RET 0
; AVX1-LABEL: name: test_add_v4i64
- ; AVX1: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
- ; AVX1: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
- ; AVX1: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
- ; AVX1: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
- ; AVX1: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
- ; AVX1: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
- ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
- ; AVX1: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>)
- ; AVX1: RET 0
+ ; AVX1: liveins: $ymm0, $ymm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
+ ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
+ ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
+ ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
+ ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>)
+ ; AVX1-NEXT: RET 0
; AVX2-LABEL: name: test_add_v4i64
- ; AVX2: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
- ; AVX2: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
- ; AVX2: [[ADD:%[0-9]+]]:_(<4 x s64>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX2: $ymm0 = COPY [[ADD]](<4 x s64>)
- ; AVX2: RET 0
+ ; AVX2: liveins: $ymm0, $ymm1
+ ; AVX2-NEXT: {{ $}}
+ ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[ADD:%[0-9]+]]:_(<4 x s64>) = G_ADD [[DEF]], [[DEF1]]
+ ; AVX2-NEXT: $ymm0 = COPY [[ADD]](<4 x s64>)
+ ; AVX2-NEXT: RET 0
%0(<4 x s64>) = IMPLICIT_DEF
%1(<4 x s64>) = IMPLICIT_DEF
%2(<4 x s64>) = G_ADD %0, %1
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX512F
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512bw -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX512BW
+# TODO: add tests for additional configuration after the legalization supported
+
--- |
define void @test_add_v64i8() {
%ret = add <64 x i8> undef, undef
body: |
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
+
; AVX1-LABEL: name: test_add_v64i8
- ; AVX1: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
- ; AVX1: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
- ; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>), [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
- ; AVX1: [[UV4:%[0-9]+]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>), [[UV6:%[0-9]+]]:_(<16 x s8>), [[UV7:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
- ; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV4]]
- ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV5]]
- ; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]]
- ; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]]
- ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
- ; AVX1: $zmm0 = COPY [[CONCAT_VECTORS]](<64 x s8>)
- ; AVX1: RET 0
+ ; AVX1: liveins: $zmm0, $zmm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>), [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
+ ; AVX1-NEXT: [[UV4:%[0-9]+]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>), [[UV6:%[0-9]+]]:_(<16 x s8>), [[UV7:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
+ ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV4]]
+ ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV5]]
+ ; AVX1-NEXT: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]]
+ ; AVX1-NEXT: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
+ ; AVX1-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<64 x s8>)
+ ; AVX1-NEXT: RET 0
; AVX512F-LABEL: name: test_add_v64i8
- ; AVX512F: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
- ; AVX512F: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
- ; AVX512F: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
- ; AVX512F: [[UV2:%[0-9]+]]:_(<32 x s8>), [[UV3:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
- ; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV]], [[UV2]]
- ; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV1]], [[UV3]]
- ; AVX512F: [[CONCAT_VECTORS:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[ADD]](<32 x s8>), [[ADD1]](<32 x s8>)
- ; AVX512F: $zmm0 = COPY [[CONCAT_VECTORS]](<64 x s8>)
- ; AVX512F: RET 0
+ ; AVX512F: liveins: $zmm0, $zmm1
+ ; AVX512F-NEXT: {{ $}}
+ ; AVX512F-NEXT: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
+ ; AVX512F-NEXT: [[UV2:%[0-9]+]]:_(<32 x s8>), [[UV3:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
+ ; AVX512F-NEXT: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV]], [[UV2]]
+ ; AVX512F-NEXT: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV1]], [[UV3]]
+ ; AVX512F-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[ADD]](<32 x s8>), [[ADD1]](<32 x s8>)
+ ; AVX512F-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<64 x s8>)
+ ; AVX512F-NEXT: RET 0
; AVX512BW-LABEL: name: test_add_v64i8
- ; AVX512BW: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
- ; AVX512BW: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
- ; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512BW: $zmm0 = COPY [[ADD]](<64 x s8>)
- ; AVX512BW: RET 0
+ ; AVX512BW: liveins: $zmm0, $zmm1
+ ; AVX512BW-NEXT: {{ $}}
+ ; AVX512BW-NEXT: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[DEF]], [[DEF1]]
+ ; AVX512BW-NEXT: $zmm0 = COPY [[ADD]](<64 x s8>)
+ ; AVX512BW-NEXT: RET 0
%0(<64 x s8>) = IMPLICIT_DEF
%1(<64 x s8>) = IMPLICIT_DEF
%2(<64 x s8>) = G_ADD %0, %1
body: |
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
+
; AVX1-LABEL: name: test_add_v32i16
- ; AVX1: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
- ; AVX1: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
- ; AVX1: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>), [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
- ; AVX1: [[UV4:%[0-9]+]]:_(<8 x s16>), [[UV5:%[0-9]+]]:_(<8 x s16>), [[UV6:%[0-9]+]]:_(<8 x s16>), [[UV7:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
- ; AVX1: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV4]]
- ; AVX1: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV5]]
- ; AVX1: [[ADD2:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV2]], [[UV6]]
- ; AVX1: [[ADD3:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV3]], [[UV7]]
- ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>), [[ADD2]](<8 x s16>), [[ADD3]](<8 x s16>)
- ; AVX1: $zmm0 = COPY [[CONCAT_VECTORS]](<32 x s16>)
- ; AVX1: RET 0
+ ; AVX1: liveins: $zmm0, $zmm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>), [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
+ ; AVX1-NEXT: [[UV4:%[0-9]+]]:_(<8 x s16>), [[UV5:%[0-9]+]]:_(<8 x s16>), [[UV6:%[0-9]+]]:_(<8 x s16>), [[UV7:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
+ ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV4]]
+ ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV5]]
+ ; AVX1-NEXT: [[ADD2:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV2]], [[UV6]]
+ ; AVX1-NEXT: [[ADD3:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV3]], [[UV7]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>), [[ADD2]](<8 x s16>), [[ADD3]](<8 x s16>)
+ ; AVX1-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<32 x s16>)
+ ; AVX1-NEXT: RET 0
; AVX512F-LABEL: name: test_add_v32i16
- ; AVX512F: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
- ; AVX512F: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
- ; AVX512F: [[UV:%[0-9]+]]:_(<16 x s16>), [[UV1:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
- ; AVX512F: [[UV2:%[0-9]+]]:_(<16 x s16>), [[UV3:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
- ; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV]], [[UV2]]
- ; AVX512F: [[ADD1:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV1]], [[UV3]]
- ; AVX512F: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s16>) = G_CONCAT_VECTORS [[ADD]](<16 x s16>), [[ADD1]](<16 x s16>)
- ; AVX512F: $zmm0 = COPY [[CONCAT_VECTORS]](<32 x s16>)
- ; AVX512F: RET 0
+ ; AVX512F: liveins: $zmm0, $zmm1
+ ; AVX512F-NEXT: {{ $}}
+ ; AVX512F-NEXT: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[UV:%[0-9]+]]:_(<16 x s16>), [[UV1:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
+ ; AVX512F-NEXT: [[UV2:%[0-9]+]]:_(<16 x s16>), [[UV3:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
+ ; AVX512F-NEXT: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV]], [[UV2]]
+ ; AVX512F-NEXT: [[ADD1:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV1]], [[UV3]]
+ ; AVX512F-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s16>) = G_CONCAT_VECTORS [[ADD]](<16 x s16>), [[ADD1]](<16 x s16>)
+ ; AVX512F-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<32 x s16>)
+ ; AVX512F-NEXT: RET 0
; AVX512BW-LABEL: name: test_add_v32i16
- ; AVX512BW: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
- ; AVX512BW: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
- ; AVX512BW: [[ADD:%[0-9]+]]:_(<32 x s16>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512BW: $zmm0 = COPY [[ADD]](<32 x s16>)
- ; AVX512BW: RET 0
+ ; AVX512BW: liveins: $zmm0, $zmm1
+ ; AVX512BW-NEXT: {{ $}}
+ ; AVX512BW-NEXT: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[ADD:%[0-9]+]]:_(<32 x s16>) = G_ADD [[DEF]], [[DEF1]]
+ ; AVX512BW-NEXT: $zmm0 = COPY [[ADD]](<32 x s16>)
+ ; AVX512BW-NEXT: RET 0
%0(<32 x s16>) = IMPLICIT_DEF
%1(<32 x s16>) = IMPLICIT_DEF
%2(<32 x s16>) = G_ADD %0, %1
body: |
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
+
; AVX1-LABEL: name: test_add_v16i32
- ; AVX1: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
- ; AVX1: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
- ; AVX1: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>), [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
- ; AVX1: [[UV4:%[0-9]+]]:_(<4 x s32>), [[UV5:%[0-9]+]]:_(<4 x s32>), [[UV6:%[0-9]+]]:_(<4 x s32>), [[UV7:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<16 x s32>)
- ; AVX1: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV4]]
- ; AVX1: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV5]]
- ; AVX1: [[ADD2:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV2]], [[UV6]]
- ; AVX1: [[ADD3:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV3]], [[UV7]]
- ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>), [[ADD2]](<4 x s32>), [[ADD3]](<4 x s32>)
- ; AVX1: $zmm0 = COPY [[CONCAT_VECTORS]](<16 x s32>)
- ; AVX1: RET 0
+ ; AVX1: liveins: $zmm0, $zmm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>), [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
+ ; AVX1-NEXT: [[UV4:%[0-9]+]]:_(<4 x s32>), [[UV5:%[0-9]+]]:_(<4 x s32>), [[UV6:%[0-9]+]]:_(<4 x s32>), [[UV7:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<16 x s32>)
+ ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV4]]
+ ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV5]]
+ ; AVX1-NEXT: [[ADD2:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV2]], [[UV6]]
+ ; AVX1-NEXT: [[ADD3:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV3]], [[UV7]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>), [[ADD2]](<4 x s32>), [[ADD3]](<4 x s32>)
+ ; AVX1-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<16 x s32>)
+ ; AVX1-NEXT: RET 0
; AVX512F-LABEL: name: test_add_v16i32
- ; AVX512F: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
- ; AVX512F: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
- ; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512F: $zmm0 = COPY [[ADD]](<16 x s32>)
- ; AVX512F: RET 0
+ ; AVX512F: liveins: $zmm0, $zmm1
+ ; AVX512F-NEXT: {{ $}}
+ ; AVX512F-NEXT: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]]
+ ; AVX512F-NEXT: $zmm0 = COPY [[ADD]](<16 x s32>)
+ ; AVX512F-NEXT: RET 0
; AVX512BW-LABEL: name: test_add_v16i32
- ; AVX512BW: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
- ; AVX512BW: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
- ; AVX512BW: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512BW: $zmm0 = COPY [[ADD]](<16 x s32>)
- ; AVX512BW: RET 0
+ ; AVX512BW: liveins: $zmm0, $zmm1
+ ; AVX512BW-NEXT: {{ $}}
+ ; AVX512BW-NEXT: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]]
+ ; AVX512BW-NEXT: $zmm0 = COPY [[ADD]](<16 x s32>)
+ ; AVX512BW-NEXT: RET 0
%0(<16 x s32>) = IMPLICIT_DEF
%1(<16 x s32>) = IMPLICIT_DEF
%2(<16 x s32>) = G_ADD %0, %1
body: |
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
+
; AVX1-LABEL: name: test_add_v8i64
- ; AVX1: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
- ; AVX1: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
- ; AVX1: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>), [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<8 x s64>)
- ; AVX1: [[UV4:%[0-9]+]]:_(<2 x s64>), [[UV5:%[0-9]+]]:_(<2 x s64>), [[UV6:%[0-9]+]]:_(<2 x s64>), [[UV7:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<8 x s64>)
- ; AVX1: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV4]]
- ; AVX1: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV5]]
- ; AVX1: [[ADD2:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV2]], [[UV6]]
- ; AVX1: [[ADD3:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV3]], [[UV7]]
- ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>), [[ADD2]](<2 x s64>), [[ADD3]](<2 x s64>)
- ; AVX1: $zmm0 = COPY [[CONCAT_VECTORS]](<8 x s64>)
- ; AVX1: RET 0
+ ; AVX1: liveins: $zmm0, $zmm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>), [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<8 x s64>)
+ ; AVX1-NEXT: [[UV4:%[0-9]+]]:_(<2 x s64>), [[UV5:%[0-9]+]]:_(<2 x s64>), [[UV6:%[0-9]+]]:_(<2 x s64>), [[UV7:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<8 x s64>)
+ ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV4]]
+ ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV5]]
+ ; AVX1-NEXT: [[ADD2:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV2]], [[UV6]]
+ ; AVX1-NEXT: [[ADD3:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV3]], [[UV7]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>), [[ADD2]](<2 x s64>), [[ADD3]](<2 x s64>)
+ ; AVX1-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<8 x s64>)
+ ; AVX1-NEXT: RET 0
; AVX512F-LABEL: name: test_add_v8i64
- ; AVX512F: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
- ; AVX512F: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
- ; AVX512F: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512F: $zmm0 = COPY [[ADD]](<8 x s64>)
- ; AVX512F: RET 0
+ ; AVX512F: liveins: $zmm0, $zmm1
+ ; AVX512F-NEXT: {{ $}}
+ ; AVX512F-NEXT: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]]
+ ; AVX512F-NEXT: $zmm0 = COPY [[ADD]](<8 x s64>)
+ ; AVX512F-NEXT: RET 0
; AVX512BW-LABEL: name: test_add_v8i64
- ; AVX512BW: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
- ; AVX512BW: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
- ; AVX512BW: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512BW: $zmm0 = COPY [[ADD]](<8 x s64>)
- ; AVX512BW: RET 0
+ ; AVX512BW: liveins: $zmm0, $zmm1
+ ; AVX512BW-NEXT: {{ $}}
+ ; AVX512BW-NEXT: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]]
+ ; AVX512BW-NEXT: $zmm0 = COPY [[ADD]](<8 x s64>)
+ ; AVX512BW-NEXT: RET 0
%0(<8 x s64>) = IMPLICIT_DEF
%1(<8 x s64>) = IMPLICIT_DEF
%2(<8 x s64>) = G_ADD %0, %1
bb.1 (%ir-block.0):
liveins: $ymm0, $ymm1, $ymm2, $ymm3
; AVX1-LABEL: name: test_add_v64i8_2
- ; AVX1: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0
- ; AVX1: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1
- ; AVX1: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2
- ; AVX1: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3
- ; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY]](<32 x s8>)
- ; AVX1: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY1]](<32 x s8>)
- ; AVX1: [[UV4:%[0-9]+]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY2]](<32 x s8>)
- ; AVX1: [[UV6:%[0-9]+]]:_(<16 x s8>), [[UV7:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY3]](<32 x s8>)
- ; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV4]]
- ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV5]]
- ; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]]
- ; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]]
- ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
- ; AVX1: [[CONCAT_VECTORS1:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
- ; AVX1: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
- ; AVX1: $ymm1 = COPY [[CONCAT_VECTORS1]](<32 x s8>)
- ; AVX1: RET 0, implicit $ymm0, implicit $ymm1
+ ; AVX1: liveins: $ymm0, $ymm1, $ymm2, $ymm3
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0
+ ; AVX1-NEXT: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1
+ ; AVX1-NEXT: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2
+ ; AVX1-NEXT: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY]](<32 x s8>)
+ ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY1]](<32 x s8>)
+ ; AVX1-NEXT: [[UV4:%[0-9]+]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY2]](<32 x s8>)
+ ; AVX1-NEXT: [[UV6:%[0-9]+]]:_(<16 x s8>), [[UV7:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY3]](<32 x s8>)
+ ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV4]]
+ ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV5]]
+ ; AVX1-NEXT: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]]
+ ; AVX1-NEXT: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
+ ; AVX1-NEXT: [[CONCAT_VECTORS1:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
+ ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
+ ; AVX1-NEXT: $ymm1 = COPY [[CONCAT_VECTORS1]](<32 x s8>)
+ ; AVX1-NEXT: RET 0, implicit $ymm0, implicit $ymm1
; AVX512F-LABEL: name: test_add_v64i8_2
- ; AVX512F: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0
- ; AVX512F: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1
- ; AVX512F: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2
- ; AVX512F: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3
- ; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY]], [[COPY2]]
- ; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY1]], [[COPY3]]
- ; AVX512F: $ymm0 = COPY [[ADD]](<32 x s8>)
- ; AVX512F: $ymm1 = COPY [[ADD1]](<32 x s8>)
- ; AVX512F: RET 0, implicit $ymm0, implicit $ymm1
+ ; AVX512F: liveins: $ymm0, $ymm1, $ymm2, $ymm3
+ ; AVX512F-NEXT: {{ $}}
+ ; AVX512F-NEXT: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0
+ ; AVX512F-NEXT: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1
+ ; AVX512F-NEXT: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2
+ ; AVX512F-NEXT: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3
+ ; AVX512F-NEXT: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY]], [[COPY2]]
+ ; AVX512F-NEXT: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY1]], [[COPY3]]
+ ; AVX512F-NEXT: $ymm0 = COPY [[ADD]](<32 x s8>)
+ ; AVX512F-NEXT: $ymm1 = COPY [[ADD1]](<32 x s8>)
+ ; AVX512F-NEXT: RET 0, implicit $ymm0, implicit $ymm1
; AVX512BW-LABEL: name: test_add_v64i8_2
- ; AVX512BW: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0
- ; AVX512BW: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1
- ; AVX512BW: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2
- ; AVX512BW: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3
- ; AVX512BW: [[CONCAT_VECTORS:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[COPY]](<32 x s8>), [[COPY1]](<32 x s8>)
- ; AVX512BW: [[CONCAT_VECTORS1:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[COPY2]](<32 x s8>), [[COPY3]](<32 x s8>)
- ; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
- ; AVX512BW: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[ADD]](<64 x s8>)
- ; AVX512BW: $ymm0 = COPY [[UV]](<32 x s8>)
- ; AVX512BW: $ymm1 = COPY [[UV1]](<32 x s8>)
- ; AVX512BW: RET 0, implicit $ymm0, implicit $ymm1
+ ; AVX512BW: liveins: $ymm0, $ymm1, $ymm2, $ymm3
+ ; AVX512BW-NEXT: {{ $}}
+ ; AVX512BW-NEXT: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0
+ ; AVX512BW-NEXT: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1
+ ; AVX512BW-NEXT: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2
+ ; AVX512BW-NEXT: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3
+ ; AVX512BW-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[COPY]](<32 x s8>), [[COPY1]](<32 x s8>)
+ ; AVX512BW-NEXT: [[CONCAT_VECTORS1:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[COPY2]](<32 x s8>), [[COPY3]](<32 x s8>)
+ ; AVX512BW-NEXT: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
+ ; AVX512BW-NEXT: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[ADD]](<64 x s8>)
+ ; AVX512BW-NEXT: $ymm0 = COPY [[UV]](<32 x s8>)
+ ; AVX512BW-NEXT: $ymm1 = COPY [[UV1]](<32 x s8>)
+ ; AVX512BW-NEXT: RET 0, implicit $ymm0, implicit $ymm1
%2(<32 x s8>) = COPY $ymm0
%3(<32 x s8>) = COPY $ymm1
%4(<32 x s8>) = COPY $ymm2
liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_sub_v16i8
- ; ALL: [[DEF:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
- ; ALL: [[SUB:%[0-9]+]]:_(<16 x s8>) = G_SUB [[DEF]], [[DEF1]]
- ; ALL: $xmm0 = COPY [[SUB]](<16 x s8>)
- ; ALL: RET 0
+ ; ALL: liveins: $xmm0, $xmm1
+ ; ALL-NEXT: {{ $}}
+ ; ALL-NEXT: [[DEF:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[SUB:%[0-9]+]]:_(<16 x s8>) = G_SUB [[DEF]], [[DEF1]]
+ ; ALL-NEXT: $xmm0 = COPY [[SUB]](<16 x s8>)
+ ; ALL-NEXT: RET 0
%0(<16 x s8>) = IMPLICIT_DEF
%1(<16 x s8>) = IMPLICIT_DEF
%2(<16 x s8>) = G_SUB %0, %1
liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_sub_v8i16
- ; ALL: [[DEF:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
- ; ALL: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[DEF]], [[DEF1]]
- ; ALL: $xmm0 = COPY [[SUB]](<8 x s16>)
- ; ALL: RET 0
+ ; ALL: liveins: $xmm0, $xmm1
+ ; ALL-NEXT: {{ $}}
+ ; ALL-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[DEF]], [[DEF1]]
+ ; ALL-NEXT: $xmm0 = COPY [[SUB]](<8 x s16>)
+ ; ALL-NEXT: RET 0
%0(<8 x s16>) = IMPLICIT_DEF
%1(<8 x s16>) = IMPLICIT_DEF
%2(<8 x s16>) = G_SUB %0, %1
liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_sub_v4i32
- ; ALL: [[DEF:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
- ; ALL: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[DEF]], [[DEF1]]
- ; ALL: $xmm0 = COPY [[SUB]](<4 x s32>)
- ; ALL: RET 0
+ ; ALL: liveins: $xmm0, $xmm1
+ ; ALL-NEXT: {{ $}}
+ ; ALL-NEXT: [[DEF:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[DEF]], [[DEF1]]
+ ; ALL-NEXT: $xmm0 = COPY [[SUB]](<4 x s32>)
+ ; ALL-NEXT: RET 0
%0(<4 x s32>) = IMPLICIT_DEF
%1(<4 x s32>) = IMPLICIT_DEF
%2(<4 x s32>) = G_SUB %0, %1
liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_sub_v2i64
- ; ALL: [[DEF:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
- ; ALL: [[SUB:%[0-9]+]]:_(<2 x s64>) = G_SUB [[DEF]], [[DEF1]]
- ; ALL: $xmm0 = COPY [[SUB]](<2 x s64>)
- ; ALL: RET 0
+ ; ALL: liveins: $xmm0, $xmm1
+ ; ALL-NEXT: {{ $}}
+ ; ALL-NEXT: [[DEF:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[DEF1:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
+ ; ALL-NEXT: [[SUB:%[0-9]+]]:_(<2 x s64>) = G_SUB [[DEF]], [[DEF1]]
+ ; ALL-NEXT: $xmm0 = COPY [[SUB]](<2 x s64>)
+ ; ALL-NEXT: RET 0
%0(<2 x s64>) = IMPLICIT_DEF
%1(<2 x s64>) = IMPLICIT_DEF
%2(<2 x s64>) = G_SUB %0, %1
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=SSE2
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX1
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX2
+
# TODO: add tests for additional configuration after the legalization supported
+
--- |
define void @test_sub_v32i8() {
%ret = sub <32 x i8> undef, undef
bb.1 (%ir-block.0):
liveins: $ymm0, $ymm1
- ; ALL-LABEL: name: test_sub_v32i8
- ; ALL: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
- ; ALL: [[SUB:%[0-9]+]]:_(<32 x s8>) = G_SUB [[DEF]], [[DEF1]]
- ; ALL: $ymm0 = COPY [[SUB]](<32 x s8>)
- ; ALL: RET 0
+ ; SSE2-LABEL: name: test_sub_v32i8
+ ; SSE2: liveins: $ymm0, $ymm1
+ ; SSE2-NEXT: {{ $}}
+ ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
+ ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
+ ; SSE2-NEXT: [[SUB:%[0-9]+]]:_(<16 x s8>) = G_SUB [[UV]], [[UV2]]
+ ; SSE2-NEXT: [[SUB1:%[0-9]+]]:_(<16 x s8>) = G_SUB [[UV1]], [[UV3]]
+ ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[SUB]](<16 x s8>), [[SUB1]](<16 x s8>)
+ ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
+ ; SSE2-NEXT: RET 0
+ ; AVX1-LABEL: name: test_sub_v32i8
+ ; AVX1: liveins: $ymm0, $ymm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
+ ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
+ ; AVX1-NEXT: [[SUB:%[0-9]+]]:_(<16 x s8>) = G_SUB [[UV]], [[UV2]]
+ ; AVX1-NEXT: [[SUB1:%[0-9]+]]:_(<16 x s8>) = G_SUB [[UV1]], [[UV3]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[SUB]](<16 x s8>), [[SUB1]](<16 x s8>)
+ ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
+ ; AVX1-NEXT: RET 0
+ ; AVX2-LABEL: name: test_sub_v32i8
+ ; AVX2: liveins: $ymm0, $ymm1
+ ; AVX2-NEXT: {{ $}}
+ ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[SUB:%[0-9]+]]:_(<32 x s8>) = G_SUB [[DEF]], [[DEF1]]
+ ; AVX2-NEXT: $ymm0 = COPY [[SUB]](<32 x s8>)
+ ; AVX2-NEXT: RET 0
%0(<32 x s8>) = IMPLICIT_DEF
%1(<32 x s8>) = IMPLICIT_DEF
%2(<32 x s8>) = G_SUB %0, %1
bb.1 (%ir-block.0):
liveins: $ymm0, $ymm1
- ; ALL-LABEL: name: test_sub_v16i16
- ; ALL: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
- ; ALL: [[SUB:%[0-9]+]]:_(<16 x s16>) = G_SUB [[DEF]], [[DEF1]]
- ; ALL: $ymm0 = COPY [[SUB]](<16 x s16>)
- ; ALL: RET 0
+ ; SSE2-LABEL: name: test_sub_v16i16
+ ; SSE2: liveins: $ymm0, $ymm1
+ ; SSE2-NEXT: {{ $}}
+ ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
+ ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
+ ; SSE2-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[UV]], [[UV2]]
+ ; SSE2-NEXT: [[SUB1:%[0-9]+]]:_(<8 x s16>) = G_SUB [[UV1]], [[UV3]]
+ ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[SUB]](<8 x s16>), [[SUB1]](<8 x s16>)
+ ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>)
+ ; SSE2-NEXT: RET 0
+ ; AVX1-LABEL: name: test_sub_v16i16
+ ; AVX1: liveins: $ymm0, $ymm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
+ ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
+ ; AVX1-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[UV]], [[UV2]]
+ ; AVX1-NEXT: [[SUB1:%[0-9]+]]:_(<8 x s16>) = G_SUB [[UV1]], [[UV3]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[SUB]](<8 x s16>), [[SUB1]](<8 x s16>)
+ ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>)
+ ; AVX1-NEXT: RET 0
+ ; AVX2-LABEL: name: test_sub_v16i16
+ ; AVX2: liveins: $ymm0, $ymm1
+ ; AVX2-NEXT: {{ $}}
+ ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[SUB:%[0-9]+]]:_(<16 x s16>) = G_SUB [[DEF]], [[DEF1]]
+ ; AVX2-NEXT: $ymm0 = COPY [[SUB]](<16 x s16>)
+ ; AVX2-NEXT: RET 0
%0(<16 x s16>) = IMPLICIT_DEF
%1(<16 x s16>) = IMPLICIT_DEF
%2(<16 x s16>) = G_SUB %0, %1
bb.1 (%ir-block.0):
liveins: $ymm0, $ymm1
- ; ALL-LABEL: name: test_sub_v8i32
- ; ALL: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
- ; ALL: [[SUB:%[0-9]+]]:_(<8 x s32>) = G_SUB [[DEF]], [[DEF1]]
- ; ALL: $ymm0 = COPY [[SUB]](<8 x s32>)
- ; ALL: RET 0
+ ; SSE2-LABEL: name: test_sub_v8i32
+ ; SSE2: liveins: $ymm0, $ymm1
+ ; SSE2-NEXT: {{ $}}
+ ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
+ ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
+ ; SSE2-NEXT: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[UV]], [[UV2]]
+ ; SSE2-NEXT: [[SUB1:%[0-9]+]]:_(<4 x s32>) = G_SUB [[UV1]], [[UV3]]
+ ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[SUB]](<4 x s32>), [[SUB1]](<4 x s32>)
+ ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
+ ; SSE2-NEXT: RET 0
+ ; AVX1-LABEL: name: test_sub_v8i32
+ ; AVX1: liveins: $ymm0, $ymm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
+ ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
+ ; AVX1-NEXT: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[UV]], [[UV2]]
+ ; AVX1-NEXT: [[SUB1:%[0-9]+]]:_(<4 x s32>) = G_SUB [[UV1]], [[UV3]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[SUB]](<4 x s32>), [[SUB1]](<4 x s32>)
+ ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
+ ; AVX1-NEXT: RET 0
+ ; AVX2-LABEL: name: test_sub_v8i32
+ ; AVX2: liveins: $ymm0, $ymm1
+ ; AVX2-NEXT: {{ $}}
+ ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[SUB:%[0-9]+]]:_(<8 x s32>) = G_SUB [[DEF]], [[DEF1]]
+ ; AVX2-NEXT: $ymm0 = COPY [[SUB]](<8 x s32>)
+ ; AVX2-NEXT: RET 0
%0(<8 x s32>) = IMPLICIT_DEF
%1(<8 x s32>) = IMPLICIT_DEF
%2(<8 x s32>) = G_SUB %0, %1
bb.1 (%ir-block.0):
liveins: $ymm0, $ymm1
- ; ALL-LABEL: name: test_sub_v4i64
- ; ALL: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
- ; ALL: [[SUB:%[0-9]+]]:_(<4 x s64>) = G_SUB [[DEF]], [[DEF1]]
- ; ALL: $ymm0 = COPY [[SUB]](<4 x s64>)
- ; ALL: RET 0
+ ; SSE2-LABEL: name: test_sub_v4i64
+ ; SSE2: liveins: $ymm0, $ymm1
+ ; SSE2-NEXT: {{ $}}
+ ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
+ ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
+ ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
+ ; SSE2-NEXT: [[SUB:%[0-9]+]]:_(<2 x s64>) = G_SUB [[UV]], [[UV2]]
+ ; SSE2-NEXT: [[SUB1:%[0-9]+]]:_(<2 x s64>) = G_SUB [[UV1]], [[UV3]]
+ ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[SUB]](<2 x s64>), [[SUB1]](<2 x s64>)
+ ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>)
+ ; SSE2-NEXT: RET 0
+ ; AVX1-LABEL: name: test_sub_v4i64
+ ; AVX1: liveins: $ymm0, $ymm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
+ ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
+ ; AVX1-NEXT: [[SUB:%[0-9]+]]:_(<2 x s64>) = G_SUB [[UV]], [[UV2]]
+ ; AVX1-NEXT: [[SUB1:%[0-9]+]]:_(<2 x s64>) = G_SUB [[UV1]], [[UV3]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[SUB]](<2 x s64>), [[SUB1]](<2 x s64>)
+ ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>)
+ ; AVX1-NEXT: RET 0
+ ; AVX2-LABEL: name: test_sub_v4i64
+ ; AVX2: liveins: $ymm0, $ymm1
+ ; AVX2-NEXT: {{ $}}
+ ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
+ ; AVX2-NEXT: [[SUB:%[0-9]+]]:_(<4 x s64>) = G_SUB [[DEF]], [[DEF1]]
+ ; AVX2-NEXT: $ymm0 = COPY [[SUB]](<4 x s64>)
+ ; AVX2-NEXT: RET 0
%0(<4 x s64>) = IMPLICIT_DEF
%1(<4 x s64>) = IMPLICIT_DEF
%2(<4 x s64>) = G_SUB %0, %1
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512bw -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX1
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX512F
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512bw -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX512BW
+
# TODO: add tests for additional configuration after the legalization supported
+
--- |
define void @test_sub_v64i8() {
%ret = sub <64 x i8> undef, undef
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
- ; ALL-LABEL: name: test_sub_v64i8
- ; ALL: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
- ; ALL: [[SUB:%[0-9]+]]:_(<64 x s8>) = G_SUB [[DEF]], [[DEF1]]
- ; ALL: $zmm0 = COPY [[SUB]](<64 x s8>)
- ; ALL: RET 0
+ ; AVX1-LABEL: name: test_sub_v64i8
+ ; AVX1: liveins: $zmm0, $zmm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>), [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
+ ; AVX1-NEXT: [[UV4:%[0-9]+]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>), [[UV6:%[0-9]+]]:_(<16 x s8>), [[UV7:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
+ ; AVX1-NEXT: [[SUB:%[0-9]+]]:_(<16 x s8>) = G_SUB [[UV]], [[UV4]]
+ ; AVX1-NEXT: [[SUB1:%[0-9]+]]:_(<16 x s8>) = G_SUB [[UV1]], [[UV5]]
+ ; AVX1-NEXT: [[SUB2:%[0-9]+]]:_(<16 x s8>) = G_SUB [[UV2]], [[UV6]]
+ ; AVX1-NEXT: [[SUB3:%[0-9]+]]:_(<16 x s8>) = G_SUB [[UV3]], [[UV7]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[SUB]](<16 x s8>), [[SUB1]](<16 x s8>), [[SUB2]](<16 x s8>), [[SUB3]](<16 x s8>)
+ ; AVX1-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<64 x s8>)
+ ; AVX1-NEXT: RET 0
+ ; AVX512F-LABEL: name: test_sub_v64i8
+ ; AVX512F: liveins: $zmm0, $zmm1
+ ; AVX512F-NEXT: {{ $}}
+ ; AVX512F-NEXT: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
+ ; AVX512F-NEXT: [[UV2:%[0-9]+]]:_(<32 x s8>), [[UV3:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
+ ; AVX512F-NEXT: [[SUB:%[0-9]+]]:_(<32 x s8>) = G_SUB [[UV]], [[UV2]]
+ ; AVX512F-NEXT: [[SUB1:%[0-9]+]]:_(<32 x s8>) = G_SUB [[UV1]], [[UV3]]
+ ; AVX512F-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[SUB]](<32 x s8>), [[SUB1]](<32 x s8>)
+ ; AVX512F-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<64 x s8>)
+ ; AVX512F-NEXT: RET 0
+ ; AVX512BW-LABEL: name: test_sub_v64i8
+ ; AVX512BW: liveins: $zmm0, $zmm1
+ ; AVX512BW-NEXT: {{ $}}
+ ; AVX512BW-NEXT: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[SUB:%[0-9]+]]:_(<64 x s8>) = G_SUB [[DEF]], [[DEF1]]
+ ; AVX512BW-NEXT: $zmm0 = COPY [[SUB]](<64 x s8>)
+ ; AVX512BW-NEXT: RET 0
%0(<64 x s8>) = IMPLICIT_DEF
%1(<64 x s8>) = IMPLICIT_DEF
%2(<64 x s8>) = G_SUB %0, %1
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
- ; ALL-LABEL: name: test_sub_v32i16
- ; ALL: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
- ; ALL: [[SUB:%[0-9]+]]:_(<32 x s16>) = G_SUB [[DEF]], [[DEF1]]
- ; ALL: $zmm0 = COPY [[SUB]](<32 x s16>)
- ; ALL: RET 0
+ ; AVX1-LABEL: name: test_sub_v32i16
+ ; AVX1: liveins: $zmm0, $zmm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>), [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
+ ; AVX1-NEXT: [[UV4:%[0-9]+]]:_(<8 x s16>), [[UV5:%[0-9]+]]:_(<8 x s16>), [[UV6:%[0-9]+]]:_(<8 x s16>), [[UV7:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
+ ; AVX1-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[UV]], [[UV4]]
+ ; AVX1-NEXT: [[SUB1:%[0-9]+]]:_(<8 x s16>) = G_SUB [[UV1]], [[UV5]]
+ ; AVX1-NEXT: [[SUB2:%[0-9]+]]:_(<8 x s16>) = G_SUB [[UV2]], [[UV6]]
+ ; AVX1-NEXT: [[SUB3:%[0-9]+]]:_(<8 x s16>) = G_SUB [[UV3]], [[UV7]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s16>) = G_CONCAT_VECTORS [[SUB]](<8 x s16>), [[SUB1]](<8 x s16>), [[SUB2]](<8 x s16>), [[SUB3]](<8 x s16>)
+ ; AVX1-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<32 x s16>)
+ ; AVX1-NEXT: RET 0
+ ; AVX512F-LABEL: name: test_sub_v32i16
+ ; AVX512F: liveins: $zmm0, $zmm1
+ ; AVX512F-NEXT: {{ $}}
+ ; AVX512F-NEXT: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[UV:%[0-9]+]]:_(<16 x s16>), [[UV1:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
+ ; AVX512F-NEXT: [[UV2:%[0-9]+]]:_(<16 x s16>), [[UV3:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
+ ; AVX512F-NEXT: [[SUB:%[0-9]+]]:_(<16 x s16>) = G_SUB [[UV]], [[UV2]]
+ ; AVX512F-NEXT: [[SUB1:%[0-9]+]]:_(<16 x s16>) = G_SUB [[UV1]], [[UV3]]
+ ; AVX512F-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s16>) = G_CONCAT_VECTORS [[SUB]](<16 x s16>), [[SUB1]](<16 x s16>)
+ ; AVX512F-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<32 x s16>)
+ ; AVX512F-NEXT: RET 0
+ ; AVX512BW-LABEL: name: test_sub_v32i16
+ ; AVX512BW: liveins: $zmm0, $zmm1
+ ; AVX512BW-NEXT: {{ $}}
+ ; AVX512BW-NEXT: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[SUB:%[0-9]+]]:_(<32 x s16>) = G_SUB [[DEF]], [[DEF1]]
+ ; AVX512BW-NEXT: $zmm0 = COPY [[SUB]](<32 x s16>)
+ ; AVX512BW-NEXT: RET 0
%0(<32 x s16>) = IMPLICIT_DEF
%1(<32 x s16>) = IMPLICIT_DEF
%2(<32 x s16>) = G_SUB %0, %1
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
- ; ALL-LABEL: name: test_sub_v16i32
- ; ALL: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
- ; ALL: [[SUB:%[0-9]+]]:_(<16 x s32>) = G_SUB [[DEF]], [[DEF1]]
- ; ALL: $zmm0 = COPY [[SUB]](<16 x s32>)
- ; ALL: RET 0
+ ; AVX1-LABEL: name: test_sub_v16i32
+ ; AVX1: liveins: $zmm0, $zmm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>), [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
+ ; AVX1-NEXT: [[UV4:%[0-9]+]]:_(<4 x s32>), [[UV5:%[0-9]+]]:_(<4 x s32>), [[UV6:%[0-9]+]]:_(<4 x s32>), [[UV7:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<16 x s32>)
+ ; AVX1-NEXT: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[UV]], [[UV4]]
+ ; AVX1-NEXT: [[SUB1:%[0-9]+]]:_(<4 x s32>) = G_SUB [[UV1]], [[UV5]]
+ ; AVX1-NEXT: [[SUB2:%[0-9]+]]:_(<4 x s32>) = G_SUB [[UV2]], [[UV6]]
+ ; AVX1-NEXT: [[SUB3:%[0-9]+]]:_(<4 x s32>) = G_SUB [[UV3]], [[UV7]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[SUB]](<4 x s32>), [[SUB1]](<4 x s32>), [[SUB2]](<4 x s32>), [[SUB3]](<4 x s32>)
+ ; AVX1-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<16 x s32>)
+ ; AVX1-NEXT: RET 0
+ ; AVX512F-LABEL: name: test_sub_v16i32
+ ; AVX512F: liveins: $zmm0, $zmm1
+ ; AVX512F-NEXT: {{ $}}
+ ; AVX512F-NEXT: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[SUB:%[0-9]+]]:_(<16 x s32>) = G_SUB [[DEF]], [[DEF1]]
+ ; AVX512F-NEXT: $zmm0 = COPY [[SUB]](<16 x s32>)
+ ; AVX512F-NEXT: RET 0
+ ; AVX512BW-LABEL: name: test_sub_v16i32
+ ; AVX512BW: liveins: $zmm0, $zmm1
+ ; AVX512BW-NEXT: {{ $}}
+ ; AVX512BW-NEXT: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[SUB:%[0-9]+]]:_(<16 x s32>) = G_SUB [[DEF]], [[DEF1]]
+ ; AVX512BW-NEXT: $zmm0 = COPY [[SUB]](<16 x s32>)
+ ; AVX512BW-NEXT: RET 0
%0(<16 x s32>) = IMPLICIT_DEF
%1(<16 x s32>) = IMPLICIT_DEF
%2(<16 x s32>) = G_SUB %0, %1
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
- ; ALL-LABEL: name: test_sub_v8i64
- ; ALL: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
- ; ALL: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
- ; ALL: [[SUB:%[0-9]+]]:_(<8 x s64>) = G_SUB [[DEF]], [[DEF1]]
- ; ALL: $zmm0 = COPY [[SUB]](<8 x s64>)
- ; ALL: RET 0
+ ; AVX1-LABEL: name: test_sub_v8i64
+ ; AVX1: liveins: $zmm0, $zmm1
+ ; AVX1-NEXT: {{ $}}
+ ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
+ ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>), [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<8 x s64>)
+ ; AVX1-NEXT: [[UV4:%[0-9]+]]:_(<2 x s64>), [[UV5:%[0-9]+]]:_(<2 x s64>), [[UV6:%[0-9]+]]:_(<2 x s64>), [[UV7:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<8 x s64>)
+ ; AVX1-NEXT: [[SUB:%[0-9]+]]:_(<2 x s64>) = G_SUB [[UV]], [[UV4]]
+ ; AVX1-NEXT: [[SUB1:%[0-9]+]]:_(<2 x s64>) = G_SUB [[UV1]], [[UV5]]
+ ; AVX1-NEXT: [[SUB2:%[0-9]+]]:_(<2 x s64>) = G_SUB [[UV2]], [[UV6]]
+ ; AVX1-NEXT: [[SUB3:%[0-9]+]]:_(<2 x s64>) = G_SUB [[UV3]], [[UV7]]
+ ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s64>) = G_CONCAT_VECTORS [[SUB]](<2 x s64>), [[SUB1]](<2 x s64>), [[SUB2]](<2 x s64>), [[SUB3]](<2 x s64>)
+ ; AVX1-NEXT: $zmm0 = COPY [[CONCAT_VECTORS]](<8 x s64>)
+ ; AVX1-NEXT: RET 0
+ ; AVX512F-LABEL: name: test_sub_v8i64
+ ; AVX512F: liveins: $zmm0, $zmm1
+ ; AVX512F-NEXT: {{ $}}
+ ; AVX512F-NEXT: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
+ ; AVX512F-NEXT: [[SUB:%[0-9]+]]:_(<8 x s64>) = G_SUB [[DEF]], [[DEF1]]
+ ; AVX512F-NEXT: $zmm0 = COPY [[SUB]](<8 x s64>)
+ ; AVX512F-NEXT: RET 0
+ ; AVX512BW-LABEL: name: test_sub_v8i64
+ ; AVX512BW: liveins: $zmm0, $zmm1
+ ; AVX512BW-NEXT: {{ $}}
+ ; AVX512BW-NEXT: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
+ ; AVX512BW-NEXT: [[SUB:%[0-9]+]]:_(<8 x s64>) = G_SUB [[DEF]], [[DEF1]]
+ ; AVX512BW-NEXT: $zmm0 = COPY [[SUB]](<8 x s64>)
+ ; AVX512BW-NEXT: RET 0
%0(<8 x s64>) = IMPLICIT_DEF
%1(<8 x s64>) = IMPLICIT_DEF
%2(<8 x s64>) = G_SUB %0, %1