; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X32 --check-prefix=X32-SSE
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX
define <2 x i64> @test_extrqi(<2 x i64> %x) nounwind uwtable ssp {
; X32-LABEL: test_extrqi:
ret <2 x i64> %1
}
+define <2 x i64> @test_extrqi_domain(<2 x i64> *%p) nounwind uwtable ssp {
+; X32-SSE-LABEL: test_extrqi_domain:
+; X32-SSE: # BB#0:
+; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-SSE-NEXT: movdqa (%eax), %xmm0
+; X32-SSE-NEXT: extrq $2, $3, %xmm0
+; X32-SSE-NEXT: retl
+;
+; X32-AVX-LABEL: test_extrqi_domain:
+; X32-AVX: # BB#0:
+; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-AVX-NEXT: vmovdqa (%eax), %xmm0
+; X32-AVX-NEXT: extrq $2, $3, %xmm0
+; X32-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: test_extrqi_domain:
+; X64-SSE: # BB#0:
+; X64-SSE-NEXT: movdqa (%rdi), %xmm0
+; X64-SSE-NEXT: extrq $2, $3, %xmm0
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: test_extrqi_domain:
+; X64-AVX: # BB#0:
+; X64-AVX-NEXT: vmovdqa (%rdi), %xmm0
+; X64-AVX-NEXT: extrq $2, $3, %xmm0
+; X64-AVX-NEXT: retq
+ %1 = load <2 x i64>, <2 x i64> *%p
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %1, i8 3, i8 2)
+ ret <2 x i64> %2
+}
+
declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8) nounwind
define <2 x i64> @test_extrq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
ret <2 x i64> %2
}
+define <2 x i64> @test_extrq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
+; X32-SSE-LABEL: test_extrq_domain:
+; X32-SSE: # BB#0:
+; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-SSE-NEXT: movdqa (%eax), %xmm1
+; X32-SSE-NEXT: extrq %xmm0, %xmm1
+; X32-SSE-NEXT: movdqa %xmm1, %xmm0
+; X32-SSE-NEXT: retl
+;
+; X32-AVX-LABEL: test_extrq_domain:
+; X32-AVX: # BB#0:
+; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-AVX-NEXT: vmovdqa (%eax), %xmm1
+; X32-AVX-NEXT: extrq %xmm0, %xmm1
+; X32-AVX-NEXT: vmovdqa %xmm1, %xmm0
+; X32-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: test_extrq_domain:
+; X64-SSE: # BB#0:
+; X64-SSE-NEXT: movdqa (%rdi), %xmm1
+; X64-SSE-NEXT: extrq %xmm0, %xmm1
+; X64-SSE-NEXT: movdqa %xmm1, %xmm0
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: test_extrq_domain:
+; X64-AVX: # BB#0:
+; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1
+; X64-AVX-NEXT: extrq %xmm0, %xmm1
+; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0
+; X64-AVX-NEXT: retq
+ %1 = load <2 x i64>, <2 x i64> *%p
+ %2 = bitcast <2 x i64> %y to <16 x i8>
+ %3 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %1, <16 x i8> %2) nounwind
+ ret <2 x i64> %3
+}
+
declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>) nounwind
define <2 x i64> @test_insertqi(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
ret <2 x i64> %1
}
+define <2 x i64> @test_insertqi_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
+; X32-SSE-LABEL: test_insertqi_domain:
+; X32-SSE: # BB#0:
+; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-SSE-NEXT: movdqa (%eax), %xmm1
+; X32-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1
+; X32-SSE-NEXT: movdqa %xmm1, %xmm0
+; X32-SSE-NEXT: retl
+;
+; X32-AVX-LABEL: test_insertqi_domain:
+; X32-AVX: # BB#0:
+; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-AVX-NEXT: vmovdqa (%eax), %xmm1
+; X32-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1
+; X32-AVX-NEXT: vmovdqa %xmm1, %xmm0
+; X32-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: test_insertqi_domain:
+; X64-SSE: # BB#0:
+; X64-SSE-NEXT: movdqa (%rdi), %xmm1
+; X64-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1
+; X64-SSE-NEXT: movdqa %xmm1, %xmm0
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: test_insertqi_domain:
+; X64-AVX: # BB#0:
+; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1
+; X64-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1
+; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0
+; X64-AVX-NEXT: retq
+ %1 = load <2 x i64>, <2 x i64> *%p
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %y, i8 5, i8 6)
+ ret <2 x i64> %2
+}
+
declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwind
define <2 x i64> @test_insertq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
ret <2 x i64> %1
}
+define <2 x i64> @test_insertq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
+; X32-SSE-LABEL: test_insertq_domain:
+; X32-SSE: # BB#0:
+; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-SSE-NEXT: movdqa (%eax), %xmm1
+; X32-SSE-NEXT: insertq %xmm0, %xmm1
+; X32-SSE-NEXT: movdqa %xmm1, %xmm0
+; X32-SSE-NEXT: retl
+;
+; X32-AVX-LABEL: test_insertq_domain:
+; X32-AVX: # BB#0:
+; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-AVX-NEXT: vmovdqa (%eax), %xmm1
+; X32-AVX-NEXT: insertq %xmm0, %xmm1
+; X32-AVX-NEXT: vmovdqa %xmm1, %xmm0
+; X32-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: test_insertq_domain:
+; X64-SSE: # BB#0:
+; X64-SSE-NEXT: movdqa (%rdi), %xmm1
+; X64-SSE-NEXT: insertq %xmm0, %xmm1
+; X64-SSE-NEXT: movdqa %xmm1, %xmm0
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: test_insertq_domain:
+; X64-AVX: # BB#0:
+; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1
+; X64-AVX-NEXT: insertq %xmm0, %xmm1
+; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0
+; X64-AVX-NEXT: retq
+ %1 = load <2 x i64>, <2 x i64> *%p
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %1, <2 x i64> %y) nounwind
+ ret <2 x i64> %2
+}
+
declare <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64>, <2 x i64>) nounwind
define <16 x i8> @shuf_0zzzzzzz1zzzzzzz(<16 x i8> %a0) {
; BTVER1-LABEL: shuf_0zzzzzzz1zzzzzzz:
; BTVER1: # BB#0:
-; BTVER1-NEXT: movaps %xmm0, %xmm1
+; BTVER1-NEXT: movdqa %xmm0, %xmm1
; BTVER1-NEXT: extrq {{.*#+}} xmm1 = xmm1[1],zero,zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; BTVER1-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
define <16 x i8> @shuf_2zzzzzzz3zzzzzzz(<16 x i8> %a0) {
; BTVER1-LABEL: shuf_2zzzzzzz3zzzzzzz:
; BTVER1: # BB#0:
-; BTVER1-NEXT: movaps %xmm0, %xmm1
+; BTVER1-NEXT: movdqa %xmm0, %xmm1
; BTVER1-NEXT: extrq {{.*#+}} xmm1 = xmm1[3],zero,zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; BTVER1-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
define <16 x i8> @shuf_01zzzzzz23zzzzzz(<16 x i8> %a0) {
; BTVER1-LABEL: shuf_01zzzzzz23zzzzzz:
; BTVER1: # BB#0:
-; BTVER1-NEXT: movaps %xmm0, %xmm1
+; BTVER1-NEXT: movdqa %xmm0, %xmm1
; BTVER1-NEXT: extrq {{.*#+}} xmm1 = xmm1[2,3],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; BTVER1-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
define <8 x i16> @shuf_0zzz1zzz(<8 x i16> %a0) {
; BTVER1-LABEL: shuf_0zzz1zzz:
; BTVER1: # BB#0:
-; BTVER1-NEXT: movaps %xmm0, %xmm1
+; BTVER1-NEXT: movdqa %xmm0, %xmm1
; BTVER1-NEXT: extrq {{.*#+}} xmm1 = xmm1[2,3],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
; BTVER1-NEXT: extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; BTVER1-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]