The VBROADCAST combines and SimplifyDemandedVectorElts improvements mean that we now more consistently use shorter (128-bit) X86vzload input operands.
Follow up to D58053
llvm-svn: 354346
let Predicates = [HasAVX512] in {
// 32-bit targets will fail to load a i64 directly but can use ZEXT_LOAD.
- def : Pat<(v8i64 (X86VBroadcast (v8i64 (X86vzload addr:$src)))),
+ def : Pat<(v8i64 (X86VBroadcast (v2i64 (X86vzload addr:$src)))),
(VPBROADCASTQZm addr:$src)>;
}
// 32-bit targets will fail to load a i64 directly but can use ZEXT_LOAD.
def : Pat<(v2i64 (X86VBroadcast (v2i64 (X86vzload addr:$src)))),
(VPBROADCASTQZ128m addr:$src)>;
- def : Pat<(v4i64 (X86VBroadcast (v4i64 (X86vzload addr:$src)))),
+ def : Pat<(v4i64 (X86VBroadcast (v2i64 (X86vzload addr:$src)))),
(VPBROADCASTQZ256m addr:$src)>;
}
let Predicates = [HasVLX, HasBWI] in {
// 32-bit targets will fail to load a i64 directly but can use ZEXT_LOAD.
def : Pat<(v2i64 (X86VBroadcast (v2i64 (X86vzload addr:$src)))),
(VPBROADCASTQrm addr:$src)>;
- def : Pat<(v4i64 (X86VBroadcast (v4i64 (X86vzload addr:$src)))),
+ def : Pat<(v4i64 (X86VBroadcast (v2i64 (X86vzload addr:$src)))),
(VPBROADCASTQYrm addr:$src)>;
def : Pat<(v4i32 (X86VBroadcast (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
;
; X86_AVX512-LABEL: insert_subvector_into_undef:
; X86_AVX512: # %bb.0:
-; X86_AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86_AVX512-NEXT: vbroadcastsd %xmm0, %zmm0
+; X86_AVX512-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %zmm0
; X86_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: insert_subvector_into_undef:
define <8 x i32> @combine_permd_insertion_as_broadcast_v4i64(i64 %a0) {
; X86-LABEL: combine_permd_insertion_as_broadcast_v4i64:
; X86: # %bb.0:
-; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-NEXT: vbroadcastsd %xmm0, %ymm0
+; X86-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
; X86-NEXT: retl
;
; X64-LABEL: combine_permd_insertion_as_broadcast_v4i64:
define <8 x i64> @combine_vpermvar_insertion_as_broadcast_v8i64(i64 %a0) {
; X86-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64:
; X86: # %bb.0:
-; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-NEXT: vbroadcastsd %xmm0, %zmm0
+; X86-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %zmm0
; X86-NEXT: retl
;
; X64-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64: