The code change is simple enough: instead of attaching an anonymous SDLoc to splatted
vector constants, use the scalar constant's existing SDLoc since that is what is passed
into getConstant() as a param. But this changes instruction scheduling, so I'll explain
why that happens.
The motivation for this patch starts near:
http://reviews.llvm.org/rL258833
...x86's getZeroVector() could be similarly cleaned up and I thought it would be 'NFC'.
But when I made that change locally, several x86 codegen tests wiggled.
It turns out that the lack of SDLoc consistency in getConstant() changes the way
ScheduleDAGRRList behaves. This is because the SDLoc contains 'IROrder' and some DAG
scheduler algorithms use IROrder for tie-breaking.
Differential Revision: http://reviews.llvm.org/D16972
llvm-svn: 260582
for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
- SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
- getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
- Ops));
+ SDValue Result = getNode(ISD::BITCAST, DL, VT,
+ getNode(ISD::BUILD_VECTOR, DL, ViaVecVT, Ops));
return Result;
}
if (VT.isVector()) {
SmallVector<SDValue, 8> Ops;
Ops.assign(VT.getVectorNumElements(), Result);
- Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
+ Result = getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
return Result;
}
if (VT.isVector()) {
SmallVector<SDValue, 8> Ops;
Ops.assign(VT.getVectorNumElements(), Result);
- Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
+ Result = getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
return Result;
}
define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_X00X:
; X32: ## BB#0:
-; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,2,0]
-; X32-NEXT: pxor %xmm0, %xmm0
-; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
+; X32-NEXT: pxor %xmm1, %xmm1
+; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_X00X:
; X64: ## BB#0:
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,2,0]
-; X64-NEXT: pxor %xmm0, %xmm0
-; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
+; X64-NEXT: pxor %xmm1, %xmm1
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
; CHECK-LABEL: prompop:
; CHECK: # BB#0:
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
+; CHECK-NEXT: pxor %xmm2, %xmm2
; CHECK-NEXT: movdqa %xmm0, %xmm1
; CHECK-NEXT: psrlq $1, %xmm1
; CHECK-NEXT: pand {{.*}}(%rip), %xmm1
; CHECK-NEXT: psubq %xmm1, %xmm0
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [3689348814741910323,3689348814741910323]
-; CHECK-NEXT: movdqa %xmm0, %xmm2
-; CHECK-NEXT: pand %xmm1, %xmm2
+; CHECK-NEXT: movdqa %xmm0, %xmm3
+; CHECK-NEXT: pand %xmm1, %xmm3
; CHECK-NEXT: psrlq $2, %xmm0
; CHECK-NEXT: pand %xmm1, %xmm0
-; CHECK-NEXT: paddq %xmm2, %xmm0
+; CHECK-NEXT: paddq %xmm3, %xmm0
; CHECK-NEXT: movdqa %xmm0, %xmm1
; CHECK-NEXT: psrlq $4, %xmm1
; CHECK-NEXT: paddq %xmm0, %xmm1
; CHECK-NEXT: pand {{.*}}(%rip), %xmm1
-; CHECK-NEXT: pxor %xmm0, %xmm0
-; CHECK-NEXT: psadbw %xmm0, %xmm1
+; CHECK-NEXT: psadbw %xmm2, %xmm1
; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%c = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a)
; AVX512F-LABEL: shuf8i1__9_6_1_10_3_7_7_1:
; AVX512F: # BB#0:
; AVX512F-NEXT: kmovw %edi, %k1
-; AVX512F-NEXT: movb $51, %al
-; AVX512F-NEXT: kmovw %eax, %k2
; AVX512F-NEXT: movq {{.*}}(%rip), %rax
+; AVX512F-NEXT: movb $51, %cl
+; AVX512F-NEXT: kmovw %ecx, %k2
; AVX512F-NEXT: vpbroadcastq %rax, %zmm0 {%k2} {z}
; AVX512F-NEXT: vpbroadcastq %rax, %zmm1 {%k1} {z}
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [9,6,1,0,3,7,7,1]