}
}
-void DecodeVPERMVMask(const Constant *C, MVT VT,
+void DecodeVPERMVMask(const Constant *C, unsigned ElSize,
SmallVectorImpl<int> &ShuffleMask) {
Type *MaskTy = C->getType();
- if (MaskTy->isVectorTy()) {
- unsigned NumElements = MaskTy->getVectorNumElements();
- if (NumElements == VT.getVectorNumElements()) {
- unsigned EltMaskSize = Log2_64(NumElements);
- for (unsigned i = 0; i < NumElements; ++i) {
- Constant *COp = C->getAggregateElement(i);
- if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp))) {
- ShuffleMask.clear();
- return;
- }
- if (isa<UndefValue>(COp))
- ShuffleMask.push_back(SM_SentinelUndef);
- else {
- APInt Element = cast<ConstantInt>(COp)->getValue();
- Element = Element.getLoBits(EltMaskSize);
- ShuffleMask.push_back(Element.getZExtValue());
- }
- }
- }
+ unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
+ (void)MaskTySize;
+ assert((MaskTySize == 128 || MaskTySize == 256 || MaskTySize == 512) &&
+ "Unexpected vector size.");
+ assert((ElSize == 8 || ElSize == 16 || ElSize == 32 || ElSize == 64) &&
+ "Unexpected vector element size.");
+
+ // The shuffle mask requires elements the same size as the target.
+ SmallBitVector UndefElts;
+ SmallVector<uint64_t, 8> RawMask;
+ if (!extractConstantMask(C, ElSize, UndefElts, RawMask))
return;
+
+ unsigned NumElts = RawMask.size();
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ if (UndefElts[i]) {
+ ShuffleMask.push_back(SM_SentinelUndef);
+ continue;
+ }
+ int Index = RawMask[i] & (NumElts - 1);
+ ShuffleMask.push_back(Index);
}
- // Scalar value; just broadcast it
- if (!isa<ConstantInt>(C))
- return;
- uint64_t Element = cast<ConstantInt>(C)->getZExtValue();
- int NumElements = VT.getVectorNumElements();
- Element &= (1 << NumElements) - 1;
- for (int i = 0; i < NumElements; ++i)
- ShuffleMask.push_back(Element);
}
void DecodeVPERMV3Mask(const Constant *C, unsigned ElSize,
define <8 x double> @combine_permvar_8f64_identity(<8 x double> %x0, <8 x double> %x1) {
; X32-LABEL: combine_permvar_8f64_identity:
; X32: # BB#0:
-; X32-NEXT: vmovapd {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
-; X32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
-; X32-NEXT: vmovapd {{.*#+}} zmm1 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
-; X32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8f64_identity:
define <8 x double> @combine_permvar_8f64_as_permpd(<8 x double> %x0, <8 x double> %x1) {
; X32-LABEL: combine_permvar_8f64_as_permpd:
; X32: # BB#0:
-; X32-NEXT: vmovapd {{.*#+}} zmm1 = <3,0,2,0,1,0,u,u,u,u,6,0,5,0,4,0>
-; X32-NEXT: vpermpd %zmm0, %zmm1, %zmm0
+; X32-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4]
; X32-NEXT: retl
;
; X64-LABEL: combine_permvar_8f64_as_permpd:
; X32: # BB#0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: kmovd %eax, %k1
-; X32-NEXT: vmovapd {{.*#+}} zmm2 = <3,0,2,0,1,0,u,u,u,u,6,0,5,0,4,0>
-; X32-NEXT: vpermpd %zmm0, %zmm2, %zmm1 {%k1}
+; X32-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,2,1,0,7,6,5,4]
; X32-NEXT: vmovapd %zmm1, %zmm0
; X32-NEXT: retl
;