return CGF.Builder.CreateMaskedLoad(Ops[0], Align, MaskVec, Ops[1]);
}
+static Value *EmitX86SubVectorBroadcast(CodeGenFunction &CGF,
+ SmallVectorImpl<Value *> &Ops,
+ llvm::Type *DstTy,
+ unsigned SrcSizeInBits,
+ unsigned Align) {
+ // Load the subvector.
+ Ops[0] = CGF.Builder.CreateAlignedLoad(Ops[0], Align);
+
+ // Create broadcast mask.
+ unsigned NumDstElts = DstTy->getVectorNumElements();
+ unsigned NumSrcElts = SrcSizeInBits / DstTy->getScalarSizeInBits();
+
+ SmallVector<uint32_t, 8> Mask;
+ for (unsigned i = 0; i != NumDstElts; i += NumSrcElts)
+ for (unsigned j = 0; j != NumSrcElts; ++j)
+ Mask.push_back(j);
+
+ return CGF.Builder.CreateShuffleVector(Ops[0], Ops[0], Mask, "subvecbcst");
+}
+
static Value *EmitX86Select(CodeGenFunction &CGF,
Value *Mask, Value *Op0, Value *Op1) {
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
return EmitX86MaskedLoad(*this, Ops, Align);
}
+
+ case X86::BI__builtin_ia32_vbroadcastf128_pd256:
+ case X86::BI__builtin_ia32_vbroadcastf128_ps256: {
+ llvm::Type *DstTy = ConvertType(E->getType());
+ return EmitX86SubVectorBroadcast(*this, Ops, DstTy, 128, 16);
+ }
+
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
__m256d test_mm256_broadcast_pd(__m128d* A) {
// CHECK-LABEL: test_mm256_broadcast_pd
- // CHECK: call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %{{.*}})
+ // CHECK: load <2 x double>, <2 x double>* %{{.*}}, align 16
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
return _mm256_broadcast_pd(A);
}
__m256 test_mm256_broadcast_ps(__m128* A) {
// CHECK-LABEL: test_mm256_broadcast_ps
- // CHECK: call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %{{.*}})
+ // CHECK: load <4 x float>, <4 x float>* %{{.*}}, align 16
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
return _mm256_broadcast_ps(A);
}