; CHECK-NEXT: ret <4 x float> %1
}
+; Try doubles.
+
+define <2 x double> @mload_one_one_double(i8* %f) {
+ %ld = tail call <2 x double> @llvm.x86.avx.maskload.pd(i8* %f, <2 x i64> <i64 -1, i64 0>)
+ ret <2 x double> %ld
+
+; CHECK-LABEL: @mload_one_one_double(
+; CHECK-NEXT: %castvec = bitcast i8* %f to <2 x double>*
+; CHECK-NEXT: %1 = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %castvec, i32 1, <2 x i1> <i1 true, i1 false>, <2 x double> undef)
+; CHECK-NEXT: ret <2 x double> %1
+}
+
+; Try 256-bit FP ops.
+
+define <8 x float> @mload_v8f32(i8* %f) {
+ %ld = tail call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %f, <8 x i32> <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 0>)
+ ret <8 x float> %ld
+
+; CHECK-LABEL: @mload_v8f32(
+; CHECK-NEXT: %castvec = bitcast i8* %f to <8 x float>*
+; CHECK-NEXT: %1 = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %castvec, i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x float> undef)
+; CHECK-NEXT: ret <8 x float> %1
+}
+
+define <4 x double> @mload_v4f64(i8* %f) {
+ %ld = tail call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 0, i64 0>)
+ ret <4 x double> %ld
+
+; CHECK-LABEL: @mload_v4f64(
+; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x double>*
+; CHECK-NEXT: %1 = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %castvec, i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x double> undef)
+; CHECK-NEXT: ret <4 x double> %1
+}
+
+; Try the AVX2 variants.
+
+define <4 x i32> @mload_v4i32(i8* %f) {
+ %ld = tail call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>)
+ ret <4 x i32> %ld
+
+; CHECK-LABEL: @mload_v4i32(
+; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x i32>*
+; CHECK-NEXT: %1 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %castvec, i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x i32> undef)
+; CHECK-NEXT: ret <4 x i32> %1
+}
+
+define <2 x i64> @mload_v2i64(i8* %f) {
+ %ld = tail call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %f, <2 x i64> <i64 -1, i64 0>)
+ ret <2 x i64> %ld
+
+; CHECK-LABEL: @mload_v2i64(
+; CHECK-NEXT: %castvec = bitcast i8* %f to <2 x i64>*
+; CHECK-NEXT: %1 = call <2 x i64> @llvm.masked.load.v2i64(<2 x i64>* %castvec, i32 1, <2 x i1> <i1 true, i1 false>, <2 x i64> undef)
+; CHECK-NEXT: ret <2 x i64> %1
+}
+
+define <8 x i32> @mload_v8i32(i8* %f) {
+ %ld = tail call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %f, <8 x i32> <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 0>)
+ ret <8 x i32> %ld
+
+; CHECK-LABEL: @mload_v8i32(
+; CHECK-NEXT: %castvec = bitcast i8* %f to <8 x i32>*
+; CHECK-NEXT: %1 = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %castvec, i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i32> undef)
+; CHECK-NEXT: ret <8 x i32> %1
+}
+
+define <4 x i64> @mload_v4i64(i8* %f) {
+ %ld = tail call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 0, i64 0>)
+ ret <4 x i64> %ld
+
+; CHECK-LABEL: @mload_v4i64(
+; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x i64>*
+; CHECK-NEXT: %1 = call <4 x i64> @llvm.masked.load.v4i64(<4 x i64>* %castvec, i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> undef)
+; CHECK-NEXT: ret <4 x i64> %1
+}
+
+
;; MASKED STORES
; If the mask isn't constant, do nothing.
declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>)
declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>)
+declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>)
+declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>)
+declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>)
+declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>)
+
declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>)
declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>)
declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>)