%2 = and <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
ret <4 x i32> %2
}
+
+;
+; known bits folding
+;
+
+define <2 x i64> @and_or_zext_v2i32(<2 x i32> %a0) {
+; CHECK-LABEL: and_or_zext_v2i32:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; CHECK-NEXT: por {{.*}}(%rip), %xmm0
+; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %1 = zext <2 x i32> %a0 to <2 x i64>
+ %2 = or <2 x i64> %1, <i64 1, i64 1>
+ %3 = and <2 x i64> %2, <i64 4294967296, i64 4294967296>
+ ret <2 x i64> %3
+}
+
+define <4 x i32> @and_or_zext_v4i16(<4 x i16> %a0) {
+; CHECK-LABEL: and_or_zext_v4i16:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; CHECK-NEXT: por {{.*}}(%rip), %xmm0
+; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %1 = zext <4 x i16> %a0 to <4 x i32>
+ %2 = or <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = and <4 x i32> %2, <i32 65536, i32 65536, i32 65536, i32 65536>
+ ret <4 x i32> %3
+}