--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX2
+
+; https://bugs.llvm.org/show_bug.cgi?id=51615
+; We can not replace a wide volatile load with a broadcast-from-memory,
+; because that would narrow the load, which isn't legal for volatiles.
+
+@g0 = external dso_local global <2 x double>, align 16
+define void @volatile_load_2_elts() {
+; AVX-LABEL: volatile_load_2_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps g0(%rip), %xmm0
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
+; AVX-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,3,2]
+; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3]
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3]
+; AVX-NEXT: vmovapd %ymm0, (%rax)
+; AVX-NEXT: vmovapd %ymm1, (%rax)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX2-LABEL: volatile_load_2_elts:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovaps g0(%rip), %xmm0
+; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
+; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-NEXT: vmovaps %ymm0, (%rax)
+; AVX2-NEXT: vmovaps %ymm2, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %i = load volatile <2 x double>, <2 x double>* @g0, align 16
+ %i1 = shufflevector <2 x double> %i, <2 x double> poison, <4 x i32> <i32 undef, i32 0, i32 undef, i32 0>
+ %shuffle1 = shufflevector <4 x double> %i1, <4 x double> zeroinitializer, <8 x i32> <i32 6, i32 7, i32 3, i32 6, i32 7, i32 1, i32 7, i32 1>
+ store volatile <8 x double> %shuffle1, <8 x double>* undef, align 64
+ ret void
+}
+
+@g1 = external dso_local global <1 x double>, align 16
+define void @volatile_load_1_elt() {
+; ALL-LABEL: volatile_load_1_elt:
+; ALL: # %bb.0:
+; ALL-NEXT: vbroadcastsd g1(%rip), %ymm0
+; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; ALL-NEXT: vmovaps %ymm0, (%rax)
+; ALL-NEXT: vmovaps %ymm2, (%rax)
+; ALL-NEXT: vzeroupper
+; ALL-NEXT: retq
+ %i = load volatile <1 x double>, <1 x double>* @g1, align 16
+ %i1 = shufflevector <1 x double> %i, <1 x double> poison, <4 x i32> <i32 undef, i32 0, i32 undef, i32 0>
+ %shuffle1 = shufflevector <4 x double> %i1, <4 x double> zeroinitializer, <8 x i32> <i32 6, i32 7, i32 3, i32 6, i32 7, i32 1, i32 7, i32 1>
+ store volatile <8 x double> %shuffle1, <8 x double>* undef, align 64
+ ret void
+}
+
+@g2 = external dso_local global <2 x float>, align 16
+define void @volatile_load_2_elts_bitcast() {
+; ALL-LABEL: volatile_load_2_elts_bitcast:
+; ALL: # %bb.0:
+; ALL-NEXT: vbroadcastsd g2(%rip), %ymm0
+; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; ALL-NEXT: vmovaps %ymm0, (%rax)
+; ALL-NEXT: vmovaps %ymm2, (%rax)
+; ALL-NEXT: vzeroupper
+; ALL-NEXT: retq
+ %i0 = load volatile <2 x float>, <2 x float>* @g2, align 16
+ %i = bitcast <2 x float> %i0 to <1 x double>
+ %i1 = shufflevector <1 x double> %i, <1 x double> poison, <4 x i32> <i32 undef, i32 0, i32 undef, i32 0>
+ %shuffle1 = shufflevector <4 x double> %i1, <4 x double> zeroinitializer, <8 x i32> <i32 6, i32 7, i32 3, i32 6, i32 7, i32 1, i32 7, i32 1>
+ store volatile <8 x double> %shuffle1, <8 x double>* undef, align 64
+ ret void
+}