ret <8 x double> %z
}
+; FIXME: The IR-level FMF should propagate to the node.
+
+define double @maxnum_intrinsic_nnan_fmf_f64(double %a, double %b) {
+; SSE-LABEL: maxnum_intrinsic_nnan_fmf_f64:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd %xmm0, %xmm2
+; SSE-NEXT: cmpunordsd %xmm0, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm3
+; SSE-NEXT: andpd %xmm1, %xmm3
+; SSE-NEXT: maxsd %xmm0, %xmm1
+; SSE-NEXT: andnpd %xmm1, %xmm2
+; SSE-NEXT: orpd %xmm3, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: maxnum_intrinsic_nnan_fmf_f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmaxsd %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %r = tail call nnan double @llvm.maxnum.f64(double %a, double %b)
+ ret double %r
+}
+
+; FIXME: Make sure vectors work too.
+
+define <4 x float> @maxnum_intrinsic_nnan_fmf_f432(<4 x float> %a, <4 x float> %b) {
+; SSE-LABEL: maxnum_intrinsic_nnan_fmf_f432:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, %xmm2
+; SSE-NEXT: maxps %xmm0, %xmm2
+; SSE-NEXT: cmpunordps %xmm0, %xmm0
+; SSE-NEXT: andps %xmm0, %xmm1
+; SSE-NEXT: andnps %xmm2, %xmm0
+; SSE-NEXT: orps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: maxnum_intrinsic_nnan_fmf_f432:
+; AVX: # %bb.0:
+; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %r = tail call nnan <4 x float> @llvm.maxnum.v4f32(<4 x float> %a, <4 x float> %b)
+ ret <4 x float> %r
+}
+
+; FIXME: Current (but legacy someday): a function-level attribute should also enable the fold.
+
+define float @maxnum_intrinsic_nnan_fmf_f32(float %a, float %b) #0 {
+; SSE-LABEL: maxnum_intrinsic_nnan_fmf_f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: cmpunordss %xmm0, %xmm2
+; SSE-NEXT: movaps %xmm2, %xmm3
+; SSE-NEXT: andps %xmm1, %xmm3
+; SSE-NEXT: maxss %xmm0, %xmm1
+; SSE-NEXT: andnps %xmm1, %xmm2
+; SSE-NEXT: orps %xmm3, %xmm2
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: maxnum_intrinsic_nnan_fmf_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vmaxss %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %r = tail call float @llvm.maxnum.f32(float %a, float %b)
+ ret float %r
+}
+
+; FIXME: Make sure vectors work too.
+
+define <2 x double> @maxnum_intrinsic_nnan_attr_f64(<2 x double> %a, <2 x double> %b) #0 {
+; SSE-LABEL: maxnum_intrinsic_nnan_attr_f64:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd %xmm1, %xmm2
+; SSE-NEXT: maxpd %xmm0, %xmm2
+; SSE-NEXT: cmpunordpd %xmm0, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm1
+; SSE-NEXT: andnpd %xmm2, %xmm0
+; SSE-NEXT: orpd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: maxnum_intrinsic_nnan_attr_f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmaxpd %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %r = tail call <2 x double> @llvm.maxnum.v2f64(<2 x double> %a, <2 x double> %b)
+ ret <2 x double> %r
+}
+
+attributes #0 = { "no-nans-fp-math"="true" }
+
%z = call <8 x double> @llvm.minnum.v8f64(<8 x double> %x, <8 x double> %y) readnone
ret <8 x double> %z
}
+
+; FIXME: The IR-level FMF should propagate to the node.
+
+define float @minnum_intrinsic_nnan_fmf_f32(float %a, float %b) {
+; SSE-LABEL: minnum_intrinsic_nnan_fmf_f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: cmpunordss %xmm0, %xmm2
+; SSE-NEXT: movaps %xmm2, %xmm3
+; SSE-NEXT: andps %xmm1, %xmm3
+; SSE-NEXT: minss %xmm0, %xmm1
+; SSE-NEXT: andnps %xmm1, %xmm2
+; SSE-NEXT: orps %xmm3, %xmm2
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: minnum_intrinsic_nnan_fmf_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vminss %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %r = tail call nnan float @llvm.minnum.f32(float %a, float %b)
+ ret float %r
+}
+
+; FIXME: Make sure vectors work too.
+
+define <2 x double> @minnum_intrinsic_nnan_fmf_v2f64(<2 x double> %a, <2 x double> %b) {
+; SSE-LABEL: minnum_intrinsic_nnan_fmf_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd %xmm1, %xmm2
+; SSE-NEXT: minpd %xmm0, %xmm2
+; SSE-NEXT: cmpunordpd %xmm0, %xmm0
+; SSE-NEXT: andpd %xmm0, %xmm1
+; SSE-NEXT: andnpd %xmm2, %xmm0
+; SSE-NEXT: orpd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: minnum_intrinsic_nnan_fmf_v2f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %r = tail call nnan <2 x double> @llvm.minnum.v2f64(<2 x double> %a, <2 x double> %b)
+ ret <2 x double> %r
+}
+
+; FIXME: Current (but legacy someday): a function-level attribute should also enable the fold.
+
+define double @minnum_intrinsic_nnan_fmf_f64(double %a, double %b) #0 {
+; SSE-LABEL: minnum_intrinsic_nnan_fmf_f64:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd %xmm0, %xmm2
+; SSE-NEXT: cmpunordsd %xmm0, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm3
+; SSE-NEXT: andpd %xmm1, %xmm3
+; SSE-NEXT: minsd %xmm0, %xmm1
+; SSE-NEXT: andnpd %xmm1, %xmm2
+; SSE-NEXT: orpd %xmm3, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: minnum_intrinsic_nnan_fmf_f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vminsd %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %r = tail call nnan double @llvm.minnum.f64(double %a, double %b)
+ ret double %r
+}
+
+; FIXME: Make sure vectors work too.
+
+define <4 x float> @minnum_intrinsic_nnan_attr_v4f32(<4 x float> %a, <4 x float> %b) #0 {
+; SSE-LABEL: minnum_intrinsic_nnan_attr_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, %xmm2
+; SSE-NEXT: minps %xmm0, %xmm2
+; SSE-NEXT: cmpunordps %xmm0, %xmm0
+; SSE-NEXT: andps %xmm0, %xmm1
+; SSE-NEXT: andnps %xmm2, %xmm0
+; SSE-NEXT: orps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: minnum_intrinsic_nnan_attr_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vminps %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: retq
+ %r = tail call <4 x float> @llvm.minnum.v4f32(<4 x float> %a, <4 x float> %b)
+ ret <4 x float> %r
+}
+
+attributes #0 = { "no-nans-fp-math"="true" }
+