; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1 -mattr=avx | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512SLOW
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1 -mattr=avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512FAST
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1 -mattr=avx512f | FileCheck %s --check-prefix=AVX512
; Test all cmp predicates that can be used with SSE.
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_oeq_f32:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpeqss %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_oeq_f32:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_oeq_f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpeqss %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovaps %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp oeq float %a, %b
%2 = select i1 %1, float %c, float %d
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_oeq_f64:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpeqsd %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_oeq_f64:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_oeq_f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpeqsd %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovapd %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp oeq double %a, %b
%2 = select i1 %1, double %c, double %d
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_ogt_f32:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpltss %xmm0, %xmm1, %k1
-; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_ogt_f32:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpltss %xmm0, %xmm1, %xmm0
-; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_ogt_f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltss %xmm0, %xmm1, %k1
+; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovaps %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp ogt float %a, %b
%2 = select i1 %1, float %c, float %d
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_ogt_f64:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpltsd %xmm0, %xmm1, %k1
-; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_ogt_f64:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpltsd %xmm0, %xmm1, %xmm0
-; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_ogt_f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltsd %xmm0, %xmm1, %k1
+; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovapd %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp ogt double %a, %b
%2 = select i1 %1, double %c, double %d
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_oge_f32:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpless %xmm0, %xmm1, %k1
-; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_oge_f32:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpless %xmm0, %xmm1, %xmm0
-; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_oge_f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpless %xmm0, %xmm1, %k1
+; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovaps %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp oge float %a, %b
%2 = select i1 %1, float %c, float %d
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_oge_f64:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmplesd %xmm0, %xmm1, %k1
-; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_oge_f64:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmplesd %xmm0, %xmm1, %xmm0
-; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_oge_f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmplesd %xmm0, %xmm1, %k1
+; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovapd %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp oge double %a, %b
%2 = select i1 %1, double %c, double %d
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_olt_f32:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpltss %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_olt_f32:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpltss %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_olt_f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltss %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovaps %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp olt float %a, %b
%2 = select i1 %1, float %c, float %d
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_olt_f64:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpltsd %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_olt_f64:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpltsd %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_olt_f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpltsd %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovapd %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp olt double %a, %b
%2 = select i1 %1, double %c, double %d
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_ole_f32:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpless %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_ole_f32:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpless %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_ole_f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpless %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovaps %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp ole float %a, %b
%2 = select i1 %1, float %c, float %d
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_ole_f64:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmplesd %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_ole_f64:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmplesd %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_ole_f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmplesd %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovapd %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp ole double %a, %b
%2 = select i1 %1, double %c, double %d
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_ord_f32:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpordss %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_ord_f32:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpordss %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_ord_f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpordss %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovaps %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp ord float %a, %b
%2 = select i1 %1, float %c, float %d
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_ord_f64:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpordsd %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_ord_f64:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpordsd %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_ord_f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpordsd %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovapd %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp ord double %a, %b
%2 = select i1 %1, double %c, double %d
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_uno_f32:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpunordss %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_uno_f32:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpunordss %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_uno_f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpunordss %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovaps %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp uno float %a, %b
%2 = select i1 %1, float %c, float %d
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_uno_f64:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpunordsd %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_uno_f64:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpunordsd %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_uno_f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpunordsd %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovapd %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp uno double %a, %b
%2 = select i1 %1, double %c, double %d
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_ugt_f32:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpnless %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_ugt_f32:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpnless %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_ugt_f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpnless %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovaps %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp ugt float %a, %b
%2 = select i1 %1, float %c, float %d
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_ugt_f64:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpnlesd %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_ugt_f64:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpnlesd %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_ugt_f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpnlesd %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovapd %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp ugt double %a, %b
%2 = select i1 %1, double %c, double %d
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_uge_f32:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpnltss %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_uge_f32:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpnltss %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_uge_f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpnltss %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovaps %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp uge float %a, %b
%2 = select i1 %1, float %c, float %d
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_uge_f64:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpnltsd %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_uge_f64:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpnltsd %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_uge_f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpnltsd %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovapd %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp uge double %a, %b
%2 = select i1 %1, double %c, double %d
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_ult_f32:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpnless %xmm0, %xmm1, %k1
-; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_ult_f32:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpnless %xmm0, %xmm1, %xmm0
-; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_ult_f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpnless %xmm0, %xmm1, %k1
+; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovaps %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp ult float %a, %b
%2 = select i1 %1, float %c, float %d
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_ult_f64:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpnlesd %xmm0, %xmm1, %k1
-; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_ult_f64:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpnlesd %xmm0, %xmm1, %xmm0
-; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_ult_f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpnlesd %xmm0, %xmm1, %k1
+; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovapd %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp ult double %a, %b
%2 = select i1 %1, double %c, double %d
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_ule_f32:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpnltss %xmm0, %xmm1, %k1
-; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_ule_f32:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpnltss %xmm0, %xmm1, %xmm0
-; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_ule_f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpnltss %xmm0, %xmm1, %k1
+; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovaps %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp ule float %a, %b
%2 = select i1 %1, float %c, float %d
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_ule_f64:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpnltsd %xmm0, %xmm1, %k1
-; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_ule_f64:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpnltsd %xmm0, %xmm1, %xmm0
-; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_ule_f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpnltsd %xmm0, %xmm1, %k1
+; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovapd %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp ule double %a, %b
%2 = select i1 %1, double %c, double %d
; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_une_f32:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpneqss %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_une_f32:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpneqss %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_une_f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpneqss %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovaps %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp une float %a, %b
%2 = select i1 %1, float %c, float %d
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
; AVX-NEXT: retq
;
-; AVX512SLOW-LABEL: select_fcmp_une_f64:
-; AVX512SLOW: # BB#0:
-; AVX512SLOW-NEXT: vcmpneqsd %xmm1, %xmm0, %k1
-; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
-; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0
-; AVX512SLOW-NEXT: retq
-;
-; AVX512FAST-LABEL: select_fcmp_une_f64:
-; AVX512FAST: # BB#0:
-; AVX512FAST-NEXT: vcmpneqsd %xmm1, %xmm0, %xmm0
-; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
-; AVX512FAST-NEXT: retq
+; AVX512-LABEL: select_fcmp_une_f64:
+; AVX512: # BB#0:
+; AVX512-NEXT: vcmpneqsd %xmm1, %xmm0, %k1
+; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
+; AVX512-NEXT: vmovapd %xmm3, %xmm0
+; AVX512-NEXT: retq
;
%1 = fcmp une double %a, %b
%2 = select i1 %1, double %c, double %d