;
; SSE41-LABEL: roundeven_f32:
; SSE41: ## %bb.0:
-; SSE41-NEXT: jmp _roundevenf ## TAILCALL
+; SSE41-NEXT: roundss $8, %xmm0, %xmm0
+; SSE41-NEXT: retq
;
; AVX-LABEL: roundeven_f32:
; AVX: ## %bb.0:
-; AVX-NEXT: jmp _roundevenf ## TAILCALL
+; AVX-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
%a = call float @llvm.roundeven.f32(float %x)
ret float %a
}
;
; SSE41-LABEL: roundeven_f64:
; SSE41: ## %bb.0:
-; SSE41-NEXT: jmp _roundeven ## TAILCALL
+; SSE41-NEXT: roundsd $8, %xmm0, %xmm0
+; SSE41-NEXT: retq
;
; AVX-LABEL: roundeven_f64:
; AVX: ## %bb.0:
-; AVX-NEXT: jmp _roundeven ## TAILCALL
+; AVX-NEXT: vroundsd $8, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
%a = call double @llvm.roundeven.f64(double %x)
ret double %a
}
;
; SSE41-LABEL: roundeven_v4f32:
; SSE41: ## %bb.0:
-; SSE41-NEXT: subq $40, %rsp
-; SSE41-NEXT: .cfi_def_cfa_offset 48
-; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
-; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
-; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; SSE41-NEXT: movaps %xmm1, %xmm0
-; SSE41-NEXT: addq $40, %rsp
+; SSE41-NEXT: roundps $8, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: roundeven_v4f32:
; AVX: ## %bb.0:
-; AVX-NEXT: subq $40, %rsp
-; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX-NEXT: callq _roundevenf
-; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX-NEXT: ## xmm0 = mem[1,1,3,3]
-; AVX-NEXT: callq _roundevenf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX-NEXT: ## xmm0 = mem[1,0]
-; AVX-NEXT: callq _roundevenf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX-NEXT: ## xmm0 = mem[3,3,3,3]
-; AVX-NEXT: callq _roundevenf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: vroundps $8, %xmm0, %xmm0
; AVX-NEXT: retq
%a = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %x)
ret <4 x float> %a
;
; SSE41-LABEL: roundeven_v2f64:
; SSE41: ## %bb.0:
-; SSE41-NEXT: subq $40, %rsp
-; SSE41-NEXT: .cfi_def_cfa_offset 48
-; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE41-NEXT: movaps %xmm1, %xmm0
-; SSE41-NEXT: addq $40, %rsp
+; SSE41-NEXT: roundpd $8, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: roundeven_v2f64:
; AVX: ## %bb.0:
-; AVX-NEXT: subq $40, %rsp
-; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX-NEXT: callq _roundeven
-; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 ## 16-byte Folded Reload
-; AVX-NEXT: ## xmm0 = mem[1,0]
-; AVX-NEXT: callq _roundeven
-; AVX-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: vroundpd $8, %xmm0, %xmm0
; AVX-NEXT: retq
%a = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %x)
ret <2 x double> %a
;
; SSE41-LABEL: roundeven_v8f32:
; SSE41: ## %bb.0:
-; SSE41-NEXT: subq $56, %rsp
-; SSE41-NEXT: .cfi_def_cfa_offset 64
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movshdup (%rsp), %xmm0 ## 16-byte Folded Reload
-; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
-; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
-; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: addq $56, %rsp
+; SSE41-NEXT: roundps $8, %xmm0, %xmm0
+; SSE41-NEXT: roundps $8, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: roundeven_v8f32:
; AVX: ## %bb.0:
-; AVX-NEXT: subq $88, %rsp
-; AVX-NEXT: .cfi_def_cfa_offset 96
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: callq _roundevenf
-; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX-NEXT: ## xmm0 = mem[1,1,3,3]
-; AVX-NEXT: callq _roundevenf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX-NEXT: ## xmm0 = mem[1,0]
-; AVX-NEXT: callq _roundevenf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX-NEXT: ## xmm0 = mem[3,3,3,3]
-; AVX-NEXT: callq _roundevenf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
-; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: callq _roundevenf
-; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX-NEXT: ## xmm0 = mem[1,1,3,3]
-; AVX-NEXT: callq _roundevenf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX-NEXT: ## xmm0 = mem[1,0]
-; AVX-NEXT: callq _roundevenf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX-NEXT: ## xmm0 = mem[3,3,3,3]
-; AVX-NEXT: callq _roundevenf
-; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
-; AVX-NEXT: addq $88, %rsp
+; AVX-NEXT: vroundps $8, %ymm0, %ymm0
; AVX-NEXT: retq
%a = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %x)
ret <8 x float> %a
;
; SSE41-LABEL: roundeven_v4f64:
; SSE41: ## %bb.0:
-; SSE41-NEXT: subq $56, %rsp
-; SSE41-NEXT: .cfi_def_cfa_offset 64
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: addq $56, %rsp
+; SSE41-NEXT: roundpd $8, %xmm0, %xmm0
+; SSE41-NEXT: roundpd $8, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: roundeven_v4f64:
; AVX: ## %bb.0:
-; AVX-NEXT: subq $88, %rsp
-; AVX-NEXT: .cfi_def_cfa_offset 96
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: callq _roundeven
-; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 ## 16-byte Folded Reload
-; AVX-NEXT: ## xmm0 = mem[1,0]
-; AVX-NEXT: callq _roundeven
-; AVX-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
-; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: callq _roundeven
-; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX-NEXT: ## xmm0 = mem[1,0]
-; AVX-NEXT: callq _roundeven
-; AVX-NEXT: vmovapd (%rsp), %xmm1 ## 16-byte Reload
-; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
-; AVX-NEXT: addq $88, %rsp
+; AVX-NEXT: vroundpd $8, %ymm0, %ymm0
; AVX-NEXT: retq
%a = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %x)
ret <4 x double> %a
;
; SSE41-LABEL: roundeven_v16f32:
; SSE41: ## %bb.0:
-; SSE41-NEXT: subq $88, %rsp
-; SSE41-NEXT: .cfi_def_cfa_offset 96
-; SSE41-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movshdup (%rsp), %xmm0 ## 16-byte Folded Reload
-; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
-; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
-; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT: callq _roundevenf
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Reload
-; SSE41-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm0[0]
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: movaps (%rsp), %xmm2 ## 16-byte Reload
-; SSE41-NEXT: addq $88, %rsp
+; SSE41-NEXT: roundps $8, %xmm0, %xmm0
+; SSE41-NEXT: roundps $8, %xmm1, %xmm1
+; SSE41-NEXT: roundps $8, %xmm2, %xmm2
+; SSE41-NEXT: roundps $8, %xmm3, %xmm3
; SSE41-NEXT: retq
;
; AVX1-LABEL: roundeven_v16f32:
; AVX1: ## %bb.0:
-; AVX1-NEXT: subq $152, %rsp
-; AVX1-NEXT: .cfi_def_cfa_offset 160
-; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
-; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[1,1,3,3]
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[1,0]
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[3,3,3,3]
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
-; AVX1-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[1,1,3,3]
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[1,0]
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[3,3,3,3]
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
-; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
-; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[1,1,3,3]
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[1,0]
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[3,3,3,3]
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
-; AVX1-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[1,1,3,3]
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[1,0]
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[3,3,3,3]
-; AVX1-NEXT: callq _roundevenf
-; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 ## 16-byte Folded Reload
-; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
-; AVX1-NEXT: addq $152, %rsp
+; AVX1-NEXT: vroundps $8, %ymm0, %ymm0
+; AVX1-NEXT: vroundps $8, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX512-LABEL: roundeven_v16f32:
; AVX512: ## %bb.0:
-; AVX512-NEXT: subq $184, %rsp
-; AVX512-NEXT: .cfi_def_cfa_offset 192
-; AVX512-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 64-byte Spill
-; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
-; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[1,1,3,3]
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[1,0]
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[3,3,3,3]
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 ## 64-byte Reload
-; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm0
-; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[1,1,3,3]
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[1,0]
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[3,3,3,3]
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX512-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
-; AVX512-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
-; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 ## 64-byte Reload
-; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[1,1,3,3]
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[1,0]
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[3,3,3,3]
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 ## 64-byte Reload
-; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[1,1,3,3]
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[1,0]
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[3,3,3,3]
-; AVX512-NEXT: callq _roundevenf
-; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX512-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
-; AVX512-NEXT: vinsertf64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 ## 32-byte Folded Reload
-; AVX512-NEXT: addq $184, %rsp
+; AVX512-NEXT: vrndscaleps $8, %zmm0, %zmm0
; AVX512-NEXT: retq
%a = call <16 x float> @llvm.roundeven.v16f32(<16 x float> %x)
ret <16 x float> %a
;
; SSE41-LABEL: roundeven_v8f64:
; SSE41: ## %bb.0:
-; SSE41-NEXT: subq $88, %rsp
-; SSE41-NEXT: .cfi_def_cfa_offset 96
-; SSE41-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE41-NEXT: callq _roundeven
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Reload
-; SSE41-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE41-NEXT: movaps (%rsp), %xmm2 ## 16-byte Reload
-; SSE41-NEXT: addq $88, %rsp
+; SSE41-NEXT: roundpd $8, %xmm0, %xmm0
+; SSE41-NEXT: roundpd $8, %xmm1, %xmm1
+; SSE41-NEXT: roundpd $8, %xmm2, %xmm2
+; SSE41-NEXT: roundpd $8, %xmm3, %xmm3
; SSE41-NEXT: retq
;
; AVX1-LABEL: roundeven_v8f64:
; AVX1: ## %bb.0:
-; AVX1-NEXT: subq $120, %rsp
-; AVX1-NEXT: .cfi_def_cfa_offset 128
-; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
-; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq _roundeven
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vpermilpd $1, (%rsp), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[1,0]
-; AVX1-NEXT: callq _roundeven
-; AVX1-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
-; AVX1-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq _roundeven
-; AVX1-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[1,0]
-; AVX1-NEXT: callq _roundeven
-; AVX1-NEXT: vmovapd (%rsp), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
-; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
-; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq _roundeven
-; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vpermilpd $1, (%rsp), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[1,0]
-; AVX1-NEXT: callq _roundeven
-; AVX1-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
-; AVX1-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq _roundeven
-; AVX1-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX1-NEXT: ## xmm0 = mem[1,0]
-; AVX1-NEXT: callq _roundeven
-; AVX1-NEXT: vmovapd (%rsp), %xmm1 ## 16-byte Reload
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 ## 16-byte Folded Reload
-; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
-; AVX1-NEXT: addq $120, %rsp
+; AVX1-NEXT: vroundpd $8, %ymm0, %ymm0
+; AVX1-NEXT: vroundpd $8, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX512-LABEL: roundeven_v8f64:
; AVX512: ## %bb.0:
-; AVX512-NEXT: subq $184, %rsp
-; AVX512-NEXT: .cfi_def_cfa_offset 192
-; AVX512-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 64-byte Spill
-; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
-; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq _roundeven
-; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vpermilpd $1, (%rsp), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[1,0]
-; AVX512-NEXT: callq _roundeven
-; AVX512-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 ## 64-byte Reload
-; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm0
-; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq _roundeven
-; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[1,0]
-; AVX512-NEXT: callq _roundeven
-; AVX512-NEXT: vmovapd (%rsp), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
-; AVX512-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
-; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 ## 64-byte Reload
-; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq _roundeven
-; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[1,0]
-; AVX512-NEXT: callq _roundeven
-; AVX512-NEXT: vmovapd (%rsp), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vmovapd %xmm0, (%rsp) ## 16-byte Spill
-; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 ## 64-byte Reload
-; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq _roundeven
-; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; AVX512-NEXT: ## xmm0 = mem[1,0]
-; AVX512-NEXT: callq _roundeven
-; AVX512-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; AVX512-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 ## 16-byte Folded Reload
-; AVX512-NEXT: vinsertf64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 ## 32-byte Folded Reload
-; AVX512-NEXT: addq $184, %rsp
+; AVX512-NEXT: vrndscalepd $8, %zmm0, %zmm0
; AVX512-NEXT: retq
%a = call <8 x double> @llvm.roundeven.v8f64(<8 x double> %x)
ret <8 x double> %a