assert(N->getOpcode() == ISD::FMUL && "Expected FMUL Operation");
const TargetOptions &Options = DAG.getTarget().Options;
- bool AllowFusion =
- (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
- // Floating-point multiply-add with intermediate rounding.
- bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT));
+ // The transforms below are incorrect when x == 0 and y == inf, because the
+ // intermediate multiplication produces a nan.
+ if (!Options.NoInfsFPMath)
+ return SDValue();
// Floating-point multiply-add without intermediate rounding.
bool HasFMA =
- AllowFusion && TLI.isFMAFasterThanFMulAndFAdd(VT) &&
+ (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath) &&
+ TLI.isFMAFasterThanFMulAndFAdd(VT) &&
(!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));
+ // Floating-point multiply-add with intermediate rounding. This can result
+ // in a less precise result due to the changed rounding order.
+ bool HasFMAD = Options.UnsafeFPMath &&
+ (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT));
+
// No valid opcode, do not combine.
if (!HasFMAD && !HasFMA)
return SDValue();
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -fp-contract=fast < %s | FileCheck -check-prefix=SI-FASTFMAF -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs -fp-contract=fast < %s | FileCheck -check-prefix=SI-SLOWFMAF -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -fp-contract=fast < %s | FileCheck -check-prefix=SI-NOFMA -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs -fp-contract=fast < %s | FileCheck -check-prefix=SI-NOFMA -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -fp-contract=fast -enable-no-infs-fp-math -mattr=+fp32-denormals < %s | FileCheck -check-prefix=SI-FMA -check-prefix=SI -check-prefix=FUNC %s
+
+; Note: The SI-FMA conversions of type x * (y + 1) --> x * y + x would be
+; beneficial even without fp32 denormals, but they do require no-infs-fp-math
+; for correctness.
declare i32 @llvm.amdgcn.workitem.id.x() #0
declare double @llvm.fabs.f64(double) #0
;
; FUNC-LABEL: {{^}}test_f32_mul_add_x_one_y:
-; SI: v_mac_f32_e32 [[VY:v[0-9]]], [[VY:v[0-9]]], [[VX:v[0-9]]]
+; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], 1.0, [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
+;
+; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
define void @test_f32_mul_add_x_one_y(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
}
; FUNC-LABEL: {{^}}test_f32_mul_y_add_x_one:
-; SI: v_mac_f32_e32 [[VY:v[0-9]]], [[VY:v[0-9]]], [[VX:v[0-9]]]
+; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], 1.0, [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
+;
+; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
define void @test_f32_mul_y_add_x_one(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
}
; FUNC-LABEL: {{^}}test_f32_mul_add_x_negone_y:
-; SI: v_mad_f32 [[VX:v[0-9]]], [[VX]], [[VY:v[0-9]]], -[[VY]]
+; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], -1.0, [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
+;
+; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
define void @test_f32_mul_add_x_negone_y(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
}
; FUNC-LABEL: {{^}}test_f32_mul_y_add_x_negone:
-; SI: v_mad_f32 [[VX:v[0-9]]], [[VX]], [[VY:v[0-9]]], -[[VY]]
+; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], -1.0, [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
+;
+; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
define void @test_f32_mul_y_add_x_negone(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
}
; FUNC-LABEL: {{^}}test_f32_mul_sub_one_x_y:
-; SI: v_mad_f32 [[VX:v[0-9]]], -[[VX]], [[VY:v[0-9]]], [[VY]]
+; SI-NOFMA: v_sub_f32_e32 [[VS:v[0-9]]], 1.0, [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
+;
+; SI-FMA: v_fma_f32 {{v[0-9]}}, -[[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
define void @test_f32_mul_sub_one_x_y(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
}
; FUNC-LABEL: {{^}}test_f32_mul_y_sub_one_x:
-; SI: v_mad_f32 [[VX:v[0-9]]], -[[VX]], [[VY:v[0-9]]], [[VY]]
+; SI-NOFMA: v_sub_f32_e32 [[VS:v[0-9]]], 1.0, [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
+;
+; SI-FMA: v_fma_f32 {{v[0-9]}}, -[[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
define void @test_f32_mul_y_sub_one_x(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
}
; FUNC-LABEL: {{^}}test_f32_mul_sub_negone_x_y:
-; SI: v_mad_f32 [[VX:v[0-9]]], -[[VX]], [[VY:v[0-9]]], -[[VY]]
+; SI-NOFMA: v_sub_f32_e32 [[VS:v[0-9]]], -1.0, [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
+;
+; SI-FMA: v_fma_f32 {{v[0-9]}}, -[[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
define void @test_f32_mul_sub_negone_x_y(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
}
; FUNC-LABEL: {{^}}test_f32_mul_y_sub_negone_x:
-; SI: v_mad_f32 [[VX:v[0-9]]], -[[VX]], [[VY:v[0-9]]], -[[VY]]
+; SI-NOFMA: v_sub_f32_e32 [[VS:v[0-9]]], -1.0, [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
+;
+; SI-FMA: v_fma_f32 {{v[0-9]}}, -[[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
define void @test_f32_mul_y_sub_negone_x(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
}
; FUNC-LABEL: {{^}}test_f32_mul_sub_x_one_y:
-; SI: v_mad_f32 [[VX:v[0-9]]], [[VX]], [[VY:v[0-9]]], -[[VY]]
+; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], -1.0, [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
+;
+; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
define void @test_f32_mul_sub_x_one_y(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
}
; FUNC-LABEL: {{^}}test_f32_mul_y_sub_x_one:
-; SI: v_mad_f32 [[VX:v[0-9]]], [[VX]], [[VY:v[0-9]]], -[[VY]]
+; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], -1.0, [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
+;
+; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], -[[VY:v[0-9]]]
define void @test_f32_mul_y_sub_x_one(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
}
; FUNC-LABEL: {{^}}test_f32_mul_sub_x_negone_y:
-; SI: v_mac_f32_e32 [[VY:v[0-9]]], [[VY]], [[VX:v[0-9]]]
+; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], 1.0, [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VY:v[0-9]]], [[VS]]
+;
+; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
define void @test_f32_mul_sub_x_negone_y(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
}
; FUNC-LABEL: {{^}}test_f32_mul_y_sub_x_negone:
-; SI: v_mac_f32_e32 [[VY:v[0-9]]], [[VY]], [[VX:v[0-9]]]
+; SI-NOFMA: v_add_f32_e32 [[VS:v[0-9]]], 1.0, [[VX:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 {{v[0-9]}}, [[VS]], [[VY:v[0-9]]]
+;
+; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VY:v[0-9]]], [[VY:v[0-9]]]
define void @test_f32_mul_y_sub_x_negone(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
;
; FUNC-LABEL: {{^}}test_f32_interp:
-; SI: v_mad_f32 [[VR:v[0-9]]], -[[VT:v[0-9]]], [[VY:v[0-9]]], [[VY]]
-; SI: v_mac_f32_e32 [[VR]], [[VT]], [[VX:v[0-9]]]
+; SI-NOFMA: v_sub_f32_e32 [[VT1:v[0-9]]], 1.0, [[VT:v[0-9]]]
+; SI-NOFMA: v_mul_f32_e32 [[VTY:v[0-9]]], [[VT1]], [[VY:v[0-9]]]
+; SI-NOFMA: v_mac_f32_e32 [[VTY]], [[VT]], [[VX:v[0-9]]]
+;
+; SI-FMA: v_fma_f32 [[VR:v[0-9]]], -[[VT:v[0-9]]], [[VY:v[0-9]]], [[VY]]
+; SI-FMA: v_fma_f32 {{v[0-9]}}, [[VX:v[0-9]]], [[VT]], [[VR]]
define void @test_f32_interp(float addrspace(1)* %out,
float addrspace(1)* %in1,
float addrspace(1)* %in2,
}
; FUNC-LABEL: {{^}}test_f64_interp:
-; SI: v_fma_f64 [[VR:v\[[0-9]+:[0-9]+\]]], -[[VT:v\[[0-9]+:[0-9]+\]]], [[VY:v\[[0-9]+:[0-9]+\]]], [[VY]]
-; SI: v_fma_f64 v{{\[[0-9]+:[0-9]+\]}}, [[VX:v\[[0-9]+:[0-9]+\]]], [[VT]], [[VR]]
+; SI-NOFMA: v_add_f64 [[VT1:v\[[0-9]+:[0-9]+\]]], -[[VT:v\[[0-9]+:[0-9]+\]]], 1.0
+; SI-NOFMA: v_mul_f64 [[VTY:v\[[0-9]+:[0-9]+\]]], [[VY:v\[[0-9]+:[0-9]+\]]], [[VT1]]
+; SI-NOFMA: v_fma_f64 v{{\[[0-9]+:[0-9]+\]}}, [[VX:v\[[0-9]+:[0-9]+\]]], [[VT]], [[VTY]]
+;
+; SI-FMA: v_fma_f64 [[VR:v\[[0-9]+:[0-9]+\]]], -[[VT:v\[[0-9]+:[0-9]+\]]], [[VY:v\[[0-9]+:[0-9]+\]]], [[VY]]
+; SI-FMA: v_fma_f64 v{{\[[0-9]+:[0-9]+\]}}, [[VX:v\[[0-9]+:[0-9]+\]]], [[VT]], [[VR]]
define void @test_f64_interp(double addrspace(1)* %out,
double addrspace(1)* %in1,
double addrspace(1)* %in2,
define <4 x float> @test_v4f32_mul_add_x_one_y(<4 x float> %x, <4 x float> %y) {
; FMA-LABEL: test_v4f32_mul_add_x_one_y:
; FMA: # BB#0:
-; FMA-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
+; FMA-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_mul_add_x_one_y:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmaddps %xmm1, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_mul_add_x_one_y:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%a = fadd <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
%m = fmul <4 x float> %a, %y
define <4 x float> @test_v4f32_mul_y_add_x_one(<4 x float> %x, <4 x float> %y) {
; FMA-LABEL: test_v4f32_mul_y_add_x_one:
; FMA: # BB#0:
-; FMA-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
+; FMA-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_mul_y_add_x_one:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmaddps %xmm1, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_mul_y_add_x_one:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
%a = fadd <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
%m = fmul <4 x float> %y, %a
define <4 x float> @test_v4f32_mul_add_x_negone_y(<4 x float> %x, <4 x float> %y) {
; FMA-LABEL: test_v4f32_mul_add_x_negone_y:
; FMA: # BB#0:
-; FMA-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
+; FMA-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_mul_add_x_negone_y:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmsubps %xmm1, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_mul_add_x_negone_y:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%a = fadd <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
%m = fmul <4 x float> %a, %y
define <4 x float> @test_v4f32_mul_y_add_x_negone(<4 x float> %x, <4 x float> %y) {
; FMA-LABEL: test_v4f32_mul_y_add_x_negone:
; FMA: # BB#0:
-; FMA-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
+; FMA-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_mul_y_add_x_negone:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmsubps %xmm1, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_mul_y_add_x_negone:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
%a = fadd <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
%m = fmul <4 x float> %y, %a
define <4 x float> @test_v4f32_mul_sub_one_x_y(<4 x float> %x, <4 x float> %y) {
; FMA-LABEL: test_v4f32_mul_sub_one_x_y:
; FMA: # BB#0:
-; FMA-NEXT: vfnmadd213ps %xmm1, %xmm1, %xmm0
+; FMA-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_mul_sub_one_x_y:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmaddps %xmm1, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_mul_sub_one_x_y:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmadd213ps %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
+; AVX512-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%s = fsub <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
%m = fmul <4 x float> %s, %y
define <4 x float> @test_v4f32_mul_y_sub_one_x(<4 x float> %x, <4 x float> %y) {
; FMA-LABEL: test_v4f32_mul_y_sub_one_x:
; FMA: # BB#0:
-; FMA-NEXT: vfnmadd213ps %xmm1, %xmm1, %xmm0
+; FMA-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_mul_y_sub_one_x:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmaddps %xmm1, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_mul_y_sub_one_x:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmadd213ps %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
+; AVX512-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
%s = fsub <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
%m = fmul <4 x float> %y, %s
define <4 x float> @test_v4f32_mul_sub_negone_x_y(<4 x float> %x, <4 x float> %y) {
; FMA-LABEL: test_v4f32_mul_sub_negone_x_y:
; FMA: # BB#0:
-; FMA-NEXT: vfnmsub213ps %xmm1, %xmm1, %xmm0
+; FMA-NEXT: vmovaps {{.*#+}} xmm2 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_mul_sub_negone_x_y:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmsubps %xmm1, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovaps {{.*#+}} xmm2 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA4-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_mul_sub_negone_x_y:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmsub213ps %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
+; AVX512-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%s = fsub <4 x float> <float -1.0, float -1.0, float -1.0, float -1.0>, %x
%m = fmul <4 x float> %s, %y
define <4 x float> @test_v4f32_mul_y_sub_negone_x(<4 x float> %x, <4 x float> %y) {
; FMA-LABEL: test_v4f32_mul_y_sub_negone_x:
; FMA: # BB#0:
-; FMA-NEXT: vfnmsub213ps %xmm1, %xmm1, %xmm0
+; FMA-NEXT: vmovaps {{.*#+}} xmm2 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_mul_y_sub_negone_x:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmsubps %xmm1, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovaps {{.*#+}} xmm2 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA4-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_mul_y_sub_negone_x:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmsub213ps %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
+; AVX512-NEXT: vsubps %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
%s = fsub <4 x float> <float -1.0, float -1.0, float -1.0, float -1.0>, %x
%m = fmul <4 x float> %y, %s
define <4 x float> @test_v4f32_mul_sub_x_one_y(<4 x float> %x, <4 x float> %y) {
; FMA-LABEL: test_v4f32_mul_sub_x_one_y:
; FMA: # BB#0:
-; FMA-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
+; FMA-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
+; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_mul_sub_x_one_y:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmsubps %xmm1, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
+; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_mul_sub_x_one_y:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vsubps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%s = fsub <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
%m = fmul <4 x float> %s, %y
define <4 x float> @test_v4f32_mul_y_sub_x_one(<4 x float> %x, <4 x float> %y) {
; FMA-LABEL: test_v4f32_mul_y_sub_x_one:
; FMA: # BB#0:
-; FMA-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
+; FMA-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
+; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_mul_y_sub_x_one:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmsubps %xmm1, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
+; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_mul_y_sub_x_one:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmsub213ps %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vsubps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
%s = fsub <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
%m = fmul <4 x float> %y, %s
define <4 x float> @test_v4f32_mul_sub_x_negone_y(<4 x float> %x, <4 x float> %y) {
; FMA-LABEL: test_v4f32_mul_sub_x_negone_y:
; FMA: # BB#0:
-; FMA-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
+; FMA-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
+; FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_mul_sub_x_negone_y:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmaddps %xmm1, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
+; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_mul_sub_x_negone_y:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vsubps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%s = fsub <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
%m = fmul <4 x float> %s, %y
define <4 x float> @test_v4f32_mul_y_sub_x_negone(<4 x float> %x, <4 x float> %y) {
; FMA-LABEL: test_v4f32_mul_y_sub_x_negone:
; FMA: # BB#0:
-; FMA-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
+; FMA-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
+; FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_mul_y_sub_x_negone:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmaddps %xmm1, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
+; FMA4-NEXT: vmulps %xmm0, %xmm1, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_mul_y_sub_x_negone:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmadd213ps %xmm1, %xmm1, %xmm0
+; AVX512-NEXT: vsubps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
%s = fsub <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
%m = fmul <4 x float> %y, %s
define float @test_f32_interp(float %x, float %y, float %t) {
; FMA-LABEL: test_f32_interp:
; FMA: # BB#0:
-; FMA-NEXT: vfnmadd213ss %xmm1, %xmm2, %xmm1
+; FMA-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; FMA-NEXT: vsubss %xmm2, %xmm3, %xmm3
+; FMA-NEXT: vmulss %xmm3, %xmm1, %xmm1
; FMA-NEXT: vfmadd213ss %xmm1, %xmm2, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_f32_interp:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmaddss %xmm1, %xmm1, %xmm2, %xmm1
+; FMA4-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; FMA4-NEXT: vsubss %xmm2, %xmm3, %xmm3
+; FMA4-NEXT: vmulss %xmm3, %xmm1, %xmm1
; FMA4-NEXT: vfmaddss %xmm1, %xmm2, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_f32_interp:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmadd213ss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX512-NEXT: vsubss %xmm2, %xmm3, %xmm3
+; AVX512-NEXT: vmulss %xmm3, %xmm1, %xmm1
; AVX512-NEXT: vfmadd213ss %xmm1, %xmm2, %xmm0
; AVX512-NEXT: retq
%t1 = fsub float 1.0, %t
define <4 x float> @test_v4f32_interp(<4 x float> %x, <4 x float> %y, <4 x float> %t) {
; FMA-LABEL: test_v4f32_interp:
; FMA: # BB#0:
-; FMA-NEXT: vfnmadd213ps %xmm1, %xmm2, %xmm1
+; FMA-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vsubps %xmm2, %xmm3, %xmm3
+; FMA-NEXT: vmulps %xmm3, %xmm1, %xmm1
; FMA-NEXT: vfmadd213ps %xmm1, %xmm2, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f32_interp:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmaddps %xmm1, %xmm1, %xmm2, %xmm1
+; FMA4-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vsubps %xmm2, %xmm3, %xmm3
+; FMA4-NEXT: vmulps %xmm3, %xmm1, %xmm1
; FMA4-NEXT: vfmaddps %xmm1, %xmm2, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f32_interp:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmadd213ps %xmm1, %xmm2, %xmm1
+; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %xmm3
+; AVX512-NEXT: vsubps %xmm2, %xmm3, %xmm3
+; AVX512-NEXT: vmulps %xmm3, %xmm1, %xmm1
; AVX512-NEXT: vfmadd213ps %xmm1, %xmm2, %xmm0
; AVX512-NEXT: retq
%t1 = fsub <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %t
define <8 x float> @test_v8f32_interp(<8 x float> %x, <8 x float> %y, <8 x float> %t) {
; FMA-LABEL: test_v8f32_interp:
; FMA: # BB#0:
-; FMA-NEXT: vfnmadd213ps %ymm1, %ymm2, %ymm1
+; FMA-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vsubps %ymm2, %ymm3, %ymm3
+; FMA-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA-NEXT: vfmadd213ps %ymm1, %ymm2, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v8f32_interp:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmaddps %ymm1, %ymm1, %ymm2, %ymm1
+; FMA4-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vsubps %ymm2, %ymm3, %ymm3
+; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA4-NEXT: vfmaddps %ymm1, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v8f32_interp:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmadd213ps %ymm1, %ymm2, %ymm1
+; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %ymm3
+; AVX512-NEXT: vsubps %ymm2, %ymm3, %ymm3
+; AVX512-NEXT: vmulps %ymm3, %ymm1, %ymm1
; AVX512-NEXT: vfmadd213ps %ymm1, %ymm2, %ymm0
; AVX512-NEXT: retq
%t1 = fsub <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %t
define double @test_f64_interp(double %x, double %y, double %t) {
; FMA-LABEL: test_f64_interp:
; FMA: # BB#0:
-; FMA-NEXT: vfnmadd213sd %xmm1, %xmm2, %xmm1
+; FMA-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; FMA-NEXT: vsubsd %xmm2, %xmm3, %xmm3
+; FMA-NEXT: vmulsd %xmm3, %xmm1, %xmm1
; FMA-NEXT: vfmadd213sd %xmm1, %xmm2, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_f64_interp:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmaddsd %xmm1, %xmm1, %xmm2, %xmm1
+; FMA4-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; FMA4-NEXT: vsubsd %xmm2, %xmm3, %xmm3
+; FMA4-NEXT: vmulsd %xmm3, %xmm1, %xmm1
; FMA4-NEXT: vfmaddsd %xmm1, %xmm2, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_f64_interp:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmadd213sd %xmm1, %xmm2, %xmm1
+; AVX512-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512-NEXT: vsubsd %xmm2, %xmm3, %xmm3
+; AVX512-NEXT: vmulsd %xmm3, %xmm1, %xmm1
; AVX512-NEXT: vfmadd213sd %xmm1, %xmm2, %xmm0
; AVX512-NEXT: retq
%t1 = fsub double 1.0, %t
define <2 x double> @test_v2f64_interp(<2 x double> %x, <2 x double> %y, <2 x double> %t) {
; FMA-LABEL: test_v2f64_interp:
; FMA: # BB#0:
-; FMA-NEXT: vfnmadd213pd %xmm1, %xmm2, %xmm1
+; FMA-NEXT: vmovapd {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00]
+; FMA-NEXT: vsubpd %xmm2, %xmm3, %xmm3
+; FMA-NEXT: vmulpd %xmm3, %xmm1, %xmm1
; FMA-NEXT: vfmadd213pd %xmm1, %xmm2, %xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v2f64_interp:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmaddpd %xmm1, %xmm1, %xmm2, %xmm1
+; FMA4-NEXT: vmovapd {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vsubpd %xmm2, %xmm3, %xmm3
+; FMA4-NEXT: vmulpd %xmm3, %xmm1, %xmm1
; FMA4-NEXT: vfmaddpd %xmm1, %xmm2, %xmm0, %xmm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v2f64_interp:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmadd213pd %xmm1, %xmm2, %xmm1
+; AVX512-NEXT: vmovapd {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00]
+; AVX512-NEXT: vsubpd %xmm2, %xmm3, %xmm3
+; AVX512-NEXT: vmulpd %xmm3, %xmm1, %xmm1
; AVX512-NEXT: vfmadd213pd %xmm1, %xmm2, %xmm0
; AVX512-NEXT: retq
%t1 = fsub <2 x double> <double 1.0, double 1.0>, %t
define <4 x double> @test_v4f64_interp(<4 x double> %x, <4 x double> %y, <4 x double> %t) {
; FMA-LABEL: test_v4f64_interp:
; FMA: # BB#0:
-; FMA-NEXT: vfnmadd213pd %ymm1, %ymm2, %ymm1
+; FMA-NEXT: vmovapd {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vsubpd %ymm2, %ymm3, %ymm3
+; FMA-NEXT: vmulpd %ymm3, %ymm1, %ymm1
; FMA-NEXT: vfmadd213pd %ymm1, %ymm2, %ymm0
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v4f64_interp:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmaddpd %ymm1, %ymm1, %ymm2, %ymm1
+; FMA4-NEXT: vmovapd {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vsubpd %ymm2, %ymm3, %ymm3
+; FMA4-NEXT: vmulpd %ymm3, %ymm1, %ymm1
; FMA4-NEXT: vfmaddpd %ymm1, %ymm2, %ymm0, %ymm0
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v4f64_interp:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmadd213pd %ymm1, %ymm2, %ymm1
+; AVX512-NEXT: vbroadcastsd {{.*}}(%rip), %ymm3
+; AVX512-NEXT: vsubpd %ymm2, %ymm3, %ymm3
+; AVX512-NEXT: vmulpd %ymm3, %ymm1, %ymm1
; AVX512-NEXT: vfmadd213pd %ymm1, %ymm2, %ymm0
; AVX512-NEXT: retq
%t1 = fsub <4 x double> <double 1.0, double 1.0, double 1.0, double 1.0>, %t
define <16 x float> @test_v16f32_mul_add_x_one_y(<16 x float> %x, <16 x float> %y) {
; FMA-LABEL: test_v16f32_mul_add_x_one_y:
; FMA: # BB#0:
-; FMA-NEXT: vfmadd213ps %ymm2, %ymm2, %ymm0
-; FMA-NEXT: vfmadd213ps %ymm3, %ymm3, %ymm1
+; FMA-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vaddps %ymm4, %ymm1, %ymm1
+; FMA-NEXT: vaddps %ymm4, %ymm0, %ymm0
+; FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v16f32_mul_add_x_one_y:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmaddps %ymm2, %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vfmaddps %ymm3, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vaddps %ymm4, %ymm1, %ymm1
+; FMA4-NEXT: vaddps %ymm4, %ymm0, %ymm0
+; FMA4-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v16f32_mul_add_x_one_y:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmadd213ps %zmm1, %zmm1, %zmm0
+; AVX512-NEXT: vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%a = fadd <16 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>
%m = fmul <16 x float> %a, %y
define <8 x double> @test_v8f64_mul_y_add_x_one(<8 x double> %x, <8 x double> %y) {
; FMA-LABEL: test_v8f64_mul_y_add_x_one:
; FMA: # BB#0:
-; FMA-NEXT: vfmadd213pd %ymm2, %ymm2, %ymm0
-; FMA-NEXT: vfmadd213pd %ymm3, %ymm3, %ymm1
+; FMA-NEXT: vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vaddpd %ymm4, %ymm1, %ymm1
+; FMA-NEXT: vaddpd %ymm4, %ymm0, %ymm0
+; FMA-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; FMA-NEXT: vmulpd %ymm1, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v8f64_mul_y_add_x_one:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmaddpd %ymm2, %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vfmaddpd %ymm3, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vaddpd %ymm4, %ymm1, %ymm1
+; FMA4-NEXT: vaddpd %ymm4, %ymm0, %ymm0
+; FMA4-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; FMA4-NEXT: vmulpd %ymm1, %ymm3, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v8f64_mul_y_add_x_one:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmadd213pd %zmm1, %zmm1, %zmm0
+; AVX512-NEXT: vaddpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%a = fadd <8 x double> %x, <double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0>
%m = fmul <8 x double> %y, %a
define <16 x float> @test_v16f32_mul_add_x_negone_y(<16 x float> %x, <16 x float> %y) {
; FMA-LABEL: test_v16f32_mul_add_x_negone_y:
; FMA: # BB#0:
-; FMA-NEXT: vfmsub213ps %ymm2, %ymm2, %ymm0
-; FMA-NEXT: vfmsub213ps %ymm3, %ymm3, %ymm1
+; FMA-NEXT: vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA-NEXT: vaddps %ymm4, %ymm1, %ymm1
+; FMA-NEXT: vaddps %ymm4, %ymm0, %ymm0
+; FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v16f32_mul_add_x_negone_y:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmsubps %ymm2, %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vfmsubps %ymm3, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA4-NEXT: vaddps %ymm4, %ymm1, %ymm1
+; FMA4-NEXT: vaddps %ymm4, %ymm0, %ymm0
+; FMA4-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v16f32_mul_add_x_negone_y:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmsub213ps %zmm1, %zmm1, %zmm0
+; AVX512-NEXT: vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%a = fadd <16 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0>
%m = fmul <16 x float> %a, %y
define <8 x double> @test_v8f64_mul_y_add_x_negone(<8 x double> %x, <8 x double> %y) {
; FMA-LABEL: test_v8f64_mul_y_add_x_negone:
; FMA: # BB#0:
-; FMA-NEXT: vfmsub213pd %ymm2, %ymm2, %ymm0
-; FMA-NEXT: vfmsub213pd %ymm3, %ymm3, %ymm1
+; FMA-NEXT: vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA-NEXT: vaddpd %ymm4, %ymm1, %ymm1
+; FMA-NEXT: vaddpd %ymm4, %ymm0, %ymm0
+; FMA-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; FMA-NEXT: vmulpd %ymm1, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v8f64_mul_y_add_x_negone:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmsubpd %ymm2, %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vfmsubpd %ymm3, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA4-NEXT: vaddpd %ymm4, %ymm1, %ymm1
+; FMA4-NEXT: vaddpd %ymm4, %ymm0, %ymm0
+; FMA4-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; FMA4-NEXT: vmulpd %ymm1, %ymm3, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v8f64_mul_y_add_x_negone:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmsub213pd %zmm1, %zmm1, %zmm0
+; AVX512-NEXT: vaddpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%a = fadd <8 x double> %x, <double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0>
%m = fmul <8 x double> %y, %a
define <16 x float> @test_v16f32_mul_sub_one_x_y(<16 x float> %x, <16 x float> %y) {
; FMA-LABEL: test_v16f32_mul_sub_one_x_y:
; FMA: # BB#0:
-; FMA-NEXT: vfnmadd213ps %ymm2, %ymm2, %ymm0
-; FMA-NEXT: vfnmadd213ps %ymm3, %ymm3, %ymm1
+; FMA-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vsubps %ymm1, %ymm4, %ymm1
+; FMA-NEXT: vsubps %ymm0, %ymm4, %ymm0
+; FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v16f32_mul_sub_one_x_y:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmaddps %ymm2, %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vfnmaddps %ymm3, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vsubps %ymm1, %ymm4, %ymm1
+; FMA4-NEXT: vsubps %ymm0, %ymm4, %ymm0
+; FMA4-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v16f32_mul_sub_one_x_y:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmadd213ps %zmm1, %zmm1, %zmm0
+; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %zmm2
+; AVX512-NEXT: vsubps %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%s = fsub <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
%m = fmul <16 x float> %s, %y
define <8 x double> @test_v8f64_mul_y_sub_one_x(<8 x double> %x, <8 x double> %y) {
; FMA-LABEL: test_v8f64_mul_y_sub_one_x:
; FMA: # BB#0:
-; FMA-NEXT: vfnmadd213pd %ymm2, %ymm2, %ymm0
-; FMA-NEXT: vfnmadd213pd %ymm3, %ymm3, %ymm1
+; FMA-NEXT: vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vsubpd %ymm1, %ymm4, %ymm1
+; FMA-NEXT: vsubpd %ymm0, %ymm4, %ymm0
+; FMA-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; FMA-NEXT: vmulpd %ymm1, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v8f64_mul_y_sub_one_x:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmaddpd %ymm2, %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vfnmaddpd %ymm3, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vsubpd %ymm1, %ymm4, %ymm1
+; FMA4-NEXT: vsubpd %ymm0, %ymm4, %ymm0
+; FMA4-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; FMA4-NEXT: vmulpd %ymm1, %ymm3, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v8f64_mul_y_sub_one_x:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmadd213pd %zmm1, %zmm1, %zmm0
+; AVX512-NEXT: vbroadcastsd {{.*}}(%rip), %zmm2
+; AVX512-NEXT: vsubpd %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%s = fsub <8 x double> <double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0>, %x
%m = fmul <8 x double> %y, %s
define <16 x float> @test_v16f32_mul_sub_negone_x_y(<16 x float> %x, <16 x float> %y) {
; FMA-LABEL: test_v16f32_mul_sub_negone_x_y:
; FMA: # BB#0:
-; FMA-NEXT: vfnmsub213ps %ymm2, %ymm2, %ymm0
-; FMA-NEXT: vfnmsub213ps %ymm3, %ymm3, %ymm1
+; FMA-NEXT: vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA-NEXT: vsubps %ymm1, %ymm4, %ymm1
+; FMA-NEXT: vsubps %ymm0, %ymm4, %ymm0
+; FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v16f32_mul_sub_negone_x_y:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmsubps %ymm2, %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vfnmsubps %ymm3, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA4-NEXT: vsubps %ymm1, %ymm4, %ymm1
+; FMA4-NEXT: vsubps %ymm0, %ymm4, %ymm0
+; FMA4-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v16f32_mul_sub_negone_x_y:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmsub213ps %zmm1, %zmm1, %zmm0
+; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %zmm2
+; AVX512-NEXT: vsubps %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%s = fsub <16 x float> <float -1.0, float -1.0, float -1.0, float -1.0,float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0>, %x
%m = fmul <16 x float> %s, %y
define <8 x double> @test_v8f64_mul_y_sub_negone_x(<8 x double> %x, <8 x double> %y) {
; FMA-LABEL: test_v8f64_mul_y_sub_negone_x:
; FMA: # BB#0:
-; FMA-NEXT: vfnmsub213pd %ymm2, %ymm2, %ymm0
-; FMA-NEXT: vfnmsub213pd %ymm3, %ymm3, %ymm1
+; FMA-NEXT: vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA-NEXT: vsubpd %ymm1, %ymm4, %ymm1
+; FMA-NEXT: vsubpd %ymm0, %ymm4, %ymm0
+; FMA-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; FMA-NEXT: vmulpd %ymm1, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v8f64_mul_y_sub_negone_x:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmsubpd %ymm2, %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vfnmsubpd %ymm3, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA4-NEXT: vsubpd %ymm1, %ymm4, %ymm1
+; FMA4-NEXT: vsubpd %ymm0, %ymm4, %ymm0
+; FMA4-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; FMA4-NEXT: vmulpd %ymm1, %ymm3, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v8f64_mul_y_sub_negone_x:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmsub213pd %zmm1, %zmm1, %zmm0
+; AVX512-NEXT: vbroadcastsd {{.*}}(%rip), %zmm2
+; AVX512-NEXT: vsubpd %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%s = fsub <8 x double> <double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0>, %x
%m = fmul <8 x double> %y, %s
define <16 x float> @test_v16f32_mul_sub_x_one_y(<16 x float> %x, <16 x float> %y) {
; FMA-LABEL: test_v16f32_mul_sub_x_one_y:
; FMA: # BB#0:
-; FMA-NEXT: vfmsub213ps %ymm2, %ymm2, %ymm0
-; FMA-NEXT: vfmsub213ps %ymm3, %ymm3, %ymm1
+; FMA-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vsubps %ymm4, %ymm1, %ymm1
+; FMA-NEXT: vsubps %ymm4, %ymm0, %ymm0
+; FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v16f32_mul_sub_x_one_y:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmsubps %ymm2, %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vfmsubps %ymm3, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vsubps %ymm4, %ymm1, %ymm1
+; FMA4-NEXT: vsubps %ymm4, %ymm0, %ymm0
+; FMA4-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v16f32_mul_sub_x_one_y:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmsub213ps %zmm1, %zmm1, %zmm0
+; AVX512-NEXT: vsubps {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%s = fsub <16 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>
%m = fmul <16 x float> %s, %y
define <8 x double> @test_v8f64_mul_y_sub_x_one(<8 x double> %x, <8 x double> %y) {
; FMA-LABEL: test_v8f64_mul_y_sub_x_one:
; FMA: # BB#0:
-; FMA-NEXT: vfmsub213pd %ymm2, %ymm2, %ymm0
-; FMA-NEXT: vfmsub213pd %ymm3, %ymm3, %ymm1
+; FMA-NEXT: vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vsubpd %ymm4, %ymm1, %ymm1
+; FMA-NEXT: vsubpd %ymm4, %ymm0, %ymm0
+; FMA-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; FMA-NEXT: vmulpd %ymm1, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v8f64_mul_y_sub_x_one:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmsubpd %ymm2, %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vfmsubpd %ymm3, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmovapd {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vsubpd %ymm4, %ymm1, %ymm1
+; FMA4-NEXT: vsubpd %ymm4, %ymm0, %ymm0
+; FMA4-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; FMA4-NEXT: vmulpd %ymm1, %ymm3, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v8f64_mul_y_sub_x_one:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmsub213pd %zmm1, %zmm1, %zmm0
+; AVX512-NEXT: vsubpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%s = fsub <8 x double> %x, <double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0>
%m = fmul <8 x double> %y, %s
define <16 x float> @test_v16f32_mul_sub_x_negone_y(<16 x float> %x, <16 x float> %y) {
; FMA-LABEL: test_v16f32_mul_sub_x_negone_y:
; FMA: # BB#0:
-; FMA-NEXT: vfmadd213ps %ymm2, %ymm2, %ymm0
-; FMA-NEXT: vfmadd213ps %ymm3, %ymm3, %ymm1
+; FMA-NEXT: vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA-NEXT: vsubps %ymm4, %ymm1, %ymm1
+; FMA-NEXT: vsubps %ymm4, %ymm0, %ymm0
+; FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v16f32_mul_sub_x_negone_y:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmaddps %ymm2, %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vfmaddps %ymm3, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmovaps {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA4-NEXT: vsubps %ymm4, %ymm1, %ymm1
+; FMA4-NEXT: vsubps %ymm4, %ymm0, %ymm0
+; FMA4-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v16f32_mul_sub_x_negone_y:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmadd213ps %zmm1, %zmm1, %zmm0
+; AVX512-NEXT: vsubps {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%s = fsub <16 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0, float -1.0>
%m = fmul <16 x float> %s, %y
define <8 x double> @test_v8f64_mul_y_sub_x_negone(<8 x double> %x, <8 x double> %y) {
; FMA-LABEL: test_v8f64_mul_y_sub_x_negone:
; FMA: # BB#0:
-; FMA-NEXT: vfmadd213pd %ymm2, %ymm2, %ymm0
-; FMA-NEXT: vfmadd213pd %ymm3, %ymm3, %ymm1
+; FMA-NEXT: vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA-NEXT: vsubpd %ymm4, %ymm1, %ymm1
+; FMA-NEXT: vsubpd %ymm4, %ymm0, %ymm0
+; FMA-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; FMA-NEXT: vmulpd %ymm1, %ymm3, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v8f64_mul_y_sub_x_negone:
; FMA4: # BB#0:
-; FMA4-NEXT: vfmaddpd %ymm2, %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vfmaddpd %ymm3, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmovapd {{.*#+}} ymm4 = [-1.000000e+00,-1.000000e+00,-1.000000e+00,-1.000000e+00]
+; FMA4-NEXT: vsubpd %ymm4, %ymm1, %ymm1
+; FMA4-NEXT: vsubpd %ymm4, %ymm0, %ymm0
+; FMA4-NEXT: vmulpd %ymm0, %ymm2, %ymm0
+; FMA4-NEXT: vmulpd %ymm1, %ymm3, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v8f64_mul_y_sub_x_negone:
; AVX512: # BB#0:
-; AVX512-NEXT: vfmadd213pd %zmm1, %zmm1, %zmm0
+; AVX512-NEXT: vsubpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%s = fsub <8 x double> %x, <double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0, double -1.0>
%m = fmul <8 x double> %y, %s
define <16 x float> @test_v16f32_interp(<16 x float> %x, <16 x float> %y, <16 x float> %t) {
; FMA-LABEL: test_v16f32_interp:
; FMA: # BB#0:
-; FMA-NEXT: vfnmadd213ps %ymm3, %ymm5, %ymm3
-; FMA-NEXT: vfnmadd213ps %ymm2, %ymm4, %ymm2
+; FMA-NEXT: vmovaps {{.*#+}} ymm6 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vsubps %ymm4, %ymm6, %ymm7
+; FMA-NEXT: vsubps %ymm5, %ymm6, %ymm6
+; FMA-NEXT: vmulps %ymm6, %ymm3, %ymm3
+; FMA-NEXT: vmulps %ymm7, %ymm2, %ymm2
; FMA-NEXT: vfmadd213ps %ymm2, %ymm4, %ymm0
; FMA-NEXT: vfmadd213ps %ymm3, %ymm5, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v16f32_interp:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmaddps %ymm3, %ymm3, %ymm5, %ymm3
-; FMA4-NEXT: vfnmaddps %ymm2, %ymm2, %ymm4, %ymm2
+; FMA4-NEXT: vmovaps {{.*#+}} ymm6 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vsubps %ymm4, %ymm6, %ymm7
+; FMA4-NEXT: vsubps %ymm5, %ymm6, %ymm6
+; FMA4-NEXT: vmulps %ymm6, %ymm3, %ymm3
+; FMA4-NEXT: vmulps %ymm7, %ymm2, %ymm2
; FMA4-NEXT: vfmaddps %ymm2, %ymm4, %ymm0, %ymm0
; FMA4-NEXT: vfmaddps %ymm3, %ymm5, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v16f32_interp:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmadd213ps %zmm1, %zmm2, %zmm1
+; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %zmm3
+; AVX512-NEXT: vsubps %zmm2, %zmm3, %zmm3
+; AVX512-NEXT: vmulps %zmm3, %zmm1, %zmm1
; AVX512-NEXT: vfmadd213ps %zmm1, %zmm2, %zmm0
; AVX512-NEXT: retq
%t1 = fsub <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %t
define <8 x double> @test_v8f64_interp(<8 x double> %x, <8 x double> %y, <8 x double> %t) {
; FMA-LABEL: test_v8f64_interp:
; FMA: # BB#0:
-; FMA-NEXT: vfnmadd213pd %ymm3, %ymm5, %ymm3
-; FMA-NEXT: vfnmadd213pd %ymm2, %ymm4, %ymm2
+; FMA-NEXT: vmovapd {{.*#+}} ymm6 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA-NEXT: vsubpd %ymm4, %ymm6, %ymm7
+; FMA-NEXT: vsubpd %ymm5, %ymm6, %ymm6
+; FMA-NEXT: vmulpd %ymm6, %ymm3, %ymm3
+; FMA-NEXT: vmulpd %ymm7, %ymm2, %ymm2
; FMA-NEXT: vfmadd213pd %ymm2, %ymm4, %ymm0
; FMA-NEXT: vfmadd213pd %ymm3, %ymm5, %ymm1
; FMA-NEXT: retq
;
; FMA4-LABEL: test_v8f64_interp:
; FMA4: # BB#0:
-; FMA4-NEXT: vfnmaddpd %ymm3, %ymm3, %ymm5, %ymm3
-; FMA4-NEXT: vfnmaddpd %ymm2, %ymm2, %ymm4, %ymm2
+; FMA4-NEXT: vmovapd {{.*#+}} ymm6 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; FMA4-NEXT: vsubpd %ymm4, %ymm6, %ymm7
+; FMA4-NEXT: vsubpd %ymm5, %ymm6, %ymm6
+; FMA4-NEXT: vmulpd %ymm6, %ymm3, %ymm3
+; FMA4-NEXT: vmulpd %ymm7, %ymm2, %ymm2
; FMA4-NEXT: vfmaddpd %ymm2, %ymm4, %ymm0, %ymm0
; FMA4-NEXT: vfmaddpd %ymm3, %ymm5, %ymm1, %ymm1
; FMA4-NEXT: retq
;
; AVX512-LABEL: test_v8f64_interp:
; AVX512: # BB#0:
-; AVX512-NEXT: vfnmadd213pd %zmm1, %zmm2, %zmm1
+; AVX512-NEXT: vbroadcastsd {{.*}}(%rip), %zmm3
+; AVX512-NEXT: vsubpd %zmm2, %zmm3, %zmm3
+; AVX512-NEXT: vmulpd %zmm3, %zmm1, %zmm1
; AVX512-NEXT: vfmadd213pd %zmm1, %zmm2, %zmm0
; AVX512-NEXT: retq
%t1 = fsub <8 x double> <double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0>, %t