From c9d7eb1bc349c26ee94a560fe53def29626c63ea Mon Sep 17 00:00:00 2001 From: Tanner Gooding Date: Wed, 24 Jan 2018 10:56:41 -0800 Subject: [PATCH] Listing the instruction mappings for the FMA intrinsics. --- .../Intrinsics/X86/Fma.PlatformNotSupported.cs | 66 +++++++++++----------- .../src/System/Runtime/Intrinsics/X86/Fma.cs | 64 ++++++++++----------- 2 files changed, 65 insertions(+), 65 deletions(-) diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.PlatformNotSupported.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.PlatformNotSupported.cs index 2a6c3c9..b562a26 100644 --- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.PlatformNotSupported.cs +++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.PlatformNotSupported.cs @@ -15,140 +15,140 @@ namespace System.Runtime.Intrinsics.X86 public static bool IsSupported { get { return false; } } /// - /// __m128 _mm_fmadd_ps (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fmadd_ps (__m128 a, __m128 b, __m128 c); VFMADDPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplyAdd(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128d _mm_fmadd_pd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fmadd_pd (__m128d a, __m128d b, __m128d c); VFMADDPD xmm, xmm, xmm/m128 /// public static Vector128 MultiplyAdd(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m256 _mm256_fmadd_ps (__m256 a, __m256 b, __m256 c) + /// __m256 _mm256_fmadd_ps (__m256 a, __m256 b, __m256 c); VFMADDPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplyAdd(Vector256 a, Vector256 b, Vector256 c) { throw new PlatformNotSupportedException(); } /// - /// __m256d _mm256_fmadd_pd (__m256d a, __m256d b, __m256d c) + /// __m256d _mm256_fmadd_pd (__m256d a, __m256d b, __m256d c); VFMADDPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplyAdd(Vector256 a, Vector256 b, Vector256 c) { throw new PlatformNotSupportedException(); } /// - /// __m128 _mm_fmadd_ss (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fmadd_ss (__m128 a, __m128 b, __m128 c); VFMADDSS xmm, xmm, xmm/m32 /// public static Vector128 MultiplyAddScalar(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128d _mm_fmadd_sd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fmadd_sd (__m128d a, __m128d b, __m128d c); VFMADDSS xmm, xmm, xmm/m64 /// public static Vector128 MultiplyAddScalar(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128 _mm_fmaddsub_ps (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fmaddsub_ps (__m128 a, __m128 b, __m128 c); VFMADDSUBPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplyAddSubtract(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128d _mm_fmaddsub_pd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fmaddsub_pd (__m128d a, __m128d b, __m128d c); VFMADDSUBPD xmm, xmm, xmm/m128 /// public static Vector128 MultiplyAddSubtract(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m256 _mm256_fmaddsub_ps (__m256 a, __m256 b, __m256 c) + /// __m256 _mm256_fmaddsub_ps (__m256 a, __m256 b, __m256 c); VFMADDSUBPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplyAddSubtract(Vector256 a, Vector256 b, Vector256 c) { throw new PlatformNotSupportedException(); } /// - /// __m256d _mm256_fmaddsub_pd (__m256d a, __m256d b, __m256d c) + /// __m256d _mm256_fmaddsub_pd (__m256d a, __m256d b, __m256d c); VFMADDSUBPD ymm, ymm, ymm/m256 /// public static Vector256 MultiplyAddSubtract(Vector256 a, Vector256 b, Vector256 c) { throw new PlatformNotSupportedException(); } /// - /// __m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c); VFMSUBPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplySubtract(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128d _mm_fmsub_pd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fmsub_pd (__m128d a, __m128d b, __m128d c); VFMSUBPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplySubtract(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m256 _mm256_fmsub_ps (__m256 a, __m256 b, __m256 c) + /// __m256 _mm256_fmsub_ps (__m256 a, __m256 b, __m256 c); VFMSUBPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplySubtract(Vector256 a, Vector256 b, Vector256 c) { throw new PlatformNotSupportedException(); } /// - /// __m256d _mm256_fmsub_pd (__m256d a, __m256d b, __m256d c) + /// __m256d _mm256_fmsub_pd (__m256d a, __m256d b, __m256d c); VFMSUBPD ymm, ymm, ymm/m256 /// public static Vector256 MultiplySubtract(Vector256 a, Vector256 b, Vector256 c) { throw new PlatformNotSupportedException(); } /// - /// __m128 _mm_fmsub_ss (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fmsub_ss (__m128 a, __m128 b, __m128 c); VFMSUBSS xmm, xmm, xmm/m32 /// public static Vector128 MultiplySubtractScalar(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128d _mm_fmsub_sd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fmsub_sd (__m128d a, __m128d b, __m128d c); VFMSUBSD xmm, xmm, xmm/m64 /// public static Vector128 MultiplySubtractScalar(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128 _mm_fmsubadd_ps (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fmsubadd_ps (__m128 a, __m128 b, __m128 c); VFMSUBADDPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplySubtractAdd(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128d _mm_fmsubadd_pd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fmsubadd_pd (__m128d a, __m128d b, __m128d c); VFMSUBADDPD xmm, xmm, xmm/m128 /// public static Vector128 MultiplySubtractAdd(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m256 _mm256_fmsubadd_ps (__m256 a, __m256 b, __m256 c) + /// __m256 _mm256_fmsubadd_ps (__m256 a, __m256 b, __m256 c); VFMSUBADDPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplySubtractAdd(Vector256 a, Vector256 b, Vector256 c) { throw new PlatformNotSupportedException(); } /// - /// __m256d _mm256_fmsubadd_pd (__m256d a, __m256d b, __m256d c) + /// __m256d _mm256_fmsubadd_pd (__m256d a, __m256d b, __m256d c); VFMSUBADDPD ymm, ymm, ymm/m256 /// public static Vector256 MultiplySubtractAdd(Vector256 a, Vector256 b, Vector256 c) { throw new PlatformNotSupportedException(); } /// - /// __m128 _mm_fnmadd_ps (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fnmadd_ps (__m128 a, __m128 b, __m128 c); VFNMADDPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplyAddNegated(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128d _mm_fnmadd_pd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fnmadd_pd (__m128d a, __m128d b, __m128d c); VFNMADDPD xmm, xmm, xmm/m128 /// public static Vector128 MultiplyAddNegated(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m256 _mm256_fnmadd_ps (__m256 a, __m256 b, __m256 c) + /// __m256 _mm256_fnmadd_ps (__m256 a, __m256 b, __m256 c); VFNMADDPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplyAddNegated(Vector256 a, Vector256 b, Vector256 c) { throw new PlatformNotSupportedException(); } /// - /// __m256d _mm256_fnmadd_pd (__m256d a, __m256d b, __m256d c) + /// __m256d _mm256_fnmadd_pd (__m256d a, __m256d b, __m256d c); VFNMADDPD ymm, ymm, ymm/m256 /// public static Vector256 MultiplyAddNegated(Vector256 a, Vector256 b, Vector256 c) { throw new PlatformNotSupportedException(); } - /// - /// __m128 _mm_fnmadd_ss (__m128 a, __m128 b, __m128 c) + /// + /// __m128 _mm_fnmadd_ss (__m128 a, __m128 b, __m128 c); VFNMADDSS xmm, xmm, xmm/m32 /// public static Vector128 MultiplyAddNegatedScalar(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128d _mm_fnmadd_sd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fnmadd_sd (__m128d a, __m128d b, __m128d c); VFNMADDSD xmm, xmm, xmm/m64 /// public static Vector128 MultiplyAddNegatedScalar(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128 _mm_fnmsub_ps (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fnmsub_ps (__m128 a, __m128 b, __m128 c); VFNMSUBPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplySubtractNegated(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128d _mm_fnmsub_pd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fnmsub_pd (__m128d a, __m128d b, __m128d c); VFNMSUBPD xmm, xmm, xmm/m128 /// public static Vector128 MultiplySubtractNegated(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m256 _mm256_fnmsub_ps (__m256 a, __m256 b, __m256 c) + /// __m256 _mm256_fnmsub_ps (__m256 a, __m256 b, __m256 c); VFNMSUBPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplySubtractNegated(Vector256 a, Vector256 b, Vector256 c) { throw new PlatformNotSupportedException(); } /// - /// __m256d _mm256_fnmsub_pd (__m256d a, __m256d b, __m256d c) + /// __m256d _mm256_fnmsub_pd (__m256d a, __m256d b, __m256d c); VFNMSUBPD ymm, ymm, ymm/m256 /// public static Vector256 MultiplySubtractNegated(Vector256 a, Vector256 b, Vector256 c) { throw new PlatformNotSupportedException(); } /// - /// __m128 _mm_fnmsub_ss (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fnmsub_ss (__m128 a, __m128 b, __m128 c); VFNMSUBSS xmm, xmm, xmm/m32 /// public static Vector128 MultiplySubtractNegatedScalar(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } /// - /// __m128d _mm_fnmsub_sd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fnmsub_sd (__m128d a, __m128d b, __m128d c); VFNMSUBSD xmm, xmm, xmm/m64 /// public static Vector128 MultiplySubtractNegatedScalar(Vector128 a, Vector128 b, Vector128 c) { throw new PlatformNotSupportedException(); } } diff --git a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.cs b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.cs index 25561bd..12f3772 100644 --- a/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.cs +++ b/src/mscorlib/src/System/Runtime/Intrinsics/X86/Fma.cs @@ -15,140 +15,140 @@ namespace System.Runtime.Intrinsics.X86 public static bool IsSupported { get => IsSupported; } /// - /// __m128 _mm_fmadd_ps (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fmadd_ps (__m128 a, __m128 b, __m128 c); VFMADDPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplyAdd(Vector128 a, Vector128 b, Vector128 c) => MultiplyAdd(a, b, c); /// - /// __m128d _mm_fmadd_pd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fmadd_pd (__m128d a, __m128d b, __m128d c); VFMADDPD xmm, xmm, xmm/m128 /// public static Vector128 MultiplyAdd(Vector128 a, Vector128 b, Vector128 c) => MultiplyAdd(a, b, c); /// - /// __m256 _mm256_fmadd_ps (__m256 a, __m256 b, __m256 c) + /// __m256 _mm256_fmadd_ps (__m256 a, __m256 b, __m256 c); VFMADDPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplyAdd(Vector256 a, Vector256 b, Vector256 c) => MultiplyAdd(a, b, c); /// - /// __m256d _mm256_fmadd_pd (__m256d a, __m256d b, __m256d c) + /// __m256d _mm256_fmadd_pd (__m256d a, __m256d b, __m256d c); VFMADDPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplyAdd(Vector256 a, Vector256 b, Vector256 c) => MultiplyAdd(a, b, c); /// - /// __m128 _mm_fmadd_ss (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fmadd_ss (__m128 a, __m128 b, __m128 c); VFMADDSS xmm, xmm, xmm/m32 /// public static Vector128 MultiplyAddScalar(Vector128 a, Vector128 b, Vector128 c) => MultiplyAddScalar(a, b, c); /// - /// __m128d _mm_fmadd_sd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fmadd_sd (__m128d a, __m128d b, __m128d c); VFMADDSS xmm, xmm, xmm/m64 /// public static Vector128 MultiplyAddScalar(Vector128 a, Vector128 b, Vector128 c) => MultiplyAddScalar(a, b, c); /// - /// __m128 _mm_fmaddsub_ps (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fmaddsub_ps (__m128 a, __m128 b, __m128 c); VFMADDSUBPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplyAddSubtract(Vector128 a, Vector128 b, Vector128 c) => MultiplyAddSubtract(a, b, c); /// - /// __m128d _mm_fmaddsub_pd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fmaddsub_pd (__m128d a, __m128d b, __m128d c); VFMADDSUBPD xmm, xmm, xmm/m128 /// public static Vector128 MultiplyAddSubtract(Vector128 a, Vector128 b, Vector128 c) => MultiplyAddSubtract(a, b, c); /// - /// __m256 _mm256_fmaddsub_ps (__m256 a, __m256 b, __m256 c) + /// __m256 _mm256_fmaddsub_ps (__m256 a, __m256 b, __m256 c); VFMADDSUBPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplyAddSubtract(Vector256 a, Vector256 b, Vector256 c) => MultiplyAddSubtract(a, b, c); /// - /// __m256d _mm256_fmaddsub_pd (__m256d a, __m256d b, __m256d c) + /// __m256d _mm256_fmaddsub_pd (__m256d a, __m256d b, __m256d c); VFMADDSUBPD ymm, ymm, ymm/m256 /// public static Vector256 MultiplyAddSubtract(Vector256 a, Vector256 b, Vector256 c) => MultiplyAddSubtract(a, b, c); /// - /// __m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c); VFMSUBPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplySubtract(Vector128 a, Vector128 b, Vector128 c) => MultiplySubtract(a, b, c); /// - /// __m128d _mm_fmsub_pd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fmsub_pd (__m128d a, __m128d b, __m128d c); VFMSUBPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplySubtract(Vector128 a, Vector128 b, Vector128 c) => MultiplySubtract(a, b, c); /// - /// __m256 _mm256_fmsub_ps (__m256 a, __m256 b, __m256 c) + /// __m256 _mm256_fmsub_ps (__m256 a, __m256 b, __m256 c); VFMSUBPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplySubtract(Vector256 a, Vector256 b, Vector256 c) => MultiplySubtract(a, b, c); /// - /// __m256d _mm256_fmsub_pd (__m256d a, __m256d b, __m256d c) + /// __m256d _mm256_fmsub_pd (__m256d a, __m256d b, __m256d c); VFMSUBPD ymm, ymm, ymm/m256 /// public static Vector256 MultiplySubtract(Vector256 a, Vector256 b, Vector256 c) => MultiplySubtract(a, b, c); /// - /// __m128 _mm_fmsub_ss (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fmsub_ss (__m128 a, __m128 b, __m128 c); VFMSUBSS xmm, xmm, xmm/m32 /// public static Vector128 MultiplySubtractScalar(Vector128 a, Vector128 b, Vector128 c) => MultiplySubtractScalar(a, b, c); /// - /// __m128d _mm_fmsub_sd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fmsub_sd (__m128d a, __m128d b, __m128d c); VFMSUBSD xmm, xmm, xmm/m64 /// public static Vector128 MultiplySubtractScalar(Vector128 a, Vector128 b, Vector128 c) => MultiplySubtractScalar(a, b, c); /// - /// __m128 _mm_fmsubadd_ps (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fmsubadd_ps (__m128 a, __m128 b, __m128 c); VFMSUBADDPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplySubtractAdd(Vector128 a, Vector128 b, Vector128 c) => MultiplySubtractAdd(a, b, c); /// - /// __m128d _mm_fmsubadd_pd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fmsubadd_pd (__m128d a, __m128d b, __m128d c); VFMSUBADDPD xmm, xmm, xmm/m128 /// public static Vector128 MultiplySubtractAdd(Vector128 a, Vector128 b, Vector128 c) => MultiplySubtractAdd(a, b, c); /// - /// __m256 _mm256_fmsubadd_ps (__m256 a, __m256 b, __m256 c) + /// __m256 _mm256_fmsubadd_ps (__m256 a, __m256 b, __m256 c); VFMSUBADDPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplySubtractAdd(Vector256 a, Vector256 b, Vector256 c) => MultiplySubtractAdd(a, b, c); /// - /// __m256d _mm256_fmsubadd_pd (__m256d a, __m256d b, __m256d c) + /// __m256d _mm256_fmsubadd_pd (__m256d a, __m256d b, __m256d c); VFMSUBADDPD ymm, ymm, ymm/m256 /// public static Vector256 MultiplySubtractAdd(Vector256 a, Vector256 b, Vector256 c) => MultiplySubtractAdd(a, b, c); /// - /// __m128 _mm_fnmadd_ps (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fnmadd_ps (__m128 a, __m128 b, __m128 c); VFNMADDPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplyAddNegated(Vector128 a, Vector128 b, Vector128 c) => MultiplyAddNegated(a, b, c); /// - /// __m128d _mm_fnmadd_pd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fnmadd_pd (__m128d a, __m128d b, __m128d c); VFNMADDPD xmm, xmm, xmm/m128 /// public static Vector128 MultiplyAddNegated(Vector128 a, Vector128 b, Vector128 c) => MultiplyAddNegated(a, b, c); /// - /// __m256 _mm256_fnmadd_ps (__m256 a, __m256 b, __m256 c) + /// __m256 _mm256_fnmadd_ps (__m256 a, __m256 b, __m256 c); VFNMADDPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplyAddNegated(Vector256 a, Vector256 b, Vector256 c) => MultiplyAddNegated(a, b, c); /// - /// __m256d _mm256_fnmadd_pd (__m256d a, __m256d b, __m256d c) + /// __m256d _mm256_fnmadd_pd (__m256d a, __m256d b, __m256d c); VFNMADDPD ymm, ymm, ymm/m256 /// public static Vector256 MultiplyAddNegated(Vector256 a, Vector256 b, Vector256 c) => MultiplyAddNegated(a, b, c); /// - /// __m128 _mm_fnmadd_ss (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fnmadd_ss (__m128 a, __m128 b, __m128 c); VFNMADDSS xmm, xmm, xmm/m32 /// public static Vector128 MultiplyAddNegatedScalar(Vector128 a, Vector128 b, Vector128 c) => MultiplyAddNegatedScalar(a, b, c); /// - /// __m128d _mm_fnmadd_sd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fnmadd_sd (__m128d a, __m128d b, __m128d c); VFNMADDSD xmm, xmm, xmm/m64 /// public static Vector128 MultiplyAddNegatedScalar(Vector128 a, Vector128 b, Vector128 c) => MultiplyAddNegatedScalar(a, b, c); /// - /// __m128 _mm_fnmsub_ps (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fnmsub_ps (__m128 a, __m128 b, __m128 c); VFNMSUBPS xmm, xmm, xmm/m128 /// public static Vector128 MultiplySubtractNegated(Vector128 a, Vector128 b, Vector128 c) => MultiplySubtractNegated(a, b, c); /// - /// __m128d _mm_fnmsub_pd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fnmsub_pd (__m128d a, __m128d b, __m128d c); VFNMSUBPD xmm, xmm, xmm/m128 /// public static Vector128 MultiplySubtractNegated(Vector128 a, Vector128 b, Vector128 c) => MultiplySubtractNegated(a, b, c); /// - /// __m256 _mm256_fnmsub_ps (__m256 a, __m256 b, __m256 c) + /// __m256 _mm256_fnmsub_ps (__m256 a, __m256 b, __m256 c); VFNMSUBPS ymm, ymm, ymm/m256 /// public static Vector256 MultiplySubtractNegated(Vector256 a, Vector256 b, Vector256 c) => MultiplySubtractNegated(a, b, c); /// - /// __m256d _mm256_fnmsub_pd (__m256d a, __m256d b, __m256d c) + /// __m256d _mm256_fnmsub_pd (__m256d a, __m256d b, __m256d c); VFNMSUBPD ymm, ymm, ymm/m256 /// public static Vector256 MultiplySubtractNegated(Vector256 a, Vector256 b, Vector256 c) => MultiplySubtractNegated(a, b, c); /// - /// __m128 _mm_fnmsub_ss (__m128 a, __m128 b, __m128 c) + /// __m128 _mm_fnmsub_ss (__m128 a, __m128 b, __m128 c); VFNMSUBSS xmm, xmm, xmm/m32 /// public static Vector128 MultiplySubtractNegatedScalar(Vector128 a, Vector128 b, Vector128 c) => MultiplySubtractNegatedScalar(a, b, c); /// - /// __m128d _mm_fnmsub_sd (__m128d a, __m128d b, __m128d c) + /// __m128d _mm_fnmsub_sd (__m128d a, __m128d b, __m128d c); VFNMSUBSD xmm, xmm, xmm/m64 /// public static Vector128 MultiplySubtractNegatedScalar(Vector128 a, Vector128 b, Vector128 c) => MultiplySubtractNegatedScalar(a, b, c); } -- 2.7.4