From 5d91a9a2a00770f8bfe57fd19e01cfcf4072deed Mon Sep 17 00:00:00 2001 From: Tanner Gooding Date: Tue, 22 Oct 2019 09:17:21 -0700 Subject: [PATCH] Adding back more arm intrinsics that existing before refactoring (dotnet/coreclr#27153) * Adding AdvSimd.And * Adding AdvSimd.AndNot * Adding AdvSimd.Or * Adding AdvSimd.OrNot * Adding AdvSimd.Xor * Adding AdvSimd.Not * Removing a trailing whitespace from AdvSimd.PlatformNotSupported.cs * Adding AdvSimd.Subtract Commit migrated from https://github.com/dotnet/coreclr/commit/04d2a2292e51f85f8192998684fd0d44da73e28f --- .../Intrinsics/Arm/AdvSimd.PlatformNotSupported.cs | 1009 ++++++++++++++++++++ .../src/System/Runtime/Intrinsics/Arm/AdvSimd.cs | 1009 ++++++++++++++++++++ 2 files changed, 2018 insertions(+) diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.PlatformNotSupported.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.PlatformNotSupported.cs index 0268c8b..64e0ff4 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.PlatformNotSupported.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.PlatformNotSupported.cs @@ -45,6 +45,12 @@ namespace System.Runtime.Intrinsics.Arm /// A64: FADD Vd.2D, Vn.2D, Vm.2D /// public static Vector128 Add(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// float64x2_t vsubq_f64 (float64x2_t a, float64x2_t b) + /// A64: FSUB Vd.2D, Vn.2D, Vm.2D + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } } /// @@ -256,6 +262,294 @@ namespace System.Runtime.Intrinsics.Arm public static Vector64 AddScalar(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } /// + /// uint8x8_t vand_u8 (uint8x8_t a, uint8x8_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector64 And(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// float64x1_t vand_f64 (float64x1_t a, float64x1_t b) + // /// A32: VAND Dd, Dn, Dm + // /// A64: AND Vd, Vn, Vm + // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + // /// + // public static Vector64 And(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int16x4_t vand_s16 (int16x4_t a, int16x4_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector64 And(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int32x2_t vand_s32(int32x2_t a, int32x2_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector64 And(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// int64x1_t vand_s64 (int64x1_t a, int64x1_t b) + // /// A32: VAND Dd, Dn, Dm + // /// A64: AND Vd, Vn, Vm + // /// + // public static Vector64 And(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int8x8_t vand_s8 (int8x8_t a, int8x8_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector64 And(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// float32x2_t vand_f32 (float32x2_t a, float32x2_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector64 And(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x4_t vand_u16 (uint16x4_t a, uint16x4_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector64 And(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x2_t vand_u32 (uint32x2_t a, uint32x2_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector64 And(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// uint64x1_t vand_u64 (uint64x1_t a, uint64x1_t b) + // /// A32: VAND Dd, Dn, Dm + // /// A64: AND Vd, Vn, Vm + // /// + // public static Vector64 And(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x16_t vand_u8 (uint8x16_t a, uint8x16_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// float64x2_t vand_f64 (float64x2_t a, float64x2_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 And(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int16x8_t vand_s16 (int16x8_t a, int16x8_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int32x4_t vand_s32(int32x4_t a, int32x4_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int64x2_t vand_s64 (int64x2_t a, int64x2_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int8x16_t vand_s8 (int8x16_t a, int8x16_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// float32x4_t vand_f32 (float32x4_t a, float32x4_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 And(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x8_t vand_u16 (uint16x8_t a, uint16x8_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x4_t vand_u32 (uint32x4_t a, uint32x4_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint64x2_t vand_u64 (uint64x2_t a, uint64x2_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x8_t vbic_u8 (uint8x8_t a, uint8x8_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// float64x1_t vbic_f64 (float64x1_t a, float64x1_t b) + // /// A32: VBIC Dd, Dn, Dm + // /// A64: BIC Vd, Vn, Vm + // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + // /// + // public static Vector64 AndNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int16x4_t vbic_s16 (int16x4_t a, int16x4_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int32x2_t vbic_s32(int32x2_t a, int32x2_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// int64x1_t vbic_s64 (int64x1_t a, int64x1_t b) + // /// A32: VBIC Dd, Dn, Dm + // /// A64: BIC Vd, Vn, Vm + // /// + // public static Vector64 AndNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int8x8_t vbic_s8 (int8x8_t a, int8x8_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// float32x2_t vbic_f32 (float32x2_t a, float32x2_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x4_t vbic_u16 (uint16x4_t a, uint16x4_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x2_t vbic_u32 (uint32x2_t a, uint32x2_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// uint64x1_t vbic_u64 (uint64x1_t a, uint64x1_t b) + // /// A32: VBIC Dd, Dn, Dm + // /// A64: BIC Vd, Vn, Vm + // /// + // public static Vector64 AndNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x16_t vbic_u8 (uint8x16_t a, uint8x16_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// float64x2_t vbic_f64 (float64x2_t a, float64x2_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int16x8_t vbic_s16 (int16x8_t a, int16x8_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int32x4_t vbic_s32(int32x4_t a, int32x4_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int64x2_t vbic_s64 (int64x2_t a, int64x2_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int8x16_t vbic_s8 (int8x16_t a, int8x16_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// float32x4_t vbic_f32 (float32x4_t a, float32x4_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x8_t vbic_u16 (uint16x8_t a, uint16x8_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x4_t vbic_u32 (uint32x4_t a, uint32x4_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint64x2_t vbic_u64 (uint64x2_t a, uint64x2_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// /// uint8x8_t vld1_u8 (uint8_t const * ptr) /// A32: VLD1.8 Dd, [Rn] /// A64: LD1 Vt.8B, [Xn] @@ -373,5 +667,720 @@ namespace System.Runtime.Intrinsics.Arm /// A64: LD1 Vt.2D, [Xn] /// public static unsafe Vector128 LoadVector128(ulong* address) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x8_t vmvn_u8 (uint8x8_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector64 Not(Vector64 value) { throw new PlatformNotSupportedException(); } + + // /// + // /// float64x1_t vmvn_f64 (float64x1_t a) + // /// A32: VMVN Dd, Dn, Dm + // /// A64: MVN Vd, Vn, Vm + // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + // /// + // public static Vector64 Not(Vector64 value) { throw new PlatformNotSupportedException(); } + + /// + /// int16x4_t vmvn_s16 (int16x4_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector64 Not(Vector64 value) { throw new PlatformNotSupportedException(); } + + /// + /// int32x2_t vmvn_s32(int32x2_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector64 Not(Vector64 value) { throw new PlatformNotSupportedException(); } + + // /// + // /// int64x1_t vmvn_s64 (int64x1_t a) + // /// A32: VMVN Dd, Dn, Dm + // /// A64: MVN Vd, Vn, Vm + // /// + // public static Vector64 Not(Vector64 value) { throw new PlatformNotSupportedException(); } + + /// + /// int8x8_t vmvn_s8 (int8x8_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector64 Not(Vector64 value) { throw new PlatformNotSupportedException(); } + + /// + /// float32x2_t vmvn_f32 (float32x2_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector64 Not(Vector64 value) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x4_t vmvn_u16 (uint16x4_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector64 Not(Vector64 value) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x2_t vmvn_u32 (uint32x2_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector64 Not(Vector64 value) { throw new PlatformNotSupportedException(); } + + // /// + // /// uint64x1_t vmvn_u64 (uint64x1_t a) + // /// A32: VMVN Dd, Dn, Dm + // /// A64: MVN Vd, Vn, Vm + // /// + // public static Vector64 Not(Vector64 value) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x16_t vmvn_u8 (uint8x16_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) { throw new PlatformNotSupportedException(); } + + /// + /// float64x2_t vmvn_f64 (float64x2_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 Not(Vector128 value) { throw new PlatformNotSupportedException(); } + + /// + /// int16x8_t vmvn_s16 (int16x8_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) { throw new PlatformNotSupportedException(); } + + /// + /// int32x4_t vmvn_s32(int32x4_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) { throw new PlatformNotSupportedException(); } + + /// + /// int64x2_t vmvn_s64 (int64x2_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) { throw new PlatformNotSupportedException(); } + + /// + /// int8x16_t vmvn_s8 (int8x16_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) { throw new PlatformNotSupportedException(); } + + /// + /// float32x4_t vmvn_f32 (float32x4_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 Not(Vector128 value) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x8_t vmvn_u16 (uint16x8_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x4_t vmvn_u32 (uint32x4_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64x2_t vmvn_u64 (uint64x2_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x8_t vorr_u8 (uint8x8_t a, uint8x8_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector64 Or(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// float64x1_t vorr_f64 (float64x1_t a, float64x1_t b) + // /// A32: VORR Dd, Dn, Dm + // /// A64: ORR Vd, Vn, Vm + // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + // /// + // public static Vector64 Or(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int16x4_t vorr_s16 (int16x4_t a, int16x4_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector64 Or(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int32x2_t vorr_s32(int32x2_t a, int32x2_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector64 Or(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// int64x1_t vorr_s64 (int64x1_t a, int64x1_t b) + // /// A32: VORR Dd, Dn, Dm + // /// A64: ORR Vd, Vn, Vm + // /// + // public static Vector64 Or(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int8x8_t vorr_s8 (int8x8_t a, int8x8_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector64 Or(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// float32x2_t vorr_f32 (float32x2_t a, float32x2_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector64 Or(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x4_t vorr_u16 (uint16x4_t a, uint16x4_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector64 Or(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x2_t vorr_u32 (uint32x2_t a, uint32x2_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector64 Or(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// uint64x1_t vorr_u64 (uint64x1_t a, uint64x1_t b) + // /// A32: VORR Dd, Dn, Dm + // /// A64: ORR Vd, Vn, Vm + // /// + // public static Vector64 Or(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x16_t vorr_u8 (uint8x16_t a, uint8x16_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// float64x2_t vorr_f64 (float64x2_t a, float64x2_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 Or(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int16x8_t vorr_s16 (int16x8_t a, int16x8_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int32x4_t vorr_s32(int32x4_t a, int32x4_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int64x2_t vorr_s64 (int64x2_t a, int64x2_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int8x16_t vorr_s8 (int8x16_t a, int8x16_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// float32x4_t vorr_f32 (float32x4_t a, float32x4_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 Or(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x8_t vorr_u16 (uint16x8_t a, uint16x8_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x4_t vorr_u32 (uint32x4_t a, uint32x4_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint64x2_t vorr_u64 (uint64x2_t a, uint64x2_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x8_t vorn_u8 (uint8x8_t a, uint8x8_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// float64x1_t vorn_f64 (float64x1_t a, float64x1_t b) + // /// A32: VORN Dd, Dn, Dm + // /// A64: ORN Vd, Vn, Vm + // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + // /// + // public static Vector64 OrNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int16x4_t vorn_s16 (int16x4_t a, int16x4_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int32x2_t vorn_s32(int32x2_t a, int32x2_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// int64x1_t vorn_s64 (int64x1_t a, int64x1_t b) + // /// A32: VORN Dd, Dn, Dm + // /// A64: ORN Vd, Vn, Vm + // /// + // public static Vector64 OrNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int8x8_t vorn_s8 (int8x8_t a, int8x8_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// float32x2_t vorn_f32 (float32x2_t a, float32x2_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x4_t vorn_u16 (uint16x4_t a, uint16x4_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x2_t vorn_u32 (uint32x2_t a, uint32x2_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// uint64x1_t vorn_u64 (uint64x1_t a, uint64x1_t b) + // /// A32: VORN Dd, Dn, Dm + // /// A64: ORN Vd, Vn, Vm + // /// + // public static Vector64 OrNot(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x16_t vorn_u8 (uint8x16_t a, uint8x16_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// float64x2_t vorn_f64 (float64x2_t a, float64x2_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int16x8_t vorn_s16 (int16x8_t a, int16x8_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int32x4_t vorn_s32(int32x4_t a, int32x4_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int64x2_t vorn_s64 (int64x2_t a, int64x2_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int8x16_t vorn_s8 (int8x16_t a, int8x16_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// float32x4_t vorn_f32 (float32x4_t a, float32x4_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x8_t vorn_u16 (uint16x8_t a, uint16x8_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x4_t vorn_u32 (uint32x4_t a, uint32x4_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint64x2_t vorn_u64 (uint64x2_t a, uint64x2_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x8_t vsub_u8 (uint8x8_t a, uint8x8_t b) + /// A32: VSUB.I8 Dd, Dn, Dm + /// A64: SUB Vd.8B, Vn.8B, Vm.8B + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int16x4_t vsub_s16 (int16x4_t a, int16x4_t b) + /// A32: VSUB.I16 Dd, Dn, Dm + /// A64: SUB Vd.4H, Vn.4H, Vm.4H + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int32x2_t vsub_s32 (int32x2_t a, int32x2_t b) + /// A32: VSUB.I32 Dd, Dn, Dm + /// A64: SUB Vd.2S, Vn.2S, Vm.2S + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int8x8_t vsub_s8 (int8x8_t a, int8x8_t b) + /// A32: VSUB.I8 Dd, Dn, Dm + /// A64: SUB Vd.8B, Vn.8B, Vm.8B + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// float32x2_t vsub_f32 (float32x2_t a, float32x2_t b) + /// A32: VSUB.F32 Dd, Dn, Dm + /// A64: FADD Vd.2S, Vn.2S, Vm.2S + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x4_t vsub_u16 (uint16x4_t a, uint16x4_t b) + /// A32: VSUB.I16 Dd, Dn, Dm + /// A64: SUB Vd.4H, Vn.4H, Vm.4H + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x2_t vsub_u32 (uint32x2_t a, uint32x2_t b) + /// A32: VSUB.I32 Dd, Dn, Dm + /// A64: SUB Vd.2S, Vn.2S, Vm.2S + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x16_t vsubq_u8 (uint8x16_t a, uint8x16_t b) + /// A32: VSUB.I8 Qd, Qn, Qm + /// A64: SUB Vd.16B, Vn.16B, Vm.16B + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int16x8_t vsubq_s16 (int16x8_t a, int16x8_t b) + /// A32: VSUB.I16 Qd, Qn, Qm + /// A64: SUB Vd.8H, Vn.8H, Vm.8H + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int32x4_t vsubq_s32 (int32x4_t a, int32x4_t b) + /// A32: VSUB.I32 Qd, Qn, Qm + /// A64: SUB Vd.4S, Vn.4S, Vm.4S + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int64x2_t vsubq_s64 (int64x2_t a, int64x2_t b) + /// A32: VSUB.I64 Qd, Qn, Qm + /// A64: SUB Vd.2D, Vn.2D, Vm.2D + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int8x16_t vsubq_s8 (int8x16_t a, int8x16_t b) + /// A32: VSUB.I8 Qd, Qn, Qm + /// A64: SUB Vd.16B, Vn.16B, Vm.16B + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// float32x4_t vsubq_f32 (float32x4_t a, float32x4_t b) + /// A32: VSUB.F32 Qd, Qn, Qm + /// A64: FADD Vd.4S, Vn.4S, Vm.4S + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x8_t vsubq_u16 (uint16x8_t a, uint16x8_t b) + /// A32: VSUB.I16 Qd, Qn, Qm + /// A64: SUB Vd.8H, Vn.8H, Vm.8H + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x4_t vsubq_u32 (uint32x4_t a, uint32x4_t b) + /// A32: VSUB.I32 Qd, Qn, Qm + /// A64: SUB Vd.4S, Vn.4S, Vm.4S + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint64x2_t vsubq_u64 (uint64x2_t a, uint64x2_t b) + /// A32: VSUB.I64 Qd, Qn, Qm + /// A64: SUB Vd.2D, Vn.2D, Vm.2D + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// float64x1_t vsub_f64 (float64x1_t a, float64x1_t b) + // /// A32: VSUB.F64 Dd, Dn, Dm + // /// A64: FADD Dd, Dn, Dm + // /// + // public static Vector64 SubtractScalar(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// int64x1_t vsub_s64 (int64x1_t a, int64x1_t b) + // /// A32: VSUB.I64 Dd, Dn, Dm + // /// A64: SUB Dd, Dn, Dm + // /// + // public static Vector64 SubtractScalar(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// uint64x1_t vsub_u64 (uint64x1_t a, uint64x1_t b) + // /// A32: VSUB.I64 Dd, Dn, Dm + // /// A64: SUB Dd, Dn, Dm + // /// + // public static Vector64 SubtractScalar(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// A32: VSUB.F32 Sd, Sn, Sm + /// A64: + /// + public static Vector64 SubtractScalar(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x8_t veor_u8 (uint8x8_t a, uint8x8_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector64 Xor(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// float64x1_t veor_f64 (float64x1_t a, float64x1_t b) + // /// A32: VEOR Dd, Dn, Dm + // /// A64: EOR Vd, Vn, Vm + // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + // /// + // public static Vector64 Xor(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int16x4_t veor_s16 (int16x4_t a, int16x4_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector64 Xor(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int32x2_t veor_s32(int32x2_t a, int32x2_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector64 Xor(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// int64x1_t veor_s64 (int64x1_t a, int64x1_t b) + // /// A32: VEOR Dd, Dn, Dm + // /// A64: EOR Vd, Vn, Vm + // /// + // public static Vector64 Xor(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// int8x8_t veor_s8 (int8x8_t a, int8x8_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector64 Xor(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// float32x2_t veor_f32 (float32x2_t a, float32x2_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector64 Xor(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x4_t veor_u16 (uint16x4_t a, uint16x4_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector64 Xor(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x2_t veor_u32 (uint32x2_t a, uint32x2_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector64 Xor(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + // /// + // /// uint64x1_t veor_u64 (uint64x1_t a, uint64x1_t b) + // /// A32: VEOR Dd, Dn, Dm + // /// A64: EOR Vd, Vn, Vm + // /// + // public static Vector64 Xor(Vector64 left, Vector64 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x16_t veor_u8 (uint8x16_t a, uint8x16_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// float64x2_t veor_f64 (float64x2_t a, float64x2_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 Xor(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int16x8_t veor_s16 (int16x8_t a, int16x8_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int32x4_t veor_s32(int32x4_t a, int32x4_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int64x2_t veor_s64 (int64x2_t a, int64x2_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// int8x16_t veor_s8 (int8x16_t a, int8x16_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// float32x4_t veor_f32 (float32x4_t a, float32x4_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 Xor(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x8_t veor_u16 (uint16x8_t a, uint16x8_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x4_t veor_u32 (uint32x4_t a, uint32x4_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } + + /// + /// uint64x2_t veor_u64 (uint64x2_t a, uint64x2_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) { throw new PlatformNotSupportedException(); } } } diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.cs index 6e2a332..8d0fbd5 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.cs @@ -47,6 +47,12 @@ namespace System.Runtime.Intrinsics.Arm /// A64: FADD Vd.2D, Vn.2D, Vm.2D /// public static Vector128 Add(Vector128 left, Vector128 right) => Add(left, right); + + /// + /// float64x2_t vsubq_f64 (float64x2_t a, float64x2_t b) + /// A64: FSUB Vd.2D, Vn.2D, Vm.2D + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) => Add(left, right); } /// @@ -258,6 +264,294 @@ namespace System.Runtime.Intrinsics.Arm public static Vector64 AddScalar(Vector64 left, Vector64 right) => AddScalar(left, right); /// + /// uint8x8_t vand_u8 (uint8x8_t a, uint8x8_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector64 And(Vector64 left, Vector64 right) => And(left, right); + + // /// + // /// float64x1_t vand_f64 (float64x1_t a, float64x1_t b) + // /// A32: VAND Dd, Dn, Dm + // /// A64: AND Vd, Vn, Vm + // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + // /// + // public static Vector64 And(Vector64 left, Vector64 right) => And(left, right); + + /// + /// int16x4_t vand_s16 (int16x4_t a, int16x4_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector64 And(Vector64 left, Vector64 right) => And(left, right); + + /// + /// int32x2_t vand_s32(int32x2_t a, int32x2_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector64 And(Vector64 left, Vector64 right) => And(left, right); + + // /// + // /// int64x1_t vand_s64 (int64x1_t a, int64x1_t b) + // /// A32: VAND Dd, Dn, Dm + // /// A64: AND Vd, Vn, Vm + // /// + // public static Vector64 And(Vector64 left, Vector64 right) => And(left, right); + + /// + /// int8x8_t vand_s8 (int8x8_t a, int8x8_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector64 And(Vector64 left, Vector64 right) => And(left, right); + + /// + /// float32x2_t vand_f32 (float32x2_t a, float32x2_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector64 And(Vector64 left, Vector64 right) => And(left, right); + + /// + /// uint16x4_t vand_u16 (uint16x4_t a, uint16x4_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector64 And(Vector64 left, Vector64 right) => And(left, right); + + /// + /// uint32x2_t vand_u32 (uint32x2_t a, uint32x2_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector64 And(Vector64 left, Vector64 right) => And(left, right); + + // /// + // /// uint64x1_t vand_u64 (uint64x1_t a, uint64x1_t b) + // /// A32: VAND Dd, Dn, Dm + // /// A64: AND Vd, Vn, Vm + // /// + // public static Vector64 And(Vector64 left, Vector64 right) => And(left, right); + + /// + /// uint8x16_t vand_u8 (uint8x16_t a, uint8x16_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); + + /// + /// float64x2_t vand_f64 (float64x2_t a, float64x2_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); + + /// + /// int16x8_t vand_s16 (int16x8_t a, int16x8_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); + + /// + /// int32x4_t vand_s32(int32x4_t a, int32x4_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); + + /// + /// int64x2_t vand_s64 (int64x2_t a, int64x2_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); + + /// + /// int8x16_t vand_s8 (int8x16_t a, int8x16_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); + + /// + /// float32x4_t vand_f32 (float32x4_t a, float32x4_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); + + /// + /// uint16x8_t vand_u16 (uint16x8_t a, uint16x8_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); + + /// + /// uint32x4_t vand_u32 (uint32x4_t a, uint32x4_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); + + /// + /// uint64x2_t vand_u64 (uint64x2_t a, uint64x2_t b) + /// A32: VAND Dd, Dn, Dm + /// A64: AND Vd, Vn, Vm + /// + public static Vector128 And(Vector128 left, Vector128 right) => And(left, right); + + /// + /// uint8x8_t vbic_u8 (uint8x8_t a, uint8x8_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) => AndNot(left, right); + + // /// + // /// float64x1_t vbic_f64 (float64x1_t a, float64x1_t b) + // /// A32: VBIC Dd, Dn, Dm + // /// A64: BIC Vd, Vn, Vm + // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + // /// + // public static Vector64 AndNot(Vector64 left, Vector64 right) => AndNot(left, right); + + /// + /// int16x4_t vbic_s16 (int16x4_t a, int16x4_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) => AndNot(left, right); + + /// + /// int32x2_t vbic_s32(int32x2_t a, int32x2_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) => AndNot(left, right); + + // /// + // /// int64x1_t vbic_s64 (int64x1_t a, int64x1_t b) + // /// A32: VBIC Dd, Dn, Dm + // /// A64: BIC Vd, Vn, Vm + // /// + // public static Vector64 AndNot(Vector64 left, Vector64 right) => AndNot(left, right); + + /// + /// int8x8_t vbic_s8 (int8x8_t a, int8x8_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) => AndNot(left, right); + + /// + /// float32x2_t vbic_f32 (float32x2_t a, float32x2_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) => AndNot(left, right); + + /// + /// uint16x4_t vbic_u16 (uint16x4_t a, uint16x4_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) => AndNot(left, right); + + /// + /// uint32x2_t vbic_u32 (uint32x2_t a, uint32x2_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector64 AndNot(Vector64 left, Vector64 right) => AndNot(left, right); + + // /// + // /// uint64x1_t vbic_u64 (uint64x1_t a, uint64x1_t b) + // /// A32: VBIC Dd, Dn, Dm + // /// A64: BIC Vd, Vn, Vm + // /// + // public static Vector64 AndNot(Vector64 left, Vector64 right) => AndNot(left, right); + + /// + /// uint8x16_t vbic_u8 (uint8x16_t a, uint8x16_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); + + /// + /// float64x2_t vbic_f64 (float64x2_t a, float64x2_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); + + /// + /// int16x8_t vbic_s16 (int16x8_t a, int16x8_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); + + /// + /// int32x4_t vbic_s32(int32x4_t a, int32x4_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); + + /// + /// int64x2_t vbic_s64 (int64x2_t a, int64x2_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); + + /// + /// int8x16_t vbic_s8 (int8x16_t a, int8x16_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); + + /// + /// float32x4_t vbic_f32 (float32x4_t a, float32x4_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); + + /// + /// uint16x8_t vbic_u16 (uint16x8_t a, uint16x8_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); + + /// + /// uint32x4_t vbic_u32 (uint32x4_t a, uint32x4_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); + + /// + /// uint64x2_t vbic_u64 (uint64x2_t a, uint64x2_t b) + /// A32: VBIC Dd, Dn, Dm + /// A64: BIC Vd, Vn, Vm + /// + public static Vector128 AndNot(Vector128 left, Vector128 right) => AndNot(left, right); + + /// /// uint8x8_t vld1_u8 (uint8_t const * ptr) /// A32: VLD1.8 Dd, [Rn] /// A64: LD1 Vt.8B, [Xn] @@ -375,5 +669,720 @@ namespace System.Runtime.Intrinsics.Arm /// A64: LD1 Vt.2D, [Xn] /// public static unsafe Vector128 LoadVector128(ulong* address) => LoadVector128(address); + + /// + /// uint8x8_t vmvn_u8 (uint8x8_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector64 Not(Vector64 value) => Not(value); + + // /// + // /// float64x1_t vmvn_f64 (float64x1_t a) + // /// A32: VMVN Dd, Dn, Dm + // /// A64: MVN Vd, Vn, Vm + // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + // /// + // public static Vector64 Not(Vector64 value) => Not(value); + + /// + /// int16x4_t vmvn_s16 (int16x4_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector64 Not(Vector64 value) => Not(value); + + /// + /// int32x2_t vmvn_s32(int32x2_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector64 Not(Vector64 value) => Not(value); + + // /// + // /// int64x1_t vmvn_s64 (int64x1_t a) + // /// A32: VMVN Dd, Dn, Dm + // /// A64: MVN Vd, Vn, Vm + // /// + // public static Vector64 Not(Vector64 value) => Not(value); + + /// + /// int8x8_t vmvn_s8 (int8x8_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector64 Not(Vector64 value) => Not(value); + + /// + /// float32x2_t vmvn_f32 (float32x2_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector64 Not(Vector64 value) => Not(value); + + /// + /// uint16x4_t vmvn_u16 (uint16x4_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector64 Not(Vector64 value) => Not(value); + + /// + /// uint32x2_t vmvn_u32 (uint32x2_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector64 Not(Vector64 value) => Not(value); + + // /// + // /// uint64x1_t vmvn_u64 (uint64x1_t a) + // /// A32: VMVN Dd, Dn, Dm + // /// A64: MVN Vd, Vn, Vm + // /// + // public static Vector64 Not(Vector64 value) => Not(value); + + /// + /// uint8x16_t vmvn_u8 (uint8x16_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) => Not(value); + + /// + /// float64x2_t vmvn_f64 (float64x2_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 Not(Vector128 value) => Not(value); + + /// + /// int16x8_t vmvn_s16 (int16x8_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) => Not(value); + + /// + /// int32x4_t vmvn_s32(int32x4_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) => Not(value); + + /// + /// int64x2_t vmvn_s64 (int64x2_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) => Not(value); + + /// + /// int8x16_t vmvn_s8 (int8x16_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) => Not(value); + + /// + /// float32x4_t vmvn_f32 (float32x4_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 Not(Vector128 value) => Not(value); + + /// + /// uint16x8_t vmvn_u16 (uint16x8_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) => Not(value); + + /// + /// uint32x4_t vmvn_u32 (uint32x4_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) => Not(value); + + /// + /// uint64x2_t vmvn_u64 (uint64x2_t a) + /// A32: VMVN Dd, Dn, Dm + /// A64: MVN Vd, Vn, Vm + /// + public static Vector128 Not(Vector128 value) => Not(value); + + /// + /// uint8x8_t vorr_u8 (uint8x8_t a, uint8x8_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector64 Or(Vector64 left, Vector64 right) => Or(left, right); + + // /// + // /// float64x1_t vorr_f64 (float64x1_t a, float64x1_t b) + // /// A32: VORR Dd, Dn, Dm + // /// A64: ORR Vd, Vn, Vm + // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + // /// + // public static Vector64 Or(Vector64 left, Vector64 right) => Or(left, right); + + /// + /// int16x4_t vorr_s16 (int16x4_t a, int16x4_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector64 Or(Vector64 left, Vector64 right) => Or(left, right); + + /// + /// int32x2_t vorr_s32(int32x2_t a, int32x2_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector64 Or(Vector64 left, Vector64 right) => Or(left, right); + + // /// + // /// int64x1_t vorr_s64 (int64x1_t a, int64x1_t b) + // /// A32: VORR Dd, Dn, Dm + // /// A64: ORR Vd, Vn, Vm + // /// + // public static Vector64 Or(Vector64 left, Vector64 right) => Or(left, right); + + /// + /// int8x8_t vorr_s8 (int8x8_t a, int8x8_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector64 Or(Vector64 left, Vector64 right) => Or(left, right); + + /// + /// float32x2_t vorr_f32 (float32x2_t a, float32x2_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector64 Or(Vector64 left, Vector64 right) => Or(left, right); + + /// + /// uint16x4_t vorr_u16 (uint16x4_t a, uint16x4_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector64 Or(Vector64 left, Vector64 right) => Or(left, right); + + /// + /// uint32x2_t vorr_u32 (uint32x2_t a, uint32x2_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector64 Or(Vector64 left, Vector64 right) => Or(left, right); + + // /// + // /// uint64x1_t vorr_u64 (uint64x1_t a, uint64x1_t b) + // /// A32: VORR Dd, Dn, Dm + // /// A64: ORR Vd, Vn, Vm + // /// + // public static Vector64 Or(Vector64 left, Vector64 right) => Or(left, right); + + /// + /// uint8x16_t vorr_u8 (uint8x16_t a, uint8x16_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); + + /// + /// float64x2_t vorr_f64 (float64x2_t a, float64x2_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); + + /// + /// int16x8_t vorr_s16 (int16x8_t a, int16x8_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); + + /// + /// int32x4_t vorr_s32(int32x4_t a, int32x4_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); + + /// + /// int64x2_t vorr_s64 (int64x2_t a, int64x2_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); + + /// + /// int8x16_t vorr_s8 (int8x16_t a, int8x16_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); + + /// + /// float32x4_t vorr_f32 (float32x4_t a, float32x4_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); + + /// + /// uint16x8_t vorr_u16 (uint16x8_t a, uint16x8_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); + + /// + /// uint32x4_t vorr_u32 (uint32x4_t a, uint32x4_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); + + /// + /// uint64x2_t vorr_u64 (uint64x2_t a, uint64x2_t b) + /// A32: VORR Dd, Dn, Dm + /// A64: ORR Vd, Vn, Vm + /// + public static Vector128 Or(Vector128 left, Vector128 right) => Or(left, right); + + /// + /// uint8x8_t vorn_u8 (uint8x8_t a, uint8x8_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) => OrNot(left, right); + + // /// + // /// float64x1_t vorn_f64 (float64x1_t a, float64x1_t b) + // /// A32: VORN Dd, Dn, Dm + // /// A64: ORN Vd, Vn, Vm + // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + // /// + // public static Vector64 OrNot(Vector64 left, Vector64 right) => OrNot(left, right); + + /// + /// int16x4_t vorn_s16 (int16x4_t a, int16x4_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) => OrNot(left, right); + + /// + /// int32x2_t vorn_s32(int32x2_t a, int32x2_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) => OrNot(left, right); + + // /// + // /// int64x1_t vorn_s64 (int64x1_t a, int64x1_t b) + // /// A32: VORN Dd, Dn, Dm + // /// A64: ORN Vd, Vn, Vm + // /// + // public static Vector64 OrNot(Vector64 left, Vector64 right) => OrNot(left, right); + + /// + /// int8x8_t vorn_s8 (int8x8_t a, int8x8_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) => OrNot(left, right); + + /// + /// float32x2_t vorn_f32 (float32x2_t a, float32x2_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) => OrNot(left, right); + + /// + /// uint16x4_t vorn_u16 (uint16x4_t a, uint16x4_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) => OrNot(left, right); + + /// + /// uint32x2_t vorn_u32 (uint32x2_t a, uint32x2_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector64 OrNot(Vector64 left, Vector64 right) => OrNot(left, right); + + // /// + // /// uint64x1_t vorn_u64 (uint64x1_t a, uint64x1_t b) + // /// A32: VORN Dd, Dn, Dm + // /// A64: ORN Vd, Vn, Vm + // /// + // public static Vector64 OrNot(Vector64 left, Vector64 right) => OrNot(left, right); + + /// + /// uint8x16_t vorn_u8 (uint8x16_t a, uint8x16_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) => OrNot(left, right); + + /// + /// float64x2_t vorn_f64 (float64x2_t a, float64x2_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) => OrNot(left, right); + + /// + /// int16x8_t vorn_s16 (int16x8_t a, int16x8_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) => OrNot(left, right); + + /// + /// int32x4_t vorn_s32(int32x4_t a, int32x4_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) => OrNot(left, right); + + /// + /// int64x2_t vorn_s64 (int64x2_t a, int64x2_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) => OrNot(left, right); + + /// + /// int8x16_t vorn_s8 (int8x16_t a, int8x16_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) => OrNot(left, right); + + /// + /// float32x4_t vorn_f32 (float32x4_t a, float32x4_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) => OrNot(left, right); + + /// + /// uint16x8_t vorn_u16 (uint16x8_t a, uint16x8_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) => OrNot(left, right); + + /// + /// uint32x4_t vorn_u32 (uint32x4_t a, uint32x4_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) => OrNot(left, right); + + /// + /// uint64x2_t vorn_u64 (uint64x2_t a, uint64x2_t b) + /// A32: VORN Dd, Dn, Dm + /// A64: ORN Vd, Vn, Vm + /// + public static Vector128 OrNot(Vector128 left, Vector128 right) => OrNot(left, right); + + /// + /// uint8x8_t vsub_u8 (uint8x8_t a, uint8x8_t b) + /// A32: VSUB.I8 Dd, Dn, Dm + /// A64: ADD Vd.8B, Vn.8B, Vm.8B + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) => Subtract(left, right); + + /// + /// int16x4_t vsub_s16 (int16x4_t a, int16x4_t b) + /// A32: VSUB.I16 Dd, Dn, Dm + /// A64: ADD Vd.4H, Vn.4H, Vm.4H + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) => Subtract(left, right); + + /// + /// int32x2_t vsub_s32 (int32x2_t a, int32x2_t b) + /// A32: VSUB.I32 Dd, Dn, Dm + /// A64: ADD Vd.2S, Vn.2S, Vm.2S + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) => Subtract(left, right); + + /// + /// int8x8_t vsub_s8 (int8x8_t a, int8x8_t b) + /// A32: VSUB.I8 Dd, Dn, Dm + /// A64: ADD Vd.8B, Vn.8B, Vm.8B + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) => Subtract(left, right); + + /// + /// float32x2_t vsub_f32 (float32x2_t a, float32x2_t b) + /// A32: VSUB.F32 Dd, Dn, Dm + /// A64: FSUB Vd.2S, Vn.2S, Vm.2S + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) => Subtract(left, right); + + /// + /// uint16x4_t vsub_u16 (uint16x4_t a, uint16x4_t b) + /// A32: VSUB.I16 Dd, Dn, Dm + /// A64: ADD Vd.4H, Vn.4H, Vm.4H + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) => Subtract(left, right); + + /// + /// uint32x2_t vsub_u32 (uint32x2_t a, uint32x2_t b) + /// A32: VSUB.I32 Dd, Dn, Dm + /// A64: ADD Vd.2S, Vn.2S, Vm.2S + /// + public static Vector64 Subtract(Vector64 left, Vector64 right) => Subtract(left, right); + + /// + /// uint8x16_t vsubq_u8 (uint8x16_t a, uint8x16_t b) + /// A32: VSUB.I8 Qd, Qn, Qm + /// A64: ADD Vd.16B, Vn.16B, Vm.16B + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); + + /// + /// int16x8_t vsubq_s16 (int16x8_t a, int16x8_t b) + /// A32: VSUB.I16 Qd, Qn, Qm + /// A64: ADD Vd.8H, Vn.8H, Vm.8H + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); + + /// + /// int32x4_t vsubq_s32 (int32x4_t a, int32x4_t b) + /// A32: VSUB.I32 Qd, Qn, Qm + /// A64: ADD Vd.4S, Vn.4S, Vm.4S + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); + + /// + /// int64x2_t vsubq_s64 (int64x2_t a, int64x2_t b) + /// A32: VSUB.I64 Qd, Qn, Qm + /// A64: ADD Vd.2D, Vn.2D, Vm.2D + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); + + /// + /// int8x16_t vsubq_s8 (int8x16_t a, int8x16_t b) + /// A32: VSUB.I8 Qd, Qn, Qm + /// A64: ADD Vd.16B, Vn.16B, Vm.16B + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); + + /// + /// float32x4_t vsubq_f32 (float32x4_t a, float32x4_t b) + /// A32: VSUB.F32 Qd, Qn, Qm + /// A64: FSUB Vd.4S, Vn.4S, Vm.4S + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); + + /// + /// uint16x8_t vsubq_u16 (uint16x8_t a, uint16x8_t b) + /// A32: VSUB.I16 Qd, Qn, Qm + /// A64: ADD Vd.8H, Vn.8H, Vm.8H + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); + + /// + /// uint32x4_t vsubq_u32 (uint32x4_t a, uint32x4_t b) + /// A32: VSUB.I32 Qd, Qn, Qm + /// A64: ADD Vd.4S, Vn.4S, Vm.4S + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); + + /// + /// uint64x2_t vsubq_u64 (uint64x2_t a, uint64x2_t b) + /// A32: VSUB.I64 Qd, Qn, Qm + /// A64: ADD Vd.2D, Vn.2D, Vm.2D + /// + public static Vector128 Subtract(Vector128 left, Vector128 right) => Subtract(left, right); + + // /// + // /// float64x1_t vsub_f64 (float64x1_t a, float64x1_t b) + // /// A32: VSUB.F64 Dd, Dn, Dm + // /// A64: FSUB Dd, Dn, Dm + // /// + // public static Vector64 SubtractScalar(Vector64 left, Vector64 right) => Subtract(left, right); + + // /// + // /// int64x1_t vsub_s64 (int64x1_t a, int64x1_t b) + // /// A32: VSUB.I64 Dd, Dn, Dm + // /// A64: ADD Dd, Dn, Dm + // /// + // public static Vector64 SubtractScalar(Vector64 left, Vector64 right) => SubtractScalar(left, right); + + // /// + // /// uint64x1_t vsub_u64 (uint64x1_t a, uint64x1_t b) + // /// A32: VSUB.I64 Dd, Dn, Dm + // /// A64: ADD Dd, Dn, Dm + // /// + // public static Vector64 SubtractScalar(Vector64 left, Vector64 right) => SubtractScalar(left, right); + + /// + /// A32: VSUB.F32 Sd, Sn, Sm + /// A64: + /// + public static Vector64 SubtractScalar(Vector64 left, Vector64 right) => SubtractScalar(left, right); + + /// + /// uint8x8_t veor_u8 (uint8x8_t a, uint8x8_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector64 Xor(Vector64 left, Vector64 right) => Xor(left, right); + + // /// + // /// float64x1_t veor_f64 (float64x1_t a, float64x1_t b) + // /// A32: VEOR Dd, Dn, Dm + // /// A64: EOR Vd, Vn, Vm + // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + // /// + // public static Vector64 Xor(Vector64 left, Vector64 right) => Xor(left, right); + + /// + /// int16x4_t veor_s16 (int16x4_t a, int16x4_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector64 Xor(Vector64 left, Vector64 right) => Xor(left, right); + + /// + /// int32x2_t veor_s32(int32x2_t a, int32x2_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector64 Xor(Vector64 left, Vector64 right) => Xor(left, right); + + // /// + // /// int64x1_t veor_s64 (int64x1_t a, int64x1_t b) + // /// A32: VEOR Dd, Dn, Dm + // /// A64: EOR Vd, Vn, Vm + // /// + // public static Vector64 Xor(Vector64 left, Vector64 right) => Xor(left, right); + + /// + /// int8x8_t veor_s8 (int8x8_t a, int8x8_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector64 Xor(Vector64 left, Vector64 right) => Xor(left, right); + + /// + /// float32x2_t veor_f32 (float32x2_t a, float32x2_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector64 Xor(Vector64 left, Vector64 right) => Xor(left, right); + + /// + /// uint16x4_t veor_u16 (uint16x4_t a, uint16x4_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector64 Xor(Vector64 left, Vector64 right) => Xor(left, right); + + /// + /// uint32x2_t veor_u32 (uint32x2_t a, uint32x2_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector64 Xor(Vector64 left, Vector64 right) => Xor(left, right); + + // /// + // /// uint64x1_t veor_u64 (uint64x1_t a, uint64x1_t b) + // /// A32: VEOR Dd, Dn, Dm + // /// A64: EOR Vd, Vn, Vm + // /// + // public static Vector64 Xor(Vector64 left, Vector64 right) => Xor(left, right); + + /// + /// uint8x16_t veor_u8 (uint8x16_t a, uint8x16_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); + + /// + /// float64x2_t veor_f64 (float64x2_t a, float64x2_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); + + /// + /// int16x8_t veor_s16 (int16x8_t a, int16x8_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); + + /// + /// int32x4_t veor_s32(int32x4_t a, int32x4_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); + + /// + /// int64x2_t veor_s64 (int64x2_t a, int64x2_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); + + /// + /// int8x16_t veor_s8 (int8x16_t a, int8x16_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); + + /// + /// float32x4_t veor_f32 (float32x4_t a, float32x4_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. + /// + public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); + + /// + /// uint16x8_t veor_u16 (uint16x8_t a, uint16x8_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); + + /// + /// uint32x4_t veor_u32 (uint32x4_t a, uint32x4_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); + + /// + /// uint64x2_t veor_u64 (uint64x2_t a, uint64x2_t b) + /// A32: VEOR Dd, Dn, Dm + /// A64: EOR Vd, Vn, Vm + /// + public static Vector128 Xor(Vector128 left, Vector128 right) => Xor(left, right); } } -- 2.7.4