CORJIT_FLAG_HAS_ARM64_LRCPC = 52, // ID_AA64ISAR1_EL1.LRCPC is 1 or better
CORJIT_FLAG_HAS_ARM64_PMULL = 53, // ID_AA64ISAR0_EL1.AES is 2 or better
CORJIT_FLAG_HAS_ARM64_SHA1 = 54, // ID_AA64ISAR0_EL1.SHA1 is 1 or better
- CORJIT_FLAG_HAS_ARM64_SHA2 = 55, // ID_AA64ISAR0_EL1.SHA2 is 1 or better
+ CORJIT_FLAG_HAS_ARM64_SHA256 = 55, // ID_AA64ISAR0_EL1.SHA2 is 1 or better
CORJIT_FLAG_HAS_ARM64_SHA512 = 56, // ID_AA64ISAR0_EL1.SHA2 is 2 or better
CORJIT_FLAG_HAS_ARM64_SHA3 = 57, // ID_AA64ISAR0_EL1.SHA3 is 1 or better
CORJIT_FLAG_HAS_ARM64_SIMD = 58, // ID_AA64PFR0_EL1.AdvSIMD is 0 or better
fmt = IF_DR_2J;
break;
+ case INS_sha256su0:
case INS_sha1su1:
assert(isVectorRegister(reg1));
assert(isVectorRegister(reg2));
fmt = IF_LS_3D;
break;
+ case INS_sha256h:
+ case INS_sha256h2:
+ case INS_sha256su1:
case INS_sha1su0:
case INS_sha1c:
case INS_sha1p:
emitDispReg(id->idReg2(), EA_4BYTE, true);
emitDispVectorReg(id->idReg3(), id->idInsOpt(), false);
}
+ else if ((ins == INS_sha256h) || (ins == INS_sha256h2))
+ {
+ // Qd Qn Vm (vector)
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, true);
+ emitDispVectorReg(id->idReg3(), id->idInsOpt(), false);
+ }
else
{
emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
HARDWARE_INTRINSIC_CLASS(JIT_FLAG_HAS_ARM64_LRCPC , Lrcpc )
HARDWARE_INTRINSIC_CLASS(JIT_FLAG_HAS_ARM64_PMULL , Pmull )
HARDWARE_INTRINSIC_CLASS(JIT_FLAG_HAS_ARM64_SHA1 , Sha1 )
-HARDWARE_INTRINSIC_CLASS(JIT_FLAG_HAS_ARM64_SHA2 , Sha2 )
+HARDWARE_INTRINSIC_CLASS(JIT_FLAG_HAS_ARM64_SHA256 , Sha256 )
HARDWARE_INTRINSIC_CLASS(JIT_FLAG_HAS_ARM64_SHA512 , Sha512 )
HARDWARE_INTRINSIC_CLASS(JIT_FLAG_HAS_ARM64_SHA3 , Sha3 )
HARDWARE_INTRINSIC_CLASS(JIT_FLAG_HAS_ARM64_SIMD , Simd )
HARDWARE_INTRINSIC(NI_ARM64_Sha1SchedulePart1, Sha1, SchedulePart1, SimdTernaryRMWOp, INS_invalid, INS_invalid, INS_sha1su0, None )
HARDWARE_INTRINSIC(NI_ARM64_Sha1SchedulePart2, Sha1, SchedulePart2, SimdBinaryRMWOp, INS_invalid, INS_invalid, INS_sha1su1, None )
+//Sha256
+HARDWARE_INTRINSIC(NI_ARM64_Sha256HashLower, Sha256, HashLower, SimdTernaryRMWOp, INS_invalid, INS_invalid, INS_sha256h, None )
+HARDWARE_INTRINSIC(NI_ARM64_Sha256HashUpper, Sha256, HashUpper, SimdTernaryRMWOp, INS_invalid, INS_invalid, INS_sha256h2, None )
+HARDWARE_INTRINSIC(NI_ARM64_Sha256SchedulePart1, Sha256, SchedulePart1, SimdBinaryRMWOp, INS_invalid, INS_invalid, INS_sha256su0, None )
+HARDWARE_INTRINSIC(NI_ARM64_Sha256SchedulePart2, Sha256, SchedulePart2, SimdTernaryRMWOp, INS_invalid, INS_invalid, INS_sha256su1, None )
#endif
InstructionSet_Lrcpc, // ID_AA64ISAR1_EL1.LRCPC is 1 or better
InstructionSet_Pmull, // ID_AA64ISAR0_EL1.AES is 2 or better
InstructionSet_Sha1, // ID_AA64ISAR0_EL1.SHA1 is 1 or better
- InstructionSet_Sha2, // ID_AA64ISAR0_EL1.SHA2 is 1 or better
+ InstructionSet_Sha256, // ID_AA64ISAR0_EL1.SHA2 is 1 or better
InstructionSet_Sha512, // ID_AA64ISAR0_EL1.SHA2 is 2 or better
InstructionSet_Sha3, // ID_AA64ISAR0_EL1.SHA3 is 1 or better
InstructionSet_Simd, // ID_AA64PFR0_EL1.AdvSIMD is 0 or better
INST1(sha1su1, "sha1su1", 0, 0, IF_DV_2P, 0x5E281800)
// sha1su1 Vd.4S, Vn.4S DV_2P 0101111000101000 000110nnnnnddddd 5E28 1800 Vd.4S Vn.4S (vector)
+
+INST1(sha256h, "sha256h", 0, 0, IF_DV_3F, 0x5E004000)
+ // sha256h Qd,Qn,Vm.4S DV_3F 01011110000mmmmm 010000nnnnnddddd 5E00 4000 Qd Qn Vm.4S (vector)
+
+INST1(sha256h2, "sha256h2", 0, 0, IF_DV_3F, 0x5E005000)
+ // sha256h Qd,Qn,Vm.4S DV_3F 01011110000mmmmm 010100nnnnnddddd 5E00 5000 Qd Qn Vm.4S (vector)
+
+INST1(sha256su0, "sha256su0", 0, 0, IF_DV_2P, 0x5E282800)
+ // sha256su0 Vd.4S,Vn.4S DV_2P 0101111000101000 001010nnnnnddddd 5E28 2800 Vd.4S Vn.4S (vector)
+
+INST1(sha256su1, "sha256su1", 0, 0, IF_DV_3F, 0x5E006000)
+ // sha256su1 Vd.4S,Vn.4S,Vm.4S DV_3F 01011110000mmmmm 011000nnnnnddddd 5E00 6000 Vd.4S Vn.4S Vm.4S (vector)
INST1(sbfm, "sbfm", 0, 0, IF_DI_2D, 0x13000000)
// sbfm Rd,Rn,imr,ims DI_2D X00100110Nrrrrrr ssssssnnnnnddddd 1300 0000 imr, ims
JIT_FLAG_HAS_ARM64_LRCPC = 52, // ID_AA64ISAR1_EL1.LRCPC is 1 or better
JIT_FLAG_HAS_ARM64_PMULL = 53, // ID_AA64ISAR0_EL1.AES is 2 or better
JIT_FLAG_HAS_ARM64_SHA1 = 54, // ID_AA64ISAR0_EL1.SHA1 is 1 or better
- JIT_FLAG_HAS_ARM64_SHA2 = 55, // ID_AA64ISAR0_EL1.SHA2 is 1 or better
+ JIT_FLAG_HAS_ARM64_SHA256 = 55, // ID_AA64ISAR0_EL1.SHA2 is 1 or better
JIT_FLAG_HAS_ARM64_SHA512 = 56, // ID_AA64ISAR0_EL1.SHA2 is 2 or better
JIT_FLAG_HAS_ARM64_SHA3 = 57, // ID_AA64ISAR0_EL1.SHA3 is 1 or better
JIT_FLAG_HAS_ARM64_SIMD = 58, // ID_AA64PFR0_EL1.AdvSIMD is 0 or better
<Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\Arm\Arm64\Simd.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\Arm\Arm64\Aes.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\Arm\Arm64\Sha1.cs" />
+ <Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\Arm\Arm64\Sha256.cs" />
</ItemGroup>
<ItemGroup Condition="'$(Platform)' != 'arm64'">
<Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\Arm\Arm64\Simd.PlatformNotSupported.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\Arm\Arm64\Aes.PlatformNotSupported.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\Arm\Arm64\Sha1.PlatformNotSupported.cs" />
+ <Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\Arm\Arm64\Sha256.PlatformNotSupported.cs" />
</ItemGroup>
<ItemGroup>
<Compile Include="$(BclSourcesRoot)\System\AppContext\AppContext.cs" />
--- /dev/null
+using System.Runtime.CompilerServices;
+using System.Runtime.Intrinsics;
+
+namespace System.Runtime.Intrinsics.Arm.Arm64
+{
+ /// <summary>
+ /// This class provides access to the Arm64 SHA256 Crypto intrinsics
+ ///
+ /// Arm64 CPU indicate support for this feature by setting
+ /// ID_AA64ISAR0_EL1.SHA2 is 1 or better
+ /// </summary>
+ [CLSCompliant(false)]
+ public static class Sha256
+ {
+ public static bool IsSupported { get { return false; } }
+
+ // <summary>
+ /// Performs SHA256 hash update (part 1).
+ /// vsha256hq_u32 (uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk)
+ ///</summary>
+ public static Vector128<uint> HashLower(Vector128<uint> hash_abcd, Vector128<uint> hash_efgh, Vector128<uint> wk) { throw new PlatformNotSupportedException(); }
+
+ // <summary>
+ /// Performs SHA256 hash update (part 2).
+ /// vsha256h2q_u32 (uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk)
+ ///</summary>
+ public static Vector128<uint> HashUpper(Vector128<uint> hash_efgh, Vector128<uint> hash_abcd, Vector128<uint> wk) { throw new PlatformNotSupportedException(); }
+
+ // <summary>
+ /// Performs SHA256 schedule update 0
+ /// vsha256su0q_u32 (uint32x4_t w0_3, uint32x4_t w4_7)
+ ///</summary>
+ public static Vector128<uint> SchedulePart1(Vector128<uint> w0_3, Vector128<uint> w4_7) { throw new PlatformNotSupportedException(); }
+
+ // <summary>
+ /// Performs SHA256 schedule update 1
+ /// vsha256su1q_u32 (uint32x4_t w0_3, uint32x4_t w8_11, uint32x4_t w12_15)
+ ///</summary>
+ public static Vector128<uint> SchedulePart2(Vector128<uint> w0_3, Vector128<uint> w8_11, Vector128<uint> w12_15) { throw new PlatformNotSupportedException(); }
+ }
+}
--- /dev/null
+using System.Runtime.CompilerServices;
+using System.Runtime.Intrinsics;
+
+namespace System.Runtime.Intrinsics.Arm.Arm64
+{
+ /// <summary>
+ /// This class provides access to the Arm64 SHA256 Crypto intrinsics
+ ///
+ /// Arm64 CPU indicate support for this feature by setting
+ /// ID_AA64ISAR0_EL1.SHA2 is 1 or better
+ /// </summary>
+ [CLSCompliant(false)]
+ public static class Sha256
+ {
+ public static bool IsSupported { get => IsSupported; }
+
+ // <summary>
+ /// Performs SHA256 hash update (part 1).
+ /// vsha256hq_u32 (uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk)
+ ///</summary>
+ public static Vector128<uint> HashLower(Vector128<uint> hash_abcd, Vector128<uint> hash_efgh, Vector128<uint> wk) => HashLower(hash_abcd, hash_efgh, wk);
+
+ // <summary>
+ /// Performs SHA256 hash update (part 2).
+ /// vsha256h2q_u32 (uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk)
+ ///</summary>
+ public static Vector128<uint> HashUpper(Vector128<uint> hash_efgh, Vector128<uint> hash_abcd, Vector128<uint> wk) => HashUpper(hash_efgh, hash_abcd, wk);
+
+ // <summary>
+ /// Performs SHA256 schedule update 0
+ /// vsha256su0q_u32 (uint32x4_t w0_3, uint32x4_t w4_7)
+ ///</summary>
+ public static Vector128<uint> SchedulePart1(Vector128<uint> w0_3, Vector128<uint> w4_7) => SchedulePart1(w0_3, w4_7);
+
+ // <summary>
+ /// Performs SHA256 schedule update 1
+ /// vsha256su1q_u32 (uint32x4_t tw0_3, uint32x4_t w8_11, uint32x4_t w12_15)
+ ///</summary>
+ public static Vector128<uint> SchedulePart2(Vector128<uint> w0_3, Vector128<uint> w8_11, Vector128<uint> w12_15) => SchedulePart2(w0_3, w8_11, w12_15);
+ }
+}
#endif
#ifdef HWCAP_SHA2
if (hwCap & HWCAP_SHA2)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA2);
+ CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA256);
#endif
#ifdef HWCAP_SHA512
if (hwCap & HWCAP_SHA512)