case HWIntrinsicInfo::SimdUnaryOp:
genHWIntrinsicSimdUnaryOp(node);
break;
+ case HWIntrinsicInfo::SimdBinaryRMWOp:
+ genHWIntrinsicSimdBinaryRMWOp(node);
+ break;
default:
NYI("HWIntrinsic form not implemented");
}
genProduceReg(node);
}
+//------------------------------------------------------------------------
+// genHWIntrinsicSimdBinaryRMWOp:
+//
+// Produce code for a GT_HWIntrinsic node with form SimdBinaryRMWOp.
+//
+// Consumes two SIMD operands and produces a SIMD result.
+// First operand is both source and destination.
+//
+// Arguments:
+// node - the GT_HWIntrinsic node
+//
+// Return Value:
+// None.
+//
+void CodeGen::genHWIntrinsicSimdBinaryRMWOp(GenTreeHWIntrinsic* node)
+{
+ GenTree* op1 = node->gtGetOp1();
+ GenTree* op2 = node->gtGetOp2();
+ var_types baseType = node->gtSIMDBaseType;
+ regNumber targetReg = node->gtRegNum;
+
+ assert(targetReg != REG_NA);
+
+ genConsumeOperands(node);
+
+ regNumber op1Reg = op1->gtRegNum;
+ regNumber op2Reg = op2->gtRegNum;
+
+ assert(genIsValidFloatReg(op1Reg));
+ assert(genIsValidFloatReg(op2Reg));
+ assert(genIsValidFloatReg(targetReg));
+
+ instruction ins = getOpForHWIntrinsic(node, baseType);
+ assert(ins != INS_invalid);
+
+ bool is16Byte = (node->gtSIMDSize > 8);
+ emitAttr attr = is16Byte ? EA_16BYTE : EA_8BYTE;
+ insOpts opt = genGetSimdInsOpt(is16Byte, baseType);
+
+ if (targetReg != op1Reg)
+ {
+ getEmitter()->emitIns_R_R(INS_mov, attr, targetReg, op1Reg);
+ }
+ getEmitter()->emitIns_R_R(ins, attr, targetReg, op2Reg, opt);
+
+ genProduceReg(node);
+}
+
#endif // FEATURE_HW_INTRINSICS
/*****************************************************************************
void genHWIntrinsicSimdSelectOp(GenTreeHWIntrinsic* node);
void genHWIntrinsicSimdSetAllOp(GenTreeHWIntrinsic* node);
void genHWIntrinsicSimdUnaryOp(GenTreeHWIntrinsic* node);
+void genHWIntrinsicSimdBinaryRMWOp(GenTreeHWIntrinsic* node);
template <typename HWIntrinsicSwitchCaseBody>
void genHWIntrinsicSwitchTable(regNumber swReg, regNumber tmpReg, int swMax, HWIntrinsicSwitchCaseBody emitSwCase);
#endif // defined(_TARGET_XARCH_)
case IF_DV_2A: // DV_2A .Q.......X...... ......nnnnnddddd Vd Vn (fabs, fcvt - vector)
case IF_DV_2M: // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
+ case IF_DV_2P: // DV_2P ................ ......nnnnnddddd Vd Vn (aes*)
assert(isValidVectorDatasize(id->idOpSize()));
assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
assert(isVectorRegister(id->idReg1()));
case IF_DV_2K: // DV_2K .........X.mmmmm ......nnnnn..... Vn Vm (fcmp)
case IF_DV_2L: // DV_2L ........XX...... ......nnnnnddddd Vd Vn (abs, neg - scalar)
case IF_DV_2M: // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
+ case IF_DV_2P: // DV_2P ................ ......nnnnnddddd Vd Vn (aes*) - Vd both source and dest
case IF_DV_3A: // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
case IF_DV_3AI: // DV_3AI .Q......XXLMmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector)
case IF_DV_3B: // DV_3B .Q.......X.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
case IF_DV_2M:
case IF_DV_2N:
case IF_DV_2O:
+ case IF_DV_2P:
case IF_DV_3A:
case IF_DV_3AI:
case IF_DV_3B:
fmt = IF_DV_2G;
}
break;
+ case INS_aesd:
+ case INS_aese:
+ case INS_aesmc:
+ case INS_aesimc:
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isValidVectorDatasize(size));
+ elemsize = optGetElemsize(opt);
+ assert(elemsize == EA_1BYTE);
+ fmt = IF_DV_2P;
+ break;
default:
unreached();
dst += emitOutput_Instr(dst, code);
break;
+ case IF_DV_2P: // DV_2P ............... ......nnnnnddddd Vd Vn (aes*)
+ elemsize = optGetElemsize(id->idInsOpt());
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
+
case IF_DV_3A: // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
code = emitInsCode(ins, fmt);
elemsize = optGetElemsize(id->idInsOpt());
case IF_DV_2A: // DV_2A .Q.......X...... ......nnnnnddddd Vd Vn (fabs, fcvt - vector)
case IF_DV_2M: // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
+ case IF_DV_2P: // DV_2P ................ ......nnnnnddddd Vd Vn (aes*)
emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
emitDispVectorReg(id->idReg2(), id->idInsOpt(), false);
break;
IF_DEF(DV_2M, IS_NONE, NONE) // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
IF_DEF(DV_2N, IS_NONE, NONE) // DV_2N .........iiiiiii ......nnnnnddddd Vd Vn imm (shift - scalar)
IF_DEF(DV_2O, IS_NONE, NONE) // DV_2O .Q.......iiiiiii ......nnnnnddddd Vd Vn imm (shift - vector)
+IF_DEF(DV_2P, IS_NONE, NONE) // DV_2P .,.............. ......nnnnnddddd Vd Vn (Vd used as both source and destination)
IF_DEF(DV_3A, IS_NONE, NONE) // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
IF_DEF(DV_3AI, IS_NONE, NONE) // DV_3AI .Q......XXLMmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
case HWIntrinsicInfo::SimdSelectOp:
case HWIntrinsicInfo::SimdSetAllOp:
case HWIntrinsicInfo::SimdUnaryOp:
+ case HWIntrinsicInfo::SimdBinaryRMWOp:
simdClass = sig->retTypeClass;
break;
case HWIntrinsicInfo::SimdExtractOp:
return impUnsupportedHWIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand);
case HWIntrinsicInfo::SimdBinaryOp:
+ case HWIntrinsicInfo::SimdBinaryRMWOp:
// op1 is the first operand
// op2 is the second operand
op2 = impSIMDPopStack(simdType);
UnaryOp, // Non SIMD intrinsics which take a single argument
CrcOp, // Crc intrinsics.
// SIMD common forms
- SimdBinaryOp, // SIMD intrinsics which take two vector operands and return a vector
- SimdUnaryOp, // SIMD intrinsics which take one vector operand and return a vector
+ SimdBinaryOp, // SIMD intrinsics which take two vector operands and return a vector
+ SimdUnaryOp, // SIMD intrinsics which take one vector operand and return a vector
+ SimdBinaryRMWOp, // Same as SimdBinaryOp , with first source vector used as destination vector also.
// SIMD custom forms
SimdExtractOp, // SIMD intrinsics which take one vector operand and a lane index and return an element
SimdInsertOp, // SIMD intrinsics which take one vector operand and a lane index and value and return a vector
HARDWARE_INTRINSIC(NI_ARM64_SIMD_SetItem, Simd, Insert, SimdInsertOp, INS_mov, INS_mov, INS_mov, None )
HARDWARE_INTRINSIC(NI_ARM64_SIMD_SetAllVector64, Simd, SetAllVector64, SimdSetAllOp, INS_dup, INS_dup, INS_dup, None )
HARDWARE_INTRINSIC(NI_ARM64_SIMD_SetAllVector128, Simd, SetAllVector128, SimdSetAllOp, INS_dup, INS_dup, INS_dup, None )
+//Aes
+HARDWARE_INTRINSIC(NI_ARM64_AesEncrypt, Aes, Encrypt, SimdBinaryRMWOp, INS_invalid, INS_invalid, INS_aese, None )
+HARDWARE_INTRINSIC(NI_ARM64_AesDecrypt, Aes, Decrypt, SimdBinaryRMWOp, INS_invalid, INS_invalid, INS_aesd, None )
+HARDWARE_INTRINSIC(NI_ARM64_AesMixColumns, Aes, MixColumns, SimdUnaryOp, INS_invalid, INS_invalid, INS_aesmc, None )
+HARDWARE_INTRINSIC(NI_ARM64_AesInvMixColumns, Aes, InverseMixColumns, SimdUnaryOp, INS_invalid, INS_invalid, INS_aesimc, None )
#endif
INST1(csetm, "csetm", 0, 0, IF_DR_1D, 0x5A9F03E0)
// csetm Rd,cond DR_1D X101101010011111 cccc0011111ddddd 5A9F 03E0 Rd cond
+INST1(aese, "aese", 0, 0, IF_DV_2P, 0x4E284800)
+ // aese Vd.16B,Vn.16B DV_2P 0100111000101000 010010nnnnnddddd 4E28 4800 Vd.16B Vn.16B (vector)
+
+INST1(aesd, "aesd", 0, 0, IF_DV_2P, 0x4E285800)
+ // aesd Vd.16B,Vn.16B DV_2P 0100111000101000 010110nnnnnddddd 4E28 5800 Vd.16B Vn.16B (vector)
+
+INST1(aesmc, "aesmc", 0, 0, IF_DV_2P, 0x4E286800)
+ // aesmc Vd.16B,Vn.16B DV_2P 0100111000101000 011010nnnnnddddd 4E28 6800 Vd.16B Vn.16B (vector)
+
+INST1(aesimc, "aesimc", 0, 0, IF_DV_2P, 0x4E287800)
+ // aesimc Vd.16B,Vn.16B DV_2P 0100111000101000 011110nnnnnddddd 4E28 7800 Vd.16B Vn.16B (vector)
+
INST1(rev, "rev", 0, 0, IF_DR_2G, 0x5AC00800)
// rev Rd,Rm DR_2G X101101011000000 00001Xnnnnnddddd 5AC0 0800 Rd Rn
</ItemGroup>
<ItemGroup Condition="'$(Platform)' == 'arm64'">
<Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\Arm\Arm64\Simd.cs" />
+ <Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\Arm\Arm64\Aes.cs" />
</ItemGroup>
<ItemGroup Condition="'$(Platform)' != 'arm64'">
<Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\Arm\Arm64\Simd.PlatformNotSupported.cs" />
+ <Compile Include="$(BclSourcesRoot)\System\Runtime\Intrinsics\Arm\Arm64\Aes.PlatformNotSupported.cs" />
</ItemGroup>
<ItemGroup>
<Compile Include="$(BclSourcesRoot)\System\AppContext\AppContext.cs" />
</ItemGroup>
<Import Project="GenerateCompilerResponseFile.targets" />
-</Project>
\ No newline at end of file
+</Project>
--- /dev/null
+using System.Runtime.CompilerServices;
+using System.Runtime.Intrinsics;
+
+namespace System.Runtime.Intrinsics.Arm.Arm64
+{
+ /// <summary>
+ /// This class provides access to the Arm64 AES Crypto intrinsics
+ ///
+ /// Arm64 CPU indicate support for this feature by setting
+ /// ID_AA64ISAR0_EL1.AES is 1 or better
+ /// </summary>
+ [CLSCompliant(false)]
+ public static class Aes
+ {
+ public static bool IsSupported { get { return false; } }
+ // <summary>
+ /// Performs AES single round decryption
+ /// vaesdq_u8 (uint8x16_t data, uint8x16_t key)
+ ///</summary>
+ public static Vector128<byte> Decrypt(Vector128<byte> value, Vector128<byte> roundKey) { throw new PlatformNotSupportedException(); }
+
+ // <summary>
+ /// Performs AES single round encryption
+ /// vaeseq_u8 (uint8x16_t data, uint8x16_t key)
+ ///</summary>
+ public static Vector128<byte> Encrypt(Vector128<byte> value, Vector128<byte> roundKey) { throw new PlatformNotSupportedException(); }
+
+ // <summary>
+ /// Performs AES Mix Columns
+ /// vaesmcq_u8 (uint8x16_t data)
+ ///</summary>
+ public static Vector128<byte> MixColumns(Vector128<byte> value) { throw new PlatformNotSupportedException(); }
+
+ // <summary>
+ /// Performs AES inverse mix columns
+ /// vaesimcq_u8 (uint8x16_t data)
+ ///</summary>
+ public static Vector128<byte> InverseMixColumns(Vector128<byte> value) { throw new PlatformNotSupportedException(); }
+ }
+}
--- /dev/null
+using System.Runtime.CompilerServices;
+using System.Runtime.Intrinsics;
+
+namespace System.Runtime.Intrinsics.Arm.Arm64
+{
+ /// <summary>
+ /// This class provides access to the Arm64 AES Crypto intrinsics
+ ///
+ /// Arm64 CPU indicate support for this feature by setting
+ /// ID_AA64ISAR0_EL1.AES is 1 or better
+ /// </summary>
+ [CLSCompliant(false)]
+ public static class Aes
+ {
+ public static bool IsSupported { get => IsSupported; }
+ // <summary>
+ /// Performs AES single round decryption
+ /// vaesdq_u8 (uint8x16_t data, uint8x16_t key)
+ ///</summary>
+ public static Vector128<byte> Decrypt(Vector128<byte> value, Vector128<byte> roundKey) => Decrypt(value, roundKey);
+
+ // <summary>
+ /// Performs AES single round encryption
+ /// vaeseq_u8 (uint8x16_t data, uint8x16_t key)
+ ///</summary>
+ public static Vector128<byte> Encrypt(Vector128<byte> value, Vector128<byte> roundKey) => Encrypt(value, roundKey);
+
+ // <summary>
+ /// Performs AES Mix Columns
+ /// vaesmcq_u8 (uint8x16_t data)
+ ///</summary>
+ public static Vector128<byte> MixColumns(Vector128<byte> value) => MixColumns(value);
+
+ // <summary>
+ /// Performs AES inverse mix columns
+ /// vaesimcq_u8 (uint8x16_t data)
+ ///</summary>
+ public static Vector128<byte> InverseMixColumns(Vector128<byte> value) => InverseMixColumns(value);
+ }
+}