if (CLR_CMAKE_TARGET_ARCH_AMD64 OR (CLR_CMAKE_TARGET_ARCH_I386 AND NOT CLR_CMAKE_PLATFORM_UNIX))
add_definitions(-DFEATURE_SIMD)
- add_definitions(-DFEATURE_AVX_SUPPORT)
endif ()
# JIT_BUILD disables certain PAL_TRY debugging features
//
void CodeGen::genVzeroupperIfNeeded(bool check256bitOnly /* = true*/)
{
-#ifdef FEATURE_AVX_SUPPORT
bool emitVzeroUpper = false;
if (check256bitOnly)
{
assert(compiler->getSIMDInstructionSet() == InstructionSet_AVX);
instGen(INS_vzeroupper);
}
-#endif
}
#endif // defined(_TARGET_XARCH_) && !FEATURE_STACK_FP_X87
}
#endif // defined(_TARGET_X86_)
-#ifdef FEATURE_AVX_SUPPORT
// When it's a PInvoke call and the call type is USER function, we issue VZEROUPPER here
// if the function contains 256bit AVX instructions, this is to avoid AVX-256 to Legacy SSE
// transition penalty, assuming the user function contains legacy SSE instruction.
assert(compiler->getSIMDInstructionSet() == InstructionSet_AVX);
instGen(INS_vzeroupper);
}
-#endif
if (target != nullptr)
{
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef ALL_XARCH_EMITTER_UNIT_TESTS
-#ifdef FEATURE_AVX_SUPPORT
genDefineTempLabel(genCreateTempLabel());
// vhaddpd ymm0,ymm1,ymm2
getEmitter()->emitIns_R_R_R(INS_cvtss2sd, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
getEmitter()->emitIns_R_R_R(INS_cvtsd2ss, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
-#endif // FEATURE_AVX_SUPPORT
#endif // ALL_XARCH_EMITTER_UNIT_TESTS
printf("*************** End of genAmd64EmitterUnitTests()\n");
}
}
}
-#ifdef FEATURE_AVX_SUPPORT
// COMPlus_EnableAVX can be used to disable using AVX if available on a target machine.
opts.compCanUseAVX = false;
if (!jitFlags.IsSet(JitFlags::JIT_FLAG_PREJIT) && jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX2))
opts.compCanUseAVX = true;
}
}
-#endif // FEATURE_AVX_SUPPORT
if (!compIsForInlining())
{
-#ifdef FEATURE_AVX_SUPPORT
if (opts.compCanUseAVX)
{
codeGen->getEmitter()->SetUseAVX(true);
codeGen->getEmitter()->SetContainsAVX(false);
codeGen->getEmitter()->SetContains256bitAVX(false);
}
- else
-#endif // FEATURE_AVX_SUPPORT
- if (opts.compCanUseSSE3_4)
+ else if (opts.compCanUseSSE3_4)
{
codeGen->getEmitter()->SetUseSSE3_4(true);
}
/*****************************************************************************/
#ifdef FEATURE_SIMD
-#ifdef FEATURE_AVX_SUPPORT
const unsigned TEMP_MAX_SIZE = YMM_REGSIZE_BYTES;
-#else // !FEATURE_AVX_SUPPORT
-const unsigned TEMP_MAX_SIZE = XMM_REGSIZE_BYTES;
-#endif // !FEATURE_AVX_SUPPORT
#else // !FEATURE_SIMD
const unsigned TEMP_MAX_SIZE = sizeof(double);
#endif // !FEATURE_SIMD
return emitTypeSize(TYP_SIMD8);
}
-#ifdef FEATURE_AVX_SUPPORT
- // (maxPossibleSIMDStructBytes is for use in a context that requires a compile-time constant.)
- static const unsigned maxPossibleSIMDStructBytes = 32;
-#else // !FEATURE_AVX_SUPPORT
- static const unsigned maxPossibleSIMDStructBytes = 16;
-#endif // !FEATURE_AVX_SUPPORT
-
// Returns the codegen type for a given SIMD size.
var_types getSIMDTypeForSize(unsigned size)
{
{
simdType = TYP_SIMD16;
}
-#ifdef FEATURE_AVX_SUPPORT
else if (size == 32)
{
simdType = TYP_SIMD32;
}
-#endif // FEATURE_AVX_SUPPORT
else
{
noway_assert(!"Unexpected size for SIMD type");
bool canUseAVX() const
{
-#ifdef FEATURE_AVX_SUPPORT
+#ifdef _TARGET_XARCH_
return opts.compCanUseAVX;
#else
return false;
#ifdef _TARGET_XARCH_
bool compCanUseSSE2; // Allow CodeGen to use "movq XMM" instructions
bool compCanUseSSE3_4; // Allow CodeGen to use SSE3, SSSE3, SSE4.1 and SSE4.2 instructions
-
-#ifdef FEATURE_AVX_SUPPORT
- bool compCanUseAVX; // Allow CodeGen to use AVX 256-bit vectors for SIMD operations
-#endif // FEATURE_AVX_SUPPORT
-#endif // _TARGET_XARCH_
+ bool compCanUseAVX; // Allow CodeGen to use AVX 256-bit vectors for SIMD operations
+#endif // _TARGET_XARCH_
#ifdef _TARGET_XARCH_
uint64_t compSupportsISA;
<LinkModuleDefinitionFile>$(OutputName).def</LinkModuleDefinitionFile>
- <ClDefines Condition="'$(BuildArchitecture)' == 'amd64'">$(ClDefines);FEATURE_SIMD;FEATURE_AVX_SUPPORT</ClDefines>
+ <ClDefines Condition="'$(BuildArchitecture)' == 'amd64'">$(ClDefines);FEATURE_SIMD</ClDefines>
<Win32DllLibs>$(SdkLibPath)\kernel32.lib;$(SdkLibPath)\user32.lib;$(SdkLibPath)\advapi32.lib;$(SdkLibPath)\oleaut32.lib;$(SdkLibPath)\uuid.lib</Win32DllLibs>
<Win32DllLibs>$(Win32DllLibs);$(ClrLibPath)\utilcode.lib</Win32DllLibs>
#ifdef FEATURE_SIMD
#ifdef _TARGET_XARCH_
-#ifdef FEATURE_AVX_SUPPORT
+#ifndef LEGACY_BACKEND
if (!jitFlags.IsSet(JitFlags::JIT_FLAG_PREJIT) && jitFlags.IsSet(JitFlags::JIT_FLAG_FEATURE_SIMD) &&
jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX2))
{
return 32;
}
}
-#endif // FEATURE_AVX_SUPPORT
+#endif // !LEGACY_BACKEND
if (GetJitTls() != nullptr && JitTls::GetCompiler() != nullptr)
{
JITDUMP("getMaxIntrinsicSIMDVectorLength: returning 16\n");
#ifdef _TARGET_XARCH_
SetUseSSE3_4(false);
-#endif // _TARGET_XARCH_
-
-#ifdef FEATURE_AVX_SUPPORT
SetUseAVX(false);
-#endif // FEATURE_AVX_SUPPORT
+#endif // _TARGET_XARCH_
}
#include "emitpub.h"
regNumber _idReg3 : REGNUM_BITS;
regNumber _idReg4 : REGNUM_BITS;
};
-#elif defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
+#elif defined(_TARGET_XARCH_)
struct
{
regNumber _idReg3 : REGNUM_BITS;
};
-#endif // defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
+#endif // defined(_TARGET_XARCH_)
} _idAddrUnion;
assert(reg == _idReg2);
}
-#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
+#if defined(_TARGET_XARCH_)
regNumber idReg3() const
{
assert(!idIsTiny());
idAddr()->_idReg3 = reg;
assert(reg == idAddr()->_idReg3);
}
-#endif // defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
+#endif // defined(_TARGET_XARCH_)
#ifdef _TARGET_ARMARCH_
insOpts idInsOpt() const
{
bool IsSSEOrAVXInstruction(instruction ins)
{
-#ifdef FEATURE_AVX_SUPPORT
+#ifndef LEGACY_BACKEND
return (ins >= INS_FIRST_SSE2_INSTRUCTION && ins <= INS_LAST_AVX_INSTRUCTION);
-#else // !FEATURE_AVX_SUPPORT
+#else // !LEGACY_BACKEND
return IsSSE2Instruction(ins);
-#endif // !FEATURE_AVX_SUPPORT
+#endif // LEGACY_BACKEND
}
bool IsAVXOnlyInstruction(instruction ins)
{
-#ifdef FEATURE_AVX_SUPPORT
+#ifndef LEGACY_BACKEND
return (ins >= INS_FIRST_AVX_INSTRUCTION && ins <= INS_LAST_AVX_INSTRUCTION);
#else
return false;
bool emitter::IsAVXInstruction(instruction ins)
{
-#ifdef FEATURE_AVX_SUPPORT
+#ifndef LEGACY_BACKEND
return (UseAVX() && IsSSEOrAVXInstruction(ins));
#else
return false;
#endif
}
-#ifdef FEATURE_AVX_SUPPORT
+#ifndef LEGACY_BACKEND
// Returns true if the AVX instruction is a binary operator that requires 3 operands.
// When we emit an instruction with only two operands, we will duplicate the destination
// as a source.
{
return UseAVX() && (IsSSE4Instruction(ins) || IsAVXOnlyInstruction(ins)) && EncodedBySSE38orSSE3A(ins);
}
-#endif // FEATURE_AVX_SUPPORT
+#endif // !LEGACY_BACKEND
// -------------------------------------------------------------------
// Is4ByteSSE4Instruction: Returns true if the SSE4 instruction
#endif
}
-#ifdef FEATURE_AVX_SUPPORT
+#ifndef LEGACY_BACKEND
// Returns true if this instruction requires a VEX prefix
// All AVX instructions require a VEX prefix
bool emitter::TakesVexPrefix(instruction ins)
return code;
}
-#endif // FEATURE_AVX_SUPPORT
+#endif // !LEGACY_BACKEND
// Returns true if this instruction, for the given EA_SIZE(attr), will require a REX.W prefix
bool TakesRexWPrefix(instruction ins, emitAttr attr)
// Outputs VEX prefix (in case of AVX instructions) and REX.R/X/W/B otherwise.
unsigned emitter::emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE* dst, code_t& code)
{
-#ifdef FEATURE_AVX_SUPPORT
+#ifndef LEGACY_BACKEND
if (hasVexPrefix(code))
{
// Only AVX instructions should have a VEX prefix
emitOutputByte(dst + 2, vexPrefix & 0xFF);
return 3;
}
-#endif // FEATURE_AVX_SUPPORT
+#endif // !LEGACY_BACKEND
#ifdef _TARGET_AMD64_
if (code > 0x00FFFFFFFFLL)
//=opcodeSize + vexPrefixAdjustedSize
unsigned emitter::emitGetVexPrefixAdjustedSize(instruction ins, emitAttr attr, code_t code)
{
-#ifdef FEATURE_AVX_SUPPORT
+#ifndef LEGACY_BACKEND
if (IsAVXInstruction(ins))
{
unsigned vexPrefixAdjustedSize = emitGetVexPrefixSize(ins, attr);
return vexPrefixAdjustedSize;
}
-#endif // FEATURE_AVX_SUPPORT
-
+#endif // !LEGACY_BACKEND
return 0;
}
*/
inline emitter::code_t emitter::insEncodeReg3456(instruction ins, regNumber reg, emitAttr size, code_t code)
{
-#ifdef FEATURE_AVX_SUPPORT
+#ifndef LEGACY_BACKEND
assert(reg < REG_STK);
assert(IsAVXInstruction(ins));
assert(hasVexPrefix(code));
dispIns(id);
emitCurIGsize += sz;
}
-#ifdef FEATURE_AVX_SUPPORT
+
/*****************************************************************************
*
* Add an instruction with three register operands.
emitCurIGsize += sz;
}
-#endif
/*****************************************************************************
*
* Add an instruction with a register + static member operands.
/* Display the instruction name */
sstr = codeGen->genInsName(ins);
-#ifdef FEATURE_AVX_SUPPORT
+
if (IsAVXInstruction(ins))
{
printf(" v%-8s", sstr);
}
else
-#endif // FEATURE_AVX_SUPPORT
{
printf(" %-9s", sstr);
}
printf(" %s", emitRegName(id->idReg2(), attr));
break;
-#ifdef FEATURE_AVX_SUPPORT
case IF_RWR_RRD_RRD:
assert(IsAVXInstruction(ins));
assert(IsThreeOperandAVXInstruction(ins));
val = emitGetInsSC(id);
goto PRINT_CONSTANT;
break;
-#endif
case IF_RRW_RRW_CNS:
printf("%s,", emitRegName(id->idReg1(), attr));
printf(" %s", emitRegName(id->idReg2(), attr));
return dst;
}
-#ifdef FEATURE_AVX_SUPPORT
BYTE* emitter::emitOutputRRR(BYTE* dst, instrDesc* id)
{
code_t code;
return dst;
}
-#endif
/*****************************************************************************
*
sz = emitSizeOfInsDsc(id);
break;
-#ifdef FEATURE_AVX_SUPPORT
case IF_RWR_RRD_RRD:
dst = emitOutputRRR(dst, id);
sz = emitSizeOfInsDsc(id);
sz = emitSizeOfInsDsc(id);
dst += emitOutputByte(dst, emitGetInsSC(id));
break;
-#endif
case IF_RRW_RRW_CNS:
assert(id->idGCref() == GCT_NONE);
}
assert(code & 0x00FF0000);
-#ifdef FEATURE_AVX_SUPPORT
if (TakesRexWPrefix(ins, size))
{
code = AddRexWPrefix(ins, code);
code = insEncodeReg3456(ins, id->idReg2(), size, code);
}
}
-#endif // FEATURE_AVX_SUPPORT
regcode = (insEncodeReg345(ins, rReg, size, &code) | insEncodeReg012(ins, mReg, size, &code)) << 8;
case IF_MWR_RRD:
case IF_MRW_RRD:
code = insCodeMR(ins);
-#ifdef FEATURE_AVX_SUPPORT
code = AddVexPrefixIfNeeded(ins, code, size);
// In case of AVX instructions that take 3 operands, encode reg1 as first source.
// encode source operand reg in 'vvvv' bits in 1's compliement form
code = insEncodeReg3456(ins, id->idReg1(), size, code);
}
-#endif // FEATURE_AVX_SUPPORT
regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8);
dst = emitOutputCV(dst, id, code | regcode | 0x0500);
BYTE* emitOutputRR(BYTE* dst, instrDesc* id);
BYTE* emitOutputIV(BYTE* dst, instrDesc* id);
-#ifdef FEATURE_AVX_SUPPORT
BYTE* emitOutputRRR(BYTE* dst, instrDesc* id);
-#endif
BYTE* emitOutputLJ(BYTE* dst, instrDesc* id);
#endif // !_TARGET_AMD64_
}
-#ifdef FEATURE_AVX_SUPPORT
+#ifndef LEGACY_BACKEND
// 3-byte VEX prefix starts with byte 0xC4
#define VEX_PREFIX_MASK_3BYTE 0xFF000000000000ULL
return (IsDstDstSrcAVXInstruction(ins) || IsDstSrcSrcAVXInstruction(ins));
}
bool Is4ByteAVXInstruction(instruction ins);
-#else // !FEATURE_AVX_SUPPORT
+#else // LEGACY_BACKEND
bool UseAVX()
{
return false;
}
+void SetUseAVX(bool value)
+{
+}
bool ContainsAVX()
{
return false;
}
+void SetContainsAVX(bool value)
+{
+}
bool Contains256bitAVX()
{
return false;
}
+void SetContains256bitAVX(bool value)
+{
+}
bool hasVexPrefix(code_t code)
{
return false;
{
return code;
}
-#endif // !FEATURE_AVX_SUPPORT
+#endif // LEGACY_BACKEND
/************************************************************************/
/* Debug-only routines to display instructions */
void emitIns_R_R_I(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int ival);
-#ifdef FEATURE_AVX_SUPPORT
void emitIns_R_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3);
void emitIns_R_R_R_I(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, int ival);
-#endif
void emitIns_S(instruction ins, emitAttr attr, int varx, int offs);
{
#ifdef _TARGET_ARM_
getEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3, flags);
-#elif defined(_TARGET_XARCH_) && defined(FEATURE_AVX_SUPPORT)
+#elif defined(_TARGET_XARCH_)
getEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3);
#else
NYI("inst_RV_RV_RV");
# No SIMD in legacy back-end.
remove_definitions(-DFEATURE_SIMD)
-remove_definitions(-DFEATURE_AVX_SUPPORT)
if(WIN32)
add_definitions(-DFX_VER_INTERNALNAME_STR=legacyjit.dll)
remove_definitions(-DFEATURE_MERGE_JIT_AND_ENGINE)
remove_definitions(-DFEATURE_SIMD)
-remove_definitions(-DFEATURE_AVX_SUPPORT)
add_definitions(-DLEGACY_BACKEND)
if (CLR_CMAKE_PLATFORM_ARCH_I386)
remove_definitions(-DFEATURE_SIMD)
- remove_definitions(-DFEATURE_AVX_SUPPORT)
add_definitions(-DUNIX_X86_ABI)
set(JIT_ARCH_ALTJIT_SOURCES ${JIT_I386_SOURCES})
elseif(CLR_CMAKE_PLATFORM_ARCH_AMD64)
//
void LinearScan::SetContainsAVXFlags(bool isFloatingPointType /* = true */, unsigned sizeOfSIMDVector /* = 0*/)
{
-#ifdef FEATURE_AVX_SUPPORT
if (isFloatingPointType)
{
if (compiler->getFloatingPointInstructionSet() == InstructionSet_AVX)
compiler->getEmitter()->SetContains256bitAVX(true);
}
}
-#endif
}
#ifdef _TARGET_X86_
<LinkModuleDefinitionFile>$(OutputName).def</LinkModuleDefinitionFile>
<ClDefines>$(ClDefines);ALT_JIT</ClDefines>
- <ClDefines Condition="'$(BuildArchitecture)' == 'amd64'">$(ClDefines);FEATURE_SIMD;FEATURE_AVX_SUPPORT</ClDefines>
+ <ClDefines Condition="'$(BuildArchitecture)' == 'amd64'">$(ClDefines);FEATURE_SIMD</ClDefines>
<Win32DllLibs>$(SdkLibPath)\kernel32.lib;$(SdkLibPath)\user32.lib;$(SdkLibPath)\advapi32.lib;$(SdkLibPath)\oleaut32.lib;$(SdkLibPath)\uuid.lib</Win32DllLibs>
<Win32DllLibs>$(Win32DllLibs);$(ClrLibPath)\utilcode.lib</Win32DllLibs>
remove_definitions(-DFEATURE_MERGE_JIT_AND_ENGINE)
remove_definitions(-DFEATURE_SIMD)
-remove_definitions(-DFEATURE_AVX_SUPPORT)
if(FEATURE_READYTORUN)
add_definitions(-DFEATURE_READYTORUN_COMPILER)
var_types targetType, var_types baseType, regNumber targetReg, regNumber srcReg, SIMDScalarMoveType moveType)
{
assert(varTypeIsFloating(baseType));
-#ifdef FEATURE_AVX_SUPPORT
if (compiler->getSIMDInstructionSet() == InstructionSet_AVX)
{
switch (moveType)
}
}
else
-#endif // FEATURE_AVX_SUPPORT
{
// SSE
ins = getOpForSIMDIntrinsic(SIMDIntrinsicBitwiseOr, baseType);
inst_RV_RV(ins, targetReg, tmpReg, targetType, emitActualTypeSize(targetType));
-#ifdef FEATURE_AVX_SUPPORT
if (compiler->canUseAVX())
{
inst_RV_RV(INS_vpbroadcastq, targetReg, targetReg, TYP_SIMD32, emitTypeSize(TYP_SIMD32));
}
else
-#endif // FEATURE_AVX_SUPPORT
{
ins = getOpForSIMDIntrinsic(SIMDIntrinsicShuffleSSE2, baseType);
getEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(targetType), targetReg, targetReg, 0);
ins = getOpForSIMDIntrinsic(SIMDIntrinsicEqual, TYP_INT);
inst_RV_RV(ins, targetReg, targetReg, targetType, emitActualTypeSize(targetType));
}
-#ifdef FEATURE_AVX_SUPPORT
else
{
assert(iset == InstructionSet_AVX);
unreached();
}
}
-#endif // FEATURE_AVX_SUPPORT
}
else if (iset == InstructionSet_AVX && ((size == 32) || (size == 16)))
{
#ifdef FEATURE_SIMD
#define ALIGN_SIMD_TYPES 1 // whether SIMD type locals are to be aligned
-#if defined(UNIX_AMD64_ABI) || !defined(FEATURE_AVX_SUPPORT)
+#if defined(UNIX_AMD64_ABI)
#define FEATURE_PARTIAL_SIMD_CALLEE_SAVE 0 // Whether SIMD registers are partially saved at calls
-#else // !UNIX_AMD64_ABI && !FEATURE_AVX_SUPPORT
+#else // !UNIX_AMD64_ABI
#define FEATURE_PARTIAL_SIMD_CALLEE_SAVE 1 // Whether SIMD registers are partially saved at calls
#endif // !UNIX_AMD64_ABI
#endif
#define REGDEF(name, rnum, mask, sname) "x" sname,
#include "register.h"
};
-#ifdef FEATURE_AVX_SUPPORT
+#ifdef FEATURE_SIMD
static const char* regNamesYMM[] = {
#define REGDEF(name, rnum, mask, sname) "y" sname,
#include "register.h"
};
-#endif // FEATURE_AVX_SUPPORT
+#endif // FEATURE_SIMD
assert((unsigned)reg < ArrLen(regNamesFloat));
-#ifdef FEATURE_AVX_SUPPORT
+#ifdef FEATURE_SIMD
if (type == TYP_SIMD32)
{
return regNamesYMM[reg];
}
-#endif // FEATURE_AVX_SUPPORT
+#endif // FEATURE_SIMD
return regNamesFloat[reg];
#endif
case TYP_SIMD8:
case TYP_SIMD12:
case TYP_SIMD16:
-#ifdef FEATURE_AVX_SUPPORT
case TYP_SIMD32:
-#endif // FEATURE_AVX_SUPPORT
return true;
default:
return false;