Eliminate `FEATURE_UNIX_AMD64_STRUCT_PASSING` and replace it with `UNIX_AMD64_ABI` when used alone. Both are currently defined; it is highly unlikely the latter will work alone; and it significantly clutters up the code, especially the JIT.
Also, fix the altjit support (now `UNIX_AMD64_ABI_ITF`) to *not* call `ClassifyEightBytes` if the struct is too large. Otherwise it asserts.
add_definitions(-DFEATURE_TIERED_COMPILATION)
if (CLR_CMAKE_PLATFORM_ARCH_AMD64)
# Enable the AMD64 Unix struct passing JIT-EE interface for all AMD64 platforms, to enable altjit.
- add_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+ add_definitions(-DUNIX_AMD64_ABI_ITF)
endif (CLR_CMAKE_PLATFORM_ARCH_AMD64)
if(CLR_CMAKE_PLATFORM_UNIX_AMD64)
add_definitions(-DFEATURE_MULTIREG_RETURN)
+ add_definitions(-DUNIX_AMD64_ABI)
endif (CLR_CMAKE_PLATFORM_UNIX_AMD64)
if(CLR_CMAKE_PLATFORM_UNIX AND CLR_CMAKE_TARGET_ARCH_AMD64)
- add_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING)
+ add_definitions(-DUNIX_AMD64_ABI)
endif(CLR_CMAKE_PLATFORM_UNIX AND CLR_CMAKE_TARGET_ARCH_AMD64)
add_definitions(-DFEATURE_USE_ASM_GC_WRITE_BARRIERS)
if(CLR_CMAKE_PLATFORM_ARCH_AMD64 OR (CLR_CMAKE_PLATFORM_ARCH_ARM64 AND NOT WIN32))
#if defined(FEATURE_HFA)
MTFLAG_ENTRY(IsHFA),
#endif // FEATURE_HFA
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
MTFLAG_ENTRY(IsRegStructPassed),
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
MTFLAG_ENTRY(IsByRefLike),
MTFLAG_ENTRY(UNUSED_ComponentSize_5),
MTFLAG_ENTRY(UNUSED_ComponentSize_6),
if ((CLR_CMAKE_PLATFORM_ARCH_I386 OR CLR_CMAKE_PLATFORM_ARCH_AMD64) AND WIN32)
# On Windows, build altjit that targets the Linux ABI:
# On x86, build Linux/x86 altjit. This enables UNIX_X86_ABI.
- # On amd64, build Linux/AMD64 altjit. This enables UNIX_AMD64_ABI and FEATURE_UNIX_AMD64_STRUCT_PASSING.
+ # On amd64, build Linux/AMD64 altjit. This enables UNIX_AMD64_ABI.
add_subdirectory(linuxnonjit)
endif ()
remove_definitions(-DUNIX_X86_ABI)
elseif(CLR_CMAKE_PLATFORM_ARCH_AMD64)
remove_definitions(-DUNIX_AMD64_ABI)
- remove_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING)
else()
clr_unknown_arch()
endif()
void genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbered, RegState* regState);
void genEnregisterIncomingStackArgs();
void genCheckUseBlockInit();
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
+#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
void genClearStackVec3ArgUpperBits();
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING && FEATURE_SIMD
+#endif // UNIX_AMD64_ABI && FEATURE_SIMD
#if defined(_TARGET_ARM64_)
bool genInstrWithConstant(instruction ins,
struct regArgElem
{
unsigned varNum; // index into compiler->lvaTable[] for this register argument
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
var_types type; // the Jit type of this regArgTab entry
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
unsigned trashBy; // index into this regArgTab[] table of the register that will be copied to this register.
// That is, for regArgTab[x].trashBy = y, argument register number 'y' will be copied to
// argument register number 'x'. Only used when circular = true.
bool processed; // true after we've processed the argument (and it is in its final location)
bool circular; // true if this register participates in a circular dependency loop.
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// For UNIX AMD64 struct passing, the type of the register argument slot can differ from
// the type of the lclVar in ways that are not ascertainable from lvType.
return type; // UNIX_AMD64 implementation
}
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
// In other cases, we simply use the type of the lclVar to determine the type of the register.
var_types getRegType(Compiler* compiler)
return varDsc.lvType;
}
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
} regArgTab[max(MAX_REG_ARG + 1, MAX_FLOAT_REG_ARG)] = {};
unsigned varNum;
regType = varDsc->GetHfaType();
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (!varTypeIsStruct(regType))
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
// A struct might be passed partially in XMM register for System V calls.
// So a single arg might use both register files.
int slots = 0;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(varDsc))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->lvVerTypeInfo.GetClassHandle();
regArgNum = firstRegSlot;
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
// Bingo - add it to our table
regArgNum = genMapRegNumToRegArgNum(varDsc->lvArgReg, regType);
// register)
noway_assert(regArgTab[regArgNum].slot == 0);
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Set the register type.
regArgTab[regArgNum].type = regType;
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
regArgTab[regArgNum].varNum = varNum;
regArgTab[regArgNum].slot = 1;
regType = regArgTab[regArgNum + i].getRegType(compiler);
regNumber regNum = genMapRegArgNumToRegNum(regArgNum + i, regType);
-#if !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if !defined(UNIX_AMD64_ABI)
// lvArgReg could be INT or FLOAT reg. So the following assertion doesn't hold.
// The type of the register depends on the classification of the first eightbyte
// of the struct. For information on classification refer to the System V x86_64 ABI at:
// http://www.x86-64.org/documentation/abi.pdf
assert((i > 0) || (regNum == varDsc->lvArgReg));
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
// Is the arg dead on entry to the method ?
if ((regArgMaskLive & genRegMask(regNum)) == 0)
{
emitAttr size;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// If this is the wrong register file, just continue.
if (regArgTab[argNum].type == TYP_UNDEF)
{
// The next register file processing will process it.
continue;
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
// If the arg is dead on entry to the method, skip it
if (regArgTab[argNum].processed)
// Must be <= MAX_PASS_MULTIREG_BYTES or else it wouldn't be passed in registers
noway_assert(varDsc->lvSize() <= MAX_PASS_MULTIREG_BYTES);
#endif // FEATURE_MULTIREG_ARGS
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
storeType = regArgTab[argNum].type;
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
if (varDsc->lvIsHfaRegArg())
{
#ifdef _TARGET_ARM_
getEmitter()->emitIns_S_R(ins_Store(storeType), size, srcRegNum, varNum, baseOffset);
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
// Check if we are writing past the end of the struct
if (varTypeIsStruct(varDsc))
{
assert(varDsc->lvSize() >= baseOffset + (unsigned)size);
}
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
if (regArgTab[argNum].slot == 1)
{
if (doingFloat)
{
-#if defined(FEATURE_HFA) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(FEATURE_HFA) || defined(UNIX_AMD64_ABI)
insCopy = ins_Copy(TYP_DOUBLE);
// Compute xtraReg here when we have a float argument
assert(xtraReg == REG_NA);
#if defined(FEATURE_HFA)
fpAvailMask &= RBM_ALLDOUBLE;
#else
-#if !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if !defined(UNIX_AMD64_ABI)
#error Error. Wrong architecture.
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !defined(UNIX_AMD64_ABI)
#endif // defined(FEATURE_HFA)
if (fpAvailMask == RBM_NONE)
#if defined(FEATURE_HFA)
fpAvailMask &= RBM_ALLDOUBLE;
#else
-#if !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if !defined(UNIX_AMD64_ABI)
#error Error. Wrong architecture.
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !defined(UNIX_AMD64_ABI)
#endif // defined(FEATURE_HFA)
}
var_types regType = regArgTab[argNum].getRegType(compiler);
regNumber regNum = genMapRegArgNumToRegNum(argNum, regType);
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (regType == TYP_UNDEF)
{
// This could happen if the reg in regArgTab[argNum] is of the other register file -
regArgMaskLive &= ~genRegMask(regNum);
continue;
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
noway_assert(varDsc->lvIsParam && varDsc->lvIsRegArg);
#ifndef _TARGET_64BIT_
destRegNum = REG_NEXT(varDsc->lvRegNum);
}
#endif // !_TARGET_64BIT_
-#if (defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || defined(_TARGET_ARM64_)) && defined(FEATURE_SIMD)
+#if (defined(UNIX_AMD64_ABI) || defined(_TARGET_ARM64_)) && defined(FEATURE_SIMD)
else
{
assert(regArgTab[argNum].slot == 2);
noway_assert(regNum != destRegNum);
continue;
}
-#endif // (defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || defined(_TARGET_ARM64_)) && defined(FEATURE_SIMD)
+#endif // (defined(UNIX_AMD64_ABI) || defined(_TARGET_ARM64_)) && defined(FEATURE_SIMD)
noway_assert(destRegNum != REG_NA);
if (destRegNum != regNum)
{
argRegCount = 2;
}
#endif
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
+#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
if (varTypeIsStruct(varDsc) && argNum < (argMax - 1) && regArgTab[argNum + 1].slot == 2)
{
argRegCount = 2;
// but mark argNum as processed and clear regNum from the live mask.
destRegNum = regNum;
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
+#endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
#if defined(_TARGET_ARM64_) && defined(FEATURE_SIMD)
if (varTypeIsSIMD(varDsc) && argNum < (argMax - 1) && regArgTab[argNum + 1].slot == 2)
{
getEmitter()->emitMarkPrologEnd();
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
+#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
// The unused bits of Vector3 arguments must be cleared
// since native compiler doesn't initize the upper bits to zeros.
//
// genFnPrologCalleeRegArgs() for argument registers and
// genEnregisterIncomingStackArgs() for stack arguments.
genClearStackVec3ArgUpperBits();
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING && FEATURE_SIMD
+#endif // UNIX_AMD64_ABI && FEATURE_SIMD
/*-----------------------------------------------------------------------------
* Take care of register arguments first
//
unsigned CodeGen::getFirstArgWithStackSlot()
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || defined(_TARGET_ARMARCH_)
+#if defined(UNIX_AMD64_ABI) || defined(_TARGET_ARMARCH_)
unsigned baseVarNum = 0;
#if defined(FEATURE_UNIX_AMR64_STRUCT_PASSING)
baseVarNum = compiler->lvaFirstStackIncomingArgNum;
return false;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
return varTypeIsStruct(treeNode);
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
assert(!varTypeIsStruct(treeNode));
return false;
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
}
//------------------------------------------------------------------------
assert(treeNode->OperGet() == GT_RETURN);
GenTree* op1 = treeNode->gtGetOp1();
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (op1->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* lclVar = op1->AsLclVarCommon();
{
assert(treeNode->OperGet() == GT_STORE_LCL_VAR);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Structs of size >=9 and <=16 are returned in two return registers on x64 Unix.
assert(varTypeIsStruct(treeNode));
}
varDsc->lvRegNum = REG_STK;
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
+#else // !UNIX_AMD64_ABI && !_TARGET_X86_
assert(!"Unreached");
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
+#endif // !UNIX_AMD64_ABI && !_TARGET_X86_
}
//------------------------------------------------------------------------
// must be cleared to zeroes. The native compiler doesn't clear the upper bits
// and there is no way to know if the caller is native or not. So, the upper
// 32 bits of Vector argument on stack are always cleared to zero.
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
+#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
void CodeGen::genClearStackVec3ArgUpperBits()
{
#ifdef DEBUG
}
}
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
+#endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
#endif // FEATURE_PUT_STRUCT_ARG_STK
// Generate code for CpObj nodes wich copy structs that have interleaved
continue;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Deal with multi register passed struct args.
if (argNode->OperGet() == GT_FIELD_LIST)
{
}
}
else
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
{
regNumber argReg = curArgTabEntry->regNum;
genConsumeReg(argNode);
#endif // FEATURE_VARARG
}
-#if defined(_TARGET_X86_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(_TARGET_X86_) || defined(UNIX_AMD64_ABI)
// The call will pop its arguments.
// for each putarg_stk:
ssize_t stackArgBytes = 0;
}
args = args->gtOp.gtOp2;
}
-#endif // defined(_TARGET_X86_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(_TARGET_X86_) || defined(UNIX_AMD64_ABI)
// Insert a null check on "this" pointer if asked.
if (call->NeedsNullCheck())
continue;
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(varDsc))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->lvVerTypeInfo.GetClassHandle();
}
}
else
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !defined(UNIX_AMD64_ABI)
{
// Register argument
noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
LclVarDsc* varDsc = &(compiler->lvaTable[baseVarNum]);
assert(varDsc != nullptr);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
assert(!varDsc->lvIsRegArg && varDsc->lvArgReg == REG_STK);
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
// On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0.
assert(varDsc->lvIsRegArg && (varDsc->lvArgReg == REG_ARG_0 || varDsc->lvArgReg == REG_FLTARG_0));
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
#endif // !DEBUG
}
else
{
unsigned baseVarNum = getBaseVarForPutArgStk(putArgStk);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (varTypeIsStruct(targetType))
{
m_stkArgVarNum = BAD_VAR_NUM;
return;
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
noway_assert(targetType != TYP_STRUCT);
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->gtRegNum;
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
assert(targetType != TYP_STRUCT);
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
GenTree* op1 = tree->gtOp1;
genConsumeReg(op1);
}
assert(structSize > 0);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// An 8-byte struct may need to be passed in a floating point register
// So we always consult the struct "Classifier" routine
else // Not an HFA struct type
{
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// The case of (structDesc.eightByteCount == 1) should have already been handled
if (structDesc.eightByteCount > 1)
}
assert(structSize > 0);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// An 8-byte struct may need to be returned in a floating point register
// So we always consult the struct "Classifier" routine
}
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#ifdef _TARGET_64BIT_
// Note this handles an odd case when FEATURE_MULTIREG_RET is disabled and HFAs are enabled
else // Not an HFA struct type
{
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// The case of (structDesc.eightByteCount == 1) should have already been handled
if (structDesc.eightByteCount > 1)
return result;
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// GetTypeFromClassificationAndSizes:
// Returns the type of the eightbyte accounting for the classification and size of the eightbyte.
GetStructTypeOffset(structDesc, type0, type1, offset0, offset1);
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
/*****************************************************************************/
/*****************************************************************************/
struct fgArgTabEntry
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
fgArgTabEntry()
{
otherRegNum = REG_NA;
isStruct = false; // is this a struct arg
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
GenTree* node; // Initially points at the Op1 field of 'parent', but if the argument is replaced with an GT_ASG or
// placeholder
bool isNonStandard : 1; // True if it is an arg that is passed in a reg other than a standard arg reg, or is forced
// to be on the stack despite its arg list position.
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
bool isStruct : 1; // True if this is a struct arg
regNumber otherRegNum; // The (second) register to use when passing this argument.
fgArgTabEntry* AddRegArg(
unsigned argNum, GenTree* node, GenTree* parent, regNumber regNum, unsigned numRegs, unsigned alignment);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
fgArgTabEntry* AddRegArg(unsigned argNum,
GenTree* node,
GenTree* parent,
const bool isStruct,
const regNumber otherRegNum = REG_NA,
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
fgArgTabEntry* AddStkArg(unsigned argNum,
GenTree* node,
GenTree* parent,
unsigned numSlots,
- unsigned alignment FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool isStruct));
+ unsigned alignment UNIX_AMD64_ABI_ONLY_ARG(const bool isStruct));
void RemorphReset();
fgArgTabEntry* RemorphRegArg(
unsigned short lvaTrackedCount; // actual # of locals being tracked
unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Only for AMD64 System V cache the first caller stack homed argument.
unsigned lvaFirstStackIncomingArgNum; // First argument with stack slot in the caller.
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
#ifdef DEBUG
VARSET_TP lvaTrackedVars; // set of tracked variables
bool fgCastNeeded(GenTree* tree, var_types toType);
GenTree* fgDoNormalizeOnStore(GenTree* tree);
- GenTree* fgMakeTmpArgNode(
- unsigned tmpVarNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool passedInRegisters));
+ GenTree* fgMakeTmpArgNode(unsigned tmpVarNum UNIX_AMD64_ABI_ONLY_ARG(const bool passedInRegisters));
// The following check for loops that don't execute calls
bool fgLoopCallMarked;
void fgMakeOutgoingStructArgCopy(GenTreeCall* call,
GenTree* args,
unsigned argIndex,
- CORINFO_CLASS_HANDLE copyBlkClass FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(
+ CORINFO_CLASS_HANDLE copyBlkClass UNIX_AMD64_ABI_ONLY_ARG(
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structDescPtr));
void fgFixupStructReturn(GenTree* call);
bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken);
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
#ifdef DEBUG
static void dumpSystemVClassificationType(SystemVClassificationType ct);
#endif // DEBUG
void eeGetSystemVAmd64PassStructInRegisterDescriptor(
/*IN*/ CORINFO_CLASS_HANDLE structHnd,
/*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
template <typename ParamType>
bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param)
static HelperCallProperties s_helperCallProperties;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size);
static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
unsigned slotNum);
unsigned __int8* offset1);
void fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgument);
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
void fgMorphMultiregStructArgs(GenTreeCall* call);
GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr);
if (lvaDoneFrameLayout > REGALLOC_FRAME_LAYOUT && !varDsc->lvOnFrame)
{
#ifdef _TARGET_AMD64_
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
// On amd64, every param has a stack location, except on Unix-like systems.
assert(varDsc->lvIsParam);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#elif !defined(LEGACY_BACKEND)
// For !LEGACY_BACKEND on other targets, a stack parameter that is enregistered or prespilled
// for profiling on ARM will have a stack location.
// to accommodate irregular sized structs, they are passed byref
CLANG_FORMAT_COMMENT_ANCHOR;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
CORINFO_CLASS_HANDLE argClass;
CorInfoType argTypeJit = strip(info.compCompHnd->getArgType(sig, list, &argClass));
var_types argType = JITtype2varType(argTypeJit);
unsigned structSize = info.compCompHnd->getClassSize(argClass);
return structSize; // TODO: roundUp() needed here?
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
return TARGET_POINTER_SIZE;
#else // !_TARGET_AMD64_
* ICorStaticInfo wrapper functions
*/
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
#ifdef DEBUG
void Compiler::dumpSystemVClassificationType(SystemVClassificationType ct)
#endif // DEBUG
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
bool Compiler::eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken)
{
call->cdByrefRegs = (regMaskSmall)emitThisByrefRegs;
#if EMIT_TRACK_STACK_DEPTH
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
noway_assert(FitsIn<USHORT>(emitCurStackLvl / ((unsigned)sizeof(unsigned))));
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#endif
// Append the call descriptor to the list */
byrefRegs |= RBM_EAX;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// If is a multi-register return method is called, mark RDX appropriately (for System V AMD64).
if (id->idIsLargeCall())
{
byrefRegs |= RBM_RDX;
}
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// If the GC register set has changed, report the new set
if (gcrefRegs != emitThisGCrefRegs)
CLANG_FORMAT_COMMENT_ANCHOR;
// clang-format off
-#if defined(FEATURE_HFA) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(FEATURE_HFA) || defined(UNIX_AMD64_ABI)
// On ARM32, ARM64 and System V for struct returning
// there is code that does GT_ASG-tree.CopyObj call.
// CopyObj is a large node and the GT_ASG is small, which triggers an exception.
GenTree::s_gtNodeSizes[GT_ASG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RETURN] = TREE_NODE_SZ_LARGE;
-#endif // defined(FEATURE_HFA) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(FEATURE_HFA) || defined(UNIX_AMD64_ABI)
GenTree::s_gtNodeSizes[GT_CALL] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CAST] = TREE_NODE_SZ_LARGE;
// When FEATURE_MULTIREG_ARGS is defined we can get here with GT_OBJ tree.
// This happens when we have a struct that is passed in multiple registers.
//
-// Also note that when FEATURE_UNIX_AMD64_STRUCT_PASSING is defined the GT_LDOBJ
+// Also note that when UNIX_AMD64_ABI is defined the GT_LDOBJ
// later gets converted to a GT_FIELD_LIST with two GT_LCL_FLDs in Lower/LowerXArch.
//
if (curArgTabEntry->numRegs >= 2)
{
regNumber otherRegNum;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
assert(curArgTabEntry->numRegs == 2);
otherRegNum = curArgTabEntry->otherRegNum;
#else
otherRegNum = (regNumber)(((unsigned)curArgTabEntry->regNum) + curArgTabEntry->numRegs - 1);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
if (listCount == -1)
{
{
assert(varTypeIsStruct(returnType));
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
comp->eeGetSystemVAmd64PassStructInRegisterDescriptor(retClsHnd, &structDesc);
//
NYI("Unsupported TARGET returning a TYP_STRUCT in InitializeStructReturnType");
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
break; // for case SPK_ByValue
}
regNumber resultReg = REG_NA;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
var_types regType0 = GetReturnRegType(0);
if (idx == 0)
{
assert(OperGet() == from->OperGet());
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = from->gtOtherRegs[i];
#else // we have RyuJIT backend and FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// For UNIX ABI we currently only allow a GT_FIELD_LIST of GT_LCL_FLDs nodes
GenTree* gtListPtr = this;
while (gtListPtr != nullptr)
}
gtListPtr = gtListPtr->MoveNext();
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// Note that for non-UNIX ABI the GT_FIELD_LIST may contain any node
//
GenTree* dest = nullptr;
unsigned destFlags = 0;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
// TODO-ARM-BUG: Does ARM need this?
// TODO-ARM64-BUG: Does ARM64 need this?
src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
(src->TypeGet() != TYP_STRUCT &&
(GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // !defined(UNIX_AMD64_ABI)
assert(varTypeIsStruct(src));
assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
src->gtOper == GT_COMMA ||
(src->TypeGet() != TYP_STRUCT &&
(GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !defined(UNIX_AMD64_ABI)
if (destAddr->OperGet() == GT_ADDR)
{
GenTree* destNode = destAddr->gtGetOp1();
// but that method has not been updadted to include ARM.
impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
lcl->gtFlags |= GTF_DONT_CSE;
-#elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#elif defined(UNIX_AMD64_ABI)
// Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
retTypeDesc->InitializeStructReturnType(this, retClsHnd);
#endif // FEATURE_MULTIREG_RET
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
}
-#else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // not UNIX_AMD64_ABI
// Check for TYP_STRUCT type that wraps a primitive type
// Such structs are returned using a single register
#endif // FEATURE_MULTIREG_RET
}
-#endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // not UNIX_AMD64_ABI
return call;
}
#if defined(_TARGET_XARCH_)
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// No VarArgs for CoreCLR on x64 Unix
assert(!info.compIsVarArgs);
return impAssignMultiRegTypeToVar(op, retClsHnd);
}
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
assert(info.compRetNativeType != TYP_STRUCT);
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
#elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
if (varTypeIsStruct(op1))
{
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Non-calls, such as obj or ret_expr, have to go through this.
// Calls with large struct return value have to go through this.
// Helper calls with small struct return value also have to go
// through this since they do not follow Unix calling convention.
if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
op1->AsCall()->gtCallType == CT_HELPER)
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
{
op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
}
(unsigned)CHECK_SPILL_ALL);
}
-#if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(_TARGET_ARM_) || defined(UNIX_AMD64_ABI)
#if defined(_TARGET_ARM_)
// TODO-ARM64-NYI: HFA
// TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
if (IsHfa(retClsHnd))
{
// Same as !IsHfa but just don't bother with impAssignStructPtr.
-#else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // defined(UNIX_AMD64_ABI)
ReturnTypeDesc retTypeDesc;
retTypeDesc.InitializeStructReturnType(this, retClsHnd);
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
assert(retRegCount == MAX_RET_REG_COUNT);
// Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
CLANG_FORMAT_COMMENT_ANCHOR;
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
if (fgNeedReturnSpillTemp())
{
{
#if defined(_TARGET_ARM_)
impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
-#else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // defined(UNIX_AMD64_ABI)
// The inlinee compiler has figured out the type of the temp already. Use it here.
impInlineInfo->retExpr =
gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
}
}
else
#define INDEBUG_LDISASM_COMMA(x)
#endif
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(x) , x
-#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(x) x
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(x)
-#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(x)
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || (!defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND))
+#if defined(UNIX_AMD64_ABI)
+#define UNIX_AMD64_ABI_ONLY_ARG(x) , x
+#define UNIX_AMD64_ABI_ONLY(x) x
+#else // !defined(UNIX_AMD64_ABI)
+#define UNIX_AMD64_ABI_ONLY_ARG(x)
+#define UNIX_AMD64_ABI_ONLY(x)
+#endif // defined(UNIX_AMD64_ABI)
+
+#if defined(UNIX_AMD64_ABI) || (!defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND))
#define FEATURE_PUT_STRUCT_ARG_STK 1
#define PUT_STRUCT_ARG_STK_ONLY_ARG(x) , x
#define PUT_STRUCT_ARG_STK_ONLY(x) x
-#else // !(defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)|| (!defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND)))
+#else // !(defined(UNIX_AMD64_ABI)|| (!defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND)))
#define PUT_STRUCT_ARG_STK_ONLY_ARG(x)
#define PUT_STRUCT_ARG_STK_ONLY(x)
-#endif // !(defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)|| (!defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND)))
+#endif // !(defined(UNIX_AMD64_ABI)|| (!defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND)))
#if defined(UNIX_AMD64_ABI)
#define UNIX_AMD64_ABI_ONLY_ARG(x) , x
#define MULTIREG_HAS_SECOND_GC_RET 1
#define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x) , x
#define MULTIREG_HAS_SECOND_GC_RET_ONLY(x) x
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // !defined(UNIX_AMD64_ABI)
#define MULTIREG_HAS_SECOND_GC_RET 0
#define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x)
#define MULTIREG_HAS_SECOND_GC_RET_ONLY(x)
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
// To get rid of warning 4701 : local variable may be used without being initialized
#define DUMMY_INIT(x) (x)
lvaSIMDInitTempVarNum = BAD_VAR_NUM;
#endif // FEATURE_SIMD
lvaCurEpoch = 0;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
lvaFirstStackIncomingArgNum = BAD_VAR_NUM;
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
}
/*****************************************************************************/
}
}
#else // !_TARGET_ARM_
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
if (varTypeIsStruct(argType))
{
}
}
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#endif // !_TARGET_ARM_
// The final home for this incoming register might be our local stack frame.
bool canPassArgInRegisters = false;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(argType))
{
canPassArgInRegisters = structDesc.passedInRegisters;
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
canPassArgInRegisters = varDscInfo->canEnreg(argType, cSlotsToEnregister);
}
varDsc->lvOtherArgReg = REG_NA;
#endif // FEATURE_MULTIREG_ARGS
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
unsigned secondAllocatedRegArgNum = 0;
var_types firstEightByteType = TYP_UNDEF;
var_types secondEightByteType = TYP_UNDEF;
}
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
firstAllocatedRegArgNum = varDscInfo->allocRegArg(argType, cSlots);
}
#if FEATURE_MULTIREG_ARGS
if (varTypeIsStruct(argType))
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
varDsc->lvArgReg = genMapRegArgNumToRegNum(firstAllocatedRegArgNum, firstEightByteType);
// If there is a second eightbyte, get a register for it too and map the arg to the reg number.
varDsc->addPrefReg(genRegMask(varDsc->lvOtherArgReg), this);
}
#endif // _TARGET_ARM64_
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
}
else
#endif // FEATURE_MULTIREG_ARGS
{
printf("Arg #%u passed in register(s) ", varDscInfo->varNum);
bool isFloat = false;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// In case of one eightbyte struct the type is already normalized earlier.
// The varTypeIsFloating(argType) is good for this case.
if (varTypeIsStruct(argType) && (structDesc.eightByteCount >= 1))
isFloat = varTypeIsFloating(firstEightByteType);
}
else
-#else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else
{
isFloat = varTypeIsFloating(argType);
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !UNIX_AMD64_ABI
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(argType))
{
// Print both registers, just to be clear
}
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
unsigned regArgNum = genMapRegNumToRegArgNum(varDsc->lvArgReg, argType);
#endif // FEATURE_FASTTAILCALL
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// The arg size is returning the number of bytes of the argument. For a struct it could return a size not a
// multiple of TARGET_POINTER_SIZE. The stack allocated space should always be multiple of TARGET_POINTER_SIZE,
// so round it up.
compArgSize += (unsigned)roundUp(argSize, TARGET_POINTER_SIZE);
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
compArgSize += argSize;
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
if (info.compIsVarArgs || isHfaArg || isSoftFPPreSpill)
{
#if defined(_TARGET_X86_)
return true;
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || defined(_TARGET_ARM64_)
+#if defined(UNIX_AMD64_ABI) || defined(_TARGET_ARM64_)
if (howToPassStruct == SPK_ByValue)
{
assert(type == TYP_STRUCT);
#if defined(WINDOWS_AMD64_ABI)
// Structs are either passed by reference or can be passed by value using one pointer
stackSize = TARGET_POINTER_SIZE;
-#elif defined(_TARGET_ARM64_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#elif defined(_TARGET_ARM64_) || defined(UNIX_AMD64_ABI)
// lvSize performs a roundup.
stackSize = this->lvSize();
}
#endif // defined(_TARGET_ARM64_)
-#else // !_TARGET_ARM64_ !WINDOWS_AMD64_ABI !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !_TARGET_ARM64_ !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI
NYI("Unsupported target.");
unreached();
-#endif // !_TARGET_ARM64_ !WINDOWS_AMD64_ABI !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !_TARGET_ARM64_ !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI
}
else
{
var_types type = TypeGet();
#ifdef _TARGET_AMD64_
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (type == TYP_STRUCT)
{
NYI("lvaArgType");
}
-#else //! FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else //! UNIX_AMD64_ABI
if (type == TYP_STRUCT)
{
switch (lvExactSize)
break;
}
}
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
#elif defined(_TARGET_ARM64_)
if (type == TYP_STRUCT)
{
#endif // ASSERTION_PROP
bool allowStructs = false;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// On System V the type of the var could be a struct type.
allowStructs = varTypeIsStruct(varDsc);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
/* Variables must be used as the same type throughout the method */
noway_assert(tiVerificationNeeded || varDsc->lvType == TYP_UNDEF || tree->gtType == TYP_UNKNOWN || allowStructs ||
{
unsigned argumentSize = eeGetArgSize(argLst, &info.compMethodInfo->args);
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// On the stack frame the homed arg always takes a full number of slots
// for proper stack alignment. Make sure the real struct size is properly rounded up.
argumentSize = (unsigned)roundUp(argumentSize, TARGET_POINTER_SIZE);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
argOffs =
lvaAssignVirtualFrameOffsetToArg(lclNum++, argumentSize, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
set(JIT_ARCH_ALTJIT_SOURCES ${JIT_I386_SOURCES})
elseif(CLR_CMAKE_PLATFORM_ARCH_AMD64)
add_definitions(-DUNIX_AMD64_ABI)
- add_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING)
set(JIT_ARCH_ALTJIT_SOURCES ${JIT_AMD64_SOURCES})
else()
clr_unknown_arch()
// call, arg, and info must be non-null.
//
// Notes:
-// For System V systems with native struct passing (i.e. FEATURE_UNIX_AMD64_STRUCT_PASSING defined)
+// For System V systems with native struct passing (i.e. UNIX_AMD64_ABI defined)
// this method allocates a single GT_PUTARG_REG for 1 eightbyte structs and a GT_FIELD_LIST of two GT_PUTARG_REGs
// for two eightbyte structs.
//
// For STK passed structs the method generates GT_PUTARG_STK tree. For System V systems with native struct passing
-// (i.e. FEATURE_UNIX_AMD64_STRUCT_PASSING defined) this method also sets the GC pointers count and the pointers
+// (i.e. UNIX_AMD64_ABI defined) this method also sets the GC pointers count and the pointers
// layout object, so the codegen of the GT_PUTARG_STK could use this for optimizing copying to the stack by value.
// (using block copy primitives for non GC pointers and a single TARGET_POINTER_SIZE copy with recording GC info.)
//
bool updateArgTable = true;
bool isOnStack = true;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (varTypeIsStruct(type))
{
isOnStack = !info->structDesc.passedInRegisters;
{
isOnStack = info->regNum == REG_STK;
}
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
isOnStack = info->regNum == REG_STK;
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
#ifdef _TARGET_ARMARCH_
// Mark contained when we pass struct
{
if (!isOnStack)
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (info->isStruct)
{
// The following code makes sure a register passed struct arg is moved to
}
}
else
-#else // not defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // not defined(UNIX_AMD64_ABI)
#if FEATURE_MULTIREG_ARGS
if ((info->numRegs > 1) && (arg->OperGet() == GT_FIELD_LIST))
{
}
else
#endif // FEATURE_MULTIREG_ARGS
-#endif // not defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // not defined(UNIX_AMD64_ABI)
{
putArg = comp->gtNewPutArgReg(type, arg, info->regNum);
}
void buildUpperVectorRestoreRefPositions(GenTree* tree, LsraLocation currentLoc, VARSET_VALARG_TP liveLargeVectors);
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// For AMD64 on SystemV machines. This method
// is called as replacement for raUpdateRegStateForArg
// that is used on Windows. On System V systems a struct can be passed
// partially using registers from the 2 register files.
void unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc);
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
// Update reg state for an incoming register argument
void updateRegStateForArg(LclVarDsc* argDsc);
}
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
//------------------------------------------------------------------------
// unixAmd64UpdateRegStateForArg: Sets the register state for an argument of type STRUCT for System V systems.
//
}
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
//------------------------------------------------------------------------
// updateRegStateForArg: Updates rsCalleeRegArgMaskLiveIn for the appropriate
//
void LinearScan::updateRegStateForArg(LclVarDsc* argDsc)
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// For System V AMD64 calls the argDsc can have 2 registers (for structs.)
// Handle them here.
if (varTypeIsStruct(argDsc))
unixAmd64UpdateRegStateForArg(argDsc);
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
RegState* intRegState = &compiler->codeGen->intRegState;
RegState* floatRegState = &compiler->codeGen->floatRegState;
HandleFloatVarArgs(call, argNode, &callHasFloatRegArgs);
appendLocationInfoToList(argNode);
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
else if (argNode->OperGet() == GT_FIELD_LIST)
{
for (GenTreeFieldList* entry = argNode->AsFieldList(); entry != nullptr; entry = entry->Rest())
appendLocationInfoToList(entry->Current());
}
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#ifdef DEBUG
// In DEBUG only, check validity with respect to the arg table entry.
#endif // FEATURE_PUT_STRUCT_ARG_STK
continue;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (argNode->OperGet() == GT_FIELD_LIST)
{
assert(argNode->isContained());
}
}
else
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
{
const regNumber argReg = curArgTabEntry->regNum;
assert(argNode->gtRegNum == argReg);
return curArgTabEntry;
}
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum,
GenTree* node,
GenTree* parent,
return curArgTabEntry;
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum,
GenTree* node,
GenTree* parent,
unsigned numSlots,
- unsigned alignment FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool isStruct))
+ unsigned alignment UNIX_AMD64_ABI_ONLY_ARG(const bool isStruct))
{
fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry;
nextSlotNum = (unsigned)roundUp(nextSlotNum, alignment);
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// The node of the ArgTabEntry could change after remorphing - it could be rewritten to a cpyblk or a
// PlaceHolder node (in case of needed late argument, for example.)
// This reqires using of an extra flag. At creation time the state is right, so
// and this assert enforces that.
assert((varTypeIsStruct(node) && isStruct) || (!varTypeIsStruct(node) && !isStruct));
curArgTabEntry->isStruct = isStruct; // is this a struct arg
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
curArgTabEntry->argNum = argNum;
curArgTabEntry->node = node;
#endif
else // we have a register argument, next we look for a struct type.
{
- if (varTypeIsStruct(argx) FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(|| curArgTabEntry->isStruct))
+ if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| curArgTabEntry->isStruct))
{
hasStructRegArg = true;
}
// Return Value:
// the newly created temp var tree.
-GenTree* Compiler::fgMakeTmpArgNode(
- unsigned tmpVarNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool passedInRegisters))
+GenTree* Compiler::fgMakeTmpArgNode(unsigned tmpVarNum UNIX_AMD64_ABI_ONLY_ARG(const bool passedInRegisters))
{
LclVarDsc* varDsc = &lvaTable[tmpVarNum];
assert(varDsc->lvIsTemp);
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) || (!defined(LEGACY_BACKEND) && defined(_TARGET_ARM_))
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
arg->gtFlags |= GTF_DONT_CSE;
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
// Can this type be passed in a single register?
// If so, the following call will return the corresponding primitive type.
// Otherwise, it will return TYP_UNKNOWN and we will pass by reference.
passedInRegisters = true;
type = structBaseType;
}
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
// If it is passed in registers, don't get the address of the var. Make it a
// field instead. It will be loaded in registers with putarg_reg tree in lower.
}
else
{
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// TODO-Cleanup: Fix this - we should never have an address that is TYP_STRUCT.
var_types addrType = type;
#else
if (lvaIsMultiregStruct(varDsc))
{
// ToDo-ARM64: Consider using: arg->ChangeOper(GT_LCL_FLD);
- // as that is how FEATURE_UNIX_AMD64_STRUCT_PASSING works.
+ // as that is how UNIX_AMD64_ABI works.
// We will create a GT_OBJ for the argument below.
// This will be passed by value in two registers.
assert(addrNode != nullptr);
{
// Create a copy of the temp to go into the late argument list
tmpVarNum = curArgTabEntry->tmpNum;
- defArg = compiler->fgMakeTmpArgNode(tmpVarNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(
- argTable[curInx]->structDesc.passedInRegisters));
+ defArg = compiler->fgMakeTmpArgNode(
+ tmpVarNum UNIX_AMD64_ABI_ONLY_ARG(argTable[curInx]->structDesc.passedInRegisters));
// mark the original node as a late argument
argx->gtFlags |= GTF_LATE_ARG;
}
#endif
-#if defined(_TARGET_AMD64_) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
noway_assert(argx->gtType != TYP_STRUCT);
#endif
// For a struct type we also need to record the class handle of the arg.
CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE;
-#if defined(_TARGET_AMD64_) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
// All structs are either passed (and retyped) as integral types, OR they
// are passed by reference.
noway_assert(argx->gtType != TYP_STRUCT);
-#else // !defined(_TARGET_AMD64_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // !defined(_TARGET_AMD64_) || defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(defArg))
{
}
}
-#endif // !(defined(_TARGET_AMD64_) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING))
+#endif // !(defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI))
setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd);
bool callIsVararg = call->IsVarargs();
#endif
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// If fgMakeOutgoingStructArgCopy is called and copies are generated, hasStackArgCopy is set
// to make sure to call EvalArgsToTemp. fgMakeOutgoingStructArgCopy just marks the argument
// to need a temp variable, and EvalArgsToTemp actually creates the temp variable node.
/* this is a register argument - put it in the table */
call->fgArgInfo->AddRegArg(argIndex, argx, nullptr, genMapIntRegArgNumToRegNum(intArgRegNum), 1, 1
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
,
false, REG_STK, nullptr
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
);
}
// this can't be a struct.
#endif // _TARGET_ARM_
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
bool hasStructArgument = false; // @TODO-ARM64-UNIX: Remove this bool during a future refactoring
// hasMultiregStructArgs is true if there are any structs that are eligible for passing
if (reMorphing)
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Get the struct description for the already completed struct argument.
fgArgTabEntry* fgEntryPtr = gtArgEntryByNode(call, argx);
assert(fgEntryPtr != nullptr);
{
structDesc.CopyFrom(fgEntryPtr->structDesc);
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
assert(argEntry != nullptr);
if (argEntry->IsBackFilled())
if (argx->IsArgPlaceHolderNode() || (!isStructArg))
{
#if defined(_TARGET_AMD64_)
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (!isStructArg)
{
size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot'
hasMultiregStructArgs = true;
}
}
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot'
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#elif defined(_TARGET_ARM64_)
if (isStructArg)
{
isStructArg = true;
}
#ifdef _TARGET_AMD64_
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (varTypeIsStruct(argx))
{
size = info.compCompHnd->getClassSize(impGetRefAnyClass());
eeGetSystemVAmd64PassStructInRegisterDescriptor(impGetRefAnyClass(), &structDesc);
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
size = 1;
}
}
CORINFO_CLASS_HANDLE objClass = argObj->gtObj.gtClass;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
unsigned originalSize = info.compCompHnd->getClassSize(objClass);
originalSize = (originalSize == 0 ? TARGET_POINTER_SIZE : originalSize);
}
#endif // _TARGET_ARM64_
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// On System V OS-es a struct is never passed by reference.
// It is either passed by value on the stack or in registers.
bool passStructInRegisters = false;
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
bool passStructByRef = false;
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
// The following if-then-else needs to be carefully refactored.
// Basically the else portion wants to turn a struct load (a GT_OBJ)
// into a GT_IND of the appropriate size.
// It can do this with structs sizes that are 1, 2, 4, or 8 bytes.
- // It can't do this when FEATURE_UNIX_AMD64_STRUCT_PASSING is defined (Why?)
- // TODO-Cleanup: Remove the #ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING below.
+ // It can't do this when UNIX_AMD64_ABI is defined (Why?)
+ // TODO-Cleanup: Remove the #ifndef UNIX_AMD64_ABI below.
// It also can't do this if we have a HFA arg,
// unless we have a 1-elem HFA in which case we want to do the optimization.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef _TARGET_X86_
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
// Check for struct argument with size 1, 2, 4 or 8 bytes
// As we can optimize these by turning them into a GT_IND of the correct type
//
!isPow2(originalSize) || // it is not a power of two (1, 2, 4 or 8)
(isHfaArg && (hfaSlots != 1))) // it is a one element HFA struct
#endif // !_TARGET_ARM_
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
{
// Normalize 'size' to the number of pointer sized items
// 'size' is the number of register slots that we will use to pass the argument
size = roundupSize / TARGET_POINTER_SIZE;
#if defined(_TARGET_AMD64_)
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
size = 1; // This must be copied to a temp and passed by address
passStructByRef = true;
copyBlkClass = objClass;
-#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // UNIX_AMD64_ABI
if (!structDesc.passedInRegisters)
{
GenTree* lclVar = fgIsIndirOfAddrOfLocal(argObj);
copyBlkClass = objClass;
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#elif defined(_TARGET_ARM64_)
if ((size > 2) && !isHfaArg)
{
#endif
#endif // _TARGET_ARM_
}
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
// TODO-Amd64-Unix: Since the else part below is disabled for UNIX_AMD64, copies are always
// generated for struct 1, 2, 4, or 8.
else // We have a struct argument with size 1, 2, 4 or 8 bytes
}
#endif
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#endif // not _TARGET_X86_
// We still have a struct unless we converted the GT_OBJ into a GT_IND above...
if (varTypeIsStruct(structBaseType) &&
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
!passStructInRegisters
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // !defined(UNIX_AMD64_ABI)
!passStructByRef
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !defined(UNIX_AMD64_ABI)
)
{
if (isHfaArg && passUsingFloatRegs)
//
if (isRegParamType(genActualType(argx->TypeGet()))
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
&& (!isStructArg || structDesc.passedInRegisters)
#endif
)
#if defined(UNIX_AMD64_ABI)
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Here a struct can be passed in register following the classifications of its members and size.
// Now make sure there are actually enough registers to do so.
if (isStructArg)
((intArgRegNum + structIntRegs) <= MAX_REG_ARG);
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
if (passUsingFloatRegs)
{
if (isRegArg)
{
regNumber nextRegNum = REG_STK;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
regNumber nextOtherRegNum = REG_STK;
unsigned int structFloatRegs = 0;
unsigned int structIntRegs = 0;
}
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
// fill in or update the argInfo table
nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum)
}
#ifdef _TARGET_AMD64_
-#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifndef UNIX_AMD64_ABI
assert(size == 1);
#endif
#endif
// This is a register argument - put it in the table
newArgEntry = call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, argAlign
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
,
isStructArg, nextOtherRegNum, &structDesc
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
);
newArgEntry->SetIsHfaRegArg(passUsingFloatRegs &&
// Set up the next intArgRegNum and fltArgRegNum values.
if (!isBackFilled)
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (isStructArg)
{
intArgRegNum += structIntRegs;
fltArgRegNum += structFloatRegs;
}
else
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
{
if (passUsingFloatRegs)
{
else
{
// This is a stack argument - put it in the table
- call->fgArgInfo->AddStkArg(argIndex, argx, args, size,
- argAlign FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(isStructArg));
+ call->fgArgInfo->AddStkArg(argIndex, argx, args, size, argAlign UNIX_AMD64_ABI_ONLY_ARG(isStructArg));
}
}
if (copyBlkClass != NO_CLASS_HANDLE)
{
noway_assert(!reMorphing);
- fgMakeOutgoingStructArgCopy(call, args, argIndex,
- copyBlkClass FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(&structDesc));
+ fgMakeOutgoingStructArgCopy(call, args, argIndex, copyBlkClass UNIX_AMD64_ABI_ONLY_ARG(&structDesc));
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
hasStackArgCopy = true;
#endif
}
}
#endif // _TARGET_X86_ && !LEGACY_BACKEND
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (isStructArg && !isRegArg)
{
nonRegPassedStructSlots += size;
}
else
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
{
argSlots += size;
}
// all cases of fgMakeOutgoingStructArgCopy() being called. hasStackArgCopy
// is added to make sure to call EvalArgsToTemp.
if (!reMorphing && (call->fgArgInfo->HasRegArgs()
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
|| hasStackArgCopy
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
))
{
// This is the first time that we morph this call AND it has register arguments.
}
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Rewrite the struct args to be passed by value on stack or in registers.
fgMorphSystemVStructArgs(call, hasStructArgument);
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !UNIX_AMD64_ABI
#ifndef LEGACY_BACKEND
// In the future we can migrate UNIX_AMD64 to use this
}
#endif // LEGACY_BACKEND
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#ifdef DEBUG
if (verbose)
#pragma warning(pop)
#endif
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// fgMorphSystemVStructArgs:
// Rewrite the struct args to be passed by value on stack or in registers.
//
// Update the flags
call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT);
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
//-----------------------------------------------------------------------------
// fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and
GenTreeCall* call,
GenTree* args,
unsigned argIndex,
- CORINFO_CLASS_HANDLE copyBlkClass FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(
- const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr))
+ CORINFO_CLASS_HANDLE copyBlkClass
+ UNIX_AMD64_ABI_ONLY_ARG(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr))
{
GenTree* argx = args->Current();
noway_assert(argx->gtOper != GT_MKREFANY);
// Structs are always on the stack, and thus never need temps
// so we have to put the copy and temp all into one expression
- GenTree* arg = fgMakeTmpArgNode(tmp FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(structDescPtr->passedInRegisters));
+ GenTree* arg = fgMakeTmpArgNode(tmp UNIX_AMD64_ABI_ONLY_ARG(structDescPtr->passedInRegisters));
// Change the expression to "(tmp=val),tmp"
arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg);
assert(call->TypeGet() != TYP_STRUCT);
#endif
-#if !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if !defined(UNIX_AMD64_ABI)
// If it was a struct return, it has been transformed into a call
// with a return buffer (that returns TYP_VOID) or into a return
// of a primitive/enregisterable type
hasMultiByteStackArgs = hasMultiByteStackArgs ||
!VarTypeIsMultiByteAndCanEnreg(argx->TypeGet(), objClass, &typeSize, false);
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
assert(objClass != nullptr);
++calleeArgRegCount;
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#else
assert(!"Target platform ABI rules regarding passing struct type args in registers");
// This is a HFA, use float 0.
callType = TYP_FLOAT;
}
-#elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#elif defined(UNIX_AMD64_ABI)
// Return a dummy node, as the return is already removed.
if (varTypeIsStruct(callType))
{
void Compiler::fgMarkImplicitByRefArgs()
{
-#if (defined(_TARGET_AMD64_) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) || defined(_TARGET_ARM64_)
+#if (defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_)
#ifdef DEBUG
if (verbose)
{
}
}
-#endif // (_TARGET_AMD64_ && !FEATURE_UNIX_AMD64_STRUCT_PASSING) || _TARGET_ARM64_
+#endif // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_
}
//------------------------------------------------------------------------
void Compiler::fgRetypeImplicitByRefArgs()
{
-#if (defined(_TARGET_AMD64_) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) || defined(_TARGET_ARM64_)
+#if (defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_)
#ifdef DEBUG
if (verbose)
{
}
}
-#endif // (_TARGET_AMD64_ && !FEATURE_UNIX_AMD64_STRUCT_PASSING) || _TARGET_ARM64_
+#endif // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_
}
//------------------------------------------------------------------------
void Compiler::fgMarkDemotedImplicitByRefArgs()
{
-#if (defined(_TARGET_AMD64_) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) || defined(_TARGET_ARM64_)
+#if (defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_)
for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++)
{
}
}
-#endif // (_TARGET_AMD64_ && !FEATURE_UNIX_AMD64_STRUCT_PASSING) || _TARGET_ARM64_
+#endif // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_
}
/*****************************************************************************
*/
bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree)
{
-#if (!defined(_TARGET_AMD64_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) && !defined(_TARGET_ARM64_)
+#if (!defined(_TARGET_AMD64_) || defined(UNIX_AMD64_ABI)) && !defined(_TARGET_ARM64_)
return false;
-#else // (_TARGET_AMD64_ && !FEATURE_UNIX_AMD64_STRUCT_PASSING) || _TARGET_ARM64_
+#else // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_
bool changed = false;
}
return changed;
-#endif // (_TARGET_AMD64_ && !FEATURE_UNIX_AMD64_STRUCT_PASSING) || _TARGET_ARM64_
+#endif // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_
}
GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr)
remove_definitions(-DUNIX_X86_ABI)
elseif(CLR_CMAKE_PLATFORM_ARCH_AMD64)
remove_definitions(-DUNIX_AMD64_ABI)
- remove_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING)
else()
clr_unknown_arch()
endif()
if (lclVarDsc1->lvIsRegArg)
{
bool isStructHandled = false;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
if (varTypeIsStruct(lclVarDsc1))
{
isStructHandled = true;
}
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // !defined(UNIX_AMD64_ABI)
if (!isStructHandled)
{
#ifdef DEBUG
#define REG_LNGRET REG_EAX
#define RBM_LNGRET RBM_EAX
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
#define REG_INTRET_1 REG_RDX
#define RBM_INTRET_1 RBM_RDX
#define REG_LNGRET_1 REG_RDX
#define RBM_LNGRET_1 RBM_RDX
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#define REG_FLOATRET REG_XMM0
#define REG_DOUBLERET REG_XMM0
#define RBM_DOUBLERET RBM_XMM0
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
#define REG_FLOATRET_1 REG_XMM1
#define RBM_FLOATRET_1 RBM_XMM1
#define REG_DOUBLERET_1 REG_XMM1
#define RBM_DOUBLERET_1 RBM_XMM1
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#define REG_FPBASE REG_EBP
#define RBM_FPBASE RBM_EBP
#define RBM_PROFILER_TAILCALL_TRASH RBM_PROFILER_LEAVE_TRASH
// The registers trashed by the CORINFO_HELP_STOP_FOR_GC helper.
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// See vm\amd64\unixasmhelpers.S for more details.
//
// On Unix a struct of size >=9 and <=16 bytes in size is returned in two return registers.
cmp ecx, 8
je LOCAL_LABEL(ReturnsDouble)
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Struct with two integer eightbytes
cmp ecx, 16
jne LOCAL_LABEL(NotTwoIntegerEightbytes)
jne LOCAL_LABEL(Epilog) // unexpected
movsd real8 ptr [rbx+CallDescrData__returnValue], xmm0
movsd real8 ptr [rbx+CallDescrData__returnValue + 8], xmm1
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
jmp LOCAL_LABEL(Epilog)
pRD->pCurrentContextPointers->Rdi = NULL;
#endif
pRD->pCurrentContextPointers->Rcx = NULL;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
pRD->pCurrentContextPointers->Rdx = (PULONG64)&m_Args->Rdx;
-#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // UNIX_AMD64_ABI
pRD->pCurrentContextPointers->Rdx = NULL;
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
pRD->pCurrentContextPointers->R8 = NULL;
pRD->pCurrentContextPointers->R9 = NULL;
pRD->pCurrentContextPointers->R10 = NULL;
m_argLocDescForStructInRegs(argLocDescForStructInRegs)
{
LIMITED_METHOD_CONTRACT;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
_ASSERTE((argLocDescForStructInRegs != NULL) || (offset != TransitionBlock::StructInRegsOffset));
#elif defined(_TARGET_ARM64_)
// This assert is not interesting on arm64. argLocDescForStructInRegs could be
#endif // !DACCESS_COMPILE
#endif // defined(_TARGET_ARM64_)
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Returns true if the ArgDestination represents a struct passed in registers.
bool IsStructPassedInRegs()
_ASSERTE(remainingBytes == 0);
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
};
#ifdef _DEBUG
{
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// Validate that the return value is not too big for the buffer passed
if (m_pMD->GetMethodTable()->IsRegPassedStruct())
{
_ASSERTE(cbReturnValue >= thReturnValueType.GetSize());
}
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// The metasig should be reset
_ASSERTE(m_methodSig.GetArgNum() == 0);
// We need to pass in a pointer, but be careful of the ARG_SLOT calling convention. We might already have a pointer in the ARG_SLOT.
PVOID pSrc = stackSize > sizeof(ARG_SLOT) ? (LPVOID)ArgSlotToPtr(pArguments[arg]) : (LPVOID)ArgSlotEndianessFixup((ARG_SLOT*)&pArguments[arg], stackSize);
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (argDest.IsStructPassedInRegs())
{
TypeHandle th;
argDest.CopyStructToRegisters(pSrc, th.AsMethodTable()->GetNumInstanceFieldBytes(), 0);
}
else
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
{
PVOID pDest = argDest.GetDestinationAddress();
int m_idxStack; // First stack slot used (or -1)
int m_cStack; // Count of stack slots used (or 0)
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
EEClass* m_eeClass; // For structs passed in register, it points to the EEClass of the struct
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#if defined(_TARGET_ARM64_)
bool m_isSinglePrecision; // For determining if HFA is single or double
#if defined(_TARGET_ARM64_)
m_isSinglePrecision = FALSE;
#endif // defined(_TARGET_ARM64_)
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
m_eeClass = NULL;
#endif
}
{
LIMITED_METHOD_CONTRACT;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
return offset >= sizeof(TransitionBlock);
#else
int ofsArgRegs = GetOffsetOfArgumentRegisters();
{
LIMITED_METHOD_CONTRACT;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
_ASSERTE(offset != TransitionBlock::StructInRegsOffset);
#endif
return (offset - GetOffsetOfArgumentRegisters()) / TARGET_POINTER_SIZE;
static BOOL IsFloatArgumentRegisterOffset(int offset)
{
LIMITED_METHOD_CONTRACT;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
return (offset != TransitionBlock::StructInRegsOffset) && (offset < 0);
#else
return offset < 0;
static BOOL HasFloatRegister(int offset, ArgLocDesc* argLocDescForStructInRegs)
{
LIMITED_METHOD_CONTRACT;
- #if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+ #if defined(UNIX_AMD64_ABI)
if (offset == TransitionBlock::StructInRegsOffset)
{
return argLocDescForStructInRegs->m_cFloatReg > 0;
}
static const int InvalidOffset = -1;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Special offset value to represent struct passed in registers. Such a struct can span both
// general purpose and floating point registers, so it can have two different offsets.
static const int StructInRegsOffset = -2;
{
LIMITED_METHOD_CONTRACT;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// No arguments are passed by reference on AMD64 on Unix
return FALSE;
#else
LIMITED_METHOD_CONTRACT;
#ifdef _TARGET_AMD64_
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
PORTABILITY_ASSERT("ArgIteratorTemplate::IsVarArgPassedByRef");
return FALSE;
-#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // UNIX_AMD64_ABI
return IsArgPassedByRef(size);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#else
return (size > ENREGISTERED_PARAMTYPE_MAXSIZE);
ArgLocDesc* GetArgLocDescForStructInRegs()
{
-#if (defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) || defined (_TARGET_ARM64_)
+#if defined(UNIX_AMD64_ABI) || defined (_TARGET_ARM64_)
return m_hasArgLocDescForStructInRegs ? &m_argLocDescForStructInRegs : NULL;
#else
return NULL;
{
LIMITED_METHOD_CONTRACT;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (m_hasArgLocDescForStructInRegs)
{
*pLoc = m_argLocDescForStructInRegs;
return;
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
if (argOffset == TransitionBlock::StructInRegsOffset)
{
CorElementType m_argType;
int m_argSize;
TypeHandle m_argTypeHandle;
-#if (defined(_TARGET_AMD64_) && defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) || defined(_TARGET_ARM64_)
+#if (defined(_TARGET_AMD64_) && defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_)
ArgLocDesc m_argLocDescForStructInRegs;
bool m_hasArgLocDescForStructInRegs;
-#endif // _TARGET_AMD64_ && UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // (_TARGET_AMD64_ && UNIX_AMD64_ABI) || _TARGET_ARM64_
#ifdef _TARGET_X86_
int m_curOfs; // Current position of the stack iterator
int m_idxGenReg; // Next general register to be assigned a value
int m_idxStack; // Next stack slot to be assigned a value
int m_idxFPReg; // Next floating point register to be assigned a value
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
bool m_fArgInRegisters; // Indicates that the current argument is stored in registers
-#endif
#else
int m_curOfs; // Current position of the stack iterator
#endif
m_argSize = argSize;
m_argTypeHandle = thValueType;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
m_hasArgLocDescForStructInRegs = false;
#endif
case ELEMENT_TYPE_VALUETYPE:
{
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
MethodTable *pMT = m_argTypeHandle.AsMethodTable();
if (pMT->IsRegPassedStruct())
{
cFPRegs = 0;
cGenRegs = 0;
-#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // UNIX_AMD64_ABI
argSize = sizeof(TADDR);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
break;
}
return argOfs;
}
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
m_fArgInRegisters = false;
#endif
{
_ASSERTE(!thValueType.IsNull());
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
MethodTable *pMT = thValueType.AsMethodTable();
if (pMT->IsRegPassedStruct())
{
break;
}
-#else // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // UNIX_AMD64_ABI
#ifdef FEATURE_HFA
if (thValueType.IsHFA() && !this->IsVarArg())
if (size <= ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE)
break;
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
}
#endif // ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
int stackElemSize;
#ifdef _TARGET_AMD64_
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (m_fArgInRegisters)
{
// Arguments passed in registers don't consume any stack
}
stackElemSize = StackElemSize(GetArgSize());
-#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // UNIX_AMD64_ABI
// All stack arguments take just one stack slot on AMD64 because of arguments bigger
// than a stack slot are passed by reference.
stackElemSize = STACK_ELEM_SIZE;
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#else // _TARGET_AMD64_
stackElemSize = StackElemSize(GetArgSize());
#if defined(ENREGISTERED_PARAMTYPE_MAXSIZE)
e_ZERO_SIZED = 0x04,
// The size of the struct is explicitly specified in the meta-data.
e_HAS_EXPLICIT_SIZE = 0x08,
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
#ifdef FEATURE_HFA
-#error Can't have FEATURE_HFA and FEATURE_UNIX_AMD64_STRUCT_PASSING defined at the same time.
+#error Can't have FEATURE_HFA and UNIX_AMD64_ABI defined at the same time.
#endif // FEATURE_HFA
e_NATIVE_PASS_IN_REGISTERS = 0x10, // Flag wheter a native struct is passed in registers.
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#ifdef FEATURE_HFA
// HFA type of the unmanaged layout
e_R4_HFA = 0x10,
return m_cbPackingSize;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
bool IsNativeStructPassedInRegisters()
{
LIMITED_METHOD_CONTRACT;
return (m_bFlags & e_NATIVE_PASS_IN_REGISTERS) != 0;
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
CorElementType GetNativeHFATypeRaw();
#ifdef FEATURE_HFA
m_bFlags |= (hfaType == ELEMENT_TYPE_R4) ? e_R4_HFA : e_R8_HFA;
}
#endif
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
void SetNativeStructPassedInRegisters()
{
LIMITED_METHOD_CONTRACT;
m_bFlags |= e_NATIVE_PASS_IN_REGISTERS;
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
};
#define MODULE_NON_DYNAMIC_STATICS ((DWORD)-1)
DWORD m_cbModuleDynamicID;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Number of eightBytes in the following arrays
int m_numberEightBytes;
// Classification of the eightBytes
SystemVClassificationType m_eightByteClassifications[CLR_SYSTEMV_MAX_EIGHTBYTES_COUNT_TO_PASS_IN_REGISTERS];
// Size of data the eightBytes
unsigned int m_eightByteSizes[CLR_SYSTEMV_MAX_EIGHTBYTES_COUNT_TO_PASS_IN_REGISTERS];
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// Set default values for optional fields.
inline void Init();
DWORD GetReliabilityContract();
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Get number of eightbytes used by a struct passed in registers.
inline int GetNumberEightBytes()
{
GetOptionalFields()->m_eightByteSizes[i] = eightByteSizes[i];
}
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#if defined(FEATURE_HFA)
bool CheckForHFA(MethodTable ** pByValueClassCache);
m_WinRTRedirectedTypeIndex = WinMDAdapter::RedirectedTypeIndex_Invalid;
#endif // FEATURE_COMINTEROP
m_cbModuleDynamicID = MODULE_NON_DYNAMIC_STATICS;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
m_numberEightBytes = 0;
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
}
#endif // !DACCESS_COMPILE
// Argument location description
ArgLocDesc* m_argLocDesc;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Current eightByte used for struct arguments in registers
int m_currentEightByte;
#endif
// Current stack slot index (relative to the ArgLocDesc::m_idxStack)
int m_currentStackSlotIndex;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Get next shuffle offset for struct passed in registers. There has to be at least one offset left.
UINT16 GetNextOfsInStruct()
{
_ASSERTE(false);
return 0;
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
public:
ShuffleIterator(ArgLocDesc* argLocDesc)
:
m_argLocDesc(argLocDesc),
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
m_currentEightByte(0),
#endif
m_currentGenRegIndex(0),
bool HasNextOfs()
{
return (m_currentGenRegIndex < m_argLocDesc->m_cGenReg) ||
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
(m_currentFloatRegIndex < m_argLocDesc->m_cFloatReg) ||
#endif
(m_currentStackSlotIndex < m_argLocDesc->m_cStack);
{
int index;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Check if the argLocDesc is for a struct in registers
EEClass* eeClass = m_argLocDesc->m_eeClass;
return (UINT16)index | ShuffleEntry::REGMASK | ShuffleEntry::FPREGMASK;
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// Shuffle any registers first (the order matters since otherwise we could end up shuffling a stack slot
// over a register we later need to shuffle down as well).
#endif
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// Return an index of argument slot. First indices are reserved for general purpose registers,
// the following ones for float registers and then the rest for stack slots.
// This index is independent of how many registers are actually used to pass arguments.
return index;
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, SArray<ShuffleEntry> * pShuffleEntryArray)
{
ArgLocDesc sArgSrc;
ArgLocDesc sArgDst;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
int argSlots = NUM_FLOAT_ARGUMENT_REGISTERS + NUM_ARGUMENT_REGISTERS + sArgPlacerSrc.SizeOfArgStack() / sizeof(size_t);
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// If the target method in non-static (this happens for open instance delegates), we need to account for
// the implicit this parameter.
_ASSERTE(!iteratorDst.HasNextOfs());
}
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
// The Unix AMD64 ABI can cause a struct to be passed on stack for the source and in registers for the destination.
// That can cause some arguments that are passed on stack for the destination to be passed in registers in the source.
// An extreme example of that is e.g.:
}
}
while (reordered);
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
entry.srcofs = ShuffleEntry::SENTINEL;
entry.dstofs = 0;
// FC_TypedByRef should be used for TypedReferences in FCall signatures
-#if defined(UNIX_AMD64_ABI) && !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-// Explicitly pass the TypedReferences by reference
-#define FC_TypedByRef TypedByRef&
-#define FC_DECIMAL DECIMAL&
-#else
#define FC_TypedByRef TypedByRef
#define FC_DECIMAL DECIMAL
-#endif
// The fcall entrypoints has to be at unique addresses. Use this helper macro to make
MODE_PREEMPTIVE;
} CONTRACTL_END;
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#if defined(UNIX_AMD64_ABI_ITF)
JIT_TO_EE_TRANSITION();
_ASSERTE(structPassInRegDescPtr != nullptr);
}
_ASSERTE(methodTablePtr != nullptr);
- // If we have full support for FEATURE_UNIX_AMD64_STRUCT_PASSING, and not just the interface,
+ // If we have full support for UNIX_AMD64_ABI, and not just the interface,
// then we've cached whether this is a reg passed struct in the MethodTable, computed during
// MethodTable construction. Otherwise, we are just building in the interface, and we haven't
// computed or cached anything, so we need to compute it now.
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
bool canPassInRegisters = useNativeLayout ? methodTablePtr->GetLayoutInfo()->IsNativeStructPassedInRegisters()
: methodTablePtr->IsRegPassedStruct();
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // !defined(UNIX_AMD64_ABI)
+ bool canPassInRegisters = false;
SystemVStructRegisterPassingHelper helper((unsigned int)th.GetSize());
- bool canPassInRegisters = methodTablePtr->ClassifyEightBytes(&helper, 0, 0, useNativeLayout);
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+ if (th.GetSize() <= CLR_SYSTEMV_MAX_STRUCT_BYTES_TO_PASS_IN_REGISTERS)
+ {
+ canPassInRegisters = methodTablePtr->ClassifyEightBytes(&helper, 0, 0, useNativeLayout);
+ }
+#endif // !defined(UNIX_AMD64_ABI)
if (canPassInRegisters)
{
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
SystemVStructRegisterPassingHelper helper((unsigned int)th.GetSize());
bool result = methodTablePtr->ClassifyEightBytes(&helper, 0, 0, useNativeLayout);
// The answer must be true at this point.
_ASSERTE(result);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
structPassInRegDescPtr->passedInRegisters = true;
EE_TO_JIT_TRANSITION();
return true;
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#else // !defined(UNIX_AMD64_ABI_ITF)
return false;
-#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#endif // !defined(UNIX_AMD64_ABI_ITF)
}
/*********************************************************************/
*pMT = pReturnTypeMT;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
if (pReturnTypeMT->IsRegPassedStruct())
{
return MetaSig::RETVALUETYPE;
}
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !UNIX_AMD64_ABI
if (pReturnTypeMT->ContainsPointers())
{
//========================================================================================
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#if defined(UNIX_AMD64_ABI_ITF)
#if defined(_DEBUG) && defined(LOGGING)
static
}
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#endif // defined(UNIX_AMD64_ABI_ITF)
#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
//==========================================================================================
typedef DPTR(MethodTableWriteableData) PTR_MethodTableWriteableData;
typedef DPTR(MethodTableWriteableData const) PTR_Const_MethodTableWriteableData;
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF
+#ifdef UNIX_AMD64_ABI_ITF
inline
SystemVClassificationType CorInfoType2UnixAmd64Classification(CorElementType eeType)
{
typedef DPTR(SystemVStructRegisterPassingHelper) SystemVStructRegisterPassingHelperPtr;
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF
+#endif // UNIX_AMD64_ABI_ITF
//===============================================================================================
//
// during object construction.
void CheckRunClassInitAsIfConstructingThrowing();
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#if defined(UNIX_AMD64_ABI_ITF)
// Builds the internal data structures and classifies struct eightbytes for Amd System V calling convention.
bool ClassifyEightBytes(SystemVStructRegisterPassingHelperPtr helperPtr, unsigned int nestingLevel, unsigned int startOffsetOfStruct, bool isNativeStruct);
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#endif // defined(UNIX_AMD64_ABI_ITF)
// Copy m_dwFlags from another method table
void CopyFlags(MethodTable * pOldMT)
private:
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#if defined(UNIX_AMD64_ABI_ITF)
void AssignClassifiedEightByteTypes(SystemVStructRegisterPassingHelperPtr helperPtr, unsigned int nestingLevel) const;
// Builds the internal data structures and classifies struct eightbytes for Amd System V calling convention.
bool ClassifyEightBytesWithManagedLayout(SystemVStructRegisterPassingHelperPtr helperPtr, unsigned int nestingLevel, unsigned int startOffsetOfStruct, bool isNativeStruct);
bool ClassifyEightBytesWithNativeLayout(SystemVStructRegisterPassingHelperPtr helperPtr, unsigned int nestingLevel, unsigned int startOffsetOfStruct, bool isNativeStruct);
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+#endif // defined(UNIX_AMD64_ABI_ITF)
DWORD GetClassIndexFromToken(mdTypeDef typeToken)
{
bool IsNativeHFA();
CorElementType GetNativeHFAType();
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
inline bool IsRegPassedStruct()
{
LIMITED_METHOD_CONTRACT;
LIMITED_METHOD_CONTRACT;
SetFlag(enum_flag_IsRegStructPassed);
}
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(UNIX_AMD64_ABI)
#ifdef FEATURE_64BIT_ALIGNMENT
// Returns true iff the native view of this type requires 64-bit aligment.
enum_flag_HasPreciseInitCctors = 0x00000400, // Do we need to run class constructors at allocation time? (Not perf important, could be moved to EEClass
#if defined(FEATURE_HFA)
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-#error Can't define both FEATURE_HFA and FEATURE_UNIX_AMD64_STRUCT_PASSING
+#if defined(UNIX_AMD64_ABI)
+#error Can't define both FEATURE_HFA and UNIX_AMD64_ABI
#endif
enum_flag_IsHFA = 0x00000800, // This type is an HFA (Homogenous Floating-point Aggregate)
#endif // FEATURE_HFA
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
#if defined(FEATURE_HFA)
-#error Can't define both FEATURE_HFA and FEATURE_UNIX_AMD64_STRUCT_PASSING
+#error Can't define both FEATURE_HFA and UNIX_AMD64_ABI
#endif
enum_flag_IsRegStructPassed = 0x00000800, // This type is a System V register passed struct.
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
enum_flag_IsByRefLike = 0x00001000,
#ifdef FEATURE_HFA
GetHalfBakedClass()->CheckForHFA(pByValueClassCache);
#endif
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
#ifdef FEATURE_HFA
-#error Can't have FEATURE_HFA and FEATURE_UNIX_AMD64_STRUCT_PASSING defined at the same time.
+#error Can't have FEATURE_HFA and UNIX_AMD64_ABI defined at the same time.
#endif // FEATURE_HFA
SystemVAmd64CheckForPassStructInRegister();
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
#ifdef FEATURE_HFA
-#error Can't have FEATURE_HFA and FEATURE_UNIX_AMD64_STRUCT_PASSING defined at the same time.
+#error Can't have FEATURE_HFA and UNIX_AMD64_ABI defined at the same time.
#endif // FEATURE_HFA
if (HasLayout())
{
SystemVAmd64CheckForPassNativeStructInRegister();
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
#ifdef FEATURE_HFA
if (HasLayout())
{
return (1 << (DWORD)(DWORD_PTR&)(pFD->m_pMTOfEnclosingClass));
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// checks whether the struct is enregisterable.
void MethodTableBuilder::SystemVAmd64CheckForPassStructInRegister()
{
eeClass->SetEightByteClassification(helper->eightByteCount, helper->eightByteClassifications, helper->eightByteSizes);
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
//---------------------------------------------------------------------------------------
//
VOID CheckForNativeHFA();
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// checks whether the struct is enregisterable.
void SystemVAmd64CheckForPassStructInRegister();
void SystemVAmd64CheckForPassNativeStructInRegister();
// Store the eightbyte classification into the EEClass
void StoreEightByteClassification(SystemVStructRegisterPassingHelper* helper);
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// this accesses the field size which is temporarily stored in m_pMTOfEnclosingClass
// during class loading. Don't use any other time
STATIC_CONTRACT_FORBID_FAULT;
STATIC_CONTRACT_MODE_COOPERATIVE;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (argDest->IsStructPassedInRegs())
{
return;
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
// destOffset is only valid for Nullable<T> passed in registers
_ASSERTE(destOffset == 0);
STATIC_CONTRACT_FORBID_FAULT;
STATIC_CONTRACT_MODE_COOPERATIVE;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (argDest->IsStructPassedInRegs())
{
}
CONTRACTL_END;
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (argDest->IsStructPassedInRegs())
{
// We should only get here if we are unboxing a T as a Nullable<T>
return TRUE;
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
return UnBoxNoGC(argDest->GetDestinationAddress(), boxedVal, destMT);
}
return;
}
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if defined(UNIX_AMD64_ABI)
if (pSrc->IsStructPassedInRegs())
{
pSrc->ReportPointersFromStructInRegisters(fn, sc, pMT->GetNumInstanceFieldBytes());
return;
}
-#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
ReportPointersFromValueType(fn, sc, pMT, pSrc->GetDestinationAddress());
}
// ex: Windows/Unix ARM/ARM64, Unix-AMD64.
//
//
-// FEATURE_UNIX_AMD64_STRUCT_PASSING is a specific kind of FEATURE_MULTIREG_RETURN
+// UNIX_AMD64_ABI is a specific kind of FEATURE_MULTIREG_RETURN
// [GcInfo v1 and v2] specified by SystemV ABI for AMD64
//
return RT_ByRef;
}
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef UNIX_AMD64_ABI
// The Multi-reg return case using the classhandle is only implemented for AMD64 SystemV ABI.
// On other platforms, multi-reg return is not supported with GcInfo v1.
// So, the relevant information must be obtained from the GcInfo tables (which requires version2).
ReturnKind structReturnKind = GetStructReturnKind(regKinds[0], regKinds[1]);
return structReturnKind;
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // UNIX_AMD64_ABI
return RT_Scalar;
}
}
else
{
-#if !defined(FEATURE_MULTIREG_RETURN) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#if !defined(FEATURE_MULTIREG_RETURN) || defined(UNIX_AMD64_ABI)
// For ARM64 struct-return, GetReturnKindFromMethodTable() is not supported
_ASSERTE(returnKind == GetReturnKindFromMethodTable(pThread, codeInfo));
-#endif // !FEATURE_MULTIREG_RETURN || FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // !FEATURE_MULTIREG_RETURN || UNIX_AMD64_ABI
}
_ASSERTE(IsValidReturnKind(returnKind));