#else // !TARGET_ARM64
// There is no zero register on ARM32
unreached();
-#endif // !_TARGET_ARM64
+#endif // !TARGET_ARM64
}
else
{
#elif defined(TARGET_ARM64)
// Structs of size >=9 and <=16 are returned in two return registers on ARM64 and HFAs.
assert(varTypeIsStruct(treeNode));
-#endif // _TARGET_*
+#endif // TARGET*
// Assumption: current implementation requires that a multi-reg
// var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, regThis, 0);
#elif defined(TARGET_ARM64)
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, regThis, 0);
-#endif // _TARGET_*
+#endif // TARGET*
}
// Either gtControlExpr != null or gtCallAddr != null or it is a direct non-virtual call to a user or helper
GetEmitter()->emitIns_R_R(INS_mov, emitActualTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum());
}
-#endif // _TARGET_*
+#endif // TARGET*
genProduceReg(treeNode);
}
// pushed ebp
callerSPtoFPdelta -= 2 * REGSIZE_BYTES;
#else
-#error "Unknown _TARGET_"
-#endif // _TARGET_*
+#error "Unknown TARGET"
+#endif // TARGET*
assert(callerSPtoFPdelta <= 0);
return callerSPtoFPdelta;
callerSPtoSPdelta -= REGSIZE_BYTES;
}
#else
-#error "Unknown _TARGET_"
-#endif // _TARGET_*
+#error "Unknown TARGET"
+#endif // TARGET*
assert(callerSPtoSPdelta <= 0);
return callerSPtoSPdelta;
break;
#endif // SCALED_ADDR_MODES
-#endif // !_TARGET_ARMARCH
+#endif // !TARGET_ARMARCH
case GT_NOP:
break;
#endif // SCALED_ADDR_MODES
-#endif // !_TARGET_ARMARCH
+#endif // !TARGET_ARMARCH
case GT_NOP:
#else
assert(!"Unknown TARGET");
-#endif // _TARGET_*
+#endif // TARGET*
}
#if defined(TARGET_ARM)
#elif defined(TARGET_ARM64)
// We will just zero out the entire vector register. This sets it to a double/float zero value
GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B);
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
#endif
fltInitReg = reg;
#elif defined(TARGET_ARM64)
// We will just zero out the entire vector register. This sets it to a double/float zero value
GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B);
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
#endif
dblInitReg = reg;
noway_assert(compiler->compCalleeRegsPushed == popCount);
}
-#endif // _TARGET_*
+#endif // TARGET*
// We need a register with value zero. Zero the initReg, if necessary, and set *pInitRegZeroed if so.
// Return the register to use. On ARM64, we never touch the initReg, and always just return REG_ZR.
}
#endif // !UNIX_AMD64_ABI
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
-#endif // _TARGET_*
+#endif // TARGET*
}
else if (genInitStkLclCnt > 0)
{
regSet.verifyRegUsed(REG_ECX);
}
else
-#endif // _TARGET_X86
+#endif // TARGET_X86
{
/* Add 'compiler->compLclFrameSize' to ESP */
/* Generate "add esp, <stack-size>" */
inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
regSet.verifyRegUsed(REG_ECX);
}
-#endif // _TARGET_X86
+#endif // TARGET_X86
else
{
// We need to make ESP point to the callee-saved registers
}
}
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
-#endif // _TARGET_*
+#endif // TARGET*
#if defined(FEATURE_EH_FUNCLETS)
}
}
-#else // _TARGET_*
+#else // TARGET*
/*****************************************************************************
*
}
}
-#endif // _TARGET_*
+#endif // TARGET*
/*-----------------------------------------------------------------------------
*
GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaPSPSym, 0);
-#else // _TARGET_*
+#else // TARGET*
NYI("Set function PSP sym");
-#endif // _TARGET_*
+#endif // TARGET*
}
#endif // FEATURE_EH_FUNCLETS
return baseVarNum;
#elif defined(TARGET_AMD64)
return 0;
-#else // _TARGET_X86
+#else // TARGET_X86
// Not implemented for x86.
NYI_X86("getFirstArgWithStackSlot not yet implemented for x86.");
return BAD_VAR_NUM;
#endif // !UNIX_X86_ABI_
#else // TARGET_X86
assert(bias == 0);
-#endif // !_TARGET_X86
+#endif // !TARGET_X86
}
#ifdef TARGET_X86
howToPassStruct = SPK_ByValue;
useType = TYP_STRUCT;
-#else // _TARGET_XXX_
+#else // TARGET_XXX
noway_assert(!"Unhandled TARGET in getArgTypeForStruct (with FEATURE_MULTIREG_ARGS=1)");
-#endif // _TARGET_XXX_
+#endif // TARGET_XXX
}
}
else // (structSize > MAX_PASS_MULTIREG_BYTES)
howToPassStruct = SPK_ByReference;
useType = TYP_UNKNOWN;
-#else // _TARGET_XXX_
+#else // TARGET_XXX
noway_assert(!"Unhandled TARGET in getArgTypeForStruct");
-#endif // _TARGET_XXX_
+#endif // TARGET_XXX
}
}
howToReturnStruct = SPK_ByReference;
useType = TYP_UNKNOWN;
-#else // _TARGET_XXX_
+#else // TARGET_XXX
noway_assert(!"Unhandled TARGET in getReturnTypeForStruct (with FEATURE_MULTIREG_ARGS=1)");
-#endif // _TARGET_XXX_
+#endif // TARGET_XXX
}
}
else // (structSize > MAX_RET_MULTIREG_BYTES) || (FEATURE_MULTIREG_RET == 0)
bool isPassedInFloatRegisters()
{
-#ifdef _TARGET_X86
+#ifdef TARGET_X86
return false;
#else
return isValidFloatArgReg(GetRegNum());
break;
}
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
-#endif // _TARGET_*
+#endif // TARGET*
if (retval == 0)
{
break;
}
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
-#endif // _TARGET_*
+#endif // TARGET*
/* no displacement */
break;
}
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
-#endif // _TARGET_*
+#endif // TARGET*
/* save displacement */
break;
} // end switch
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
-#endif // _TARGET_*
+#endif // TARGET*
return cb;
} // end if
pdis = DIS::PdisNew(DIS::distX8664);
#elif defined(TARGET_ARM64)
pdis = DIS::PdisNew(DIS::distArm64);
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
#endif
#include "disx86.h"
#elif defined(TARGET_ARM64)
#include "disarm64.h"
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
#endif
// otherwise will we pass this struct by value in multiple registers
#else
NYI("unknown target");
-#endif // defined(_TARGET_XXX_)
+#endif // defined(TARGET_XXX)
#endif // FEATURE_MULTIREG_ARGS
// we pass this struct by value in multiple registers
#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C
#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C
-#endif // _TARGET_XXX
+#endif // TARGET_XXX
// Make this an enum:
//
{
#ifdef TARGET_X86
return ((ins >= INS_imul_AX) && (ins <= INS_imul_DI));
-#else // _TARGET_AMD64
+#else // TARGET_AMD64
return ((ins >= INS_imul_AX) && (ins <= INS_imul_15));
#endif
}
{
#ifdef TARGET_X86
return false;
-#else // _TARGET_AMD64
+#else // TARGET_AMD64
return ((ins >= INS_imul_08) && (ins <= INS_imul_15));
#endif
}
{
return false;
}
-#else //!_TARGET_AMD64 = TARGET_X86
+#else //! TARGET_AMD64 = TARGET_X86
return false;
#endif //! TARGET_AMD64
}
#ifdef TARGET_AMD64
// how many bytes per instruction we format for
const size_t digits = 10;
-#else // _TARGET_X86
+#else // TARGET_X86
const size_t digits = 6;
#endif
printf(" ");
result.insLatency = PERFSCORE_LATENCY_1C;
}
break;
-#endif // _TARGET_X86
+#endif // TARGET_X86
#ifdef TARGET_AMD64
case INS_movsq:
}
}
#else
-#error "Unknown _TARGET_"
+#error "Unknown TARGET"
#endif
assert(addr->gtOper == GT_ADD);
case GT_CNS_LNG:
case GT_CNS_STR:
case GT_CNS_INT:
-#error "Unknown _TARGET_"
+#error "Unknown TARGET"
#endif
COMMON_CNS:
costSz = 6;
}
#else
-#error "Unknown _TARGET_"
+#error "Unknown TARGET"
#endif
/* Overflow casts are a lot more expensive */
}
break;
-#if defined(_TARGET_XARCH4_) && defined(FEATURE_HW_INTRINSICS)
+#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
case TYP_SIMD32:
switch (baseType)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
-#else // _TARGET_XXX_
+#else // TARGET_XXX
// This target needs support here!
//
{
#ifdef TARGET_64BIT
Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
-#else // _TARGET_64BIT
+#else // TARGET_64BIT
// [10/17/2013] Consider changing this: to put on my verification lawyer hat,
// this is non-conforming to the ECMA Spec: types don't have to be equivalent,
// but compatible, since we can coalesce native int with int32 (see section III.1.5).
lclTyp = JITtype2varType(ciType);
-#ifdef _TARGET_AMD64
- noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
-#endif // _TARGET_AMD64
-
if (compIsForInlining())
{
switch (fieldInfo.fieldAccessor)
#include "instrs.h"
#else
-#error "Unknown _TARGET_"
+#error "Unknown TARGET"
#endif
};
// clang-format on
#else
NYI("inst_RV_SH - unknown target");
-#endif // _TARGET_*
+#endif // TARGET*
}
/*****************************************************************************
assert(!varTypeIsSIMD(dstType));
assert(!varTypeIsFloating(dstType));
return INS_mov;
-#else // _TARGET_*
-#error "Unknown _TARGET_"
+#else // TARGET*
+#error "Unknown TARGET"
#endif
}
#elif defined(TARGET_ARM64)
GetEmitter()->emitIns_BARR(INS_dmb, barrierType);
#else
-#error "Unknown _TARGET_"
+#error "Unknown TARGET"
#endif
}
#elif defined(TARGET_ARMARCH)
GetEmitter()->emitIns_R_I(INS_mov, size, reg, 0 ARM_ARG(flags));
#else
-#error "Unknown _TARGET_"
+#error "Unknown TARGET"
#endif
regSet.verifyRegUsed(reg);
}
#elif defined(TARGET_ARMARCH)
GetEmitter()->emitIns_R_I(INS_cmp, size, reg, 0);
#else
-#error "Unknown _TARGET_"
+#error "Unknown TARGET"
#endif
}
#if defined(TARGET_XARCH) || defined(TARGET_ARMARCH)
GetEmitter()->emitIns_R_R(INS_cmp, size, reg1, reg2);
#else
-#error "Unknown _TARGET_"
+#error "Unknown TARGET"
#endif
}
assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm");
}
#else
-#error "Unknown _TARGET_"
+#error "Unknown TARGET"
#endif
}
}
#ifdef TARGET_X86
INST1(fld, "fld", IUM_WR, 0x0000D9, INS_FLAGS_x87Instr)
INST1(fstp, "fstp", IUM_WR, 0x0018D9, INS_FLAGS_x87Instr)
-#endif // _TARGET_X86
+#endif // TARGET_X86
INST1(seto, "seto", IUM_WR, 0x0F0090, INS_FLAGS_ReadsFlags)
INST1(setno, "setno", IUM_WR, 0x0F0091, INS_FLAGS_ReadsFlags)
#elif defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
// On System V type environment the float registers are not indexed together with the int ones.
varDscInfo->floatRegArgNum = varDscInfo->intRegArgNum;
-#endif // _TARGET_*
+#endif // TARGET*
CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args;
//
varDscInfo->setAllRegArgUsed(argType);
-#endif // _TARGET_XXX_
+#endif // TARGET_XXX
#if FEATURE_FASTTAILCALL
varDsc->lvStkOffs = varDscInfo->stackArgSize;
varDsc->lvStkOffs = argOffs;
argOffs += argSize;
}
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
-#endif // _TARGET_*
+#endif // TARGET*
}
else
{
#else
NYI("LowerTailCallViaHelper");
-#endif // _TARGET_*
+#endif // TARGET*
// Transform this call node into a call to Jit tail call helper.
call->gtCallType = CT_HELPER;
// Lowering will have split any candidate lclVars into lo/hi vars.
return false;
}
-#endif // !defined(_TARGET_64BIT)
+#endif // !defined(TARGET_64BIT)
// If we have JMP, reg args must be put on the stack
currentRegCount = node->AsMultiRegOp()->GetRegCount();
}
else
-#endif // _TARGET_ARM
+#endif // TARGET_ARM
{
assert(!node->IsMultiRegNode());
currentRegCount = 1;
#else
#error Unsupported or unset target architecture
-#endif // _TARGET_*
+#endif // TARGET*
bool isBackFilled = false;
unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use
}
#else
#error Unsupported or unset target architecture
-#endif // _TARGET_XXX_
+#endif // TARGET_XXX
if (isStructArg)
{
// We have an argument with a struct type, but it may be be a child of a GT_COMMA
//
// Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization
//
-#else // _TARGET_XARCH
+#else // TARGET_XARCH
/* If this is an unsigned long mod with op2 which is a cast to long from a
constant int, then don't morph to a call to the helper. This can be done
faster inline using idiv.
return tree;
}
}
-#endif // _TARGET_XARCH
+#endif // TARGET_XARCH
ASSIGN_HELPER_FOR_MOD:
largeFrame = true;
break; // early out, we don't need to keep increasing frameSize
}
-#elif _TARGET_ARM32
+#elif defined(TARGET_ARM)
if (frameSize > 0x0400)
{
// We likely have a large stack frame.
hugeFrame = true;
break; // early out, we don't need to keep increasing frameSize
}
-#elif TARGET_ARM64
+#elif defined(TARGET_ARM64)
if (frameSize > 0x1000)
{
// We likely have a large stack frame.
typedef TinyArray<unsigned short, regNumber, REGNUM_BITS> regList;
#else
// The regList is unused for all other targets.
-#endif // _TARGET_*
+#endif // TARGET*
#endif // REGLIST_H
JITDUMP(" Known type Vector256<ulong>\n");
}
else
-#endif // defined(_TARGET_XARCH)
+#endif // defined(TARGET_XARCH)
if (typeHnd == m_simdHandleCache->Vector128FloatHandle)
{
simdBaseType = TYP_FLOAT;
{
retVal = gtNewSIMDNode(simdType, op1, SIMDIntrinsicAbs, baseType, size);
}
-#else // !defined(_TARGET_XARCH)_ && !defined(TARGET_ARM64)
+#else // !defined(TARGET_XARCH)_ && !defined(TARGET_ARM64)
assert(!"Abs intrinsic on non-xarch target not implemented");
#endif // !TARGET_XARCH
// See the LICENSE file in the project root for more information.
/*****************************************************************************/
-#ifndef _TARGET_H_
-#define _TARGET_H_
+#ifndef TARGET_H_
+#define TARGET_H_
#if defined(FEATURE_CORECLR) && defined(TARGET_UNIX)
#define FEATURE_VARARG 0
C_ASSERT(sizeof(target_ssize_t) == TARGET_POINTER_SIZE);
/*****************************************************************************/
-#endif // _TARGET_H_
+#endif // TARGET_H_
/*****************************************************************************/
// See unwindX86.cpp
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
-#endif // _TARGET_*
+#endif // TARGET*
}
#elif defined(TARGET_X86)
// No register ranges
-#else // _TARGET_*
+#else // TARGET*
#error Unsupported or unset target architecture
-#endif // _TARGET_*
+#endif // TARGET*
}
#if defined(TARGET_ARM64)