// and thus saved on the frame).
// Compute the maximum estimated spill temp size.
- unsigned maxTmpSize = sizeof(double) + sizeof(float) + sizeof(__int64) + sizeof(void*);
+ unsigned maxTmpSize = sizeof(double) + sizeof(float) + sizeof(__int64) + TARGET_POINTER_SIZE;
maxTmpSize += (compiler->tmpDoubleSpillMax * sizeof(double)) + (compiler->tmpIntSpillMax * sizeof(int));
// idea of how to ignore it.
// On Arm, a long can be passed in register
- noway_assert(genTypeSize(genActualType(varDsc->TypeGet())) == sizeof(void*));
+ noway_assert(genTypeSize(genActualType(varDsc->TypeGet())) == TARGET_POINTER_SIZE);
#endif
#endif //_TARGET_64BIT_
initStkLclCnt += varDsc->lvStructGcCount;
}
- if ((compiler->lvaLclSize(varNum) > (3 * sizeof(void*))) && (largeGcStructs <= 4))
+ if ((compiler->lvaLclSize(varNum) > (3 * TARGET_POINTER_SIZE)) && (largeGcStructs <= 4))
{
largeGcStructs++;
}
if (compiler->ehNeedsShadowSPslots() && !compiler->info.compInitMem)
{
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
- unsigned filterEndOffsetSlotOffs = compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*));
+ unsigned filterEndOffsetSlotOffs = compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE;
// Zero out the slot for nesting level 0
- unsigned firstSlotOffs = filterEndOffsetSlotOffs - (sizeof(void*));
+ unsigned firstSlotOffs = filterEndOffsetSlotOffs - TARGET_POINTER_SIZE;
if (!initRegZeroed)
{
/* Add 'compiler->compLclFrameSize' to ESP */
/* Use pop ECX to increment ESP by 4, unless compiler->compJmpOpUsed is true */
- if ((compiler->compLclFrameSize == sizeof(void*)) && !compiler->compJmpOpUsed)
+ if ((compiler->compLclFrameSize == TARGET_POINTER_SIZE) && !compiler->compJmpOpUsed)
{
inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
regTracker.rsTrackRegTrash(REG_ECX);
if (fCalleePop)
{
- noway_assert(compiler->compArgSize >= intRegState.rsCalleeRegArgCount * sizeof(void*));
- stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * sizeof(void*);
+ noway_assert(compiler->compArgSize >= intRegState.rsCalleeRegArgCount * REGSIZE_BYTES);
+ stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
noway_assert(compiler->compArgSize < 0x10000); // "ret" only has 2 byte operand
}
noway_assert(cookieOffset < varOffset);
unsigned offset = varOffset - cookieOffset;
- unsigned stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * sizeof(void*);
+ unsigned stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
noway_assert(offset < stkArgSize);
offset = stkArgSize - offset;
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs;
PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) >
- sizeof(void*)); // below doesn't underflow.
- filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*)));
+ TARGET_POINTER_SIZE); // below doesn't underflow.
+ filterEndOffsetSlotOffs =
+ (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
unsigned curNestingSlotOffs;
- curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * sizeof(void*));
+ curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar, curNestingSlotOffs);
reg = REG_STK;
break;
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs;
filterEndOffsetSlotOffs =
- (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*)));
+ (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
unsigned curNestingSlotOffs;
- curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * sizeof(void*)));
+ curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
// Zero out the slot for the next nesting level
instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar,
- curNestingSlotOffs - sizeof(void*));
+ curNestingSlotOffs - TARGET_POINTER_SIZE);
instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, LCL_FINALLY_MARK, compiler->lvaShadowSPslotsVar,
curNestingSlotOffs);
addrReg = 0;
// Get the number of BYTES to copy to the stack
- opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(void*));
+ opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass),
+ TARGET_POINTER_SIZE);
size_t bytesToBeCopied = opsz;
// postponedFields is true if we have any postponed fields
if (fieldVarDsc->lvStackAligned())
{
if (fieldVarDsc->lvExactSize != 2 * sizeof(unsigned) &&
- fieldVarDsc->lvFldOffset + sizeof(void*) != bytesToBeCopied)
+ fieldVarDsc->lvFldOffset + TARGET_POINTER_SIZE != bytesToBeCopied)
{
// Might need 4-bytes paddings for fields other than LONG and DOUBLE.
// Just push some junk (i.e EAX) on the stack.
inst_RV(INS_push, REG_EAX, TYP_INT);
genSinglePush();
- bytesToBeCopied -= sizeof(void*);
+ bytesToBeCopied -= TARGET_POINTER_SIZE;
}
// If we have an expectedAlignedOffset make sure that this push instruction
}
else
{
- getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, sizeof(void*));
+ getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, TARGET_POINTER_SIZE);
genSinglePush();
}
- bytesToBeCopied -= sizeof(void*);
+ bytesToBeCopied -= TARGET_POINTER_SIZE;
}
// Push the "upper half" of DOUBLE var if it is not enregistered.
{
if (!fieldVarDsc->lvRegister)
{
- getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, sizeof(void*));
+ getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, TARGET_POINTER_SIZE);
genSinglePush();
}
- bytesToBeCopied -= sizeof(void*);
+ bytesToBeCopied -= TARGET_POINTER_SIZE;
}
//
genSinglePush();
}
- bytesToBeCopied -= sizeof(void*);
+ bytesToBeCopied -= TARGET_POINTER_SIZE;
}
else // not stack aligned
{
// This should never change until it is set back to UINT_MAX by an aligned
// offset
noway_assert(expectedAlignedOffset ==
- roundUp(fieldVarDsc->lvFldOffset, sizeof(void*)) - sizeof(void*));
+ roundUp(fieldVarDsc->lvFldOffset, TARGET_POINTER_SIZE) -
+ TARGET_POINTER_SIZE);
}
expectedAlignedOffset =
- roundUp(fieldVarDsc->lvFldOffset, sizeof(void*)) - sizeof(void*);
+ roundUp(fieldVarDsc->lvFldOffset, TARGET_POINTER_SIZE) - TARGET_POINTER_SIZE;
noway_assert(expectedAlignedOffset < bytesToBeCopied);
noway_assert(arg->gtObj.gtOp1->InReg());
regNumber reg = arg->gtObj.gtOp1->gtRegNum;
// Get the number of DWORDS to copy to the stack
- opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(void*));
- unsigned slots = (unsigned)(opsz / sizeof(void*));
+ opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(DWORD));
+ unsigned slots = (unsigned)(opsz / sizeof(DWORD));
BYTE* gcLayout = new (compiler, CMK_Codegen) BYTE[slots];
if (opsz & 0x4)
{
- stkDisp -= sizeof(void*);
+ stkDisp -= TARGET_POINTER_SIZE;
getEmitter()->emitIns_AR_R(INS_push, EA_4BYTE, REG_NA, reg, stkDisp);
genSinglePush();
}
{
getEmitter()->emitIns_R_AR(INS_movq, EA_8BYTE, xmmReg, reg, curDisp);
getEmitter()->emitIns_AR_R(INS_movq, EA_8BYTE, xmmReg, REG_SPBASE, curDisp);
- curDisp += 2 * sizeof(void*);
+ curDisp += 2 * TARGET_POINTER_SIZE;
}
noway_assert(curDisp == stkDisp);
}
noway_assert(gcLayout[i] == TYPE_GC_BYREF);
fieldSize = EA_BYREF;
}
- getEmitter()->emitIns_AR_R(INS_push, fieldSize, REG_NA, reg, i * sizeof(void*));
+ getEmitter()->emitIns_AR_R(INS_push, fieldSize, REG_NA, reg, i * TARGET_POINTER_SIZE);
genSinglePush();
}
}
/* Keep track of ESP for EBP-less frames */
genSinglePush();
- argSize += sizeof(void*);
+ argSize += REGSIZE_BYTES;
#elif defined(_TARGET_ARM_)
// Push the count of the incoming stack arguments
unsigned nOldStkArgs =
- (unsigned)((compiler->compArgSize - (intRegState.rsCalleeRegArgCount * sizeof(void*))) / sizeof(void*));
+ (unsigned)((compiler->compArgSize - (intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES);
getEmitter()->emitIns_I(INS_push, EA_4BYTE, nOldStkArgs);
genSinglePush(); // Keep track of ESP for EBP-less frames
- args += sizeof(void*);
+ args += REGSIZE_BYTES;
// Push the count of the outgoing stack arguments
- getEmitter()->emitIns_I(INS_push, EA_4BYTE, argSize / sizeof(void*));
+ getEmitter()->emitIns_I(INS_push, EA_4BYTE, argSize / REGSIZE_BYTES);
genSinglePush(); // Keep track of ESP for EBP-less frames
- args += sizeof(void*);
+ args += REGSIZE_BYTES;
// Push info about the callee-saved registers to be restored
// For now, we always spill all registers if compiler->compTailCallUsed
(fTailCallTargetIsVSD ? 0x2 : 0x0); // Stub dispatch flag
getEmitter()->emitIns_I(INS_push, EA_4BYTE, calleeSavedRegInfo);
genSinglePush(); // Keep track of ESP for EBP-less frames
- args += sizeof(void*);
+ args += REGSIZE_BYTES;
// Push the address of the target function
getEmitter()->emitIns_R(INS_push, EA_4BYTE, REG_TAILCALL_ADDR);
genSinglePush(); // Keep track of ESP for EBP-less frames
- args += sizeof(void*);
+ args += REGSIZE_BYTES;
#else // _TARGET_X86_
}
else if (cit == CORINFO_TYPE_REFANY)
{
- sigSize = 2 * sizeof(void*);
+ sigSize = 2 * TARGET_POINTER_SIZE;
}
return sigSize;
}
bool lvStackAligned() const
{
assert(lvIsStructField);
- return ((lvFldOffset % sizeof(void*)) == 0);
+ return ((lvFldOffset % TARGET_POINTER_SIZE) == 0);
}
bool lvNormalizeOnLoad() const
{
#ifdef _TARGET_ARM_
// Don't set a preferred register for a TYP_STRUCT that takes more than one register slot
- if ((lvType == TYP_STRUCT) && (lvSize() > sizeof(void*)))
+ if ((lvType == TYP_STRUCT) && (lvSize() > REGSIZE_BYTES))
return;
#endif
return structSize; // TODO: roundUp() needed here?
}
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- return sizeof(size_t);
+ return TARGET_POINTER_SIZE;
#else // !_TARGET_AMD64_
unsigned structSize = info.compCompHnd->getClassSize(argClass);
// make certain the EE passes us back the right thing for refanys
- assert(argTypeJit != CORINFO_TYPE_REFANY || structSize == 2 * sizeof(void*));
+ assert(argTypeJit != CORINFO_TYPE_REFANY || structSize == 2 * TARGET_POINTER_SIZE);
// For each target that supports passing struct args in multiple registers
// apply the target specific rules for them here:
if (emitComp->verbose)
{
- unsigned count = (offsHi - offsLo) / sizeof(void*);
+ unsigned count = (offsHi - offsLo) / TARGET_POINTER_SIZE;
printf("%u tracked GC refs are at stack offsets ", count);
if (offsLo >= 0)
#endif // DEBUG
- assert(((offsHi - offsLo) % sizeof(void*)) == 0);
- assert((offsLo % sizeof(void*)) == 0);
- assert((offsHi % sizeof(void*)) == 0);
+ assert(((offsHi - offsLo) % TARGET_POINTER_SIZE) == 0);
+ assert((offsLo % TARGET_POINTER_SIZE) == 0);
+ assert((offsHi % TARGET_POINTER_SIZE) == 0);
emitGCrFrameOffsMin = offsLo;
emitGCrFrameOffsMax = offsHi;
- emitGCrFrameOffsCnt = (offsHi - offsLo) / sizeof(void*);
+ emitGCrFrameOffsCnt = (offsHi - offsLo) / TARGET_POINTER_SIZE;
}
/*****************************************************************************
int of;
bool sp = false;
- for (vn = 0, of = emitGCrFrameOffsMin; vn < emitGCrFrameOffsCnt; vn += 1, of += sizeof(void*))
+ for (vn = 0, of = emitGCrFrameOffsMin; vn < emitGCrFrameOffsCnt; vn += 1, of += TARGET_POINTER_SIZE)
{
if (emitGCrFrameLiveTab[vn])
{
UNATIVE_OFFSET roDataAlignmentDelta = 0;
if (emitConsDsc.dsdOffs)
{
- UNATIVE_OFFSET roDataAlignment = sizeof(void*); // 8 Byte align by default.
+ UNATIVE_OFFSET roDataAlignment = TARGET_POINTER_SIZE; // 8 Byte align by default.
roDataAlignmentDelta = (UNATIVE_OFFSET)ALIGN_UP(emitTotalHotCodeSize, roDataAlignment) - emitTotalHotCodeSize;
assert((roDataAlignmentDelta == 0) || (roDataAlignmentDelta == 4));
}
varPtrDsc** dp;
for (vn = 0, of = emitGCrFrameOffsMin, dp = emitGCrFrameLiveTab; vn < emitGCrFrameOffsCnt;
- vn++, of += sizeof(void*), dp++)
+ vn++, of += TARGET_POINTER_SIZE, dp++)
{
if (*dp)
{
{
JITDUMP(" section %u, size %u, block absolute addr\n", secNum++, dscSize);
- assert(dscSize && dscSize % sizeof(BasicBlock*) == 0);
+ assert(dscSize && dscSize % TARGET_POINTER_SIZE == 0);
size_t numElems = dscSize / TARGET_POINTER_SIZE;
BYTE** bDst = (BYTE**)dst;
for (unsigned i = 0; i < numElems; i++)
varPtrDsc* desc;
- assert((abs(offs) % sizeof(ssize_t)) == 0);
+ assert((abs(offs) % TARGET_POINTER_SIZE) == 0);
assert(needsGC(gcType));
/* Compute the index into the GC frame table if the caller didn't do it */
if (disp == -1)
{
- disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+ disp = (offs - emitGCrFrameOffsMin) / TARGET_POINTER_SIZE;
}
assert((size_t)disp < emitGCrFrameOffsCnt);
if (disp == -1)
{
- disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+ disp = (offs - emitGCrFrameOffsMin) / TARGET_POINTER_SIZE;
}
assert((unsigned)disp < emitGCrFrameOffsCnt);
if (needsGC(gcType))
{
- call->cdArgTable[gcArgs] = i * sizeof(void*);
+ call->cdArgTable[gcArgs] = i * TARGET_POINTER_SIZE;
if (gcType == GCT_BYREF)
{
}
#endif // DEBUG
- return sizeof(size_t);
+ return TARGET_POINTER_SIZE;
}
//------------------------------------------------------------------------
/* Compute the index into the GC frame table */
- disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+ disp = (offs - emitGCrFrameOffsMin) / TARGET_POINTER_SIZE;
assert(disp < emitGCrFrameOffsCnt);
/* If the variable is currently dead, mark it as live */
/* Compute the index into the GC frame table */
- disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+ disp = (offs - emitGCrFrameOffsMin) / TARGET_POINTER_SIZE;
assert(disp < emitGCrFrameOffsCnt);
/* If the variable is currently live, mark it as dead */
}
#endif
- assert(argSize % (int)sizeof(void*) == 0);
- argCnt = argSize / (int)sizeof(void*);
+ assert(argSize % (int)REGSIZE_BYTES == 0);
+ argCnt = argSize / (int)REGSIZE_BYTES;
/* Managed RetVal: emit sequence point for the call */
if (emitComp->opts.compDbgInfo && ilOffset != BAD_IL_OFFSET)
if (emitInsWritesToLclVarStackLoc(id))
{
int varNum = id->idAddr()->iiaLclVar.lvaVarNum();
- unsigned ofs = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), sizeof(size_t));
+ unsigned ofs = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), TARGET_POINTER_SIZE);
regNumber regBase;
int adr = emitComp->lvaFrameAddress(varNum, true, ®Base, ofs);
if (id->idGCref() != GCT_NONE)
#endif
assert(argSize % REGSIZE_BYTES == 0);
- argCnt = (int)(argSize / (int)sizeof(void*));
+ argCnt = (int)(argSize / (int)REGSIZE_BYTES);
/* Managed RetVal: emit sequence point for the call */
if (emitComp->opts.compDbgInfo && ilOffset != BAD_IL_OFFSET)
if (emitInsWritesToLclVarStackLoc(id) || emitInsWritesToLclVarStackLocPair(id))
{
int varNum = id->idAddr()->iiaLclVar.lvaVarNum();
- unsigned ofs = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), sizeof(size_t));
+ unsigned ofs = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), TARGET_POINTER_SIZE);
bool FPbased;
int adr = emitComp->lvaFrameAddress(varNum, &FPbased);
if (id->idGCref() != GCT_NONE)
}
if (emitInsWritesToLclVarStackLocPair(id))
{
- unsigned ofs2 = ofs + sizeof(size_t);
+ unsigned ofs2 = ofs + TARGET_POINTER_SIZE;
if (id->idGCrefReg2() != GCT_NONE)
{
emitGCvarLiveUpd(adr + ofs2, varNum, id->idGCrefReg2(), dst);
if (EA_IS_OFFSET(attr))
{
assert(ins == INS_push);
- sz = 1 + sizeof(void*);
+ sz = 1 + TARGET_POINTER_SIZE;
id = emitNewInstrDsp(EA_1BYTE, offs);
id->idIns(ins);
assert(ins == INS_mov && reg == REG_EAX);
// Special case: "mov eax, [addr]" is smaller
- sz = 1 + sizeof(void*);
+ sz = 1 + TARGET_POINTER_SIZE;
}
else
{
// instruction.
if (ins == INS_mov && reg == REG_EAX)
{
- sz = 1 + sizeof(void*);
+ sz = 1 + TARGET_POINTER_SIZE;
if (size == EA_2BYTE)
sz += 1;
}
// the instruction.
if (ins == INS_mov && reg == REG_EAX)
{
- sz = 1 + sizeof(void*);
+ sz = 1 + TARGET_POINTER_SIZE;
if (size == EA_2BYTE)
sz += 1;
}
//
//
//
- if ((sizeof(void*) + // return address for call
+ if ((TARGET_POINTER_SIZE + // return address for call
emitComp->genStackLevel +
// Current stack level. This gets resetted on every
// localloc and on the prolog (invariant is that
// we've consumed more than JIT_RESERVED_STACK bytes
// of stack, which is what the prolog probe covers (in
// addition to the EE requested size)
- (emitComp->compHndBBtabCount * sizeof(void*))
+ (emitComp->compHndBBtabCount * TARGET_POINTER_SIZE)
// Hidden slots for calling finallys
) >= JIT_RESERVED_STACK)
{
}
#endif
- assert(argSize % sizeof(void*) == 0);
- argCnt = (int)(argSize / (ssize_t)sizeof(void*)); // we need a signed-divide
+ assert(argSize % REGSIZE_BYTES == 0);
+ argCnt = (int)(argSize / (int)REGSIZE_BYTES); // we need a signed-divide
/* Managed RetVal: emit sequence point for the call */
if (emitComp->opts.compDbgInfo && ilOffset != BAD_IL_OFFSET)
if (jdsc && !noDetail)
{
- unsigned cnt = (jdsc->dsSize - 1) / sizeof(void*);
+ unsigned cnt = (jdsc->dsSize - 1) / TARGET_POINTER_SIZE;
BasicBlock** bbp = (BasicBlock**)jdsc->dsCont;
#ifdef _TARGET_AMD64_
if (id->idIsDspReloc())
{
- emitRecordRelocation((void*)(dst - sizeof(void*)), target, IMAGE_REL_BASED_MOFFSET);
+ emitRecordRelocation((void*)(dst - TARGET_POINTER_SIZE), target, IMAGE_REL_BASED_MOFFSET);
}
#endif //_TARGET_X86_
if (ins == INS_sub && id->idInsFmt() == IF_RRW_CNS && id->idReg1() == REG_ESP)
{
assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL);
- emitStackPushN(dst, (unsigned)(emitGetInsSC(id) / sizeof(void*)));
+ emitStackPushN(dst, (unsigned)(emitGetInsSC(id) / TARGET_POINTER_SIZE));
}
break;
{
assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL);
emitStackPop(dst, /*isCall*/ false, /*callInstrSize*/ 0,
- (unsigned)(emitGetInsSC(id) / sizeof(void*)));
+ (unsigned)(emitGetInsSC(id) / TARGET_POINTER_SIZE));
}
break;
assert((compiler->compArgSize & 0x3) == 0);
size_t argCount =
- (compiler->compArgSize - (compiler->codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*))) / sizeof(void*);
+ (compiler->compArgSize - (compiler->codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES;
assert(argCount <= MAX_USHORT_SIZE_T);
header->argCount = static_cast<unsigned short>(argCount);
{
unsigned offset;
- offset = (pasDepth - i) * sizeof(void*);
+ offset = (pasDepth - i) * TARGET_POINTER_SIZE;
if (curArg == GCT_BYREF)
offset |= byref_OFFSET_FLAG;
lvl += i;
unsigned offset;
- offset = lvl * sizeof(void*);
+ offset = lvl * TARGET_POINTER_SIZE;
if (mask & pasByrefBottomMask)
offset |= byref_OFFSET_FLAG;
// A struct will have gcSlots only if it is at least TARGET_POINTER_SIZE.
if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
- unsigned slots = compiler->lvaLclSize(varNum) / sizeof(void*);
+ unsigned slots = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE;
BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
// walk each member of the array
{
assert(pass == 1);
- unsigned offset = varDsc->lvStkOffs + i * sizeof(void*);
+ unsigned offset = varDsc->lvStkOffs + i * TARGET_POINTER_SIZE;
#if DOUBLE_ALIGN
// For genDoubleAlign(), locals are addressed relative to ESP and
// arguments are addressed relative to EBP.
unsigned begOffs;
unsigned endOffs;
- assert(~OFFSET_MASK % sizeof(void*) == 0);
+ assert(~OFFSET_MASK % TARGET_POINTER_SIZE == 0);
/* Get hold of the variable's stack offset */
// Note that the enregisterable struct types cannot have GC pointers in them.
if ((varDsc->lvType == TYP_STRUCT) && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
- unsigned slots = compiler->lvaLclSize(varNum) / sizeof(void*);
+ unsigned slots = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE;
BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
// walk each member of the array
continue;
}
- int offset = varDsc->lvStkOffs + i * sizeof(void*);
+ int offset = varDsc->lvStkOffs + i * TARGET_POINTER_SIZE;
#if DOUBLE_ALIGN
// For genDoubleAlign(), locals are addressed relative to ESP and
// arguments are addressed relative to EBP.
}
else if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
- unsigned slots = compiler->lvaLclSize(varNum) / sizeof(void*);
+ unsigned slots = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE;
BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
// walk each member of the array
GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
- op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
+ op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
if (varTypeIsStruct(exprToBox))
op1->gtType = TYP_REF; // points at boxed object
FieldSeqNode* firstElemFldSeq =
GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
- op1 =
- gtNewOperNode(GT_ADD, TYP_BYREF, op1,
- new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
+ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
+ new (this, GT_CNS_INT)
+ GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
if (varTypeIsStruct(lclTyp))
{
FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
- new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
+ new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
}
if (!(access & CORINFO_ACCESS_ADDRESS))
// remember the element size
if (lclTyp == TYP_REF)
{
- op1->gtIndex.gtIndElemSize = sizeof(void*);
+ op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
}
else
{
// UNBOX(exp) morphs into
// clone = pop(exp);
// ((*clone == typeToken) ? nop : helper(clone, typeToken));
- // push(clone + sizeof(void*))
+ // push(clone + TARGET_POINTER_SIZE)
//
GenTreePtr cloneOperand;
op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
// to the beginning of the value-type. Today this means adjusting
// past the base of the objects vtable field which is pointer sized.
- op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
+ op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
}
else
callsiteSize += 10; // "lea EAX, bword ptr [EBP-14H]"
- // NB sizeof (void*) fails to convey intent when cross-jitting.
-
- unsigned opsz = (unsigned)(roundUp(comp->getClassSize(verType.GetClassHandle()), sizeof(void*)));
- unsigned slots = opsz / sizeof(void*);
+ unsigned opsz = (unsigned)(roundUp(comp->getClassSize(verType.GetClassHandle()), TARGET_POINTER_SIZE));
+ unsigned slots = opsz / TARGET_POINTER_SIZE;
callsiteSize += slots * 20; // "push gword ptr [EAX+offs] "
}
const unsigned argCount = args.numArgs;
m_ArgCount = argCount;
- const unsigned pointerSize = sizeof(void*);
+ const unsigned pointerSize = TARGET_POINTER_SIZE;
unsigned i = 0;
// Implicit arguments
#endif // FEATURE_FASTTAILCALL
// The total argument size must be aligned.
- noway_assert((compArgSize % sizeof(void*)) == 0);
+ noway_assert((compArgSize % TARGET_POINTER_SIZE) == 0);
#ifdef _TARGET_X86_
/* We can not pass more than 2^16 dwords as arguments as the "ret"
varDsc->lvExactSize = info.compCompHnd->getClassSize(typeHnd);
size_t lvSize = varDsc->lvSize();
- assert((lvSize % sizeof(void*)) ==
- 0); // The struct needs to be a multiple of sizeof(void*) bytes for getClassGClayout() to be valid.
- varDsc->lvGcLayout = (BYTE*)compGetMem((lvSize / sizeof(void*)) * sizeof(BYTE), CMK_LvaTable);
+ assert((lvSize % TARGET_POINTER_SIZE) ==
+ 0); // The struct needs to be a multiple of TARGET_POINTER_SIZE bytes for getClassGClayout() to be valid.
+ varDsc->lvGcLayout = (BYTE*)compGetMem((lvSize / TARGET_POINTER_SIZE) * sizeof(BYTE), CMK_LvaTable);
unsigned numGCVars;
var_types simdBaseType = TYP_UNKNOWN;
varDsc->lvType = impNormStructType(typeHnd, varDsc->lvGcLayout, &numGCVars, &simdBaseType);
// We need to re-adjust the offsets of the parameters so they are EBP
// relative rather than stack/frame pointer relative
- varDsc->lvStkOffs += (2 * sizeof(void*)); // return address and pushed EBP
+ varDsc->lvStkOffs += (2 * TARGET_POINTER_SIZE); // return address and pushed EBP
noway_assert(varDsc->lvStkOffs >= FIRST_ARG_STACK_OFFS);
}
/* Update the argOffs to reflect arguments that are passed in registers */
noway_assert(codeGen->intRegState.rsCalleeRegArgCount <= MAX_REG_ARG);
- noway_assert(compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*));
+ noway_assert(compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES);
#ifdef _TARGET_X86_
- argOffs -= codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*);
+ argOffs -= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
#endif
#ifndef LEGACY_BACKEND
if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE)
{
noway_assert(lclNum == (unsigned)info.compTypeCtxtArg);
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES,
argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
}
if (info.compIsVarArgs)
{
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES,
argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
}
if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE)
{
noway_assert(lclNum == (unsigned)info.compTypeCtxtArg);
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES,
argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
}
if (info.compIsVarArgs)
{
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES,
argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
}
#if !defined(_TARGET_ARMARCH_)
#if DEBUG
- // TODO: Remove this noway_assert and replace occurrences of sizeof(void *) with argSize
+ // TODO: Remove this noway_assert and replace occurrences of TARGET_POINTER_SIZE with argSize
// Also investigate why we are incrementing argOffs for X86 as this seems incorrect
//
- noway_assert(argSize == sizeof(void*));
+ noway_assert(argSize == TARGET_POINTER_SIZE);
#endif // DEBUG
#endif
#if defined(_TARGET_X86_)
- argOffs += sizeof(void*);
+ argOffs += TARGET_POINTER_SIZE;
#elif defined(_TARGET_AMD64_)
// Register arguments on AMD64 also takes stack space. (in the backing store)
varDsc->lvStkOffs = argOffs;
- argOffs += sizeof(void*);
+ argOffs += TARGET_POINTER_SIZE;
#elif defined(_TARGET_ARM64_)
// Register arguments on ARM64 only take stack space when they have a frame home.
#elif defined(_TARGET_ARM_)
#ifdef _TARGET_XARCH_
// On x86/amd64, the return address has already been pushed by the call instruction in the caller.
- stkOffs -= sizeof(void*); // return address;
+ stkOffs -= TARGET_POINTER_SIZE; // return address;
// TODO-AMD64-CQ: for X64 eventually this should be pushed with all the other
// calleeregs. When you fix this, you'll also need to fix
if (lvaOutgoingArgSpaceSize > 0)
{
#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) // No 4 slots for outgoing params on System V.
- noway_assert(lvaOutgoingArgSpaceSize >= (4 * sizeof(void*)));
+ noway_assert(lvaOutgoingArgSpaceSize >= (4 * TARGET_POINTER_SIZE));
#endif
- noway_assert((lvaOutgoingArgSpaceSize % sizeof(void*)) == 0);
+ noway_assert((lvaOutgoingArgSpaceSize % TARGET_POINTER_SIZE) == 0);
// Give it a value so we can avoid asserts in CHK builds.
// Since this will always use an SP relative offset of zero
pushedCount += 1; // pushed PC (return address)
#endif
- noway_assert(compLclFrameSize == (unsigned)-(stkOffs + (pushedCount * (int)sizeof(void*))));
+ noway_assert(compLclFrameSize == (unsigned)-(stkOffs + (pushedCount * (int)TARGET_POINTER_SIZE)));
}
int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs)
//
bool lclFrameSizeAligned = (compLclFrameSize % sizeof(double)) == 0;
bool regPushedCountAligned = ((compCalleeRegsPushed + genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true))) %
- (sizeof(double) / sizeof(void*))) == 0;
+ (sizeof(double) / TARGET_POINTER_SIZE)) == 0;
if (regPushedCountAligned != lclFrameSizeAligned)
{
- lvaIncrementFrameSize(sizeof(void*));
+ lvaIncrementFrameSize(TARGET_POINTER_SIZE);
}
#elif defined(_TARGET_X86_)
if (compLclFrameSize == 0)
{
// This can only happen with JitStress=1 or JitDoubleAlign=2
- lvaIncrementFrameSize(sizeof(void*));
+ lvaIncrementFrameSize(TARGET_POINTER_SIZE);
}
}
#endif
GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
{
noway_assert(tree->gtOper == GT_CAST);
- noway_assert(genTypeSize(TYP_I_IMPL) == sizeof(void*));
+ noway_assert(genTypeSize(TYP_I_IMPL) == TARGET_POINTER_SIZE);
/* The first sub-operand is the thing being cast */
tree->gtFlags &= ~GTF_UNSIGNED;
}
#else
- if (dstSize < sizeof(void*))
+ if (dstSize < TARGET_POINTER_SIZE)
{
oper = gtNewCastNodeL(TYP_I_IMPL, oper, TYP_I_IMPL);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
}
else
{
- // If the valuetype size is not a multiple of sizeof(void*),
+ // If the valuetype size is not a multiple of TARGET_POINTER_SIZE,
// we must copyblk to a temp before doing the obj to avoid
// the obj reading memory past the end of the valuetype
CLANG_FORMAT_COMMENT_ANCHOR;
// Create a node representing the local pointing to the base of the args
GenTreePtr ptrArg =
gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL),
- gtNewIconNode(varDsc->lvStkOffs - codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*) +
+ gtNewIconNode(varDsc->lvStkOffs - codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES +
lclOffs));
// Access the argument through the local
#define GCS EA_GCREF
#define BRS EA_BYREF
#define PS EA_PTRSIZE
-#define PST (sizeof(void*) / sizeof(int))
+#define PST (TARGET_POINTER_SIZE / sizeof(int))
#ifdef _TARGET_64BIT_
#define VTF_I32 0