Clean up clrjit from using sizeof(void*) when meant TARGET_POINTER_SIZE (#15524)
authorEgor Chesakov <Egor.Chesakov@microsoft.com>
Sat, 16 Dec 2017 04:30:58 +0000 (20:30 -0800)
committerJan Kotas <jkotas@microsoft.com>
Sat, 16 Dec 2017 04:30:58 +0000 (23:30 -0500)
17 files changed:
src/jit/codegencommon.cpp
src/jit/codegenlegacy.cpp
src/jit/compiler.cpp
src/jit/compiler.h
src/jit/compiler.hpp
src/jit/ee_il_dll.cpp
src/jit/emit.cpp
src/jit/emitarm.cpp
src/jit/emitarm64.cpp
src/jit/emitxarch.cpp
src/jit/gcencode.cpp
src/jit/gcinfo.cpp
src/jit/importer.cpp
src/jit/inlinepolicy.cpp
src/jit/lclvars.cpp
src/jit/morph.cpp
src/jit/typelist.h

index 7be0f4f..bf0412d 100644 (file)
@@ -3113,7 +3113,7 @@ void CodeGen::genGenerateCode(void** codePtr, ULONG* nativeSizeOfCode)
     // and thus saved on the frame).
 
     // Compute the maximum estimated spill temp size.
-    unsigned maxTmpSize = sizeof(double) + sizeof(float) + sizeof(__int64) + sizeof(void*);
+    unsigned maxTmpSize = sizeof(double) + sizeof(float) + sizeof(__int64) + TARGET_POINTER_SIZE;
 
     maxTmpSize += (compiler->tmpDoubleSpillMax * sizeof(double)) + (compiler->tmpIntSpillMax * sizeof(int));
 
@@ -5122,7 +5122,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere
             // idea of how to ignore it.
 
             // On Arm, a long can be passed in register
-            noway_assert(genTypeSize(genActualType(varDsc->TypeGet())) == sizeof(void*));
+            noway_assert(genTypeSize(genActualType(varDsc->TypeGet())) == TARGET_POINTER_SIZE);
 #endif
 #endif //_TARGET_64BIT_
 
@@ -5618,7 +5618,7 @@ void CodeGen::genCheckUseBlockInit()
             initStkLclCnt += varDsc->lvStructGcCount;
         }
 
-        if ((compiler->lvaLclSize(varNum) > (3 * sizeof(void*))) && (largeGcStructs <= 4))
+        if ((compiler->lvaLclSize(varNum) > (3 * TARGET_POINTER_SIZE)) && (largeGcStructs <= 4))
         {
             largeGcStructs++;
         }
@@ -9151,10 +9151,10 @@ void CodeGen::genFnProlog()
     if (compiler->ehNeedsShadowSPslots() && !compiler->info.compInitMem)
     {
         // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
-        unsigned filterEndOffsetSlotOffs = compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*));
+        unsigned filterEndOffsetSlotOffs = compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE;
 
         // Zero out the slot for nesting level 0
-        unsigned firstSlotOffs = filterEndOffsetSlotOffs - (sizeof(void*));
+        unsigned firstSlotOffs = filterEndOffsetSlotOffs - TARGET_POINTER_SIZE;
 
         if (!initRegZeroed)
         {
@@ -9779,7 +9779,7 @@ void CodeGen::genFnEpilog(BasicBlock* block)
             /* Add 'compiler->compLclFrameSize' to ESP */
             /* Use pop ECX to increment ESP by 4, unless compiler->compJmpOpUsed is true */
 
-            if ((compiler->compLclFrameSize == sizeof(void*)) && !compiler->compJmpOpUsed)
+            if ((compiler->compLclFrameSize == TARGET_POINTER_SIZE) && !compiler->compJmpOpUsed)
             {
                 inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
                 regTracker.rsTrackRegTrash(REG_ECX);
@@ -10008,8 +10008,8 @@ void CodeGen::genFnEpilog(BasicBlock* block)
 
         if (fCalleePop)
         {
-            noway_assert(compiler->compArgSize >= intRegState.rsCalleeRegArgCount * sizeof(void*));
-            stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * sizeof(void*);
+            noway_assert(compiler->compArgSize >= intRegState.rsCalleeRegArgCount * REGSIZE_BYTES);
+            stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
 
             noway_assert(compiler->compArgSize < 0x10000); // "ret" only has 2 byte operand
         }
@@ -11948,7 +11948,7 @@ void CodeGen::genSetScopeInfo(unsigned            which,
 
         noway_assert(cookieOffset < varOffset);
         unsigned offset     = varOffset - cookieOffset;
-        unsigned stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * sizeof(void*);
+        unsigned stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
         noway_assert(offset < stkArgSize);
         offset = stkArgSize - offset;
 
index d741a44..1b43003 100644 (file)
@@ -5090,11 +5090,12 @@ void CodeGen::genCodeForTreeLeaf(GenTreePtr tree, regMaskTP destReg, regMaskTP b
             // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
             unsigned filterEndOffsetSlotOffs;
             PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) >
-                          sizeof(void*)); // below doesn't underflow.
-            filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*)));
+                          TARGET_POINTER_SIZE); // below doesn't underflow.
+            filterEndOffsetSlotOffs =
+                (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
 
             unsigned curNestingSlotOffs;
-            curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * sizeof(void*));
+            curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
             instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar, curNestingSlotOffs);
             reg = REG_STK;
             break;
@@ -13047,14 +13048,14 @@ void CodeGen::genCodeForBBlist()
                 // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
                 unsigned filterEndOffsetSlotOffs;
                 filterEndOffsetSlotOffs =
-                    (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*)));
+                    (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
 
                 unsigned curNestingSlotOffs;
-                curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * sizeof(void*)));
+                curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
 
                 // Zero out the slot for the next nesting level
                 instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar,
-                                           curNestingSlotOffs - sizeof(void*));
+                                           curNestingSlotOffs - TARGET_POINTER_SIZE);
 
                 instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, LCL_FINALLY_MARK, compiler->lvaShadowSPslotsVar,
                                            curNestingSlotOffs);
@@ -15977,7 +15978,8 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
                             addrReg = 0;
 
                             // Get the number of BYTES to copy to the stack
-                            opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(void*));
+                            opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass),
+                                           TARGET_POINTER_SIZE);
                             size_t bytesToBeCopied = opsz;
 
                             // postponedFields is true if we have any postponed fields
@@ -16020,14 +16022,14 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
                                 if (fieldVarDsc->lvStackAligned())
                                 {
                                     if (fieldVarDsc->lvExactSize != 2 * sizeof(unsigned) &&
-                                        fieldVarDsc->lvFldOffset + sizeof(void*) != bytesToBeCopied)
+                                        fieldVarDsc->lvFldOffset + TARGET_POINTER_SIZE != bytesToBeCopied)
                                     {
                                         // Might need 4-bytes paddings for fields other than LONG and DOUBLE.
                                         // Just push some junk (i.e EAX) on the stack.
                                         inst_RV(INS_push, REG_EAX, TYP_INT);
                                         genSinglePush();
 
-                                        bytesToBeCopied -= sizeof(void*);
+                                        bytesToBeCopied -= TARGET_POINTER_SIZE;
                                     }
 
                                     // If we have an expectedAlignedOffset make sure that this push instruction
@@ -16060,11 +16062,11 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
                                         }
                                         else
                                         {
-                                            getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, sizeof(void*));
+                                            getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, TARGET_POINTER_SIZE);
                                             genSinglePush();
                                         }
 
-                                        bytesToBeCopied -= sizeof(void*);
+                                        bytesToBeCopied -= TARGET_POINTER_SIZE;
                                     }
 
                                     // Push the "upper half" of DOUBLE var if it is not enregistered.
@@ -16073,11 +16075,11 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
                                     {
                                         if (!fieldVarDsc->lvRegister)
                                         {
-                                            getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, sizeof(void*));
+                                            getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, TARGET_POINTER_SIZE);
                                             genSinglePush();
                                         }
 
-                                        bytesToBeCopied -= sizeof(void*);
+                                        bytesToBeCopied -= TARGET_POINTER_SIZE;
                                     }
 
                                     //
@@ -16156,7 +16158,7 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
                                         genSinglePush();
                                     }
 
-                                    bytesToBeCopied -= sizeof(void*);
+                                    bytesToBeCopied -= TARGET_POINTER_SIZE;
                                 }
                                 else // not stack aligned
                                 {
@@ -16171,11 +16173,12 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
                                         // This should never change until it is set back to UINT_MAX by an aligned
                                         // offset
                                         noway_assert(expectedAlignedOffset ==
-                                                     roundUp(fieldVarDsc->lvFldOffset, sizeof(void*)) - sizeof(void*));
+                                                     roundUp(fieldVarDsc->lvFldOffset, TARGET_POINTER_SIZE) -
+                                                         TARGET_POINTER_SIZE);
                                     }
 
                                     expectedAlignedOffset =
-                                        roundUp(fieldVarDsc->lvFldOffset, sizeof(void*)) - sizeof(void*);
+                                        roundUp(fieldVarDsc->lvFldOffset, TARGET_POINTER_SIZE) - TARGET_POINTER_SIZE;
 
                                     noway_assert(expectedAlignedOffset < bytesToBeCopied);
 
@@ -16302,8 +16305,8 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
                     noway_assert(arg->gtObj.gtOp1->InReg());
                     regNumber reg = arg->gtObj.gtOp1->gtRegNum;
                     // Get the number of DWORDS to copy to the stack
-                    opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(void*));
-                    unsigned slots = (unsigned)(opsz / sizeof(void*));
+                    opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(DWORD));
+                    unsigned slots = (unsigned)(opsz / sizeof(DWORD));
 
                     BYTE* gcLayout = new (compiler, CMK_Codegen) BYTE[slots];
 
@@ -16355,7 +16358,7 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
 
                         if (opsz & 0x4)
                         {
-                            stkDisp -= sizeof(void*);
+                            stkDisp -= TARGET_POINTER_SIZE;
                             getEmitter()->emitIns_AR_R(INS_push, EA_4BYTE, REG_NA, reg, stkDisp);
                             genSinglePush();
                         }
@@ -16367,7 +16370,7 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
                         {
                             getEmitter()->emitIns_R_AR(INS_movq, EA_8BYTE, xmmReg, reg, curDisp);
                             getEmitter()->emitIns_AR_R(INS_movq, EA_8BYTE, xmmReg, REG_SPBASE, curDisp);
-                            curDisp += 2 * sizeof(void*);
+                            curDisp += 2 * TARGET_POINTER_SIZE;
                         }
                         noway_assert(curDisp == stkDisp);
                     }
@@ -16385,7 +16388,7 @@ size_t CodeGen::genPushArgList(GenTreeCall* call)
                                 noway_assert(gcLayout[i] == TYPE_GC_BYREF);
                                 fieldSize = EA_BYREF;
                             }
-                            getEmitter()->emitIns_AR_R(INS_push, fieldSize, REG_NA, reg, i * sizeof(void*));
+                            getEmitter()->emitIns_AR_R(INS_push, fieldSize, REG_NA, reg, i * TARGET_POINTER_SIZE);
                             genSinglePush();
                         }
                     }
@@ -19290,7 +19293,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
                         /* Keep track of ESP for EBP-less frames */
                         genSinglePush();
 
-                        argSize += sizeof(void*);
+                        argSize += REGSIZE_BYTES;
 
 #elif defined(_TARGET_ARM_)
 
@@ -19683,16 +19686,16 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
         // Push the count of the incoming stack arguments
 
         unsigned nOldStkArgs =
-            (unsigned)((compiler->compArgSize - (intRegState.rsCalleeRegArgCount * sizeof(void*))) / sizeof(void*));
+            (unsigned)((compiler->compArgSize - (intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES);
         getEmitter()->emitIns_I(INS_push, EA_4BYTE, nOldStkArgs);
         genSinglePush(); // Keep track of ESP for EBP-less frames
-        args += sizeof(void*);
+        args += REGSIZE_BYTES;
 
         // Push the count of the outgoing stack arguments
 
-        getEmitter()->emitIns_I(INS_push, EA_4BYTE, argSize / sizeof(void*));
+        getEmitter()->emitIns_I(INS_push, EA_4BYTE, argSize / REGSIZE_BYTES);
         genSinglePush(); // Keep track of ESP for EBP-less frames
-        args += sizeof(void*);
+        args += REGSIZE_BYTES;
 
         // Push info about the callee-saved registers to be restored
         // For now, we always spill all registers if compiler->compTailCallUsed
@@ -19701,13 +19704,13 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
                                    (fTailCallTargetIsVSD ? 0x2 : 0x0); // Stub dispatch flag
         getEmitter()->emitIns_I(INS_push, EA_4BYTE, calleeSavedRegInfo);
         genSinglePush(); // Keep track of ESP for EBP-less frames
-        args += sizeof(void*);
+        args += REGSIZE_BYTES;
 
         // Push the address of the target function
 
         getEmitter()->emitIns_R(INS_push, EA_4BYTE, REG_TAILCALL_ADDR);
         genSinglePush(); // Keep track of ESP for EBP-less frames
-        args += sizeof(void*);
+        args += REGSIZE_BYTES;
 
 #else // _TARGET_X86_
 
index d359c6f..c496de5 100644 (file)
@@ -2179,7 +2179,7 @@ unsigned Compiler::compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd)
     }
     else if (cit == CORINFO_TYPE_REFANY)
     {
-        sigSize = 2 * sizeof(void*);
+        sigSize = 2 * TARGET_POINTER_SIZE;
     }
     return sigSize;
 }
index cf2f68f..5e448d8 100644 (file)
@@ -743,7 +743,7 @@ public:
     bool lvStackAligned() const
     {
         assert(lvIsStructField);
-        return ((lvFldOffset % sizeof(void*)) == 0);
+        return ((lvFldOffset % TARGET_POINTER_SIZE) == 0);
     }
     bool lvNormalizeOnLoad() const
     {
index 2340780..096c81d 100644 (file)
@@ -2131,7 +2131,7 @@ inline void LclVarDsc::addPrefReg(regMaskTP regMask, Compiler* comp)
 
 #ifdef _TARGET_ARM_
     // Don't set a preferred register for a TYP_STRUCT that takes more than one register slot
-    if ((lvType == TYP_STRUCT) && (lvSize() > sizeof(void*)))
+    if ((lvType == TYP_STRUCT) && (lvSize() > REGSIZE_BYTES))
         return;
 #endif
 
index 8707376..e87c46c 100644 (file)
@@ -441,7 +441,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO*
         return structSize; // TODO: roundUp() needed here?
     }
 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
-    return sizeof(size_t);
+    return TARGET_POINTER_SIZE;
 
 #else // !_TARGET_AMD64_
 
@@ -454,7 +454,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO*
         unsigned structSize = info.compCompHnd->getClassSize(argClass);
 
         // make certain the EE passes us back the right thing for refanys
-        assert(argTypeJit != CORINFO_TYPE_REFANY || structSize == 2 * sizeof(void*));
+        assert(argTypeJit != CORINFO_TYPE_REFANY || structSize == 2 * TARGET_POINTER_SIZE);
 
         // For each target that supports passing struct args in multiple registers
         // apply the target specific rules for them here:
index c72deee..9f42aea 100644 (file)
@@ -2129,7 +2129,7 @@ void emitter::emitSetFrameRangeGCRs(int offsLo, int offsHi)
 
     if (emitComp->verbose)
     {
-        unsigned count = (offsHi - offsLo) / sizeof(void*);
+        unsigned count = (offsHi - offsLo) / TARGET_POINTER_SIZE;
         printf("%u tracked GC refs are at stack offsets ", count);
 
         if (offsLo >= 0)
@@ -2164,13 +2164,13 @@ void emitter::emitSetFrameRangeGCRs(int offsLo, int offsHi)
 
 #endif // DEBUG
 
-    assert(((offsHi - offsLo) % sizeof(void*)) == 0);
-    assert((offsLo % sizeof(void*)) == 0);
-    assert((offsHi % sizeof(void*)) == 0);
+    assert(((offsHi - offsLo) % TARGET_POINTER_SIZE) == 0);
+    assert((offsLo % TARGET_POINTER_SIZE) == 0);
+    assert((offsHi % TARGET_POINTER_SIZE) == 0);
 
     emitGCrFrameOffsMin = offsLo;
     emitGCrFrameOffsMax = offsHi;
-    emitGCrFrameOffsCnt = (offsHi - offsLo) / sizeof(void*);
+    emitGCrFrameOffsCnt = (offsHi - offsLo) / TARGET_POINTER_SIZE;
 }
 
 /*****************************************************************************
@@ -2847,7 +2847,7 @@ void emitter::emitDispVarSet()
     int      of;
     bool     sp = false;
 
-    for (vn = 0, of = emitGCrFrameOffsMin; vn < emitGCrFrameOffsCnt; vn += 1, of += sizeof(void*))
+    for (vn = 0, of = emitGCrFrameOffsMin; vn < emitGCrFrameOffsCnt; vn += 1, of += TARGET_POINTER_SIZE)
     {
         if (emitGCrFrameLiveTab[vn])
         {
@@ -4528,7 +4528,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp,
     UNATIVE_OFFSET roDataAlignmentDelta = 0;
     if (emitConsDsc.dsdOffs)
     {
-        UNATIVE_OFFSET roDataAlignment = sizeof(void*); // 8 Byte align by default.
+        UNATIVE_OFFSET roDataAlignment = TARGET_POINTER_SIZE; // 8 Byte align by default.
         roDataAlignmentDelta = (UNATIVE_OFFSET)ALIGN_UP(emitTotalHotCodeSize, roDataAlignment) - emitTotalHotCodeSize;
         assert((roDataAlignmentDelta == 0) || (roDataAlignmentDelta == 4));
     }
@@ -4925,7 +4925,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp,
         varPtrDsc** dp;
 
         for (vn = 0, of = emitGCrFrameOffsMin, dp = emitGCrFrameLiveTab; vn < emitGCrFrameOffsCnt;
-             vn++, of += sizeof(void*), dp++)
+             vn++, of += TARGET_POINTER_SIZE, dp++)
         {
             if (*dp)
             {
@@ -5459,7 +5459,7 @@ void emitter::emitOutputDataSec(dataSecDsc* sec, BYTE* dst)
         {
             JITDUMP("  section %u, size %u, block absolute addr\n", secNum++, dscSize);
 
-            assert(dscSize && dscSize % sizeof(BasicBlock*) == 0);
+            assert(dscSize && dscSize % TARGET_POINTER_SIZE == 0);
             size_t numElems = dscSize / TARGET_POINTER_SIZE;
             BYTE** bDst     = (BYTE**)dst;
             for (unsigned i = 0; i < numElems; i++)
@@ -5548,14 +5548,14 @@ void emitter::emitGCvarLiveSet(int offs, GCtype gcType, BYTE* addr, ssize_t disp
 
     varPtrDsc* desc;
 
-    assert((abs(offs) % sizeof(ssize_t)) == 0);
+    assert((abs(offs) % TARGET_POINTER_SIZE) == 0);
     assert(needsGC(gcType));
 
     /* Compute the index into the GC frame table if the caller didn't do it */
 
     if (disp == -1)
     {
-        disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+        disp = (offs - emitGCrFrameOffsMin) / TARGET_POINTER_SIZE;
     }
 
     assert((size_t)disp < emitGCrFrameOffsCnt);
@@ -5645,7 +5645,7 @@ void emitter::emitGCvarDeadSet(int offs, BYTE* addr, ssize_t disp)
 
     if (disp == -1)
     {
-        disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+        disp = (offs - emitGCrFrameOffsMin) / TARGET_POINTER_SIZE;
     }
 
     assert((unsigned)disp < emitGCrFrameOffsCnt);
@@ -5898,7 +5898,7 @@ void emitter::emitRecordGCcall(BYTE* codePos, unsigned char callInstrSize)
 
             if (needsGC(gcType))
             {
-                call->cdArgTable[gcArgs] = i * sizeof(void*);
+                call->cdArgTable[gcArgs] = i * TARGET_POINTER_SIZE;
 
                 if (gcType == GCT_BYREF)
                 {
@@ -6144,7 +6144,7 @@ unsigned char emitter::emitOutputSizeT(BYTE* dst, ssize_t val)
     }
 #endif // DEBUG
 
-    return sizeof(size_t);
+    return TARGET_POINTER_SIZE;
 }
 
 //------------------------------------------------------------------------
@@ -6528,7 +6528,7 @@ void emitter::emitGCvarLiveUpd(int offs, int varNum, GCtype gcType, BYTE* addr)
 
             /* Compute the index into the GC frame table */
 
-            disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+            disp = (offs - emitGCrFrameOffsMin) / TARGET_POINTER_SIZE;
             assert(disp < emitGCrFrameOffsCnt);
 
             /* If the variable is currently dead, mark it as live */
@@ -6559,7 +6559,7 @@ void emitter::emitGCvarDeadUpd(int offs, BYTE* addr)
 
         /* Compute the index into the GC frame table */
 
-        disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+        disp = (offs - emitGCrFrameOffsMin) / TARGET_POINTER_SIZE;
         assert(disp < emitGCrFrameOffsCnt);
 
         /* If the variable is currently live, mark it as dead */
index a35c729..f844888 100644 (file)
@@ -4502,8 +4502,8 @@ void emitter::emitIns_Call(EmitCallType          callType,
     }
 #endif
 
-    assert(argSize % (int)sizeof(void*) == 0);
-    argCnt = argSize / (int)sizeof(void*);
+    assert(argSize % (int)REGSIZE_BYTES == 0);
+    argCnt = argSize / (int)REGSIZE_BYTES;
 
     /* Managed RetVal: emit sequence point for the call */
     if (emitComp->opts.compDbgInfo && ilOffset != BAD_IL_OFFSET)
@@ -6387,7 +6387,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
     if (emitInsWritesToLclVarStackLoc(id))
     {
         int       varNum = id->idAddr()->iiaLclVar.lvaVarNum();
-        unsigned  ofs    = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), sizeof(size_t));
+        unsigned  ofs    = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), TARGET_POINTER_SIZE);
         regNumber regBase;
         int       adr = emitComp->lvaFrameAddress(varNum, true, &regBase, ofs);
         if (id->idGCref() != GCT_NONE)
index 9b45d3d..abde69c 100644 (file)
@@ -7271,7 +7271,7 @@ void emitter::emitIns_Call(EmitCallType          callType,
 #endif
 
     assert(argSize % REGSIZE_BYTES == 0);
-    argCnt = (int)(argSize / (int)sizeof(void*));
+    argCnt = (int)(argSize / (int)REGSIZE_BYTES);
 
     /* Managed RetVal: emit sequence point for the call */
     if (emitComp->opts.compDbgInfo && ilOffset != BAD_IL_OFFSET)
@@ -9928,7 +9928,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
     if (emitInsWritesToLclVarStackLoc(id) || emitInsWritesToLclVarStackLocPair(id))
     {
         int      varNum = id->idAddr()->iiaLclVar.lvaVarNum();
-        unsigned ofs    = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), sizeof(size_t));
+        unsigned ofs    = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), TARGET_POINTER_SIZE);
         bool     FPbased;
         int      adr = emitComp->lvaFrameAddress(varNum, &FPbased);
         if (id->idGCref() != GCT_NONE)
@@ -9954,7 +9954,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
         }
         if (emitInsWritesToLclVarStackLocPair(id))
         {
-            unsigned ofs2 = ofs + sizeof(size_t);
+            unsigned ofs2 = ofs + TARGET_POINTER_SIZE;
             if (id->idGCrefReg2() != GCT_NONE)
             {
                 emitGCvarLiveUpd(adr + ofs2, varNum, id->idGCrefReg2(), dst);
index fe46c19..d2d28a4 100644 (file)
@@ -3679,7 +3679,7 @@ void emitter::emitIns_C(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fld
     if (EA_IS_OFFSET(attr))
     {
         assert(ins == INS_push);
-        sz = 1 + sizeof(void*);
+        sz = 1 + TARGET_POINTER_SIZE;
 
         id = emitNewInstrDsp(EA_1BYTE, offs);
         id->idIns(ins);
@@ -3888,7 +3888,7 @@ void emitter::emitIns_R_C(instruction ins, emitAttr attr, regNumber reg, CORINFO
         assert(ins == INS_mov && reg == REG_EAX);
 
         // Special case: "mov eax, [addr]" is smaller
-        sz = 1 + sizeof(void*);
+        sz = 1 + TARGET_POINTER_SIZE;
     }
     else
     {
@@ -3905,7 +3905,7 @@ void emitter::emitIns_R_C(instruction ins, emitAttr attr, regNumber reg, CORINFO
         // instruction.
         if (ins == INS_mov && reg == REG_EAX)
         {
-            sz = 1 + sizeof(void*);
+            sz = 1 + TARGET_POINTER_SIZE;
             if (size == EA_2BYTE)
                 sz += 1;
         }
@@ -3979,7 +3979,7 @@ void emitter::emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE f
     // the instruction.
     if (ins == INS_mov && reg == REG_EAX)
     {
-        sz = 1 + sizeof(void*);
+        sz = 1 + TARGET_POINTER_SIZE;
         if (size == EA_2BYTE)
             sz += 1;
     }
@@ -5291,7 +5291,7 @@ void emitter::emitIns_Call(EmitCallType          callType,
         //
         //
         //
-        if ((sizeof(void*) + // return address for call
+        if ((TARGET_POINTER_SIZE + // return address for call
              emitComp->genStackLevel +
              // Current stack level. This gets resetted on every
              // localloc and on the prolog (invariant is that
@@ -5301,7 +5301,7 @@ void emitter::emitIns_Call(EmitCallType          callType,
              // we've consumed more than JIT_RESERVED_STACK bytes
              // of stack, which is what the prolog probe covers (in
              // addition to the EE requested size)
-             (emitComp->compHndBBtabCount * sizeof(void*))
+             (emitComp->compHndBBtabCount * TARGET_POINTER_SIZE)
              // Hidden slots for calling finallys
              ) >= JIT_RESERVED_STACK)
         {
@@ -5400,8 +5400,8 @@ void emitter::emitIns_Call(EmitCallType          callType,
     }
 #endif
 
-    assert(argSize % sizeof(void*) == 0);
-    argCnt = (int)(argSize / (ssize_t)sizeof(void*)); // we need a signed-divide
+    assert(argSize % REGSIZE_BYTES == 0);
+    argCnt = (int)(argSize / (int)REGSIZE_BYTES); // we need a signed-divide
 
     /* Managed RetVal: emit sequence point for the call */
     if (emitComp->opts.compDbgInfo && ilOffset != BAD_IL_OFFSET)
@@ -6395,7 +6395,7 @@ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail)
 
     if (jdsc && !noDetail)
     {
-        unsigned     cnt = (jdsc->dsSize - 1) / sizeof(void*);
+        unsigned     cnt = (jdsc->dsSize - 1) / TARGET_POINTER_SIZE;
         BasicBlock** bbp = (BasicBlock**)jdsc->dsCont;
 
 #ifdef _TARGET_AMD64_
@@ -8759,7 +8759,7 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc)
 
         if (id->idIsDspReloc())
         {
-            emitRecordRelocation((void*)(dst - sizeof(void*)), target, IMAGE_REL_BASED_MOFFSET);
+            emitRecordRelocation((void*)(dst - TARGET_POINTER_SIZE), target, IMAGE_REL_BASED_MOFFSET);
         }
 
 #endif //_TARGET_X86_
@@ -11225,7 +11225,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
                 if (ins == INS_sub && id->idInsFmt() == IF_RRW_CNS && id->idReg1() == REG_ESP)
                 {
                     assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL);
-                    emitStackPushN(dst, (unsigned)(emitGetInsSC(id) / sizeof(void*)));
+                    emitStackPushN(dst, (unsigned)(emitGetInsSC(id) / TARGET_POINTER_SIZE));
                 }
                 break;
 
@@ -11235,7 +11235,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
                 {
                     assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL);
                     emitStackPop(dst, /*isCall*/ false, /*callInstrSize*/ 0,
-                                 (unsigned)(emitGetInsSC(id) / sizeof(void*)));
+                                 (unsigned)(emitGetInsSC(id) / TARGET_POINTER_SIZE));
                 }
                 break;
 
index bdea7f1..ea95e5c 100644 (file)
@@ -1634,7 +1634,7 @@ size_t GCInfo::gcInfoBlockHdrSave(
     assert((compiler->compArgSize & 0x3) == 0);
 
     size_t argCount =
-        (compiler->compArgSize - (compiler->codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*))) / sizeof(void*);
+        (compiler->compArgSize - (compiler->codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES;
     assert(argCount <= MAX_USHORT_SIZE_T);
     header->argCount = static_cast<unsigned short>(argCount);
 
@@ -2085,7 +2085,7 @@ unsigned PendingArgsStack::pasEnumGCoffs(unsigned iter, unsigned* offs)
         {
             unsigned offset;
 
-            offset = (pasDepth - i) * sizeof(void*);
+            offset = (pasDepth - i) * TARGET_POINTER_SIZE;
             if (curArg == GCT_BYREF)
                 offset |= byref_OFFSET_FLAG;
 
@@ -2110,7 +2110,7 @@ unsigned PendingArgsStack::pasEnumGCoffs(unsigned iter, unsigned* offs)
             lvl += i;
 
             unsigned offset;
-            offset = lvl * sizeof(void*);
+            offset = lvl * TARGET_POINTER_SIZE;
             if (mask & pasByrefBottomMask)
                 offset |= byref_OFFSET_FLAG;
 
@@ -2329,7 +2329,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un
             // A struct will have gcSlots only if it is at least TARGET_POINTER_SIZE.
             if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
             {
-                unsigned slots  = compiler->lvaLclSize(varNum) / sizeof(void*);
+                unsigned slots  = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE;
                 BYTE*    gcPtrs = compiler->lvaGetGcLayout(varNum);
 
                 // walk each member of the array
@@ -2344,7 +2344,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un
                     {
                         assert(pass == 1);
 
-                        unsigned offset = varDsc->lvStkOffs + i * sizeof(void*);
+                        unsigned offset = varDsc->lvStkOffs + i * TARGET_POINTER_SIZE;
 #if DOUBLE_ALIGN
                         // For genDoubleAlign(), locals are addressed relative to ESP and
                         // arguments are addressed relative to EBP.
@@ -2489,7 +2489,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un
             unsigned begOffs;
             unsigned endOffs;
 
-            assert(~OFFSET_MASK % sizeof(void*) == 0);
+            assert(~OFFSET_MASK % TARGET_POINTER_SIZE == 0);
 
             /* Get hold of the variable's stack offset */
 
@@ -4276,7 +4276,7 @@ void GCInfo::gcMakeRegPtrTable(
         // Note that the enregisterable struct types cannot have GC pointers in them.
         if ((varDsc->lvType == TYP_STRUCT) && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
         {
-            unsigned slots  = compiler->lvaLclSize(varNum) / sizeof(void*);
+            unsigned slots  = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE;
             BYTE*    gcPtrs = compiler->lvaGetGcLayout(varNum);
 
             // walk each member of the array
@@ -4287,7 +4287,7 @@ void GCInfo::gcMakeRegPtrTable(
                     continue;
                 }
 
-                int offset = varDsc->lvStkOffs + i * sizeof(void*);
+                int offset = varDsc->lvStkOffs + i * TARGET_POINTER_SIZE;
 #if DOUBLE_ALIGN
                 // For genDoubleAlign(), locals are addressed relative to ESP and
                 // arguments are addressed relative to EBP.
index 293abd5..f330a86 100644 (file)
@@ -515,7 +515,7 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int* untrackedCount, UNALIGNED
         }
         else if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
         {
-            unsigned slots  = compiler->lvaLclSize(varNum) / sizeof(void*);
+            unsigned slots  = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE;
             BYTE*    gcPtrs = compiler->lvaGetGcLayout(varNum);
 
             // walk each member of the array
index 0753152..23262f3 100644 (file)
@@ -5672,7 +5672,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
         GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
 
         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
-        op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
+        op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
 
         if (varTypeIsStruct(exprToBox))
@@ -6528,9 +6528,9 @@ GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolve
                     op1->gtType = TYP_REF; // points at boxed object
                     FieldSeqNode* firstElemFldSeq =
                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
-                    op1 =
-                        gtNewOperNode(GT_ADD, TYP_BYREF, op1,
-                                      new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
+                    op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
+                                        new (this, GT_CNS_INT)
+                                            GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
 
                     if (varTypeIsStruct(lclTyp))
                     {
@@ -6586,7 +6586,7 @@ GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolve
         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
 
         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
-                            new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
+                            new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
     }
 
     if (!(access & CORINFO_ACCESS_ADDRESS))
@@ -11291,7 +11291,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
                     // remember the element size
                     if (lclTyp == TYP_REF)
                     {
-                        op1->gtIndex.gtIndElemSize = sizeof(void*);
+                        op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
                     }
                     else
                     {
@@ -14686,7 +14686,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
                     // UNBOX(exp) morphs into
                     // clone = pop(exp);
                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
-                    // push(clone + sizeof(void*))
+                    // push(clone + TARGET_POINTER_SIZE)
                     //
                     GenTreePtr cloneOperand;
                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
@@ -14719,7 +14719,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
                     // to the beginning of the value-type. Today this means adjusting
                     // past the base of the objects vtable field which is pointer sized.
 
-                    op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
+                    op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
                 }
                 else
index eaf7396..2d4ebfd 100644 (file)
@@ -758,10 +758,8 @@ int DefaultPolicy::DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* meth
 
             callsiteSize += 10; // "lea     EAX, bword ptr [EBP-14H]"
 
-            // NB sizeof (void*) fails to convey intent when cross-jitting.
-
-            unsigned opsz  = (unsigned)(roundUp(comp->getClassSize(verType.GetClassHandle()), sizeof(void*)));
-            unsigned slots = opsz / sizeof(void*);
+            unsigned opsz  = (unsigned)(roundUp(comp->getClassSize(verType.GetClassHandle()), TARGET_POINTER_SIZE));
+            unsigned slots = opsz / TARGET_POINTER_SIZE;
 
             callsiteSize += slots * 20; // "push    gword ptr [EAX+offs]  "
         }
@@ -1578,7 +1576,7 @@ void DiscretionaryPolicy::MethodInfoObservations(CORINFO_METHOD_INFO* methodInfo
     const unsigned    argCount = args.numArgs;
     m_ArgCount                 = argCount;
 
-    const unsigned pointerSize = sizeof(void*);
+    const unsigned pointerSize = TARGET_POINTER_SIZE;
     unsigned       i           = 0;
 
     // Implicit arguments
index d75e81b..bc2470c 100644 (file)
@@ -364,7 +364,7 @@ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo)
 #endif // FEATURE_FASTTAILCALL
 
     // The total argument size must be aligned.
-    noway_assert((compArgSize % sizeof(void*)) == 0);
+    noway_assert((compArgSize % TARGET_POINTER_SIZE) == 0);
 
 #ifdef _TARGET_X86_
     /* We can not pass more than 2^16 dwords as arguments as the "ret"
@@ -2263,9 +2263,9 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool
         varDsc->lvExactSize = info.compCompHnd->getClassSize(typeHnd);
 
         size_t lvSize = varDsc->lvSize();
-        assert((lvSize % sizeof(void*)) ==
-               0); // The struct needs to be a multiple of sizeof(void*) bytes for getClassGClayout() to be valid.
-        varDsc->lvGcLayout = (BYTE*)compGetMem((lvSize / sizeof(void*)) * sizeof(BYTE), CMK_LvaTable);
+        assert((lvSize % TARGET_POINTER_SIZE) ==
+               0); // The struct needs to be a multiple of TARGET_POINTER_SIZE bytes for getClassGClayout() to be valid.
+        varDsc->lvGcLayout = (BYTE*)compGetMem((lvSize / TARGET_POINTER_SIZE) * sizeof(BYTE), CMK_LvaTable);
         unsigned  numGCVars;
         var_types simdBaseType = TYP_UNKNOWN;
         varDsc->lvType         = impNormStructType(typeHnd, varDsc->lvGcLayout, &numGCVars, &simdBaseType);
@@ -4756,7 +4756,7 @@ void Compiler::lvaFixVirtualFrameOffsets()
                     // We need to re-adjust the offsets of the parameters so they are EBP
                     // relative rather than stack/frame pointer relative
 
-                    varDsc->lvStkOffs += (2 * sizeof(void*)); // return address and pushed EBP
+                    varDsc->lvStkOffs += (2 * TARGET_POINTER_SIZE); // return address and pushed EBP
 
                     noway_assert(varDsc->lvStkOffs >= FIRST_ARG_STACK_OFFS);
                 }
@@ -4871,10 +4871,10 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
     /* Update the argOffs to reflect arguments that are passed in registers */
 
     noway_assert(codeGen->intRegState.rsCalleeRegArgCount <= MAX_REG_ARG);
-    noway_assert(compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*));
+    noway_assert(compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES);
 
 #ifdef _TARGET_X86_
-    argOffs -= codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*);
+    argOffs -= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
 #endif
 
 #ifndef LEGACY_BACKEND
@@ -4913,13 +4913,13 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
     if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE)
     {
         noway_assert(lclNum == (unsigned)info.compTypeCtxtArg);
-        argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+        argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES,
                                                    argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
     }
 
     if (info.compIsVarArgs)
     {
-        argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+        argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES,
                                                    argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
     }
 
@@ -5018,13 +5018,13 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
     if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE)
     {
         noway_assert(lclNum == (unsigned)info.compTypeCtxtArg);
-        argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+        argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES,
                                                    argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
     }
 
     if (info.compIsVarArgs)
     {
-        argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+        argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES,
                                                    argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
     }
 
@@ -5201,19 +5201,19 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum,
 
 #if !defined(_TARGET_ARMARCH_)
 #if DEBUG
-        // TODO: Remove this noway_assert and replace occurrences of sizeof(void *) with argSize
+        // TODO: Remove this noway_assert and replace occurrences of TARGET_POINTER_SIZE with argSize
         // Also investigate why we are incrementing argOffs for X86 as this seems incorrect
         //
-        noway_assert(argSize == sizeof(void*));
+        noway_assert(argSize == TARGET_POINTER_SIZE);
 #endif // DEBUG
 #endif
 
 #if defined(_TARGET_X86_)
-        argOffs += sizeof(void*);
+        argOffs += TARGET_POINTER_SIZE;
 #elif defined(_TARGET_AMD64_)
         // Register arguments on AMD64 also takes stack space. (in the backing store)
         varDsc->lvStkOffs = argOffs;
-        argOffs += sizeof(void*);
+        argOffs += TARGET_POINTER_SIZE;
 #elif defined(_TARGET_ARM64_)
 // Register arguments on ARM64 only take stack space when they have a frame home.
 #elif defined(_TARGET_ARM_)
@@ -5471,7 +5471,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
 
 #ifdef _TARGET_XARCH_
     // On x86/amd64, the return address has already been pushed by the call instruction in the caller.
-    stkOffs -= sizeof(void*); // return address;
+    stkOffs -= TARGET_POINTER_SIZE; // return address;
 
     // TODO-AMD64-CQ: for X64 eventually this should be pushed with all the other
     // calleeregs.  When you fix this, you'll also need to fix
@@ -6094,9 +6094,9 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
     if (lvaOutgoingArgSpaceSize > 0)
     {
 #if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) // No 4 slots for outgoing params on System V.
-        noway_assert(lvaOutgoingArgSpaceSize >= (4 * sizeof(void*)));
+        noway_assert(lvaOutgoingArgSpaceSize >= (4 * TARGET_POINTER_SIZE));
 #endif
-        noway_assert((lvaOutgoingArgSpaceSize % sizeof(void*)) == 0);
+        noway_assert((lvaOutgoingArgSpaceSize % TARGET_POINTER_SIZE) == 0);
 
         // Give it a value so we can avoid asserts in CHK builds.
         // Since this will always use an SP relative offset of zero
@@ -6125,7 +6125,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
     pushedCount += 1; // pushed PC (return address)
 #endif
 
-    noway_assert(compLclFrameSize == (unsigned)-(stkOffs + (pushedCount * (int)sizeof(void*))));
+    noway_assert(compLclFrameSize == (unsigned)-(stkOffs + (pushedCount * (int)TARGET_POINTER_SIZE)));
 }
 
 int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs)
@@ -6328,11 +6328,11 @@ void Compiler::lvaAlignFrame()
     //
     bool lclFrameSizeAligned   = (compLclFrameSize % sizeof(double)) == 0;
     bool regPushedCountAligned = ((compCalleeRegsPushed + genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true))) %
-                                  (sizeof(double) / sizeof(void*))) == 0;
+                                  (sizeof(double) / TARGET_POINTER_SIZE)) == 0;
 
     if (regPushedCountAligned != lclFrameSizeAligned)
     {
-        lvaIncrementFrameSize(sizeof(void*));
+        lvaIncrementFrameSize(TARGET_POINTER_SIZE);
     }
 
 #elif defined(_TARGET_X86_)
@@ -6345,7 +6345,7 @@ void Compiler::lvaAlignFrame()
         if (compLclFrameSize == 0)
         {
             // This can only happen with JitStress=1 or JitDoubleAlign=2
-            lvaIncrementFrameSize(sizeof(void*));
+            lvaIncrementFrameSize(TARGET_POINTER_SIZE);
         }
     }
 #endif
index c1a7973..fbe3758 100644 (file)
@@ -146,7 +146,7 @@ bool Compiler::fgMorphRelopToQmark(GenTreePtr tree)
 GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
 {
     noway_assert(tree->gtOper == GT_CAST);
-    noway_assert(genTypeSize(TYP_I_IMPL) == sizeof(void*));
+    noway_assert(genTypeSize(TYP_I_IMPL) == TARGET_POINTER_SIZE);
 
     /* The first sub-operand is the thing being cast */
 
@@ -204,7 +204,7 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
             tree->gtFlags &= ~GTF_UNSIGNED;
         }
 #else
-        if (dstSize < sizeof(void*))
+        if (dstSize < TARGET_POINTER_SIZE)
         {
             oper = gtNewCastNodeL(TYP_I_IMPL, oper, TYP_I_IMPL);
             oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
@@ -3957,7 +3957,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
                         }
                         else
                         {
-                            // If the valuetype size is not a multiple of sizeof(void*),
+                            // If the valuetype size is not a multiple of TARGET_POINTER_SIZE,
                             // we must copyblk to a temp before doing the obj to avoid
                             // the obj reading memory past the end of the valuetype
                             CLANG_FORMAT_COMMENT_ANCHOR;
@@ -6371,7 +6371,7 @@ GenTreePtr Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varTyp
         // Create a node representing the local pointing to the base of the args
         GenTreePtr ptrArg =
             gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL),
-                          gtNewIconNode(varDsc->lvStkOffs - codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*) +
+                          gtNewIconNode(varDsc->lvStkOffs - codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES +
                                         lclOffs));
 
         // Access the argument through the local
index 2e3667b..f7b803f 100644 (file)
@@ -5,7 +5,7 @@
 #define GCS EA_GCREF
 #define BRS EA_BYREF
 #define PS EA_PTRSIZE
-#define PST (sizeof(void*) / sizeof(int))
+#define PST (TARGET_POINTER_SIZE / sizeof(int))
 
 #ifdef _TARGET_64BIT_
 #define VTF_I32 0