From de7a416d55f309c36fd883e94d3263d60dc86e8c Mon Sep 17 00:00:00 2001 From: Egor Chesakov Date: Mon, 20 Aug 2018 17:08:18 -0700 Subject: [PATCH] Cleanup unnecessary casts in roundUp --- src/jit/codegenarm64.cpp | 6 +++--- src/jit/codegencommon.cpp | 24 +++++++++++------------- src/jit/codegenxarch.cpp | 2 +- src/jit/compiler.h | 4 ++-- src/jit/compilerbitsettraits.hpp | 4 ++-- src/jit/ee_il_dll.cpp | 4 ++-- src/jit/flowgraph.cpp | 2 +- src/jit/gentree.cpp | 2 +- src/jit/inlinepolicy.cpp | 2 +- src/jit/lclvars.cpp | 20 +++++++++++--------- src/jit/lsra.cpp | 2 +- src/jit/morph.cpp | 24 ++++++++++++------------ src/jit/rationalize.cpp | 4 ++-- 13 files changed, 50 insertions(+), 50 deletions(-) diff --git a/src/jit/codegenarm64.cpp b/src/jit/codegenarm64.cpp index 70d3a8f..30ab08f 100644 --- a/src/jit/codegenarm64.cpp +++ b/src/jit/codegenarm64.cpp @@ -1148,10 +1148,10 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo() // so that they are contiguous with the incoming stack arguments. saveRegsPlusPSPSize += MAX_REG_ARG * REGSIZE_BYTES; } - unsigned saveRegsPlusPSPSizeAligned = (unsigned)roundUp(saveRegsPlusPSPSize, STACK_ALIGN); + unsigned saveRegsPlusPSPSizeAligned = roundUp(saveRegsPlusPSPSize, STACK_ALIGN); assert(compiler->lvaOutgoingArgSpaceSize % REGSIZE_BYTES == 0); - unsigned outgoingArgSpaceAligned = (unsigned)roundUp(compiler->lvaOutgoingArgSpaceSize, STACK_ALIGN); + unsigned outgoingArgSpaceAligned = roundUp(compiler->lvaOutgoingArgSpaceSize, STACK_ALIGN); unsigned maxFuncletFrameSizeAligned = saveRegsPlusPSPSizeAligned + outgoingArgSpaceAligned; assert((maxFuncletFrameSizeAligned % STACK_ALIGN) == 0); @@ -1163,7 +1163,7 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo() if (maxFuncletFrameSizeAligned <= 512) { unsigned funcletFrameSize = saveRegsPlusPSPSize + compiler->lvaOutgoingArgSpaceSize; - unsigned funcletFrameSizeAligned = (unsigned)roundUp(funcletFrameSize, STACK_ALIGN); + unsigned funcletFrameSizeAligned = roundUp(funcletFrameSize, STACK_ALIGN); assert(funcletFrameSizeAligned <= maxFuncletFrameSizeAligned); unsigned funcletFrameAlignmentPad = funcletFrameSizeAligned - funcletFrameSize; diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp index 9c79e48..564fd1e 100644 --- a/src/jit/codegencommon.cpp +++ b/src/jit/codegencommon.cpp @@ -1138,8 +1138,7 @@ unsigned CodeGenInterface::InferStructOpSizeAlign(GenTree* op, unsigned* alignme { CORINFO_CLASS_HANDLE clsHnd = op->AsObj()->gtClass; opSize = compiler->info.compCompHnd->getClassSize(clsHnd); - alignment = - (unsigned)roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE); + alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE); } else if (op->gtOper == GT_LCL_VAR) { @@ -1167,9 +1166,9 @@ unsigned CodeGenInterface::InferStructOpSizeAlign(GenTree* op, unsigned* alignme if (op2->IsIconHandle(GTF_ICON_CLASS_HDL)) { CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE)op2->gtIntCon.gtIconVal; - opSize = (unsigned)roundUp(compiler->info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE); - alignment = (unsigned)roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), - TARGET_POINTER_SIZE); + opSize = roundUp(compiler->info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE); + alignment = + roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE); } else { @@ -1204,9 +1203,8 @@ unsigned CodeGenInterface::InferStructOpSizeAlign(GenTree* op, unsigned* alignme { CORINFO_CLASS_HANDLE clsHnd = op->gtArgPlace.gtArgPlaceClsHnd; assert(clsHnd != 0); - opSize = (unsigned)roundUp(compiler->info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE); - alignment = - (unsigned)roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE); + opSize = roundUp(compiler->info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE); + alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE); } else { @@ -4706,7 +4704,7 @@ void CodeGen::genCheckUseBlockInit() { // Var is on the stack at entry. initStkLclCnt += - (unsigned)roundUp(compiler->lvaLclSize(varNum), TARGET_POINTER_SIZE) / sizeof(int); + roundUp(compiler->lvaLclSize(varNum), TARGET_POINTER_SIZE) / sizeof(int); } } else @@ -4737,7 +4735,7 @@ void CodeGen::genCheckUseBlockInit() { varDsc->lvMustInit = true; - initStkLclCnt += (unsigned)roundUp(compiler->lvaLclSize(varNum), TARGET_POINTER_SIZE) / sizeof(int); + initStkLclCnt += roundUp(compiler->lvaLclSize(varNum), TARGET_POINTER_SIZE) / sizeof(int); } continue; @@ -5186,7 +5184,7 @@ void CodeGen::genPushCalleeSavedRegisters() // If compiler->lvaOutgoingArgSpaceSize is not aligned, we need to align the SP adjustment. assert(remainingFrameSz > (int)compiler->lvaOutgoingArgSpaceSize); int spAdjustment2Unaligned = remainingFrameSz - compiler->lvaOutgoingArgSpaceSize; - int spAdjustment2 = (int)roundUp((size_t)spAdjustment2Unaligned, STACK_ALIGN); + int spAdjustment2 = (int)roundUp((unsigned)spAdjustment2Unaligned, STACK_ALIGN); int alignmentAdjustment2 = spAdjustment2 - spAdjustment2Unaligned; assert((alignmentAdjustment2 == 0) || (alignmentAdjustment2 == 8)); @@ -5980,7 +5978,7 @@ void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog) // If compiler->lvaOutgoingArgSpaceSize is not aligned, we need to align the SP adjustment. assert(remainingFrameSz > (int)compiler->lvaOutgoingArgSpaceSize); int spAdjustment2Unaligned = remainingFrameSz - compiler->lvaOutgoingArgSpaceSize; - int spAdjustment2 = (int)roundUp((size_t)spAdjustment2Unaligned, STACK_ALIGN); + int spAdjustment2 = (int)roundUp((unsigned)spAdjustment2Unaligned, STACK_ALIGN); int alignmentAdjustment2 = spAdjustment2 - spAdjustment2Unaligned; assert((alignmentAdjustment2 == 0) || (alignmentAdjustment2 == REGSIZE_BYTES)); @@ -6521,7 +6519,7 @@ void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, regNumber zeroReg = genGetZeroReg(initReg, pInitRegZeroed); // zero out the whole thing rounded up to a single stack slot size - unsigned lclSize = (unsigned)roundUp(compiler->lvaLclSize(varNum), sizeof(int)); + unsigned lclSize = roundUp(compiler->lvaLclSize(varNum), (unsigned)sizeof(int)); unsigned i; for (i = 0; i + REGSIZE_BYTES <= lclSize; i += REGSIZE_BYTES) { diff --git a/src/jit/codegenxarch.cpp b/src/jit/codegenxarch.cpp index a921289..dae6f2e 100644 --- a/src/jit/codegenxarch.cpp +++ b/src/jit/codegenxarch.cpp @@ -5069,7 +5069,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) if (source->TypeGet() == TYP_STRUCT) { GenTreeObj* obj = source->AsObj(); - unsigned argBytes = (unsigned)roundUp(obj->gtBlkSize, TARGET_POINTER_SIZE); + unsigned argBytes = roundUp(obj->gtBlkSize, TARGET_POINTER_SIZE); assert((curArgTabEntry->numSlots * TARGET_POINTER_SIZE) == argBytes); } #endif // FEATURE_PUT_STRUCT_ARG_STK diff --git a/src/jit/compiler.h b/src/jit/compiler.h index 6015cf3..0a1ff81 100644 --- a/src/jit/compiler.h +++ b/src/jit/compiler.h @@ -666,7 +666,7 @@ public: } #endif // defined(FEATURE_SIMD) && !defined(_TARGET_64BIT_) - return (unsigned)(roundUp(lvExactSize, TARGET_POINTER_SIZE)); + return roundUp(lvExactSize, TARGET_POINTER_SIZE); } const size_t lvArgStackSize() const; @@ -3701,7 +3701,7 @@ public: fgCurBBEpoch++; fgCurBBEpochSize = fgBBNumMax + 1; fgBBSetCountInSizeTUnits = - unsigned(roundUp(fgCurBBEpochSize, sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); + roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG // All BlockSet objects are now invalid! diff --git a/src/jit/compilerbitsettraits.hpp b/src/jit/compilerbitsettraits.hpp index e6c6b13..79f7b5e 100644 --- a/src/jit/compilerbitsettraits.hpp +++ b/src/jit/compilerbitsettraits.hpp @@ -78,7 +78,7 @@ unsigned AllVarBitSetTraits::GetSize(Compiler* comp) // static unsigned AllVarBitSetTraits::GetArrSize(Compiler* comp, unsigned elemSize) { - return unsigned(roundUp(GetSize(comp), elemSize)); + return roundUp(GetSize(comp), elemSize); } // static @@ -163,7 +163,7 @@ unsigned BitVecTraits::GetArrSize(BitVecTraits* b, unsigned elemSize) { assert(elemSize == sizeof(size_t)); unsigned elemBits = 8 * elemSize; - return (unsigned)roundUp(b->size, elemBits) / elemBits; + return roundUp(b->size, elemBits) / elemBits; } // static diff --git a/src/jit/ee_il_dll.cpp b/src/jit/ee_il_dll.cpp index 88612bc..cf65bdb 100644 --- a/src/jit/ee_il_dll.cpp +++ b/src/jit/ee_il_dll.cpp @@ -500,13 +500,13 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* #endif // FEATURE_MULTIREG_ARGS // we pass this struct by value in multiple registers - return (unsigned)roundUp(structSize, TARGET_POINTER_SIZE); + return roundUp(structSize, TARGET_POINTER_SIZE); } else { unsigned argSize = sizeof(int) * genTypeStSz(argType); assert(0 < argSize && argSize <= sizeof(__int64)); - return (unsigned)roundUp(argSize, TARGET_POINTER_SIZE); + return roundUp(argSize, TARGET_POINTER_SIZE); } #endif } diff --git a/src/jit/flowgraph.cpp b/src/jit/flowgraph.cpp index a590f99..0a9f2ce 100644 --- a/src/jit/flowgraph.cpp +++ b/src/jit/flowgraph.cpp @@ -9648,7 +9648,7 @@ void Compiler::fgSimpleLowering() // stack alignment boundary. if (compLocallocUsed) { - outgoingArgSpaceSize = (unsigned)roundUp(outgoingArgSpaceSize, STACK_ALIGN); + outgoingArgSpaceSize = roundUp(outgoingArgSpaceSize, STACK_ALIGN); JITDUMP("Bumping outgoingArgSpaceSize to %u for localloc", outgoingArgSpaceSize); } diff --git a/src/jit/gentree.cpp b/src/jit/gentree.cpp index 3e263a0..9421a9c 100644 --- a/src/jit/gentree.cpp +++ b/src/jit/gentree.cpp @@ -6660,7 +6660,7 @@ void Compiler::gtSetObjGcInfo(GenTreeObj* objNode) { // Get the GC fields info var_types simdBaseType; // Dummy argument - slots = (unsigned)(roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE); + slots = roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; gcPtrs = new (this, CMK_ASTNode) BYTE[slots]; nodeType = impNormStructType(structHnd, gcPtrs, &gcPtrCount, &simdBaseType); } diff --git a/src/jit/inlinepolicy.cpp b/src/jit/inlinepolicy.cpp index 9cb534c..45dc96f 100644 --- a/src/jit/inlinepolicy.cpp +++ b/src/jit/inlinepolicy.cpp @@ -780,7 +780,7 @@ int DefaultPolicy::DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* meth callsiteSize += 10; // "lea EAX, bword ptr [EBP-14H]" - unsigned opsz = (unsigned)(roundUp(comp->getClassSize(verType.GetClassHandle()), TARGET_POINTER_SIZE)); + unsigned opsz = roundUp(comp->getClassSize(verType.GetClassHandle()), TARGET_POINTER_SIZE); unsigned slots = opsz / TARGET_POINTER_SIZE; callsiteSize += slots * 20; // "push gword ptr [EAX+offs] " diff --git a/src/jit/lclvars.cpp b/src/jit/lclvars.cpp index 5f51739..c540b75 100644 --- a/src/jit/lclvars.cpp +++ b/src/jit/lclvars.cpp @@ -977,7 +977,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) #endif // _TARGET_XXX_ #if FEATURE_FASTTAILCALL - varDscInfo->stackArgSize += (unsigned)roundUp(argSize, TARGET_POINTER_SIZE); + varDscInfo->stackArgSize += roundUp(argSize, TARGET_POINTER_SIZE); #endif // FEATURE_FASTTAILCALL } @@ -985,7 +985,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) // The arg size is returning the number of bytes of the argument. For a struct it could return a size not a // multiple of TARGET_POINTER_SIZE. The stack allocated space should always be multiple of TARGET_POINTER_SIZE, // so round it up. - compArgSize += (unsigned)roundUp(argSize, TARGET_POINTER_SIZE); + compArgSize += roundUp(argSize, TARGET_POINTER_SIZE); #else // !UNIX_AMD64_ABI compArgSize += argSize; #endif // !UNIX_AMD64_ABI @@ -3296,7 +3296,8 @@ void Compiler::lvaSortByRefCount() // We have a new epoch, and also cache the tracked var count in terms of size_t's sufficient to hold that many bits. lvaCurEpoch++; - lvaTrackedCountInSizeTUnits = unsigned(roundUp(lvaTrackedCount, sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); + lvaTrackedCountInSizeTUnits = + roundUp((unsigned)lvaTrackedCount, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG VarSetOps::AssignNoCopy(this, lvaTrackedVars, VarSetOps::MakeFull(this)); @@ -4899,7 +4900,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() #ifdef UNIX_AMD64_ABI // On the stack frame the homed arg always takes a full number of slots // for proper stack alignment. Make sure the real struct size is properly rounded up. - argumentSize = (unsigned)roundUp(argumentSize, TARGET_POINTER_SIZE); + argumentSize = roundUp(argumentSize, TARGET_POINTER_SIZE); #endif // UNIX_AMD64_ABI argOffs = @@ -5190,7 +5191,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, if (argOffs < prevRegsSize) { // We must align up the argOffset to a multiple of 8 to account for skipped registers. - argOffs = roundUp(argOffs, 2 * TARGET_POINTER_SIZE); + argOffs = roundUp((unsigned)argOffs, 2 * TARGET_POINTER_SIZE); } // We should've skipped only a single register. assert(argOffs == prevRegsSize); @@ -5310,7 +5311,8 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, case TYP_DOUBLE: case TYP_LONG: // We must align up the argOffset to a multiple of 8 - argOffs = roundUp(argOffsWithoutPreSpillRegArgs, 2 * TARGET_POINTER_SIZE) + sizeofPreSpillRegArgs; + argOffs = + roundUp((unsigned)argOffsWithoutPreSpillRegArgs, 2 * TARGET_POINTER_SIZE) + sizeofPreSpillRegArgs; break; default: @@ -7210,14 +7212,14 @@ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTree** pTree, fgWalkData* // We need to support alignment requirements to access memory on ARM ARCH unsigned alignment = 1; pComp->codeGen->InferOpSizeAlign(lcl, &alignment); - alignment = (unsigned)roundUp(alignment, TARGET_POINTER_SIZE); - padding = (unsigned)roundUp(padding, alignment); + alignment = roundUp(alignment, TARGET_POINTER_SIZE); + padding = roundUp(padding, alignment); #endif // _TARGET_ARMARCH_ // Change the variable to a TYP_BLK if (varType != TYP_BLK) { - varDsc->lvExactSize = (unsigned)(roundUp(padding + pComp->lvaLclSize(lclNum), TARGET_POINTER_SIZE)); + varDsc->lvExactSize = roundUp(padding + pComp->lvaLclSize(lclNum), TARGET_POINTER_SIZE); varDsc->lvType = TYP_BLK; pComp->lvaSetVarAddrExposed(lclNum); } diff --git a/src/jit/lsra.cpp b/src/jit/lsra.cpp index 8678584..11a119b 100644 --- a/src/jit/lsra.cpp +++ b/src/jit/lsra.cpp @@ -1809,7 +1809,7 @@ void LinearScan::initVarRegMaps() // The compiler memory allocator requires that the allocation be an // even multiple of int-sized objects unsigned int varCount = compiler->lvaTrackedCount; - regMapCount = (unsigned int)roundUp(varCount, sizeof(int)); + regMapCount = roundUp(varCount, (unsigned)sizeof(int)); // Not sure why blocks aren't numbered from zero, but they don't appear to be. // So, if we want to index by bbNum we have to know the maximum value. diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp index 83707b0..f1d5195 100644 --- a/src/jit/morph.cpp +++ b/src/jit/morph.cpp @@ -1211,7 +1211,7 @@ fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum, { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; - nextSlotNum = (unsigned)roundUp(nextSlotNum, alignment); + nextSlotNum = roundUp(nextSlotNum, alignment); curArgTabEntry->setRegNum(0, REG_STK); curArgTabEntry->argNum = argNum; @@ -1354,7 +1354,7 @@ void fgArgInfo::RemorphStkArg(unsigned argNum, GenTree* node, GenTree* parent, u } } - nextSlotNum = (unsigned)roundUp(nextSlotNum, alignment); + nextSlotNum = roundUp(nextSlotNum, alignment); assert(curArgTabEntry->argNum == argNum); assert(curArgTabEntry->slotNum == nextSlotNum); @@ -3478,8 +3478,8 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) } else { - size = (unsigned)(roundUp(info.compCompHnd->getClassSize(argx->gtArgPlace.gtArgPlaceClsHnd), - TARGET_POINTER_SIZE)) / + size = roundUp(info.compCompHnd->getClassSize(argx->gtArgPlace.gtArgPlaceClsHnd), + TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; eeGetSystemVAmd64PassStructInRegisterDescriptor(argx->gtArgPlace.gtArgPlaceClsHnd, &structDesc); if (size > 1) @@ -3502,8 +3502,8 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) else { // Structs are either passed in 1 or 2 (64-bit) slots - size = (unsigned)(roundUp(info.compCompHnd->getClassSize(argx->gtArgPlace.gtArgPlaceClsHnd), - TARGET_POINTER_SIZE)) / + size = roundUp(info.compCompHnd->getClassSize(argx->gtArgPlace.gtArgPlaceClsHnd), + TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; if (size == 2) @@ -3528,8 +3528,8 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) #elif defined(_TARGET_ARM_) if (isStructArg) { - size = (unsigned)(roundUp(info.compCompHnd->getClassSize(argx->gtArgPlace.gtArgPlaceClsHnd), - TARGET_POINTER_SIZE)) / + size = roundUp(info.compCompHnd->getClassSize(argx->gtArgPlace.gtArgPlaceClsHnd), + TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; if (isHfaArg || size > 1) { @@ -3562,7 +3562,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) if (varTypeIsStruct(argx)) { size = info.compCompHnd->getClassSize(impGetRefAnyClass()); - unsigned roundupSize = (unsigned)roundUp(size, TARGET_POINTER_SIZE); + unsigned roundupSize = roundUp(size, TARGET_POINTER_SIZE); size = roundupSize / TARGET_POINTER_SIZE; eeGetSystemVAmd64PassStructInRegisterDescriptor(impGetRefAnyClass(), &structDesc); } @@ -3603,7 +3603,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) unsigned originalSize = info.compCompHnd->getClassSize(objClass); originalSize = (originalSize == 0 ? TARGET_POINTER_SIZE : originalSize); - unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE); + unsigned roundupSize = roundUp(originalSize, TARGET_POINTER_SIZE); structSize = originalSize; @@ -3943,7 +3943,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) // recompute the 'size' so that it represent the number of stack slots rather than the number of // registers // - unsigned roundupSize = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE); + unsigned roundupSize = roundUp(structSize, TARGET_POINTER_SIZE); size = roundupSize / TARGET_POINTER_SIZE; // We also must update fltArgRegNum so that we no longer try to @@ -4705,7 +4705,7 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry { assert(structSize <= MAX_ARG_REG_COUNT * TARGET_POINTER_SIZE); BYTE gcPtrs[MAX_ARG_REG_COUNT]; - elemCount = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; + elemCount = roundUp(structSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]); for (unsigned inx = 0; inx < elemCount; inx++) diff --git a/src/jit/rationalize.cpp b/src/jit/rationalize.cpp index 50e5b42..209026d 100644 --- a/src/jit/rationalize.cpp +++ b/src/jit/rationalize.cpp @@ -293,7 +293,7 @@ void Rationalizer::FixupIfSIMDLocal(GenTreeLclVarCommon* node) node->gtFlags &= ~(GTF_VAR_USEASG); break; } - unsigned simdSize = (unsigned int)roundUp(varDsc->lvExactSize, TARGET_POINTER_SIZE); + unsigned simdSize = roundUp(varDsc->lvExactSize, TARGET_POINTER_SIZE); node->gtType = comp->getSIMDTypeForSize(simdSize); #endif // FEATURE_SIMD } @@ -450,7 +450,7 @@ void Rationalizer::RewriteAssignment(LIR::Use& use) { CORINFO_CLASS_HANDLE structHnd = varDsc->lvVerTypeInfo.GetClassHandle(); GenTreeObj* objNode = comp->gtNewObjNode(structHnd, location)->AsObj(); - unsigned int slots = (unsigned)(roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE); + unsigned int slots = roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; objNode->SetGCInfo(varDsc->lvGcLayout, varDsc->lvStructGcCount, slots); objNode->ChangeOper(GT_STORE_OBJ); -- 2.7.4