These changes address casts that trigger warnings C4242, C4254, or
C4302 (all of which are truncation-related). Most of the warnings
turned out to be innocuous, but there does seem to be some fishiness
when truncating pointer values to 32-bit integers for the purpose
of generating hash codes.
Commit migrated from https://github.com/dotnet/coreclr/commit/
ed690e0a669cf3c62588c3cbc9d5012490cee1c3
public:
static unsigned GetHashCode(const T* ptr)
{
- return (unsigned)ptr; // Hmm. Maybe (unsigned) ought to be "ssize_t" -- or this ought to be ifdef'd by size.
+ // Hmm. Maybe (unsigned) ought to be "ssize_t" -- or this ought to be ifdef'd by size.
+ return static_cast<unsigned>(reinterpret_cast<uintptr_t>(ptr));
}
};
bool Compiler::eeIsJitDataOffs(CORINFO_FIELD_HANDLE field)
{
// if 'field' is a jit data offset it has to fit into a 32-bit unsigned int
- unsigned value = (unsigned) field;
+ unsigned value = static_cast<unsigned>(reinterpret_cast<uintptr_t>(field));
if (((CORINFO_FIELD_HANDLE)(size_t)value) != field)
{
return false; // upper bits were set, not a jit data offset
}
+
// Data offsets are marked by the fact that the low two bits are 0b01 0x1
return (value & iaut_MASK) == iaut_DATA_OFFSET;
}
// Data offsets are marked by the fact that the low two bits are 0b01 0x1
if (eeIsJitDataOffs(field))
{
- unsigned dataOffs = (unsigned) field;
+ unsigned dataOffs = static_cast<unsigned>(reinterpret_cast<uintptr_t>(field));
assert(((CORINFO_FIELD_HANDLE)(size_t)dataOffs) == field);
assert(dataOffs < 0x40000000);
- return ((int) field) >> iaut_SHIFT;
+ return (static_cast<int>(reinterpret_cast<intptr_t>(field))) >> iaut_SHIFT;
}
else
{
// An absolute indir address that doesn't need reloc should fit within 32-bits
// to be encoded as offset relative to zero. This addr mode requires an extra
// SIB byte
- noway_assert((int)addr == (size_t)addr);
+ noway_assert(static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (size_t)addr);
sz++;
}
#endif //_TARGET_AMD64_
// An absolute indir address that doesn't need reloc should fit within 32-bits
// to be encoded as offset relative to zero. This addr mode requires an extra
// SIB byte
- noway_assert((int)addr == (size_t)addr);
+ noway_assert(static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (size_t)addr);
sz++;
}
#endif //_TARGET_AMD64_
// the addr can be encoded as pc-relative address.
noway_assert(!emitComp->opts.compReloc);
noway_assert(codeGen->genAddrRelocTypeHint((size_t)addr) != IMAGE_REL_BASED_REL32);
- noway_assert((int)addr == (ssize_t)addr);
+ noway_assert(static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (ssize_t)addr);
// This requires, specifying a SIB byte after ModRM byte.
dst += emitOutputWord(dst, code | 0x0400);
dst += emitOutputByte(dst, 0x25);
#endif //_TARGET_AMD64_
- dst += emitOutputLong(dst, (int)addr);
+ dst += emitOutputLong(dst, static_cast<int>(reinterpret_cast<intptr_t>(addr)));
}
goto DONE_CALL;
}
hash ^= tree->gtCast.gtCastType;
break;
case GT_LDOBJ:
- hash ^= reinterpret_cast<unsigned>(tree->gtLdObj.gtClass);
+ hash ^= static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->gtLdObj.gtClass));
break;
case GT_INDEX:
hash += tree->gtIndex.gtIndElemSize;
// Make sure this provides methods that allow it to be used as a KeyFuncs type in SimplerHash.
static int GetHashCode(FieldSeqNode fsn)
{
- return reinterpret_cast<int>(fsn.m_fieldHnd) ^ reinterpret_cast<int>(fsn.m_next);
+ return static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_fieldHnd)) ^ static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_next));
}
static bool Equals(FieldSeqNode fsn1, FieldSeqNode fsn2)
{
verInitBBEntryState(block, &verCurrentState);
assert(block->bbStkDepth == 0);
- block->bbStkDepth = verCurrentState.esStackDepth;
+ block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
assert(addToPending);
assert(impGetPendingBlockMember(block) == 0);
}
case TYP_REF:
return CoerceTypRefToT<T>(c, offset);
case TYP_BYREF:
- return (T) reinterpret_cast<VarTypConv<TYP_BYREF>::Type*>(c->m_defs)[offset];
+ return static_cast<T>(reinterpret_cast<VarTypConv<TYP_BYREF>::Type*>(c->m_defs)[offset]);
case TYP_INT:
- return (T) reinterpret_cast<VarTypConv<TYP_INT>::Type*>(c->m_defs)[offset];
+ return static_cast<T>(reinterpret_cast<VarTypConv<TYP_INT>::Type*>(c->m_defs)[offset]);
case TYP_LONG:
- return (T) reinterpret_cast<VarTypConv<TYP_LONG>::Type*>(c->m_defs)[offset];
+ return static_cast<T>(reinterpret_cast<VarTypConv<TYP_LONG>::Type*>(c->m_defs)[offset]);
case TYP_FLOAT:
- return (T) reinterpret_cast<VarTypConv<TYP_FLOAT>::Lang*>(c->m_defs)[offset];
+ return static_cast<T>(reinterpret_cast<VarTypConv<TYP_FLOAT>::Lang*>(c->m_defs)[offset]);
case TYP_DOUBLE:
- return (T) reinterpret_cast<VarTypConv<TYP_DOUBLE>::Lang*>(c->m_defs)[offset];
+ return static_cast<T>(reinterpret_cast<VarTypConv<TYP_DOUBLE>::Lang*>(c->m_defs)[offset]);
default:
assert(false);
return (T)0;
return GenTree::OperIsCompare(gtOp) != 0;
}
-template <typename T>
-inline T ValueNumStore::CoerceTypRefToT(Chunk* c, unsigned offset)
-{
- noway_assert(sizeof(T) >= sizeof(VarTypConv<TYP_REF>::Type));
- return (T) reinterpret_cast<VarTypConv<TYP_REF>::Type*>(c->m_defs)[offset];
-}
-
template <>
-inline float ValueNumStore::CoerceTypRefToT<float>(Chunk* c, unsigned offset)
+inline size_t ValueNumStore::CoerceTypRefToT(Chunk* c, unsigned offset)
{
- unreached();
+ return reinterpret_cast<size_t>(reinterpret_cast<VarTypConv<TYP_REF>::Type*>(c->m_defs)[offset]);
}
-template <>
-inline double ValueNumStore::CoerceTypRefToT<double>(Chunk* c, unsigned offset)
+template <typename T>
+inline T ValueNumStore::CoerceTypRefToT(Chunk* c, unsigned offset)
{
+ noway_assert(sizeof(T) >= sizeof(VarTypConv<TYP_REF>::Type));
unreached();
}