This is a mechanical prep change for scalable vector support. All it does is move the point of TypeSize to unsigned (i.e. the unsafe cast) closer to point of use.
Use *PtrUse;
bool IsWrite;
Type *OpType;
- uint64_t TypeSize;
+ TypeSize TypeStoreSize = TypeSize::Fixed(0);
MaybeAlign Alignment;
// The mask Value, if we're looking at a masked load/store.
Value *MaybeMask;
: IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
MaybeMask(MaybeMask) {
const DataLayout &DL = I->getModule()->getDataLayout();
- TypeSize = DL.getTypeStoreSizeInBits(OpType);
+ TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
PtrUse = &I->getOperandUse(OperandNo);
}
// dynamically initialized global is always valid.
GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr));
if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
- isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
+ isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
NumOptimizedAccessesToGlobalVar++;
return;
}
if (ClOpt && ClOptStack) {
// A direct inbounds access to a stack variable is always valid.
if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
- isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
+ isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
NumOptimizedAccessesToStackVar++;
return;
}
O.IsWrite, nullptr, UseCalls, Exp);
} else {
doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
- Granularity, O.TypeSize, O.IsWrite, nullptr, UseCalls,
+ Granularity, O.TypeStoreSize, O.IsWrite, nullptr, UseCalls,
Exp);
}
}
return false; // FIXME
IRBuilder<> IRB(O.getInsn());
- if (isPowerOf2_64(O.TypeSize) &&
- (O.TypeSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
+ if (isPowerOf2_64(O.TypeStoreSize) &&
+ (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
(!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
- *O.Alignment >= O.TypeSize / 8)) {
- size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeSize);
+ *O.Alignment >= O.TypeStoreSize / 8)) {
+ size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
if (InstrumentWithCalls) {
IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
IRB.CreatePointerCast(Addr, IntptrTy));
} else {
IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite],
{IRB.CreatePointerCast(Addr, IntptrTy),
- ConstantInt::get(IntptrTy, O.TypeSize / 8)});
+ ConstantInt::get(IntptrTy, O.TypeStoreSize / 8)});
}
untagPointerOperand(O.getInsn(), Addr);