From 1221526681b73c3e99c6e41b5b1cb99ce4b52c69 Mon Sep 17 00:00:00 2001 From: Johannes Doerfert Date: Thu, 29 Jun 2023 10:22:04 -0700 Subject: [PATCH] [Attributor][FIX] Check AA preconditions AAs often have preconditions, e.g., that the associated type is a pointer type. If these do not hold, we do not need to bother creating the AA. Best case, we invalidate it right away, worst case, we crash or do something wrong (as happend in the issues below). Fixes: https://github.com/llvm/llvm-project/issues/63553 Fixes: https://github.com/llvm/llvm-project/issues/63597 --- llvm/include/llvm/Transforms/IPO/Attributor.h | 126 +++++++ llvm/test/Transforms/Attributor/align.ll | 14 + llvm/test/Transforms/Attributor/liveness.ll | 464 ++++++++++++-------------- 3 files changed, 357 insertions(+), 247 deletions(-) diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h index 12784b8..7fbf2de 100644 --- a/llvm/include/llvm/Transforms/IPO/Attributor.h +++ b/llvm/include/llvm/Transforms/IPO/Attributor.h @@ -730,6 +730,17 @@ struct IRPosition { } } + /// Return true if this is a function or call site position. + bool isFunctionScope() const { + switch (getPositionKind()) { + case IRPosition::IRP_CALL_SITE: + case IRPosition::IRP_FUNCTION: + return true; + default: + return false; + }; + } + /// Return the Function surrounding the anchor value. Function *getAnchorScope() const { Value &V = getAnchorValue(); @@ -3462,6 +3473,14 @@ struct AANoSync StateWrapper> { AANoSync(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.isFunctionScope() && + !IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return IRAttribute::isValidIRPositionForInit(A, IRP); + } + /// Returns true if "nosync" is assumed. bool isAssumedNoSync() const { return getAssumed(); } @@ -3539,6 +3558,13 @@ struct AANonNull StateWrapper> { AANonNull(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return IRAttribute::isValidIRPositionForInit(A, IRP); + } + /// Return true if we assume that the underlying value is nonnull. bool isAssumedNonNull() const { return getAssumed(); } @@ -3727,6 +3753,13 @@ struct AANoAlias StateWrapper> { AANoAlias(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return IRAttribute::isValidIRPositionForInit(A, IRP); + } + static bool isImpliedByIR(Attributor &A, const IRPosition &IRP, ArrayRef AttrKinds, bool IgnoreSubsumingPositions = false) { @@ -3773,6 +3806,14 @@ struct AANoFree StateWrapper> { AANoFree(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.isFunctionScope() && + !IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return IRAttribute::isValidIRPositionForInit(A, IRP); + } + /// Return true if "nofree" is assumed. bool isAssumedNoFree() const { return getAssumed(); } @@ -4068,6 +4109,13 @@ struct AADereferenceable StateWrapper> { AADereferenceable(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return IRAttribute::isValidIRPositionForInit(A, IRP); + } + /// Return true if we assume that the underlying value is nonnull. bool isAssumedNonNull() const { return NonNullAA && NonNullAA->isAssumedNonNull(); @@ -4124,6 +4172,13 @@ struct AAAlign : public IRAttribute< StateWrapper> { AAAlign(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return IRAttribute::isValidIRPositionForInit(A, IRP); + } + /// Return assumed alignment. Align getAssumedAlign() const { return Align(getAssumed()); } @@ -4195,6 +4250,13 @@ struct AANoCapture StateWrapper, AbstractAttribute>> { AANoCapture(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return IRAttribute::isValidIRPositionForInit(A, IRP); + } + /// State encoding bits. A set bit in the state means the property holds. /// NO_CAPTURE is the best possible state, 0 the worst possible state. enum { @@ -4406,6 +4468,13 @@ struct AAPrivatizablePtr using Base = StateWrapper; AAPrivatizablePtr(const IRPosition &IRP, Attributor &A) : Base(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return AbstractAttribute::isValidIRPositionForInit(A, IRP); + } + /// Returns true if pointer privatization is assumed to be possible. bool isAssumedPrivatizablePtr() const { return getAssumed(); } @@ -4445,6 +4514,14 @@ struct AAMemoryBehavior StateWrapper, AbstractAttribute>> { AAMemoryBehavior(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.isFunctionScope() && + !IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return IRAttribute::isValidIRPositionForInit(A, IRP); + } + /// State encoding bits. A set bit in the state means the property holds. /// BEST_STATE is the best possible state, 0 the worst possible state. enum { @@ -4510,6 +4587,14 @@ struct AAMemoryLocation AAMemoryLocation(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.isFunctionScope() && + !IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return IRAttribute::isValidIRPositionForInit(A, IRP); + } + /// Encoding of different locations that could be accessed by a memory /// access. enum { @@ -4677,6 +4762,13 @@ struct AAValueConstantRange AAValueConstantRange(const IRPosition &IRP, Attributor &A) : Base(IRP, IRP.getAssociatedType()->getIntegerBitWidth()) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.getAssociatedType()->isIntegerTy()) + return false; + return AbstractAttribute::isValidIRPositionForInit(A, IRP); + } + /// See AbstractAttribute::getState(...). IntegerRangeState &getState() override { return *this; } const IntegerRangeState &getState() const override { return *this; } @@ -4939,6 +5031,13 @@ struct AAPotentialConstantValues using Base = StateWrapper; AAPotentialConstantValues(const IRPosition &IRP, Attributor &A) : Base(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.getAssociatedType()->isIntegerTy()) + return false; + return AbstractAttribute::isValidIRPositionForInit(A, IRP); + } + /// See AbstractAttribute::getState(...). PotentialConstantIntValuesState &getState() override { return *this; } const PotentialConstantIntValuesState &getState() const override { @@ -5069,6 +5168,19 @@ struct AANoFPClass AANoFPClass(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + Type *Ty = IRP.getAssociatedType(); + do { + if (Ty->isFPOrFPVectorTy()) + return IRAttribute::isValidIRPositionForInit(A, IRP); + if (!Ty->isArrayTy()) + break; + Ty = Ty->getArrayElementType(); + } while (true); + return false; + } + /// Return true if we assume that the underlying value is nofpclass. FPClassTest getAssumedNoFPClass() const { return static_cast(getAssumed()); @@ -5406,6 +5518,13 @@ struct AANonConvergent : public StateWrapper { struct AAPointerInfo : public AbstractAttribute { AAPointerInfo(const IRPosition &IRP) : AbstractAttribute(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return AbstractAttribute::isValidIRPositionForInit(A, IRP); + } + enum AccessKind { // First two bits to distinguish may and must accesses. AK_MUST = 1 << 0, @@ -5828,6 +5947,13 @@ struct AAAssumptionInfo struct AAUnderlyingObjects : AbstractAttribute { AAUnderlyingObjects(const IRPosition &IRP) : AbstractAttribute(IRP) {} + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return AbstractAttribute::isValidIRPositionForInit(A, IRP); + } + /// Create an abstract attribute biew for the position \p IRP. static AAUnderlyingObjects &createForPosition(const IRPosition &IRP, Attributor &A); diff --git a/llvm/test/Transforms/Attributor/align.ll b/llvm/test/Transforms/Attributor/align.ll index 74eb361..5433841 100644 --- a/llvm/test/Transforms/Attributor/align.ll +++ b/llvm/test/Transforms/Attributor/align.ll @@ -1089,6 +1089,20 @@ define ptr @aligned_8_return_caller(ptr align(16) %a, i1 %c1, i1 %c2) { ret ptr %r } +define i32 @implicit_cast_caller(ptr %ptr) { +; CHECK-LABEL: define {{[^@]+}}@implicit_cast_caller +; CHECK-SAME: (ptr [[PTR:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @implicit_cast_callee(ptr [[PTR]]) +; CHECK-NEXT: ret i32 0 +; +entry: + %call = tail call i32 @implicit_cast_callee(ptr %ptr) + ret i32 0 +} + +declare void @implicit_cast_callee(i64) + attributes #0 = { nounwind uwtable noinline } attributes #1 = { uwtable noinline } attributes #2 = { null_pointer_is_valid } diff --git a/llvm/test/Transforms/Attributor/liveness.ll b/llvm/test/Transforms/Attributor/liveness.ll index 955b328..27f2d0e 100644 --- a/llvm/test/Transforms/Attributor/liveness.ll +++ b/llvm/test/Transforms/Attributor/liveness.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes --check-globals -; RUN: opt -opaque-pointers=0 -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,TUNIT -; RUN: opt -opaque-pointers=0 -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,CGSCC +; RUN: opt -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,TUNIT +; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,CGSCC -; NOT_CGSCC___: @dead_with_blockaddress_users.l = constant [2 x i8*] [i8* inttoptr (i32 1 to i8*), i8* inttoptr (i32 1 to i8*)] -; IS__CGSCC___: @dead_with_blockaddress_users.l = constant [2 x i8*] [i8* blockaddress(@dead_with_blockaddress_users, %lab0), i8* blockaddress(@dead_with_blockaddress_users, %end)] -@dead_with_blockaddress_users.l = constant [2 x i8*] [i8* blockaddress(@dead_with_blockaddress_users, %lab0), i8* blockaddress(@dead_with_blockaddress_users, %end)] +; NOT_CGSCC___: @dead_with_blockaddress_users.l = constant [2 x ptr] [ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 1 to ptr)] +; IS__CGSCC___: @dead_with_blockaddress_users.l = constant [2 x ptr] [ptr blockaddress(@dead_with_blockaddress_users, %lab0), ptr blockaddress(@dead_with_blockaddress_users, %end)] +@dead_with_blockaddress_users.l = constant [2 x ptr] [ptr blockaddress(@dead_with_blockaddress_users, %lab0), ptr blockaddress(@dead_with_blockaddress_users, %end)] declare void @no_return_call() nofree noreturn nounwind nosync @@ -24,16 +24,16 @@ declare i32 @bar() nosync readnone ; and nothing should be deduced for it. ;. -; TUNIT: @[[DEAD_WITH_BLOCKADDRESS_USERS_L:[a-zA-Z0-9_$"\\.-]+]] = constant [2 x i8*] [i8* inttoptr (i32 1 to i8*), i8* inttoptr (i32 1 to i8*)] +; TUNIT: @[[DEAD_WITH_BLOCKADDRESS_USERS_L:[a-zA-Z0-9_$"\\.-]+]] = constant [2 x ptr] [ptr inttoptr (i32 1 to ptr), ptr inttoptr (i32 1 to ptr)] ; TUNIT: @[[A1:[a-zA-Z0-9_$"\\.-]+]] = common global i8 0, align 8 ; TUNIT: @[[A2:[a-zA-Z0-9_$"\\.-]+]] = common global i8 0, align 16 -; TUNIT: @[[E:[a-zA-Z0-9_$"\\.-]+]] = global %struct.a* null +; TUNIT: @[[E:[a-zA-Z0-9_$"\\.-]+]] = global ptr null ; TUNIT: @[[P:[a-zA-Z0-9_$"\\.-]+]] = global i8 0 ;. -; CGSCC: @[[DEAD_WITH_BLOCKADDRESS_USERS_L:[a-zA-Z0-9_$"\\.-]+]] = constant [2 x i8*] [i8* blockaddress(@dead_with_blockaddress_users, [[LAB0:%.*]]), i8* blockaddress(@dead_with_blockaddress_users, [[END:%.*]])] +; CGSCC: @[[DEAD_WITH_BLOCKADDRESS_USERS_L:[a-zA-Z0-9_$"\\.-]+]] = constant [2 x ptr] [ptr blockaddress(@dead_with_blockaddress_users, [[LAB0:%.*]]), ptr blockaddress(@dead_with_blockaddress_users, [[END:%.*]])] ; CGSCC: @[[A1:[a-zA-Z0-9_$"\\.-]+]] = common global i8 0, align 8 ; CGSCC: @[[A2:[a-zA-Z0-9_$"\\.-]+]] = common global i8 0, align 16 -; CGSCC: @[[E:[a-zA-Z0-9_$"\\.-]+]] = global %struct.a* null +; CGSCC: @[[E:[a-zA-Z0-9_$"\\.-]+]] = global ptr null ; CGSCC: @[[P:[a-zA-Z0-9_$"\\.-]+]] = global i8 0 ;. define internal i32 @dead_internal_func(i32 %0) { @@ -67,38 +67,38 @@ define internal i32 @dead_internal_func(i32 %0) { br i1 %10, label %3, label %5 } -define i32 @volatile_load(i32*) norecurse nounwind uwtable { +define i32 @volatile_load(ptr) norecurse nounwind uwtable { ; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite) uwtable ; TUNIT-LABEL: define {{[^@]+}}@volatile_load -; TUNIT-SAME: (i32* nofree noundef align 4 [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] { -; TUNIT-NEXT: [[TMP2:%.*]] = load volatile i32, i32* [[TMP0]], align 4 +; TUNIT-SAME: (ptr nofree noundef align 4 [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] { +; TUNIT-NEXT: [[TMP2:%.*]] = load volatile i32, ptr [[TMP0]], align 4 ; TUNIT-NEXT: ret i32 [[TMP2]] ; ; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite) uwtable ; CGSCC-LABEL: define {{[^@]+}}@volatile_load -; CGSCC-SAME: (i32* nofree noundef align 4 [[TMP0:%.*]]) #[[ATTR7:[0-9]+]] { -; CGSCC-NEXT: [[TMP2:%.*]] = load volatile i32, i32* [[TMP0]], align 4 +; CGSCC-SAME: (ptr nofree noundef align 4 [[TMP0:%.*]]) #[[ATTR7:[0-9]+]] { +; CGSCC-NEXT: [[TMP2:%.*]] = load volatile i32, ptr [[TMP0]], align 4 ; CGSCC-NEXT: ret i32 [[TMP2]] ; - %2 = load volatile i32, i32* %0, align 4 + %2 = load volatile i32, ptr %0, align 4 ret i32 %2 } -define internal i32 @internal_load(i32*) norecurse nounwind uwtable { +define internal i32 @internal_load(ptr) norecurse nounwind uwtable { ; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none) uwtable ; CGSCC-LABEL: define {{[^@]+}}@internal_load ; CGSCC-SAME: () #[[ATTR8:[0-9]+]] { ; CGSCC-NEXT: ret i32 undef ; - %2 = load i32, i32* %0, align 4 + %2 = load i32, ptr %0, align 4 ret i32 %2 } ; TEST 1: Only first block is live. -define i32 @first_block_no_return(i32 %a, i32* nonnull %ptr1, i32* %ptr2) #0 { +define i32 @first_block_no_return(i32 %a, ptr nonnull %ptr1, ptr %ptr2) #0 { ; CHECK: Function Attrs: nofree noreturn nosync nounwind ; CHECK-LABEL: define {{[^@]+}}@first_block_no_return -; CHECK-SAME: (i32 [[A:%.*]], i32* nocapture nofree nonnull readnone [[PTR1:%.*]], i32* nocapture nofree readnone [[PTR2:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-SAME: (i32 [[A:%.*]], ptr nocapture nofree nonnull readnone [[PTR1:%.*]], ptr nocapture nofree readnone [[PTR2:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: call void @no_return_call() #[[ATTR3:[0-9]+]] ; CHECK-NEXT: unreachable @@ -110,15 +110,15 @@ define i32 @first_block_no_return(i32 %a, i32* nonnull %ptr1, i32* %ptr2) #0 { ; CHECK-NEXT: unreachable ; entry: - call i32 @internal_load(i32* %ptr1) + call i32 @internal_load(ptr %ptr1) call void @no_return_call() call i32 @dead_internal_func(i32 10) %cmp = icmp eq i32 %a, 0 br i1 %cmp, label %cond.true, label %cond.false cond.true: ; preds = %entry - call i32 @internal_load(i32* %ptr2) - %load = call i32 @volatile_load(i32* %ptr1) + call i32 @internal_load(ptr %ptr2) + %load = call i32 @volatile_load(ptr %ptr1) call void @normal_call() %call = call i32 @foo() br label %cond.end @@ -138,9 +138,9 @@ cond.end: ; preds = %cond.false, %cond.t ; This is just an example. For example we can put a sync call in a ; dead block and check if it is deduced. -define i32 @dead_block_present(i32 %a, i32* %ptr1) #0 { +define i32 @dead_block_present(i32 %a, ptr %ptr1) #0 { ; CHECK-LABEL: define {{[^@]+}}@dead_block_present -; CHECK-SAME: (i32 [[A:%.*]], i32* nofree [[PTR1:%.*]]) { +; CHECK-SAME: (i32 [[A:%.*]], ptr nofree [[PTR1:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0 ; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] @@ -160,7 +160,7 @@ entry: cond.true: ; preds = %entry call void @no_return_call() - %call = call i32 @volatile_load(i32* %ptr1) + %call = call i32 @volatile_load(ptr %ptr1) br label %cond.end cond.false: ; preds = %entry @@ -254,9 +254,9 @@ cond.end: ; preds = %cond.false, %cond.t ; TEST 5.1 noreturn invoke instruction with a unreachable normal successor block. -define i32 @invoke_noreturn(i32 %a) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +define i32 @invoke_noreturn(i32 %a) personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: define {{[^@]+}}@invoke_noreturn -; CHECK-SAME: (i32 [[A:%.*]]) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +; CHECK-SAME: (i32 [[A:%.*]]) personality ptr @__gxx_personality_v0 { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0 ; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] @@ -273,8 +273,8 @@ define i32 @invoke_noreturn(i32 %a) personality i8* bitcast (i32 (...)* @__gxx_p ; CHECK: continue: ; CHECK-NEXT: unreachable ; CHECK: cleanup: -; CHECK-NEXT: [[RES:%.*]] = landingpad { i8*, i32 } -; CHECK-NEXT: catch i8* null +; CHECK-NEXT: [[RES:%.*]] = landingpad { ptr, i32 } +; CHECK-NEXT: catch ptr null ; CHECK-NEXT: ret i32 0 ; entry: @@ -299,17 +299,17 @@ continue: br label %cond.end cleanup: - %res = landingpad { i8*, i32 } - catch i8* null + %res = landingpad { ptr, i32 } + catch ptr null ret i32 0 } ; TEST 5.2 noreturn invoke instruction replaced by a call and an unreachable instruction ; put after it. -define i32 @invoke_noreturn_nounwind(i32 %a) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +define i32 @invoke_noreturn_nounwind(i32 %a) personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: define {{[^@]+}}@invoke_noreturn_nounwind -; CHECK-SAME: (i32 [[A:%.*]]) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +; CHECK-SAME: (i32 [[A:%.*]]) personality ptr @__gxx_personality_v0 { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0 ; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] @@ -351,15 +351,15 @@ continue: br label %cond.end cleanup: - %res = landingpad { i8*, i32 } - catch i8* null + %res = landingpad { ptr, i32 } + catch ptr null ret i32 0 } ; TEST 5.3 unounwind invoke instruction replaced by a call and a branch instruction put after it. -define i32 @invoke_nounwind(i32 %a) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +define i32 @invoke_nounwind(i32 %a) personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: define {{[^@]+}}@invoke_nounwind -; CHECK-SAME: (i32 [[A:%.*]]) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +; CHECK-SAME: (i32 [[A:%.*]]) personality ptr @__gxx_personality_v0 { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0 ; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] @@ -401,15 +401,15 @@ continue: br label %cond.end cleanup: - %res = landingpad { i8*, i32 } - catch i8* null + %res = landingpad { ptr, i32 } + catch ptr null ret i32 0 } ; TEST 5.4 unounwind invoke instruction replaced by a call and a branch instruction put after it. -define i32 @invoke_nounwind_phi(i32 %a) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +define i32 @invoke_nounwind_phi(i32 %a) personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: define {{[^@]+}}@invoke_nounwind_phi -; CHECK-SAME: (i32 [[A:%.*]]) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +; CHECK-SAME: (i32 [[A:%.*]]) personality ptr @__gxx_personality_v0 { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0 ; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] @@ -446,14 +446,14 @@ continue: ret i32 %p cleanup: - %res = landingpad { i8*, i32 } catch i8* null + %res = landingpad { ptr, i32 } catch ptr null ret i32 0 } ; TEST 5.5 unounwind invoke instruction replaced by a call and a branch instruction put after it. -define i32 @invoke_nounwind_phi_dom(i32 %a) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +define i32 @invoke_nounwind_phi_dom(i32 %a) personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: define {{[^@]+}}@invoke_nounwind_phi_dom -; CHECK-SAME: (i32 [[A:%.*]]) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +; CHECK-SAME: (i32 [[A:%.*]]) personality ptr @__gxx_personality_v0 { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0 ; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] @@ -490,36 +490,36 @@ continue: ret i32 %p cleanup: - %res = landingpad { i8*, i32 } catch i8* null + %res = landingpad { ptr, i32 } catch ptr null ret i32 0 } ; TEST 6: Undefined behvior, taken from LangRef. ; FIXME: Should be able to detect undefined behavior. -define void @ub(i32* %0) { +define void @ub(ptr %0) { ; TUNIT: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write) ; TUNIT-LABEL: define {{[^@]+}}@ub -; TUNIT-SAME: (i32* nocapture nofree writeonly [[TMP0:%.*]]) #[[ATTR7:[0-9]+]] { +; TUNIT-SAME: (ptr nocapture nofree writeonly [[TMP0:%.*]]) #[[ATTR7:[0-9]+]] { ; TUNIT-NEXT: [[POISON:%.*]] = sub nuw i32 0, 1 ; TUNIT-NEXT: [[STILL_POISON:%.*]] = and i32 [[POISON]], 0 -; TUNIT-NEXT: [[POISON_YET_AGAIN:%.*]] = getelementptr i32, i32* [[TMP0]], i32 [[STILL_POISON]] -; TUNIT-NEXT: store i32 0, i32* [[POISON_YET_AGAIN]], align 4 +; TUNIT-NEXT: [[POISON_YET_AGAIN:%.*]] = getelementptr i32, ptr [[TMP0]], i32 [[STILL_POISON]] +; TUNIT-NEXT: store i32 0, ptr [[POISON_YET_AGAIN]], align 4 ; TUNIT-NEXT: ret void ; ; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write) ; CGSCC-LABEL: define {{[^@]+}}@ub -; CGSCC-SAME: (i32* nocapture nofree writeonly [[TMP0:%.*]]) #[[ATTR9:[0-9]+]] { +; CGSCC-SAME: (ptr nocapture nofree writeonly [[TMP0:%.*]]) #[[ATTR9:[0-9]+]] { ; CGSCC-NEXT: [[POISON:%.*]] = sub nuw i32 0, 1 ; CGSCC-NEXT: [[STILL_POISON:%.*]] = and i32 [[POISON]], 0 -; CGSCC-NEXT: [[POISON_YET_AGAIN:%.*]] = getelementptr i32, i32* [[TMP0]], i32 [[STILL_POISON]] -; CGSCC-NEXT: store i32 0, i32* [[POISON_YET_AGAIN]], align 4 +; CGSCC-NEXT: [[POISON_YET_AGAIN:%.*]] = getelementptr i32, ptr [[TMP0]], i32 [[STILL_POISON]] +; CGSCC-NEXT: store i32 0, ptr [[POISON_YET_AGAIN]], align 4 ; CGSCC-NEXT: ret void ; %poison = sub nuw i32 0, 1 ; Results in a poison value. %still_poison = and i32 %poison, 0 ; 0, but also poison. - %poison_yet_again = getelementptr i32, i32* %0, i32 %still_poison - store i32 0, i32* %poison_yet_again ; Undefined behavior due to store to poison. + %poison_yet_again = getelementptr i32, ptr %0, i32 %still_poison + store i32 0, ptr %poison_yet_again ; Undefined behavior due to store to poison. ret void } @@ -710,84 +710,84 @@ cond.end: ; preds = %cond.if, %con @a1 = common global i8 0, align 8 @a2 = common global i8 0, align 16 -define internal i8* @f1(i8* readnone %0) local_unnamed_addr #0 { +define internal ptr @f1(ptr readnone %0) local_unnamed_addr #0 { ; CGSCC-LABEL: define {{[^@]+}}@f1 -; CGSCC-SAME: (i8* readnone [[TMP0:%.*]]) local_unnamed_addr { -; CGSCC-NEXT: [[TMP2:%.*]] = icmp eq i8* [[TMP0]], null +; CGSCC-SAME: (ptr readnone [[TMP0:%.*]]) local_unnamed_addr { +; CGSCC-NEXT: [[TMP2:%.*]] = icmp eq ptr [[TMP0]], null ; CGSCC-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP5:%.*]] ; CGSCC: 3: -; CGSCC-NEXT: [[TMP4:%.*]] = tail call i8* @f2(i8* nonnull @a1) +; CGSCC-NEXT: [[TMP4:%.*]] = tail call ptr @f2(ptr nonnull @a1) ; CGSCC-NEXT: br label [[TMP5]] ; CGSCC: 5: -; CGSCC-NEXT: [[TMP6:%.*]] = phi i8* [ [[TMP4]], [[TMP3]] ], [ [[TMP0]], [[TMP1:%.*]] ] -; CGSCC-NEXT: ret i8* [[TMP6]] +; CGSCC-NEXT: [[TMP6:%.*]] = phi ptr [ [[TMP4]], [[TMP3]] ], [ [[TMP0]], [[TMP1:%.*]] ] +; CGSCC-NEXT: ret ptr [[TMP6]] ; - %2 = icmp eq i8* %0, null + %2 = icmp eq ptr %0, null br i1 %2, label %3, label %5 ;