* The ``CLANG_ENABLE_OPAQUE_POINTERS`` cmake flag is no longer supported.
* C APIs that do not support opaque pointers (like ``LLVMBuildLoad``) are no
longer supported.
+* Typed pointer bitcode is implicitly upgraded to use opaque pointers, unless
+ ``-opaque-pointers=0`` is passed.
The following typed pointer functionality is still to be removed:
* The ``-no-opaque-pointers`` cc1 flag, ``-opaque-pointers=0`` opt flag and
``-plugin-opt=no-opaque-pointers`` lto flag.
-* Auto-detection of typed pointers in bitcode and textual IR.
+* Auto-detection of typed pointers in textual IR.
* Support for typed pointers in LLVM libraries.
if (!ResultTy ||
!PointerType::isValidElementType(ResultTy))
return error("Invalid type");
- if (LLVM_UNLIKELY(!Context.hasSetOpaquePointersValue()))
- Context.setOpaquePointers(false);
ContainedIDs.push_back(Record[0]);
ResultTy = PointerType::get(ResultTy, AddressSpace);
break;
case bitc::TYPE_CODE_OPAQUE_POINTER: { // OPAQUE_POINTER: [addrspace]
if (Record.size() != 1)
return error("Invalid opaque pointer record");
- if (LLVM_UNLIKELY(!Context.hasSetOpaquePointersValue())) {
- Context.setOpaquePointers(true);
- } else if (Context.supportsTypedPointers())
+ if (Context.supportsTypedPointers())
return error(
"Opaque pointers are only supported in -opaque-pointers mode");
unsigned AddressSpace = Record[0];
target datalayout = "A2"
-; CHECK-LABEL: define i8 addrspace(2)* @alloca_addrspace_2() {
+; CHECK-LABEL: define ptr addrspace(2) @alloca_addrspace_2() {
; CHECK: %alloca = alloca i8, align 1, addrspace(2)
define i8 addrspace(2)* @alloca_addrspace_2() {
%alloca = alloca i8, addrspace(2)
ret i8 addrspace(2)* %alloca
}
-; CHECK-LABEL: define i8 addrspace(5)* @alloca_addrspace_5() {
+; CHECK-LABEL: define ptr addrspace(5) @alloca_addrspace_5() {
; CHECK: %alloca = alloca i8, align 1, addrspace(5)
define i8 addrspace(5)* @alloca_addrspace_5() {
%alloca = alloca i8, addrspace(5)
; RUN: llvm-dis < %S/arm-intrinsics.bc | FileCheck %s
define void @f(i32* %p) {
-; CHECK: call i32 @llvm.arm.ldrex.p0i32(i32* elementtype(i32)
+; CHECK: call i32 @llvm.arm.ldrex.p0(ptr elementtype(i32)
%a = call i32 @llvm.arm.ldrex.p0i32(i32* %p)
-; CHECK: call i32 @llvm.arm.strex.p0i32(i32 0, i32* elementtype(i32)
+; CHECK: call i32 @llvm.arm.strex.p0(i32 0, ptr elementtype(i32)
%c = call i32 @llvm.arm.strex.p0i32(i32 0, i32* %p)
-; CHECK: call i32 @llvm.arm.ldaex.p0i32(i32* elementtype(i32)
+; CHECK: call i32 @llvm.arm.ldaex.p0(ptr elementtype(i32)
%a2 = call i32 @llvm.arm.ldaex.p0i32(i32* %p)
-; CHECK: call i32 @llvm.arm.stlex.p0i32(i32 0, i32* elementtype(i32)
+; CHECK: call i32 @llvm.arm.stlex.p0(i32 0, ptr elementtype(i32)
%c2 = call i32 @llvm.arm.stlex.p0i32(i32 0, i32* %p)
ret void
}
; Backwards compatibility test: make sure we can process bitcode without
; synchronization scope names encoded in it.
-; CHECK: load atomic i32, i32* %x unordered, align 4
-; CHECK: load atomic volatile i32, i32* %x syncscope("singlethread") acquire, align 4
-; CHECK: store atomic i32 3, i32* %x release, align 4
-; CHECK: store atomic volatile i32 3, i32* %x syncscope("singlethread") monotonic, align 4
-; CHECK: cmpxchg i32* %x, i32 1, i32 0 syncscope("singlethread") monotonic monotonic
-; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire
-; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic
-; CHECK: cmpxchg weak i32* %x, i32 13, i32 0 seq_cst monotonic
-; CHECK: atomicrmw add i32* %x, i32 10 seq_cst
-; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic
+; CHECK: load atomic i32, ptr %x unordered, align 4
+; CHECK: load atomic volatile i32, ptr %x syncscope("singlethread") acquire, align 4
+; CHECK: store atomic i32 3, ptr %x release, align 4
+; CHECK: store atomic volatile i32 3, ptr %x syncscope("singlethread") monotonic, align 4
+; CHECK: cmpxchg ptr %x, i32 1, i32 0 syncscope("singlethread") monotonic monotonic
+; CHECK: cmpxchg volatile ptr %x, i32 0, i32 1 acq_rel acquire
+; CHECK: cmpxchg ptr %x, i32 42, i32 0 acq_rel monotonic
+; CHECK: cmpxchg weak ptr %x, i32 13, i32 0 seq_cst monotonic
+; CHECK: atomicrmw add ptr %x, i32 10 seq_cst
+; CHECK: atomicrmw volatile xchg ptr %x, i32 10 monotonic
; CHECK: fence syncscope("singlethread") release
; CHECK: fence seq_cst
define void @test_cmpxchg(i32* %addr, i32 %desired, i32 %new) {
cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
- ; CHECK: cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ ; CHECK: cmpxchg ptr %addr, i32 %desired, i32 %new seq_cst seq_cst
cmpxchg volatile i32* %addr, i32 %desired, i32 %new seq_cst monotonic
- ; CHECK: cmpxchg volatile i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ ; CHECK: cmpxchg volatile ptr %addr, i32 %desired, i32 %new seq_cst monotonic
cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire
- ; CHECK: cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire
+ ; CHECK: cmpxchg weak ptr %addr, i32 %desired, i32 %new acq_rel acquire
cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new syncscope("singlethread") release monotonic
- ; CHECK: cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new syncscope("singlethread") release monotonic
+ ; CHECK: cmpxchg weak volatile ptr %addr, i32 %desired, i32 %new syncscope("singlethread") release monotonic
ret void
}
; before the IR change on this file.
; CHECK: @atomicrmw
-; CHECK: %b = atomicrmw add i32* %a, i32 %i acquire
+; CHECK: %b = atomicrmw add ptr %a, i32 %i acquire
define void @atomicrmw(i32* %a, i32 %i) {
%b = atomicrmw add i32* %a, i32 %i acquire
ret void
}
define void @f5(i8* sret(i8) %0)
-; CHECK: define void @f5(i8* sret(i8) %0)
+; CHECK: define void @f5(ptr sret(i8) %0)
{
ret void;
}
}
define void @f7(i8* noalias %0)
-; CHECK: define void @f7(i8* noalias %0)
+; CHECK: define void @f7(ptr noalias %0)
{
ret void;
}
define void @f8(i8* byval(i8) %0)
-; CHECK: define void @f8(i8* byval(i8) %0)
+; CHECK: define void @f8(ptr byval(i8) %0)
{
ret void;
}
define void @f9(i8* nest %0)
-; CHECK: define void @f9(i8* nest %0)
+; CHECK: define void @f9(ptr nest %0)
{
ret void;
}
}
define void @f18(i8* nocapture %0)
-; CHECK: define void @f18(i8* nocapture %0)
+; CHECK: define void @f18(ptr nocapture %0)
{
ret void;
}
Make sure we upgrade old-stile IntAttribute byval records to a fully typed
version correctly.
-CHECK: call void @bar({ i32*, i8 }* byval({ i32*, i8 }) %ptr)
-CHECK: invoke void @bar({ i32*, i8 }* byval({ i32*, i8 }) %ptr)
+CHECK: call void @bar(ptr byval({ ptr, i8 }) %ptr)
+CHECK: invoke void @bar(ptr byval({ ptr, i8 }) %ptr)
-; RUN: llvm-dis < %s.bc | FileCheck %s --check-prefixes=CHECK,CHECK-TYPED
+; RUN: llvm-dis < %s.bc | FileCheck %s
; callbr.ll.bc was generated by passing this file to llvm-as.
-; RUN: llvm-as < %s | llvm-dis | FileCheck %s --check-prefixes=CHECK,CHECK-TYPED
-; RUN: llvm-as -opaque-pointers < %s | llvm-dis -opaque-pointers | FileCheck %s --check-prefixes=CHECK,CHECK-OPAQUE
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
define i32 @test_asm_goto(i32 %x){
entry:
define i32 @test_asm_goto3(i32 %x){
entry:
-; CHECK-TYPED: callbr void asm "", "r,i,!i"(i32 %x, i8* blockaddress(@test_asm_goto3, %unrelated))
-; CHECK-OPAQUE: callbr void asm "", "r,i,!i"(i32 %x, ptr blockaddress(@test_asm_goto3, %unrelated))
+; CHECK: callbr void asm "", "r,i,!i"(i32 %x, ptr blockaddress(@test_asm_goto3, %unrelated))
; CHECK-NEXT: to label %normal [label %fail]
callbr void asm "", "r,i,!i"(i32 %x, i8* blockaddress(@test_asm_goto3, %unrelated)) to label %normal [label %fail]
normal:
define void @test(i32* %addr) {
cmpxchg i32* %addr, i32 42, i32 0 monotonic
-; CHECK: cmpxchg i32* %addr, i32 42, i32 0 monotonic monotonic
+; CHECK: cmpxchg ptr %addr, i32 42, i32 0 monotonic monotonic
cmpxchg i32* %addr, i32 42, i32 0 acquire
-; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acquire acquire
+; CHECK: cmpxchg ptr %addr, i32 42, i32 0 acquire acquire
cmpxchg i32* %addr, i32 42, i32 0 release
-; CHECK: cmpxchg i32* %addr, i32 42, i32 0 release monotonic
+; CHECK: cmpxchg ptr %addr, i32 42, i32 0 release monotonic
cmpxchg i32* %addr, i32 42, i32 0 acq_rel
-; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acq_rel acquire
+; CHECK: cmpxchg ptr %addr, i32 42, i32 0 acq_rel acquire
cmpxchg i32* %addr, i32 42, i32 0 seq_cst
-; CHECK: cmpxchg i32* %addr, i32 42, i32 0 seq_cst seq_cst
+; CHECK: cmpxchg ptr %addr, i32 42, i32 0 seq_cst seq_cst
ret void
}
br label %a
b:
cmpxchg i32* %x, i32 %y, i32 %z acquire acquire
-; CHECK: cmpxchg i32* %x, i32 %y, i32 %z acquire acquire
+; CHECK: cmpxchg ptr %x, i32 %y, i32 %z acquire acquire
ret void
a:
%y = add i32 %y.orig, 1
@const.float = constant double 0.0
; CHECK: @const.float = constant double 0.0
@const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
%const.struct.type = type { i32, i8 }
%const.struct.type.packed = type <{ i32, i8 }>
@const.struct = constant %const.struct.type { i32 -1, i8 undef }
@g.used3 = global i8 0
declare void @g.f1()
@llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
@llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
@llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
@llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
;; Aliases
; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
; Aliases -- Linkage
@a.private = private alias i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
@a.internal = internal alias i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
@a.linkonce = linkonce alias i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
@a.weak = weak alias i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
@a.linkonce_odr = linkonce_odr alias i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
@a.weak_odr = weak_odr alias i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
@a.external = external alias i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
; Aliases -- Visibility
@a.default = default alias i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
@a.hidden = hidden alias i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
@a.protected = protected alias i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
; Aliases -- DLLStorageClass
@a.dlldefault = default alias i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
@a.dllimport = dllimport alias i32* @g1
-; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, ptr @g1
@a.dllexport = dllexport alias i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
; Aliases -- ThreadLocal
@a.notthreadlocal = alias i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
@a.generaldynamic = thread_local alias i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
@a.localdynamic = thread_local(localdynamic) alias i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
@a.initialexec = thread_local(initialexec) alias i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
@a.localexec = thread_local(localexec) alias i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
; Aliases -- unnamed_addr
@a.unnamed_addr = unnamed_addr alias i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
;; Functions
; Format: define [linkage] [visibility] [DLLStorageClass]
declare signext i64 @f.signext()
; CHECK: declare signext i64 @f.signext()
declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
; Functions -- Parameter attributes
declare void @f.param.zeroext(i8 zeroext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
; Functions -- unnamed_addr
declare void @f.unnamed_addr() unnamed_addr
;; Atomic Memory Ordering Constraints
define void @atomics(i32* %word) {
%cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
- ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+ ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
%cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
- ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+ ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
%cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
- ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+ ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
%cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
- ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+ ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
%cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
- ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+ ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
%cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
- ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+ ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+ ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
%cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
- ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+ ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
- ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+ ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
%atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
- ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+ ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
%atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
- ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+ ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
%atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
- ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+ ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
%atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
- ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+ ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
%atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
- ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+ ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
%atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
- ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+ ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+ ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
%atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
%atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; XXX: The parser spits out the load type here.
%ld.1 = load atomic i32* %word monotonic, align 4
- ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+ ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
%ld.2 = load atomic volatile i32* %word acquire, align 8
- ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+ ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
%ld.3 = load atomic volatile i32* %word syncscope("singlethread") seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
- ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+ ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
- ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+ ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
ret void
}
%opaquety = type opaque
define void @typesystem() {
%p0 = bitcast i8* null to i32 (i32)*
- ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+ ; CHECK: %p0 = bitcast ptr null to ptr
%p1 = bitcast i8* null to void (i8*)*
- ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+ ; CHECK: %p1 = bitcast ptr null to ptr
%p2 = bitcast i8* null to i32 (i8*, ...)*
- ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+ ; CHECK: %p2 = bitcast ptr null to ptr
%p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
- ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+ ; CHECK: %p3 = bitcast ptr null to ptr
%p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
- ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+ ; CHECK: %p4 = bitcast ptr null to ptr
%p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
- ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+ ; CHECK: %p5 = bitcast ptr null to ptr
%t0 = alloca i1942652
; CHECK: %t0 = alloca i1942652
%t7 = alloca x86_mmx
; CHECK: %t7 = alloca x86_mmx
%t8 = alloca %opaquety*
- ; CHECK: %t8 = alloca %opaquety*
+ ; CHECK: %t8 = alloca ptr
ret void
}
; Instructions -- Terminators
define void @instructions.terminators(i8 %val) { ; XXX: landingpad changed.
-; CHECK: define void @instructions.terminators(i8 %val) personality i32 ()* @personality_handler
+; CHECK: define void @instructions.terminators(i8 %val) personality ptr @personality_handler
br i1 false, label %iftrue, label %iffalse
; CHECK: br i1 false, label %iftrue, label %iffalse
defaultdest.2:
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
invoke fastcc void @f.fastcc()
; CHECK: invoke fastcc void @f.fastcc()
; XXX: The parser spits out the load type here.
getelementptr { i8, i32 }* %up.ptr, i8 0
- ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+ ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
getelementptr <{ i8, i32 }>* %p.ptr, i8 1
- ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+ ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
getelementptr [3 x i8]* %arr.ptr, i8 2
- ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+ ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
getelementptr { i8, { i32 } }* %n.ptr, i32 0, i32 1
- ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+ ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
getelementptr inbounds { i8, { i32 } }* %n.ptr, i32 1, i32 0
- ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+ ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
getelementptr <2 x i8*> %pvec, <2 x i64> %offsets
- ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+ ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
ret void
}
; CHECK: alloca inalloca i32, i8 4, align 4
load i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9
- ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9
+ ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9
load volatile i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9
- ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9
+ ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9
store i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
store volatile i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
ret void
}
sitofp i32 -1 to float
; CHECK: sitofp i32 -1 to float
ptrtoint i8* null to i64
- ; CHECK: ptrtoint i8* null to i64
+ ; CHECK: ptrtoint ptr null to i64
inttoptr i64 0 to i8*
- ; CHECK: inttoptr i64 0 to i8*
+ ; CHECK: inttoptr i64 0 to ptr
bitcast i32 0 to i32
; CHECK: bitcast i32 0 to i32
addrspacecast i32* null to i32 addrspace(1)*
- ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+ ; CHECK: addrspacecast ptr null to ptr addrspace(1)
ret void
}
; CHECK: call void @f.nobuiltin() #36
call fastcc noalias i32* @f.noalias() noinline
- ; CHECK: call fastcc noalias i32* @f.noalias() #11
+ ; CHECK: call fastcc noalias ptr @f.noalias() #11
tail call ghccc nonnull i32* @f.nonnull() minsize
- ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #6
+ ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #6
ret void
}
define void @instructions.call_musttail(i8* inalloca %val) {
musttail call void @f.param.inalloca(i8* inalloca %val)
- ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+ ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
ret void
}
declare i32 @personality_handler()
define void @instructions.landingpad() {
-; CHECK: define void @instructions.landingpad() personality i32 ()* @personality_handler
+; CHECK: define void @instructions.landingpad() personality ptr @personality_handler
invoke void @llvm.donothing() to label %proceed unwind label %catch1
invoke void @llvm.donothing() to label %proceed unwind label %catch2
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch3:
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch4:
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(i8* %ap2)
+ ; CHECK: call void @llvm.va_start(ptr %ap2)
va_arg i8* %ap2, i32
- ; CHECK: va_arg i8* %ap2, i32
+ ; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+ ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(i8* %ap2)
+ ; CHECK: call void @llvm.va_end(ptr %ap2)
ret void
}
define void @intrinsics.gc() gc "shadow-stack" {
%ptrloc = alloca i8*
call void @llvm.gcroot(i8** %ptrloc, i8* null)
- ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+ ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
call i8* @llvm.gcread(i8* null, i8** %ptrloc)
- ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+ ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
%ref = alloca i8
call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
- ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+ ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
ret void
}
!10 = !{!"rax"}
define void @intrinsics.codegen() {
call i8* @llvm.returnaddress(i32 1)
- ; CHECK: call i8* @llvm.returnaddress(i32 1)
+ ; CHECK: call ptr @llvm.returnaddress(i32 1)
call i8* @llvm.frameaddress(i32 1)
- ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+ ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
%stack = call i8* @llvm.stacksave()
- ; CHECK: %stack = call i8* @llvm.stacksave()
+ ; CHECK: %stack = call ptr @llvm.stacksave()
call void @llvm.stackrestore(i8* %stack)
- ; CHECK: call void @llvm.stackrestore(i8* %stack)
+ ; CHECK: call void @llvm.stackrestore(ptr %stack)
call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
- ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+ ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
call void @llvm.pcmarker(i32 1)
; CHECK: call void @llvm.pcmarker(i32 1)
; CHECK: call i64 @llvm.readcyclecounter()
call void @llvm.clear_cache(i8* null, i8* null)
- ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+ ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
- ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+ ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
ret void
}
@const.float = constant double 0.0
; CHECK: @const.float = constant double 0.0
@const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
%const.struct.type = type { i32, i8 }
%const.struct.type.packed = type <{ i32, i8 }>
@const.struct = constant %const.struct.type { i32 -1, i8 undef }
@g.used3 = global i8 0
declare void @g.f1()
@llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
@llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
@llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
@llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
;; Aliases
; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
; Aliases -- Linkage
@a.private = private alias i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
@a.internal = internal alias i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
@a.linkonce = linkonce alias i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
@a.weak = weak alias i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
@a.linkonce_odr = linkonce_odr alias i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
@a.weak_odr = weak_odr alias i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
@a.external = external alias i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
; Aliases -- Visibility
@a.default = default alias i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
@a.hidden = hidden alias i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
@a.protected = protected alias i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
; Aliases -- DLLStorageClass
@a.dlldefault = default alias i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
@a.dllimport = dllimport alias i32* @g1
-; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, ptr @g1
@a.dllexport = dllexport alias i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
; Aliases -- ThreadLocal
@a.notthreadlocal = alias i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
@a.generaldynamic = thread_local alias i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
@a.localdynamic = thread_local(localdynamic) alias i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
@a.initialexec = thread_local(initialexec) alias i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
@a.localexec = thread_local(localexec) alias i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
; Aliases -- unnamed_addr
@a.unnamed_addr = unnamed_addr alias i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
;; Functions
; Format: define [linkage] [visibility] [DLLStorageClass]
declare signext i64 @f.signext()
; CHECK: declare signext i64 @f.signext()
declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
-; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) ptr @f.dereferenceable4_or_null()
declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
-; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) ptr @f.dereferenceable8_or_null()
declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
-; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) ptr @f.dereferenceable16_or_null()
; Functions -- Parameter attributes
declare void @f.param.zeroext(i8 zeroext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
-; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(ptr dereferenceable_or_null(4))
; Functions -- unnamed_addr
declare void @f.unnamed_addr() unnamed_addr
declare i32 @f.personality_handler()
; CHECK: declare i32 @f.personality_handler()
define void @f.personality() personality i32 ()* @f.personality_handler {
-; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+; CHECK: define void @f.personality() personality ptr @f.personality_handler
invoke void @llvm.donothing() to label %normal unwind label %exception
exception:
%cleanup = landingpad i32 cleanup
;; Atomic Memory Ordering Constraints
define void @atomics(i32* %word) {
%cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
- ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+ ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
%cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
- ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+ ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
%cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
- ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+ ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
%cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
- ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+ ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
%cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
- ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+ ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
%cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
- ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+ ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+ ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
%cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
- ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+ ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
- ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+ ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
%atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
- ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+ ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
%atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
- ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+ ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
%atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
- ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+ ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
%atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
- ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+ ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
%atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
- ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+ ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
%atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
- ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+ ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+ ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
%atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
%atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
- ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+ ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+ ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
%ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
- ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+ ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
- ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+ ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
ret void
}
%opaquety = type opaque
define void @typesystem() {
%p0 = bitcast i8* null to i32 (i32)*
- ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+ ; CHECK: %p0 = bitcast ptr null to ptr
%p1 = bitcast i8* null to void (i8*)*
- ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+ ; CHECK: %p1 = bitcast ptr null to ptr
%p2 = bitcast i8* null to i32 (i8*, ...)*
- ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+ ; CHECK: %p2 = bitcast ptr null to ptr
%p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
- ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+ ; CHECK: %p3 = bitcast ptr null to ptr
%p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
- ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+ ; CHECK: %p4 = bitcast ptr null to ptr
%p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
- ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+ ; CHECK: %p5 = bitcast ptr null to ptr
%t0 = alloca i1942652
; CHECK: %t0 = alloca i1942652
%t7 = alloca x86_mmx
; CHECK: %t7 = alloca x86_mmx
%t8 = alloca %opaquety*
- ; CHECK: %t8 = alloca %opaquety*
+ ; CHECK: %t8 = alloca ptr
ret void
}
defaultdest.2:
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
invoke fastcc void @f.fastcc()
; CHECK: invoke fastcc void @f.fastcc()
%n.ptr = alloca { i8, { i32 } }
getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
- ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+ ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
- ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+ ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
- ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+ ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
- ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+ ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
- ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+ ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
- ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+ ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
ret void
}
; CHECK: alloca inalloca i32, i8 4, align 4
load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
- ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
- ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
store i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
store volatile i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
ret void
}
sitofp i32 -1 to float
; CHECK: sitofp i32 -1 to float
ptrtoint i8* null to i64
- ; CHECK: ptrtoint i8* null to i64
+ ; CHECK: ptrtoint ptr null to i64
inttoptr i64 0 to i8*
- ; CHECK: inttoptr i64 0 to i8*
+ ; CHECK: inttoptr i64 0 to ptr
bitcast i32 0 to i32
; CHECK: bitcast i32 0 to i32
addrspacecast i32* null to i32 addrspace(1)*
- ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+ ; CHECK: addrspacecast ptr null to ptr addrspace(1)
ret void
}
; CHECK: call void @f.nobuiltin() #39
call fastcc noalias i32* @f.noalias() noinline
- ; CHECK: call fastcc noalias i32* @f.noalias() #12
+ ; CHECK: call fastcc noalias ptr @f.noalias() #12
tail call ghccc nonnull i32* @f.nonnull() minsize
- ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+ ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #7
ret void
}
define void @instructions.call_musttail(i8* inalloca %val) {
musttail call void @f.param.inalloca(i8* inalloca %val)
- ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+ ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
ret void
}
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch3:
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch4:
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(i8* %ap2)
+ ; CHECK: call void @llvm.va_start(ptr %ap2)
va_arg i8* %ap2, i32
- ; CHECK: va_arg i8* %ap2, i32
+ ; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+ ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(i8* %ap2)
+ ; CHECK: call void @llvm.va_end(ptr %ap2)
ret void
}
define void @intrinsics.gc() gc "shadow-stack" {
%ptrloc = alloca i8*
call void @llvm.gcroot(i8** %ptrloc, i8* null)
- ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+ ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
call i8* @llvm.gcread(i8* null, i8** %ptrloc)
- ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+ ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
%ref = alloca i8
call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
- ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+ ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
ret void
}
!10 = !{!"rax"}
define void @intrinsics.codegen() {
call i8* @llvm.returnaddress(i32 1)
- ; CHECK: call i8* @llvm.returnaddress(i32 1)
+ ; CHECK: call ptr @llvm.returnaddress(i32 1)
call i8* @llvm.frameaddress(i32 1)
- ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+ ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
%stack = call i8* @llvm.stacksave()
- ; CHECK: %stack = call i8* @llvm.stacksave()
+ ; CHECK: %stack = call ptr @llvm.stacksave()
call void @llvm.stackrestore(i8* %stack)
- ; CHECK: call void @llvm.stackrestore(i8* %stack)
+ ; CHECK: call void @llvm.stackrestore(ptr %stack)
call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
- ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+ ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
call void @llvm.pcmarker(i32 1)
; CHECK: call void @llvm.pcmarker(i32 1)
; CHECK: call i64 @llvm.readcyclecounter()
call void @llvm.clear_cache(i8* null, i8* null)
- ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+ ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
- ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+ ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
ret void
}
define void @intrinsics.localescape() {
%static.alloca = alloca i32
call void (...) @llvm.localescape(i32* %static.alloca)
- ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+ ; CHECK: call void (...) @llvm.localescape(ptr %static.alloca)
call void @intrinsics.localrecover()
%func = bitcast void ()* @intrinsics.localescape to i8*
%fp = call i8* @llvm.frameaddress(i32 1)
call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
- ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+ ; CHECK: call ptr @llvm.localrecover(ptr %func, ptr %fp, i32 0)
ret void
}
@const.float = constant double 0.0
; CHECK: @const.float = constant double 0.0
@const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
%const.struct.type = type { i32, i8 }
%const.struct.type.packed = type <{ i32, i8 }>
@const.struct = constant %const.struct.type { i32 -1, i8 undef }
@g.used3 = global i8 0
declare void @g.f1()
@llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
@llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
@llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
@llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
;; Aliases
; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
; Aliases -- Linkage
@a.private = private alias i32, i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
@a.internal = internal alias i32, i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
@a.linkonce = linkonce alias i32, i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
@a.weak = weak alias i32, i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
@a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
@a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
@a.external = external alias i32, i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
; Aliases -- Visibility
@a.default = default alias i32, i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
@a.hidden = hidden alias i32, i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
@a.protected = protected alias i32, i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
; Aliases -- DLLStorageClass
@a.dlldefault = default alias i32, i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
@a.dllimport = dllimport alias i32, i32* @g1
-; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, ptr @g1
@a.dllexport = dllexport alias i32, i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
; Aliases -- ThreadLocal
@a.notthreadlocal = alias i32, i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
@a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
@a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
@a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
@a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
; Aliases -- unnamed_addr
@a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
;; Functions
; Format: define [linkage] [visibility] [DLLStorageClass]
declare signext i64 @f.signext()
; CHECK: declare signext i64 @f.signext()
declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
-; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) ptr @f.dereferenceable4_or_null()
declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
-; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) ptr @f.dereferenceable8_or_null()
declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
-; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) ptr @f.dereferenceable16_or_null()
; Functions -- Parameter attributes
declare void @f.param.zeroext(i8 zeroext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
-; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(ptr dereferenceable_or_null(4))
; Functions -- unnamed_addr
declare void @f.unnamed_addr() unnamed_addr
declare i32 @f.personality_handler()
; CHECK: declare i32 @f.personality_handler()
define void @f.personality() personality i32 ()* @f.personality_handler {
-; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+; CHECK: define void @f.personality() personality ptr @f.personality_handler
invoke void @llvm.donothing() to label %normal unwind label %exception
exception:
%cleanup = landingpad i32 cleanup
;; Atomic Memory Ordering Constraints
define void @atomics(i32* %word) {
%cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
- ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+ ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
%cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
- ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+ ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
%cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
- ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+ ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
%cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
- ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+ ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
%cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
- ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+ ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
%cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
- ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+ ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+ ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
%cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
- ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+ ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
- ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+ ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
%atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
- ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+ ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
%atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
- ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+ ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
%atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
- ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+ ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
%atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
- ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+ ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
%atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
- ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+ ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
%atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
- ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+ ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+ ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
%atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
%atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
- ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+ ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+ ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
%ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
- ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+ ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
- ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+ ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
ret void
}
%opaquety = type opaque
define void @typesystem() {
%p0 = bitcast i8* null to i32 (i32)*
- ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+ ; CHECK: %p0 = bitcast ptr null to ptr
%p1 = bitcast i8* null to void (i8*)*
- ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+ ; CHECK: %p1 = bitcast ptr null to ptr
%p2 = bitcast i8* null to i32 (i8*, ...)*
- ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+ ; CHECK: %p2 = bitcast ptr null to ptr
%p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
- ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+ ; CHECK: %p3 = bitcast ptr null to ptr
%p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
- ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+ ; CHECK: %p4 = bitcast ptr null to ptr
%p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
- ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+ ; CHECK: %p5 = bitcast ptr null to ptr
%t0 = alloca i1942652
; CHECK: %t0 = alloca i1942652
%t7 = alloca x86_mmx
; CHECK: %t7 = alloca x86_mmx
%t8 = alloca %opaquety*
- ; CHECK: %t8 = alloca %opaquety*
+ ; CHECK: %t8 = alloca ptr
ret void
}
defaultdest.2:
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
invoke fastcc void @f.fastcc()
; CHECK: invoke fastcc void @f.fastcc()
catchpad2:
catchpad within %cs2 [i32* %arg1]
br label %normal
- ; CHECK: catchpad within %cs2 [i32* %arg1]
+ ; CHECK: catchpad within %cs2 [ptr %arg1]
; CHECK-NEXT: br label %normal
catchswitch3:
catchpad3:
catchpad within %cs3 [i32* %arg1, i32* %arg2]
br label %normal
- ; CHECK: catchpad within %cs3 [i32* %arg1, i32* %arg2]
+ ; CHECK: catchpad within %cs3 [ptr %arg1, ptr %arg2]
; CHECK-NEXT: br label %normal
cleanuppad1:
%n.ptr = alloca { i8, { i32 } }
getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
- ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+ ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
- ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+ ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
- ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+ ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
- ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+ ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
- ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+ ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
- ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+ ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
ret void
}
; CHECK: alloca inalloca i32, i8 4, align 4
load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
- ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
- ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
store i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
store volatile i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
ret void
}
sitofp i32 -1 to float
; CHECK: sitofp i32 -1 to float
ptrtoint i8* null to i64
- ; CHECK: ptrtoint i8* null to i64
+ ; CHECK: ptrtoint ptr null to i64
inttoptr i64 0 to i8*
- ; CHECK: inttoptr i64 0 to i8*
+ ; CHECK: inttoptr i64 0 to ptr
bitcast i32 0 to i32
; CHECK: bitcast i32 0 to i32
addrspacecast i32* null to i32 addrspace(1)*
- ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+ ; CHECK: addrspacecast ptr null to ptr addrspace(1)
ret void
}
; CHECK: call void @f.nobuiltin() #42
call fastcc noalias i32* @f.noalias() noinline
- ; CHECK: call fastcc noalias i32* @f.noalias() #12
+ ; CHECK: call fastcc noalias ptr @f.noalias() #12
tail call ghccc nonnull i32* @f.nonnull() minsize
- ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+ ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #7
ret void
}
define void @instructions.call_musttail(i8* inalloca %val) {
musttail call void @f.param.inalloca(i8* inalloca %val)
- ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+ ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
ret void
}
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch3:
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch4:
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(i8* %ap2)
+ ; CHECK: call void @llvm.va_start(ptr %ap2)
va_arg i8* %ap2, i32
- ; CHECK: va_arg i8* %ap2, i32
+ ; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+ ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(i8* %ap2)
+ ; CHECK: call void @llvm.va_end(ptr %ap2)
ret void
}
define void @intrinsics.gc() gc "shadow-stack" {
%ptrloc = alloca i8*
call void @llvm.gcroot(i8** %ptrloc, i8* null)
- ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+ ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
call i8* @llvm.gcread(i8* null, i8** %ptrloc)
- ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+ ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
%ref = alloca i8
call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
- ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+ ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
ret void
}
!10 = !{!"rax"}
define void @intrinsics.codegen() {
call i8* @llvm.returnaddress(i32 1)
- ; CHECK: call i8* @llvm.returnaddress(i32 1)
+ ; CHECK: call ptr @llvm.returnaddress(i32 1)
call i8* @llvm.frameaddress(i32 1)
- ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+ ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
%stack = call i8* @llvm.stacksave()
- ; CHECK: %stack = call i8* @llvm.stacksave()
+ ; CHECK: %stack = call ptr @llvm.stacksave()
call void @llvm.stackrestore(i8* %stack)
- ; CHECK: call void @llvm.stackrestore(i8* %stack)
+ ; CHECK: call void @llvm.stackrestore(ptr %stack)
call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
- ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+ ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
call void @llvm.pcmarker(i32 1)
; CHECK: call void @llvm.pcmarker(i32 1)
; CHECK: call i64 @llvm.readcyclecounter()
call void @llvm.clear_cache(i8* null, i8* null)
- ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+ ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
- ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+ ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
ret void
}
define void @intrinsics.localescape() {
%static.alloca = alloca i32
call void (...) @llvm.localescape(i32* %static.alloca)
- ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+ ; CHECK: call void (...) @llvm.localescape(ptr %static.alloca)
call void @intrinsics.localrecover()
%func = bitcast void ()* @intrinsics.localescape to i8*
%fp = call i8* @llvm.frameaddress(i32 1)
call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
- ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+ ; CHECK: call ptr @llvm.localrecover(ptr %func, ptr %fp, i32 0)
ret void
}
@const.float = constant double 0.0
; CHECK: @const.float = constant double 0.0
@const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
%const.struct.type = type { i32, i8 }
%const.struct.type.packed = type <{ i32, i8 }>
@const.struct = constant %const.struct.type { i32 -1, i8 undef }
@g.used3 = global i8 0
declare void @g.f1()
@llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
@llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
@llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
@llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
;; Aliases
; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
; Aliases -- Linkage
@a.private = private alias i32, i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
@a.internal = internal alias i32, i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
@a.linkonce = linkonce alias i32, i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
@a.weak = weak alias i32, i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
@a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
@a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
@a.external = external alias i32, i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
; Aliases -- Visibility
@a.default = default alias i32, i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
@a.hidden = hidden alias i32, i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
@a.protected = protected alias i32, i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
; Aliases -- DLLStorageClass
@a.dlldefault = default alias i32, i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
@a.dllimport = dllimport alias i32, i32* @g1
-; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, ptr @g1
@a.dllexport = dllexport alias i32, i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
; Aliases -- ThreadLocal
@a.notthreadlocal = alias i32, i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
@a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
@a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
@a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
@a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
; Aliases -- unnamed_addr and local_unnamed_addr
@a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
@a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
-; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
+; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, ptr @g.local_unnamed_addr
;; IFunc
; Format @<Name> = [Linkage] [Visibility] ifunc <IFuncTy>,
-; <ResolverTy>* @<Resolver>
+; ptr @<Resolver>
; IFunc -- Linkage
@ifunc.external = external ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.external = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.external = ifunc void (), ptr @ifunc_resolver
@ifunc.private = private ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.private = private ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.private = private ifunc void (), ptr @ifunc_resolver
@ifunc.internal = internal ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.internal = internal ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.internal = internal ifunc void (), ptr @ifunc_resolver
; IFunc -- Visibility
@ifunc.default = default ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.default = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.default = ifunc void (), ptr @ifunc_resolver
@ifunc.hidden = hidden ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.hidden = hidden ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.hidden = hidden ifunc void (), ptr @ifunc_resolver
@ifunc.protected = protected ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.protected = protected ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.protected = protected ifunc void (), ptr @ifunc_resolver
define i8* @ifunc_resolver() {
entry:
declare signext i64 @f.signext()
; CHECK: declare signext i64 @f.signext()
declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
-; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) ptr @f.dereferenceable4_or_null()
declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
-; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) ptr @f.dereferenceable8_or_null()
declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
-; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) ptr @f.dereferenceable16_or_null()
; Functions -- Parameter attributes
declare void @f.param.zeroext(i8 zeroext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
-; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(ptr dereferenceable_or_null(4))
; Functions -- unnamed_addr and local_unnamed_addr
declare void @f.unnamed_addr() unnamed_addr
declare i32 @f.personality_handler()
; CHECK: declare i32 @f.personality_handler()
define void @f.personality() personality i32 ()* @f.personality_handler {
-; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+; CHECK: define void @f.personality() personality ptr @f.personality_handler
invoke void @llvm.donothing() to label %normal unwind label %exception
exception:
%cleanup = landingpad i32 cleanup
;; Atomic Memory Ordering Constraints
define void @atomics(i32* %word) {
%cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
- ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+ ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
%cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
- ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+ ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
%cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
- ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+ ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
%cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
- ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+ ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
%cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
- ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+ ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
%cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
- ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+ ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+ ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
%cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
- ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+ ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
- ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+ ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
%atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
- ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+ ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
%atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
- ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+ ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
%atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
- ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+ ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
%atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
- ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+ ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
%atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
- ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+ ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
%atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
- ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+ ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+ ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
%atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
%atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
- ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+ ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+ ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
%ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
- ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+ ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
- ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+ ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
ret void
}
%opaquety = type opaque
define void @typesystem() {
%p0 = bitcast i8* null to i32 (i32)*
- ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+ ; CHECK: %p0 = bitcast ptr null to ptr
%p1 = bitcast i8* null to void (i8*)*
- ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+ ; CHECK: %p1 = bitcast ptr null to ptr
%p2 = bitcast i8* null to i32 (i8*, ...)*
- ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+ ; CHECK: %p2 = bitcast ptr null to ptr
%p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
- ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+ ; CHECK: %p3 = bitcast ptr null to ptr
%p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
- ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+ ; CHECK: %p4 = bitcast ptr null to ptr
%p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
- ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+ ; CHECK: %p5 = bitcast ptr null to ptr
%t0 = alloca i1942652
; CHECK: %t0 = alloca i1942652
%t7 = alloca x86_mmx
; CHECK: %t7 = alloca x86_mmx
%t8 = alloca %opaquety*
- ; CHECK: %t8 = alloca %opaquety*
+ ; CHECK: %t8 = alloca ptr
ret void
}
defaultdest.2:
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
invoke fastcc void @f.fastcc()
; CHECK: invoke fastcc void @f.fastcc()
catchpad2:
catchpad within %cs2 [i32* %arg1]
br label %normal
- ; CHECK: catchpad within %cs2 [i32* %arg1]
+ ; CHECK: catchpad within %cs2 [ptr %arg1]
; CHECK-NEXT: br label %normal
catchswitch3:
catchpad3:
catchpad within %cs3 [i32* %arg1, i32* %arg2]
br label %normal
- ; CHECK: catchpad within %cs3 [i32* %arg1, i32* %arg2]
+ ; CHECK: catchpad within %cs3 [ptr %arg1, ptr %arg2]
; CHECK-NEXT: br label %normal
cleanuppad1:
%n.ptr = alloca { i8, { i32 } }
getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
- ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+ ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
- ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+ ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
- ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+ ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
- ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+ ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
- ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+ ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
- ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+ ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
ret void
}
; CHECK: alloca inalloca i32, i8 4, align 4
load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
- ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
- ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
store i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
store volatile i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
ret void
}
sitofp i32 -1 to float
; CHECK: sitofp i32 -1 to float
ptrtoint i8* null to i64
- ; CHECK: ptrtoint i8* null to i64
+ ; CHECK: ptrtoint ptr null to i64
inttoptr i64 0 to i8*
- ; CHECK: inttoptr i64 0 to i8*
+ ; CHECK: inttoptr i64 0 to ptr
bitcast i32 0 to i32
; CHECK: bitcast i32 0 to i32
addrspacecast i32* null to i32 addrspace(1)*
- ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+ ; CHECK: addrspacecast ptr null to ptr addrspace(1)
ret void
}
; CHECK: call void @f.nobuiltin() #43
call fastcc noalias i32* @f.noalias() noinline
- ; CHECK: call fastcc noalias i32* @f.noalias() #12
+ ; CHECK: call fastcc noalias ptr @f.noalias() #12
tail call ghccc nonnull i32* @f.nonnull() minsize
- ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+ ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #7
ret void
}
define void @instructions.call_musttail(i8* inalloca %val) {
musttail call void @f.param.inalloca(i8* inalloca %val)
- ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+ ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
ret void
}
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch3:
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch4:
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(i8* %ap2)
+ ; CHECK: call void @llvm.va_start(ptr %ap2)
va_arg i8* %ap2, i32
- ; CHECK: va_arg i8* %ap2, i32
+ ; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+ ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(i8* %ap2)
+ ; CHECK: call void @llvm.va_end(ptr %ap2)
ret void
}
define void @intrinsics.gc() gc "shadow-stack" {
%ptrloc = alloca i8*
call void @llvm.gcroot(i8** %ptrloc, i8* null)
- ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+ ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
call i8* @llvm.gcread(i8* null, i8** %ptrloc)
- ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+ ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
%ref = alloca i8
call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
- ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+ ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
ret void
}
!10 = !{!"rax"}
define void @intrinsics.codegen() {
call i8* @llvm.returnaddress(i32 1)
- ; CHECK: call i8* @llvm.returnaddress(i32 1)
+ ; CHECK: call ptr @llvm.returnaddress(i32 1)
call i8* @llvm.frameaddress(i32 1)
- ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+ ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
%stack = call i8* @llvm.stacksave()
- ; CHECK: %stack = call i8* @llvm.stacksave()
+ ; CHECK: %stack = call ptr @llvm.stacksave()
call void @llvm.stackrestore(i8* %stack)
- ; CHECK: call void @llvm.stackrestore(i8* %stack)
+ ; CHECK: call void @llvm.stackrestore(ptr %stack)
call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
- ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+ ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
call void @llvm.pcmarker(i32 1)
; CHECK: call void @llvm.pcmarker(i32 1)
; CHECK: call i64 @llvm.readcyclecounter()
call void @llvm.clear_cache(i8* null, i8* null)
- ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+ ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
- ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+ ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
ret void
}
define void @intrinsics.localescape() {
%static.alloca = alloca i32
call void (...) @llvm.localescape(i32* %static.alloca)
- ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+ ; CHECK: call void (...) @llvm.localescape(ptr %static.alloca)
call void @intrinsics.localrecover()
%func = bitcast void ()* @intrinsics.localescape to i8*
%fp = call i8* @llvm.frameaddress(i32 1)
call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
- ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+ ; CHECK: call ptr @llvm.localrecover(ptr %func, ptr %fp, i32 0)
ret void
}
@const.float = constant double 0.0
; CHECK: @const.float = constant double 0.0
@const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
%const.struct.type = type { i32, i8 }
%const.struct.type.packed = type <{ i32, i8 }>
@const.struct = constant %const.struct.type { i32 -1, i8 undef }
@g.used3 = global i8 0
declare void @g.f1()
@llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
@llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
@llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
@llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
;; Aliases
; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
; Aliases -- Linkage
@a.private = private alias i32, i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
@a.internal = internal alias i32, i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
@a.linkonce = linkonce alias i32, i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
@a.weak = weak alias i32, i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
@a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
@a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
@a.external = external alias i32, i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
; Aliases -- Visibility
@a.default = default alias i32, i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
@a.hidden = hidden alias i32, i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
@a.protected = protected alias i32, i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
; Aliases -- DLLStorageClass
@a.dlldefault = default alias i32, i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
@a.dllimport = dllimport alias i32, i32* @g1
-; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, ptr @g1
@a.dllexport = dllexport alias i32, i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
; Aliases -- ThreadLocal
@a.notthreadlocal = alias i32, i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
@a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
@a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
@a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
@a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
; Aliases -- unnamed_addr and local_unnamed_addr
@a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
@a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
-; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
+; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, ptr @g.local_unnamed_addr
;; IFunc
; Format @<Name> = [Linkage] [Visibility] ifunc <IFuncTy>,
-; <ResolverTy>* @<Resolver>
+; ptr @<Resolver>
; IFunc -- Linkage
@ifunc.external = external ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.external = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.external = ifunc void (), ptr @ifunc_resolver
@ifunc.private = private ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.private = private ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.private = private ifunc void (), ptr @ifunc_resolver
@ifunc.internal = internal ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.internal = internal ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.internal = internal ifunc void (), ptr @ifunc_resolver
; IFunc -- Visibility
@ifunc.default = default ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.default = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.default = ifunc void (), ptr @ifunc_resolver
@ifunc.hidden = hidden ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.hidden = hidden ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.hidden = hidden ifunc void (), ptr @ifunc_resolver
@ifunc.protected = protected ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.protected = protected ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.protected = protected ifunc void (), ptr @ifunc_resolver
define i8* @ifunc_resolver() {
entry:
declare signext i64 @f.signext()
; CHECK: declare signext i64 @f.signext()
declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
-; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) ptr @f.dereferenceable4_or_null()
declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
-; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) ptr @f.dereferenceable8_or_null()
declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
-; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) ptr @f.dereferenceable16_or_null()
; Functions -- Parameter attributes
declare void @f.param.zeroext(i8 zeroext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
-; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(ptr dereferenceable_or_null(4))
; Functions -- unnamed_addr and local_unnamed_addr
declare void @f.unnamed_addr() unnamed_addr
declare i32 @f.personality_handler()
; CHECK: declare i32 @f.personality_handler()
define void @f.personality() personality i32 ()* @f.personality_handler {
-; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+; CHECK: define void @f.personality() personality ptr @f.personality_handler
invoke void @llvm.donothing() to label %normal unwind label %exception
exception:
%cleanup = landingpad i32 cleanup
;; Atomic Memory Ordering Constraints
define void @atomics(i32* %word) {
%cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
- ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+ ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
%cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
- ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+ ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
%cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
- ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+ ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
%cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
- ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+ ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
%cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
- ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+ ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
%cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
- ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+ ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+ ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
%cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
- ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+ ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
- ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+ ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
%atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
- ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+ ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
%atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
- ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+ ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
%atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
- ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+ ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
%atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
- ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+ ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
%atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
- ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+ ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
%atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
- ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+ ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+ ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
%atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
%atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
- ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+ ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+ ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
%ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
- ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+ ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
- ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+ ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
ret void
}
%opaquety = type opaque
define void @typesystem() {
%p0 = bitcast i8* null to i32 (i32)*
- ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+ ; CHECK: %p0 = bitcast ptr null to ptr
%p1 = bitcast i8* null to void (i8*)*
- ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+ ; CHECK: %p1 = bitcast ptr null to ptr
%p2 = bitcast i8* null to i32 (i8*, ...)*
- ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+ ; CHECK: %p2 = bitcast ptr null to ptr
%p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
- ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+ ; CHECK: %p3 = bitcast ptr null to ptr
%p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
- ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+ ; CHECK: %p4 = bitcast ptr null to ptr
%p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
- ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+ ; CHECK: %p5 = bitcast ptr null to ptr
%t0 = alloca i1942652
; CHECK: %t0 = alloca i1942652
%t7 = alloca x86_mmx
; CHECK: %t7 = alloca x86_mmx
%t8 = alloca %opaquety*
- ; CHECK: %t8 = alloca %opaquety*
+ ; CHECK: %t8 = alloca ptr
ret void
}
defaultdest.2:
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
invoke fastcc void @f.fastcc()
; CHECK: invoke fastcc void @f.fastcc()
catchpad2:
catchpad within %cs2 [i32* %arg1]
br label %normal
- ; CHECK: catchpad within %cs2 [i32* %arg1]
+ ; CHECK: catchpad within %cs2 [ptr %arg1]
; CHECK-NEXT: br label %normal
catchswitch3:
catchpad3:
catchpad within %cs3 [i32* %arg1, i32* %arg2]
br label %normal
- ; CHECK: catchpad within %cs3 [i32* %arg1, i32* %arg2]
+ ; CHECK: catchpad within %cs3 [ptr %arg1, ptr %arg2]
; CHECK-NEXT: br label %normal
cleanuppad1:
%n.ptr = alloca { i8, { i32 } }
getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
- ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+ ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
- ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+ ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
- ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+ ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
- ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+ ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
- ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+ ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
- ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+ ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
ret void
}
; CHECK: alloca inalloca i32, i8 4, align 4
load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
- ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
- ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
store i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
store volatile i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
ret void
}
sitofp i32 -1 to float
; CHECK: sitofp i32 -1 to float
ptrtoint i8* null to i64
- ; CHECK: ptrtoint i8* null to i64
+ ; CHECK: ptrtoint ptr null to i64
inttoptr i64 0 to i8*
- ; CHECK: inttoptr i64 0 to i8*
+ ; CHECK: inttoptr i64 0 to ptr
bitcast i32 0 to i32
; CHECK: bitcast i32 0 to i32
addrspacecast i32* null to i32 addrspace(1)*
- ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+ ; CHECK: addrspacecast ptr null to ptr addrspace(1)
ret void
}
; CHECK: call void @f.nobuiltin() #43
call fastcc noalias i32* @f.noalias() noinline
- ; CHECK: call fastcc noalias i32* @f.noalias() #12
+ ; CHECK: call fastcc noalias ptr @f.noalias() #12
tail call ghccc nonnull i32* @f.nonnull() minsize
- ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+ ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #7
ret void
}
define void @instructions.call_musttail(i8* inalloca %val) {
musttail call void @f.param.inalloca(i8* inalloca %val)
- ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+ ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
ret void
}
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch3:
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch4:
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(i8* %ap2)
+ ; CHECK: call void @llvm.va_start(ptr %ap2)
va_arg i8* %ap2, i32
- ; CHECK: va_arg i8* %ap2, i32
+ ; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+ ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(i8* %ap2)
+ ; CHECK: call void @llvm.va_end(ptr %ap2)
ret void
}
define void @intrinsics.gc() gc "shadow-stack" {
%ptrloc = alloca i8*
call void @llvm.gcroot(i8** %ptrloc, i8* null)
- ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+ ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
call i8* @llvm.gcread(i8* null, i8** %ptrloc)
- ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+ ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
%ref = alloca i8
call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
- ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+ ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
ret void
}
!10 = !{!"rax"}
define void @intrinsics.codegen() {
call i8* @llvm.returnaddress(i32 1)
- ; CHECK: call i8* @llvm.returnaddress(i32 1)
+ ; CHECK: call ptr @llvm.returnaddress(i32 1)
call i8* @llvm.frameaddress(i32 1)
- ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+ ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
%stack = call i8* @llvm.stacksave()
- ; CHECK: %stack = call i8* @llvm.stacksave()
+ ; CHECK: %stack = call ptr @llvm.stacksave()
call void @llvm.stackrestore(i8* %stack)
- ; CHECK: call void @llvm.stackrestore(i8* %stack)
+ ; CHECK: call void @llvm.stackrestore(ptr %stack)
call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
- ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+ ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
call void @llvm.pcmarker(i32 1)
; CHECK: call void @llvm.pcmarker(i32 1)
; CHECK: call i64 @llvm.readcyclecounter()
call void @llvm.clear_cache(i8* null, i8* null)
- ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+ ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
- ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+ ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
ret void
}
define void @intrinsics.localescape() {
%static.alloca = alloca i32
call void (...) @llvm.localescape(i32* %static.alloca)
- ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+ ; CHECK: call void (...) @llvm.localescape(ptr %static.alloca)
call void @intrinsics.localrecover()
%func = bitcast void ()* @intrinsics.localescape to i8*
%fp = call i8* @llvm.frameaddress(i32 1)
call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
- ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+ ; CHECK: call ptr @llvm.localrecover(ptr %func, ptr %fp, i32 0)
ret void
}
;; Constant Expressions
define i8** @constexpr() {
- ; CHECK: ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
+ ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, inrange i32 1, i32 2)
ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
}
@const.float = constant double 0.0
; CHECK: @const.float = constant double 0.0
@const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
%const.struct.type = type { i32, i8 }
%const.struct.type.packed = type <{ i32, i8 }>
@const.struct = constant %const.struct.type { i32 -1, i8 undef }
@g.used3 = global i8 0
declare void @g.f1()
@llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
@llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
@llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
@llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
;; Aliases
; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
; Aliases -- Linkage
@a.private = private alias i32, i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
@a.internal = internal alias i32, i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
@a.linkonce = linkonce alias i32, i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
@a.weak = weak alias i32, i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
@a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
@a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
@a.external = external alias i32, i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
; Aliases -- Visibility
@a.default = default alias i32, i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
@a.hidden = hidden alias i32, i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
@a.protected = protected alias i32, i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
; Aliases -- DLLStorageClass
@a.dlldefault = default alias i32, i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
@a.dllimport = dllimport alias i32, i32* @g1
-; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, ptr @g1
@a.dllexport = dllexport alias i32, i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
; Aliases -- ThreadLocal
@a.notthreadlocal = alias i32, i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
@a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
@a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
@a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
@a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
; Aliases -- unnamed_addr and local_unnamed_addr
@a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
@a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
-; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
+; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, ptr @g.local_unnamed_addr
;; IFunc
; Format @<Name> = [Linkage] [Visibility] ifunc <IFuncTy>,
-; <ResolverTy>* @<Resolver>
+; ptr @<Resolver>
; IFunc -- Linkage
@ifunc.external = external ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.external = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.external = ifunc void (), ptr @ifunc_resolver
@ifunc.private = private ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.private = private ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.private = private ifunc void (), ptr @ifunc_resolver
@ifunc.internal = internal ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.internal = internal ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.internal = internal ifunc void (), ptr @ifunc_resolver
; IFunc -- Visibility
@ifunc.default = default ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.default = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.default = ifunc void (), ptr @ifunc_resolver
@ifunc.hidden = hidden ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.hidden = hidden ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.hidden = hidden ifunc void (), ptr @ifunc_resolver
@ifunc.protected = protected ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.protected = protected ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.protected = protected ifunc void (), ptr @ifunc_resolver
define i8* @ifunc_resolver() {
entry:
declare signext i64 @f.signext()
; CHECK: declare signext i64 @f.signext()
declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
-; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) ptr @f.dereferenceable4_or_null()
declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
-; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) ptr @f.dereferenceable8_or_null()
declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
-; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) ptr @f.dereferenceable16_or_null()
; Functions -- Parameter attributes
declare void @f.param.zeroext(i8 zeroext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
-; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(ptr dereferenceable_or_null(4))
; Functions -- unnamed_addr and local_unnamed_addr
declare void @f.unnamed_addr() unnamed_addr
declare i32 @f.personality_handler()
; CHECK: declare i32 @f.personality_handler()
define void @f.personality() personality i32 ()* @f.personality_handler {
-; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+; CHECK: define void @f.personality() personality ptr @f.personality_handler
invoke void @llvm.donothing() to label %normal unwind label %exception
exception:
%cleanup = landingpad i32 cleanup
;; Atomic Memory Ordering Constraints
define void @atomics(i32* %word) {
%cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
- ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+ ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
%cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
- ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+ ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
%cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
- ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+ ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
%cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
- ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+ ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
%cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
- ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+ ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
%cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
- ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+ ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+ ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
%cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
- ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+ ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
- ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+ ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
%atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
- ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+ ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
%atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
- ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+ ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
%atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
- ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+ ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
%atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
- ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+ ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
%atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
- ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+ ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
%atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
- ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+ ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+ ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
%atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
%atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
- ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+ ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+ ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
%ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
- ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+ ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
- ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+ ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
ret void
}
%opaquety = type opaque
define void @typesystem() {
%p0 = bitcast i8* null to i32 (i32)*
- ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+ ; CHECK: %p0 = bitcast ptr null to ptr
%p1 = bitcast i8* null to void (i8*)*
- ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+ ; CHECK: %p1 = bitcast ptr null to ptr
%p2 = bitcast i8* null to i32 (i8*, ...)*
- ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+ ; CHECK: %p2 = bitcast ptr null to ptr
%p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
- ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+ ; CHECK: %p3 = bitcast ptr null to ptr
%p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
- ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+ ; CHECK: %p4 = bitcast ptr null to ptr
%p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
- ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+ ; CHECK: %p5 = bitcast ptr null to ptr
%t0 = alloca i1942652
; CHECK: %t0 = alloca i1942652
%t7 = alloca x86_mmx
; CHECK: %t7 = alloca x86_mmx
%t8 = alloca %opaquety*
- ; CHECK: %t8 = alloca %opaquety*
+ ; CHECK: %t8 = alloca ptr
ret void
}
defaultdest.2:
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
invoke fastcc void @f.fastcc()
; CHECK: invoke fastcc void @f.fastcc()
catchpad2:
catchpad within %cs2 [i32* %arg1]
br label %normal
- ; CHECK: catchpad within %cs2 [i32* %arg1]
+ ; CHECK: catchpad within %cs2 [ptr %arg1]
; CHECK-NEXT: br label %normal
catchswitch3:
catchpad3:
catchpad within %cs3 [i32* %arg1, i32* %arg2]
br label %normal
- ; CHECK: catchpad within %cs3 [i32* %arg1, i32* %arg2]
+ ; CHECK: catchpad within %cs3 [ptr %arg1, ptr %arg2]
; CHECK-NEXT: br label %normal
cleanuppad1:
%n.ptr = alloca { i8, { i32 } }
getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
- ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+ ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
- ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+ ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
- ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+ ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
- ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+ ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
- ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+ ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
- ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+ ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
ret void
}
; CHECK: alloca inalloca i32, i8 4, align 4
load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
- ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
- ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
store i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
store volatile i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
ret void
}
sitofp i32 -1 to float
; CHECK: sitofp i32 -1 to float
ptrtoint i8* null to i64
- ; CHECK: ptrtoint i8* null to i64
+ ; CHECK: ptrtoint ptr null to i64
inttoptr i64 0 to i8*
- ; CHECK: inttoptr i64 0 to i8*
+ ; CHECK: inttoptr i64 0 to ptr
bitcast i32 0 to i32
; CHECK: bitcast i32 0 to i32
addrspacecast i32* null to i32 addrspace(1)*
- ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+ ; CHECK: addrspacecast ptr null to ptr addrspace(1)
ret void
}
; CHECK: call void @f.strictfp() #9
call fastcc noalias i32* @f.noalias() noinline
- ; CHECK: call fastcc noalias i32* @f.noalias() #12
+ ; CHECK: call fastcc noalias ptr @f.noalias() #12
tail call ghccc nonnull i32* @f.nonnull() minsize
- ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+ ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #7
ret void
}
define void @instructions.call_musttail(i8* inalloca %val) {
musttail call void @f.param.inalloca(i8* inalloca %val)
- ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+ ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
ret void
}
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch3:
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch4:
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(i8* %ap2)
+ ; CHECK: call void @llvm.va_start(ptr %ap2)
va_arg i8* %ap2, i32
- ; CHECK: va_arg i8* %ap2, i32
+ ; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+ ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(i8* %ap2)
+ ; CHECK: call void @llvm.va_end(ptr %ap2)
ret void
}
define void @intrinsics.gc() gc "shadow-stack" {
%ptrloc = alloca i8*
call void @llvm.gcroot(i8** %ptrloc, i8* null)
- ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+ ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
call i8* @llvm.gcread(i8* null, i8** %ptrloc)
- ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+ ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
%ref = alloca i8
call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
- ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+ ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
ret void
}
!10 = !{!"rax"}
define void @intrinsics.codegen() {
call i8* @llvm.returnaddress(i32 1)
- ; CHECK: call i8* @llvm.returnaddress(i32 1)
+ ; CHECK: call ptr @llvm.returnaddress(i32 1)
call i8* @llvm.frameaddress(i32 1)
- ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+ ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
%stack = call i8* @llvm.stacksave()
- ; CHECK: %stack = call i8* @llvm.stacksave()
+ ; CHECK: %stack = call ptr @llvm.stacksave()
call void @llvm.stackrestore(i8* %stack)
- ; CHECK: call void @llvm.stackrestore(i8* %stack)
+ ; CHECK: call void @llvm.stackrestore(ptr %stack)
call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
- ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+ ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
call void @llvm.pcmarker(i32 1)
; CHECK: call void @llvm.pcmarker(i32 1)
; CHECK: call i64 @llvm.readcyclecounter()
call void @llvm.clear_cache(i8* null, i8* null)
- ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+ ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
- ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+ ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
ret void
}
define void @intrinsics.localescape() {
%static.alloca = alloca i32
call void (...) @llvm.localescape(i32* %static.alloca)
- ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+ ; CHECK: call void (...) @llvm.localescape(ptr %static.alloca)
call void @intrinsics.localrecover()
%func = bitcast void ()* @intrinsics.localescape to i8*
%fp = call i8* @llvm.frameaddress(i32 1)
call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
- ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+ ; CHECK: call ptr @llvm.localrecover(ptr %func, ptr %fp, i32 0)
ret void
}
;; Constant Expressions
define i8** @constexpr() {
- ; CHECK: ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
+ ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, inrange i32 1, i32 2)
ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
}
@const.float = constant double 0.0
; CHECK: @const.float = constant double 0.0
@const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
%const.struct.type = type { i32, i8 }
%const.struct.type.packed = type <{ i32, i8 }>
@const.struct = constant %const.struct.type { i32 -1, i8 undef }
@g.used3 = global i8 0
declare void @g.f1()
@llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
@llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
@llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
@llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
;; Aliases
; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
; Aliases -- Linkage
@a.private = private alias i32, i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
@a.internal = internal alias i32, i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
@a.linkonce = linkonce alias i32, i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
@a.weak = weak alias i32, i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
@a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
@a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
@a.external = external alias i32, i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
; Aliases -- Visibility
@a.default = default alias i32, i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
@a.hidden = hidden alias i32, i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
@a.protected = protected alias i32, i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
; Aliases -- DLLStorageClass
@a.dlldefault = default alias i32, i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
@a.dllexport = dllexport alias i32, i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
; Aliases -- ThreadLocal
@a.notthreadlocal = alias i32, i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
@a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
@a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
@a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
@a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
; Aliases -- unnamed_addr and local_unnamed_addr
@a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
@a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
-; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
+; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, ptr @g.local_unnamed_addr
;; IFunc
; Format @<Name> = [Linkage] [Visibility] ifunc <IFuncTy>,
-; <ResolverTy>* @<Resolver>
+; ptr @<Resolver>
; IFunc -- Linkage
@ifunc.external = external ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.external = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.external = ifunc void (), ptr @ifunc_resolver
@ifunc.private = private ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.private = private ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.private = private ifunc void (), ptr @ifunc_resolver
@ifunc.internal = internal ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.internal = internal ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.internal = internal ifunc void (), ptr @ifunc_resolver
; IFunc -- Visibility
@ifunc.default = default ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.default = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.default = ifunc void (), ptr @ifunc_resolver
@ifunc.hidden = hidden ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.hidden = hidden ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.hidden = hidden ifunc void (), ptr @ifunc_resolver
@ifunc.protected = protected ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.protected = protected ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.protected = protected ifunc void (), ptr @ifunc_resolver
define i8* @ifunc_resolver() {
entry:
declare signext i64 @f.signext()
; CHECK: declare signext i64 @f.signext()
declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
-; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) ptr @f.dereferenceable4_or_null()
declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
-; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) ptr @f.dereferenceable8_or_null()
declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
-; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) ptr @f.dereferenceable16_or_null()
; Functions -- Parameter attributes
declare void @f.param.zeroext(i8 zeroext)
declare void @f.param.inreg(i8 inreg)
; CHECK: declare void @f.param.inreg(i8 inreg)
declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
-; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(ptr dereferenceable_or_null(4))
; Functions -- unnamed_addr and local_unnamed_addr
declare void @f.unnamed_addr() unnamed_addr
declare i32 @f.personality_handler()
; CHECK: declare i32 @f.personality_handler()
define void @f.personality() personality i32 ()* @f.personality_handler {
-; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+; CHECK: define void @f.personality() personality ptr @f.personality_handler
invoke void @llvm.donothing() to label %normal unwind label %exception
exception:
%cleanup = landingpad i32 cleanup
;; Atomic Memory Ordering Constraints
define void @atomics(i32* %word) {
%cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
- ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+ ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
%cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
- ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+ ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
%cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
- ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+ ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
%cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
- ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+ ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
%cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
- ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+ ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
%cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
- ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+ ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
%cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
- ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+ ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
%cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
- ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+ ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
%atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
- ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+ ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
%atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
- ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+ ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
%atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
- ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+ ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
%atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
- ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+ ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
%atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
- ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+ ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
%atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
- ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+ ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
%atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
- ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+ ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
%atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
- ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+ ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
%atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
- ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+ ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
%atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
%atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
- ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+ ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
fence acquire
; CHECK: fence acquire
fence release
; CHECK: fence syncscope("singlethread") seq_cst
%ld.1 = load atomic i32, i32* %word monotonic, align 4
- ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+ ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
%ld.2 = load atomic volatile i32, i32* %word acquire, align 8
- ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+ ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
%ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
- ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+ ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
store atomic i32 23, i32* %word monotonic, align 4
- ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+ ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
store atomic volatile i32 24, i32* %word monotonic, align 4
- ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+ ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
- ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+ ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
ret void
}
%opaquety = type opaque
define void @typesystem() {
%p0 = bitcast i8* null to i32 (i32)*
- ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+ ; CHECK: %p0 = bitcast ptr null to ptr
%p1 = bitcast i8* null to void (i8*)*
- ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+ ; CHECK: %p1 = bitcast ptr null to ptr
%p2 = bitcast i8* null to i32 (i8*, ...)*
- ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+ ; CHECK: %p2 = bitcast ptr null to ptr
%p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
- ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+ ; CHECK: %p3 = bitcast ptr null to ptr
%p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
- ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+ ; CHECK: %p4 = bitcast ptr null to ptr
%p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
- ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+ ; CHECK: %p5 = bitcast ptr null to ptr
%t0 = alloca i1942652
; CHECK: %t0 = alloca i1942652
%t7 = alloca x86_mmx
; CHECK: %t7 = alloca x86_mmx
%t8 = alloca %opaquety*
- ; CHECK: %t8 = alloca %opaquety*
+ ; CHECK: %t8 = alloca ptr
ret void
}
defaultdest.2:
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
- ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+ ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
invoke fastcc void @f.fastcc()
; CHECK: invoke fastcc void @f.fastcc()
catchpad2:
catchpad within %cs2 [i32* %arg1]
br label %normal
- ; CHECK: catchpad within %cs2 [i32* %arg1]
+ ; CHECK: catchpad within %cs2 [ptr %arg1]
; CHECK-NEXT: br label %normal
catchswitch3:
catchpad3:
catchpad within %cs3 [i32* %arg1, i32* %arg2]
br label %normal
- ; CHECK: catchpad within %cs3 [i32* %arg1, i32* %arg2]
+ ; CHECK: catchpad within %cs3 [ptr %arg1, ptr %arg2]
; CHECK-NEXT: br label %normal
cleanuppad1:
%n.ptr = alloca { i8, { i32 } }
getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
- ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+ ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
- ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+ ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
- ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+ ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
- ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+ ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
- ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+ ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
- ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+ ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
ret void
}
; CHECK: alloca inalloca i32, i8 4, align 4
load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
- ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
- ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+ ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
store i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
store volatile i32* null, i32** %base, align 4, !nontemporal !8
- ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+ ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
ret void
}
sitofp i32 -1 to float
; CHECK: sitofp i32 -1 to float
ptrtoint i8* null to i64
- ; CHECK: ptrtoint i8* null to i64
+ ; CHECK: ptrtoint ptr null to i64
inttoptr i64 0 to i8*
- ; CHECK: inttoptr i64 0 to i8*
+ ; CHECK: inttoptr i64 0 to ptr
bitcast i32 0 to i32
; CHECK: bitcast i32 0 to i32
addrspacecast i32* null to i32 addrspace(1)*
- ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+ ; CHECK: addrspacecast ptr null to ptr addrspace(1)
ret void
}
; CHECK: call void @f.strictfp() #9
call fastcc noalias i32* @f.noalias() noinline
- ; CHECK: call fastcc noalias i32* @f.noalias() #12
+ ; CHECK: call fastcc noalias ptr @f.noalias() #12
tail call ghccc nonnull i32* @f.nonnull() minsize
- ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+ ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #7
ret void
}
define void @instructions.call_musttail(i8* inalloca %val) {
musttail call void @f.param.inalloca(i8* inalloca %val)
- ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+ ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
ret void
}
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch3:
cleanup
; CHECK: cleanup
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
catch i32* null
- ; CHECK: catch i32* null
+ ; CHECK: catch ptr null
br label %proceed
catch4:
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(i8* %ap2)
+ ; CHECK: call void @llvm.va_start(ptr %ap2)
va_arg i8* %ap2, i32
- ; CHECK: va_arg i8* %ap2, i32
+ ; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+ ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(i8* %ap2)
+ ; CHECK: call void @llvm.va_end(ptr %ap2)
ret void
}
define void @intrinsics.gc() gc "shadow-stack" {
%ptrloc = alloca i8*
call void @llvm.gcroot(i8** %ptrloc, i8* null)
- ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+ ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
call i8* @llvm.gcread(i8* null, i8** %ptrloc)
- ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+ ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
%ref = alloca i8
call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
- ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+ ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
ret void
}
!10 = !{!"rax"}
define void @intrinsics.codegen() {
call i8* @llvm.returnaddress(i32 1)
- ; CHECK: call i8* @llvm.returnaddress(i32 1)
+ ; CHECK: call ptr @llvm.returnaddress(i32 1)
call i8* @llvm.frameaddress(i32 1)
- ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+ ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call i32 @llvm.read_register.i32(metadata !10)
; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
%stack = call i8* @llvm.stacksave()
- ; CHECK: %stack = call i8* @llvm.stacksave()
+ ; CHECK: %stack = call ptr @llvm.stacksave()
call void @llvm.stackrestore(i8* %stack)
- ; CHECK: call void @llvm.stackrestore(i8* %stack)
+ ; CHECK: call void @llvm.stackrestore(ptr %stack)
call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
- ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+ ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
call void @llvm.pcmarker(i32 1)
; CHECK: call void @llvm.pcmarker(i32 1)
; CHECK: call i64 @llvm.readcyclecounter()
call void @llvm.clear_cache(i8* null, i8* null)
- ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+ ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
- ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+ ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
ret void
}
define void @intrinsics.localescape() {
%static.alloca = alloca i32
call void (...) @llvm.localescape(i32* %static.alloca)
- ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+ ; CHECK: call void (...) @llvm.localescape(ptr %static.alloca)
call void @intrinsics.localrecover()
%func = bitcast void ()* @intrinsics.localescape to i8*
%fp = call i8* @llvm.frameaddress(i32 1)
call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
- ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+ ; CHECK: call ptr @llvm.localrecover(ptr %func, ptr %fp, i32 0)
ret void
}
;; Constant Expressions
define i8** @constexpr() {
- ; CHECK: ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
+ ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, inrange i32 1, i32 2)
ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
}
@X = global i32 0
; CHECK: @Y = global i32 1
@Y = global i32 1
-; CHECK: @Z = global [2 x i32*] [i32* @X, i32* @Y]
+; CHECK: @Z = global [2 x ptr] [ptr @X, ptr @Y]
@Z = global [2 x i32*] [i32* @X, i32* @Y]
define void @SimpleConstants(i32 %x) {
entry:
; null
-; CHECK: store i32 %x, i32* null
+; CHECK: store i32 %x, ptr null
store i32 %x, i32* null
; boolean
%poison = sub nuw i32 0, 1
;address of basic block
- ; CHECK-NEXT: %res2 = icmp eq i8* blockaddress(@OtherConstants, %Next), null
+ ; CHECK-NEXT: %res2 = icmp eq ptr blockaddress(@OtherConstants, %Next), null
%res2 = icmp eq i8* blockaddress(@OtherConstants, %Next), null
br label %Next
Next:
uitofp i32 1 to float
; CHECK-NEXT: sitofp i32 -1 to float
sitofp i32 -1 to float
- ; CHECK-NEXT: ptrtoint i32* @X to i32
+ ; CHECK-NEXT: ptrtoint ptr @X to i32
ptrtoint i32* @X to i32
- ; CHECK-NEXT: inttoptr i8 1 to i8*
+ ; CHECK-NEXT: inttoptr i8 1 to ptr
inttoptr i8 1 to i8*
; CHECK-NEXT: bitcast i32 1 to <2 x i16>
bitcast i32 1 to <2 x i16>
- ; CHECK-NEXT: getelementptr i32, i32* @X, i32 0
+ ; CHECK-NEXT: getelementptr i32, ptr @X, i32 0
getelementptr i32, i32* @X, i32 0
- ; CHECK-NEXT: getelementptr inbounds i32, i32* @X, i32 0
+ ; CHECK-NEXT: getelementptr inbounds i32, ptr @X, i32 0
getelementptr inbounds i32, i32* @X, i32 0
; CHECK: select i1 true, i32 1, i32 0
select i1 true ,i32 1, i32 0
define void @ptrtoint(i32* %src){
entry:
-; CHECK: %res1 = ptrtoint i32* %src to i8
+; CHECK: %res1 = ptrtoint ptr %src to i8
%res1 = ptrtoint i32* %src to i8
ret void
define void @inttoptr(i32 %src){
entry:
-; CHECK: %res1 = inttoptr i32 %src to i32*
+; CHECK: %res1 = inttoptr i32 %src to ptr
%res1 = inttoptr i32 %src to i32*
ret void
; CHECK: %res1 = bitcast i32 %src1 to i32
%res1 = bitcast i32 %src1 to i32
-; CHECK: %res2 = bitcast i32* %src2 to i64*
+; CHECK: %res2 = bitcast ptr %src2 to ptr
%res2 = bitcast i32* %src2 to i64*
ret void
define void @ptrtointInstr(i32* %ptr, <4 x i32*> %vecPtr){
entry:
-; CHECK: %res1 = ptrtoint i32* %ptr to i8
+; CHECK: %res1 = ptrtoint ptr %ptr to i8
%res1 = ptrtoint i32* %ptr to i8
-; CHECK-NEXT: %res2 = ptrtoint <4 x i32*> %vecPtr to <4 x i64>
+; CHECK-NEXT: %res2 = ptrtoint <4 x ptr> %vecPtr to <4 x i64>
%res2 = ptrtoint <4 x i32*> %vecPtr to <4 x i64>
ret void
define void @inttoptrInstr(i32 %x, <4 x i32> %vec){
entry:
-; CHECK: %res1 = inttoptr i32 %x to i64*
+; CHECK: %res1 = inttoptr i32 %x to ptr
%res1 = inttoptr i32 %x to i64*
-; CHECK-NEXT: inttoptr <4 x i32> %vec to <4 x i8*>
+; CHECK-NEXT: inttoptr <4 x i32> %vec to <4 x ptr>
%res2 = inttoptr <4 x i32> %vec to <4 x i8*>
ret void
; CHECK-NEXT: !8 = !DIObjCProperty(name: "P1", type: !1)
; CHECK-NEXT: !9 = !DITemplateTypeParameter(type: !1)
; CHECK-NEXT: !10 = !DIGlobalVariable(name: "G",{{.*}} type: !1,
-; CHECK-NEXT: !11 = !DITemplateValueParameter(type: !1, value: i32* @G1)
+; CHECK-NEXT: !11 = !DITemplateValueParameter(type: !1, value: ptr @G1)
; CHECK-NEXT: !12 = !DIImportedEntity(tag: DW_TAG_imported_module, name: "T2", scope: !0, entity: !1)
; CHECK-NEXT: !13 = !DICompositeType(tag: DW_TAG_structure_type, name: "T3", file: !0, elements: !14, identifier: "T3")
; CHECK-NEXT: !14 = !{!15}
; CHECK: @protected_local_global = protected global i32 0
@local_alias = dso_local alias i32, i32* @local_global
-; CHECK-DAG: @local_alias = dso_local alias i32, i32* @local_global
+; CHECK-DAG: @local_alias = dso_local alias i32, ptr @local_global
@preemptable_alias = dso_preemptable alias i32, i32* @hidden_local_global
-; CHECK-DAG: @preemptable_alias = alias i32, i32* @hidden_local_global
+; CHECK-DAG: @preemptable_alias = alias i32, ptr @hidden_local_global
@preemptable_ifunc = dso_preemptable ifunc void (), void ()* ()* @ifunc_resolver
-; CHECK-DAG: @preemptable_ifunc = ifunc void (), void ()* ()* @ifunc_resolver
+; CHECK-DAG: @preemptable_ifunc = ifunc void (), ptr @ifunc_resolver
declare dso_local default void @default_local()
; CHECK: declare dso_local void @default_local()
define void @call_named() {
entry:
%0 = tail call addrspace(40) i32 @named(i16* null)
- ; CHECK: %0 = tail call addrspace(40) i32 @named(i16* null)
+ ; CHECK: %0 = tail call addrspace(40) i32 @named(ptr null)
ret void
}
define void @call_numbered() {
entry:
%0 = tail call addrspace(40) i32 @0(i16* null)
- ; CHECK: %0 = tail call addrspace(40) i32 @0(i16* null)
+ ; CHECK: %0 = tail call addrspace(40) i32 @0(ptr null)
ret void
}
declare i32 @foo() addrspace(40)
; CHECK: declare i32 @foo() addrspace(40)
declare i32 @named(i16* nocapture) addrspace(40)
-; CHECK: declare i32 @named(i16* nocapture) addrspace(40)
+; CHECK: declare i32 @named(ptr nocapture) addrspace(40)
declare i32 @0(i16*) addrspace(40)
-; CHECK: declare i32 @0(i16*) addrspace(40)
+; CHECK: declare i32 @0(ptr) addrspace(40)
; RUN: llvm-as < %s | llvm-dis | FileCheck %s
-; CHECK: %g = getelementptr i8, i8* %p
+; CHECK: %g = getelementptr i8, ptr %p
define i8* @ptr(i8* %p) {
%g = getelementptr i8, i8* %p
; CHECK: @default_addrspace.var = global i8 1
@non_default_addrspace.var = addrspace(1) global i8* undef
-; CHECK: @non_default_addrspace.var = addrspace(1) global i8* undef
+; CHECK: @non_default_addrspace.var = addrspace(1) global ptr undef
@initialexec.var = thread_local(initialexec) global i32 0, align 4
; CHECK: @initialexec.var = thread_local(initialexec) global i32 0, align 4
module asm "some assembly"
; Named Types Test
-; CHECK: %mytype = type { %mytype*, i32 }
+; CHECK: %mytype = type { ptr, i32 }
%mytype = type { %mytype*, i32 }
; Aliases Test
; CHECK: @glob1 = global i32 1
@glob1 = global i32 1
-; CHECK: @aliased1 = alias i32, i32* @glob1
+; CHECK: @aliased1 = alias i32, ptr @glob1
@aliased1 = alias i32, i32* @glob1
-; CHECK-NEXT: @aliased2 = internal alias i32, i32* @glob1
+; CHECK-NEXT: @aliased2 = internal alias i32, ptr @glob1
@aliased2 = internal alias i32, i32* @glob1
-; CHECK-NEXT: @aliased3 = alias i32, i32* @glob1
+; CHECK-NEXT: @aliased3 = alias i32, ptr @glob1
@aliased3 = external alias i32, i32* @glob1
-; CHECK-NEXT: @aliased4 = weak alias i32, i32* @glob1
+; CHECK-NEXT: @aliased4 = weak alias i32, ptr @glob1
@aliased4 = weak alias i32, i32* @glob1
-; CHECK-NEXT: @aliased5 = weak_odr alias i32, i32* @glob1
+; CHECK-NEXT: @aliased5 = weak_odr alias i32, ptr @glob1
@aliased5 = weak_odr alias i32, i32* @glob1
;Parameter Attribute Test
; CHECK: declare void @ParamAttr1(i8 zeroext)
declare void @ParamAttr1(i8 zeroext)
-; CHECK: declare void @ParamAttr2(i8* nest)
+; CHECK: declare void @ParamAttr2(ptr nest)
declare void @ParamAttr2(i8* nest)
-; CHECK: declare void @ParamAttr3(i8* sret(i8))
+; CHECK: declare void @ParamAttr3(ptr sret(i8))
declare void @ParamAttr3(i8* sret(i8))
; CHECK: declare void @ParamAttr4(i8 signext)
declare void @ParamAttr4(i8 signext)
-; CHECK: declare void @ParamAttr5(i8* inreg)
+; CHECK: declare void @ParamAttr5(ptr inreg)
declare void @ParamAttr5(i8* inreg)
-; CHECK: declare void @ParamAttr6(i8* byval(i8))
+; CHECK: declare void @ParamAttr6(ptr byval(i8))
declare void @ParamAttr6(i8* byval(i8))
-; CHECK: declare void @ParamAttr7(i8* noalias)
+; CHECK: declare void @ParamAttr7(ptr noalias)
declare void @ParamAttr7(i8* noalias)
-; CHECK: declare void @ParamAttr8(i8* nocapture)
+; CHECK: declare void @ParamAttr8(ptr nocapture)
declare void @ParamAttr8(i8* nocapture)
; CHECK: declare void @ParamAttr9{{[(i8* nest noalias nocapture) | (i8* noalias nocapture nest)]}}
declare void @ParamAttr9(i8* nest noalias nocapture)
Make sure we upgrade old-style IntAttribute inalloca records to a
fully typed version correctly.
-CHECK: call void @bar({ i32*, i8 }* inalloca({ i32*, i8 }) %ptr)
-CHECK: invoke void @bar({ i32*, i8 }* inalloca({ i32*, i8 }) %ptr)
+CHECK: call void @bar(ptr inalloca({ ptr, i8 }) %ptr)
+CHECK: invoke void @bar(ptr inalloca({ ptr, i8 }) %ptr)
define void @foo(i32* inalloca(i32) %args) {
ret void
}
-; CHECK-LABEL: define void @foo(i32* inalloca(i32) %args)
+; CHECK-LABEL: define void @foo(ptr inalloca(i32) %args)
define void @bar() {
; Use the maximum alignment, since we stuff our bit with alignment.
}
; CHECK-LABEL: define void @bar() {
; CHECK: %args = alloca inalloca i32, align 4294967296
-; CHECK: call void @foo(i32* inalloca(i32) %args)
+; CHECK: call void @foo(ptr inalloca(i32) %args)
declare %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2.v8i8.p0i8(i8*)
-; CHECK-LABEL: define %struct.__neon_int8x8x2_t @test_named_struct_return(i8* %A) {
-; CHECK: %1 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A)
+; CHECK-LABEL: define %struct.__neon_int8x8x2_t @test_named_struct_return(ptr %A) {
+; CHECK: %1 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %A)
; CHECK: %2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0
; CHECK: %3 = insertvalue %struct.__neon_int8x8x2_t poison, <8 x i8> %2, 0
; CHECK: %4 = extractvalue { <8 x i8>, <8 x i8> } %1, 1
%0 = type opaque
; CHECK-LABEL: @f0(
-; CHECK: %c1 = call %0* @llvm.ssa.copy.p0s_s.0(%0* %arg)
-; CHECK: %c2 = call %1* @llvm.ssa.copy.p0s_s.1(%1* %tmp)
-; CHECK: %c3 = call %0** @llvm.ssa.copy.p0p0s_s.1(%0** %arg2)
-; CHECK: %c4 = call %1** @llvm.ssa.copy.p0p0s_s.0(%1** %tmp2)
+; CHECK: %c1 = call ptr @llvm.ssa.copy.p0(ptr %arg)
+; CHECK: %c2 = call ptr @llvm.ssa.copy.p0(ptr %tmp)
+; CHECK: %c3 = call ptr @llvm.ssa.copy.p0(ptr %arg2)
+; CHECK: %c4 = call ptr @llvm.ssa.copy.p0(ptr %tmp2)
define void @f0(%0* %arg, %1* %tmp, %1** %tmp2, %0** %arg2) {
bb:
RUN: FileCheck --check-prefix=BAD-BITWIDTH %s
RUN: not llvm-dis -disable-output %p/Inputs/invalid-align.bc 2>&1 | \
RUN: FileCheck --check-prefix=BAD-ALIGN %s
-RUN: not llvm-dis -disable-output %p/Inputs/invalid-gep-mismatched-explicit-type.bc 2>&1 | \
-RUN: FileCheck --check-prefix=MISMATCHED-EXPLICIT-GEP %s
-RUN: not llvm-dis -disable-output %p/Inputs/invalid-load-mismatched-explicit-type.bc 2>&1 | \
-RUN: FileCheck --check-prefix=MISMATCHED-EXPLICIT-LOAD %s
-RUN: not llvm-dis -disable-output %p/Inputs/invalid-gep-operator-mismatched-explicit-type.bc 2>&1 | \
-RUN: FileCheck --check-prefix=MISMATCHED-EXPLICIT-GEP-OPERATOR %s
-RUN: not llvm-dis -disable-output %p/Inputs/invalid-call-mismatched-explicit-type.bc 2>&1 | \
-RUN: FileCheck --check-prefix=MISMATCHED-EXPLICIT-CALL %s
RUN: not llvm-dis -disable-output %p/Inputs/invalid-call-non-function-explicit-type.bc 2>&1 | \
RUN: FileCheck --check-prefix=NON-FUNCTION-EXPLICIT-CALL %s
RUN: not llvm-dis -disable-output %p/Inputs/invalid-invoke-mismatched-explicit-type.bc 2>&1 | \
BAD-TYPE-TABLE-FORWARD-REF: Invalid TYPE table: Only named structs can be forward referenced
BAD-BITWIDTH: error: can't skip to bit
BAD-ALIGN: Invalid alignment value
-MISMATCHED-EXPLICIT-GEP: Explicit gep type does not match pointee type of pointer operand
-MISMATCHED-EXPLICIT-LOAD: Explicit load/store type does not match pointee type of pointer operand
-MISMATCHED-EXPLICIT-GEP-OPERATOR: Explicit gep operator type does not match pointee type of pointer operand
-MISMATCHED-EXPLICIT-CALL: Explicit call type does not match pointee type of callee operand
NON-FUNCTION-EXPLICIT-CALL: Explicit call type is not a function type
-MISMATCHED-EXPLICIT-INVOKE: Explicit invoke type does not match pointee type of callee operand
+MISMATCHED-EXPLICIT-INVOKE: Insufficient operands to call
NON-FUNCTION-EXPLICIT-INVOKE: Explicit invoke type is not a function type
RUN: not llvm-dis -disable-output %p/Inputs/invalid-extractval-array-idx.bc 2>&1 | \
RUN: not llvm-dis -disable-output %p/Inputs/invalid-alias-type-mismatch.bc 2>&1 | \
RUN: FileCheck --check-prefix=ALIAS-TYPE-MISMATCH %s
-ALIAS-TYPE-MISMATCH: Alias and aliasee types don't match
+ALIAS-TYPE-MISMATCH: Insufficient function protos
RUN: not llvm-dis -disable-output %p/Inputs/invalid-no-function-block.bc 2>&1 | \
RUN: FileCheck --check-prefix=NO-FUNCTION-BLOCK %s
@global = global i32 0
@default.internal.alias = alias internal i32, internal i32* @global
-; CHECK: @default.internal.alias = internal alias i32, i32* @global
+; CHECK: @default.internal.alias = internal alias i32, ptr @global
@hidden.internal.alias = hidden alias internal i32, internal i32* @global
-; CHECK: @hidden.internal.alias = internal alias i32, i32* @global
+; CHECK: @hidden.internal.alias = internal alias i32, ptr @global
@protected.internal.alias = protected alias internal i32, internal i32* @global
-; CHECK: @protected.internal.alias = internal alias i32, i32* @global
+; CHECK: @protected.internal.alias = internal alias i32, ptr @global
@default.private.alias = alias private i32, private i32* @global
-; CHECK: @default.private.alias = private alias i32, i32* @global
+; CHECK: @default.private.alias = private alias i32, ptr @global
@hidden.private.alias = hidden alias private i32, private i32* @global
-; CHECK: @hidden.private.alias = private alias i32, i32* @global
+; CHECK: @hidden.private.alias = private alias i32, ptr @global
@protected.private.alias = protected alias private i32, private i32* @global
-; CHECK: @protected.private.alias = private alias i32, i32* @global
+; CHECK: @protected.private.alias = private alias i32, ptr @global
define internal void @default.internal() {
; CHECK: define internal void @default.internal
%ptr1 = alloca i8
store i8 2, i8* %ptr1
-; CHECK: %res1 = load i8, i8* %ptr1
+; CHECK: %res1 = load i8, ptr %ptr1
%res1 = load i8, i8* %ptr1
-; CHECK-NEXT: %res2 = load volatile i8, i8* %ptr1
+; CHECK-NEXT: %res2 = load volatile i8, ptr %ptr1
%res2 = load volatile i8, i8* %ptr1
-; CHECK-NEXT: %res3 = load i8, i8* %ptr1, align 1
+; CHECK-NEXT: %res3 = load i8, ptr %ptr1, align 1
%res3 = load i8, i8* %ptr1, align 1
-; CHECK-NEXT: %res4 = load volatile i8, i8* %ptr1, align 1
+; CHECK-NEXT: %res4 = load volatile i8, ptr %ptr1, align 1
%res4 = load volatile i8, i8* %ptr1, align 1
-; CHECK-NEXT: %res5 = load i8, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: %res5 = load i8, ptr %ptr1, align 1, !nontemporal !0
%res5 = load i8, i8* %ptr1, !nontemporal !0
-; CHECK-NEXT: %res6 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: %res6 = load volatile i8, ptr %ptr1, align 1, !nontemporal !0
%res6 = load volatile i8, i8* %ptr1, !nontemporal !0
-; CHECK-NEXT: %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: %res7 = load i8, ptr %ptr1, align 1, !nontemporal !0
%res7 = load i8, i8* %ptr1, align 1, !nontemporal !0
-; CHECK-NEXT: %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: %res8 = load volatile i8, ptr %ptr1, align 1, !nontemporal !0
%res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
-; CHECK-NEXT: %res9 = load i8, i8* %ptr1, align 1, !invariant.load !1
+; CHECK-NEXT: %res9 = load i8, ptr %ptr1, align 1, !invariant.load !1
%res9 = load i8, i8* %ptr1, !invariant.load !1
-; CHECK-NEXT: %res10 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
+; CHECK-NEXT: %res10 = load volatile i8, ptr %ptr1, align 1, !invariant.load !1
%res10 = load volatile i8, i8* %ptr1, !invariant.load !1
-; CHECK-NEXT: %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1
+; CHECK-NEXT: %res11 = load i8, ptr %ptr1, align 1, !invariant.load !1
%res11 = load i8, i8* %ptr1, align 1, !invariant.load !1
-; CHECK-NEXT: %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
+; CHECK-NEXT: %res12 = load volatile i8, ptr %ptr1, align 1, !invariant.load !1
%res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
-; CHECK-NEXT: %res13 = load i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+; CHECK-NEXT: %res13 = load i8, ptr %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
%res13 = load i8, i8* %ptr1, !nontemporal !0, !invariant.load !1
-; CHECK-NEXT: %res14 = load volatile i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+; CHECK-NEXT: %res14 = load volatile i8, ptr %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
%res14 = load volatile i8, i8* %ptr1, !nontemporal !0, !invariant.load !1
-; CHECK-NEXT: %res15 = load i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+; CHECK-NEXT: %res15 = load i8, ptr %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
%res15 = load i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
-; CHECK-NEXT: %res16 = load volatile i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+; CHECK-NEXT: %res16 = load volatile i8, ptr %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
%res16 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
ret void
%ptr1 = alloca i8
store i8 2, i8* %ptr1
-; CHECK: %res1 = load atomic i8, i8* %ptr1 unordered, align 1
+; CHECK: %res1 = load atomic i8, ptr %ptr1 unordered, align 1
%res1 = load atomic i8, i8* %ptr1 unordered, align 1
-; CHECK-NEXT: %res2 = load atomic i8, i8* %ptr1 monotonic, align 1
+; CHECK-NEXT: %res2 = load atomic i8, ptr %ptr1 monotonic, align 1
%res2 = load atomic i8, i8* %ptr1 monotonic, align 1
-; CHECK-NEXT: %res3 = load atomic i8, i8* %ptr1 acquire, align 1
+; CHECK-NEXT: %res3 = load atomic i8, ptr %ptr1 acquire, align 1
%res3 = load atomic i8, i8* %ptr1 acquire, align 1
-; CHECK-NEXT: %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1
+; CHECK-NEXT: %res4 = load atomic i8, ptr %ptr1 seq_cst, align 1
%res4 = load atomic i8, i8* %ptr1 seq_cst, align 1
-; CHECK-NEXT: %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1
+; CHECK-NEXT: %res5 = load atomic volatile i8, ptr %ptr1 unordered, align 1
%res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1
-; CHECK-NEXT: %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1
+; CHECK-NEXT: %res6 = load atomic volatile i8, ptr %ptr1 monotonic, align 1
%res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1
-; CHECK-NEXT: %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1
+; CHECK-NEXT: %res7 = load atomic volatile i8, ptr %ptr1 acquire, align 1
%res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1
-; CHECK-NEXT: %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
+; CHECK-NEXT: %res8 = load atomic volatile i8, ptr %ptr1 seq_cst, align 1
%res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
-; CHECK-NEXT: %res9 = load atomic i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
+; CHECK-NEXT: %res9 = load atomic i8, ptr %ptr1 syncscope("singlethread") unordered, align 1
%res9 = load atomic i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
-; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+; CHECK-NEXT: %res10 = load atomic i8, ptr %ptr1 syncscope("singlethread") monotonic, align 1
%res10 = load atomic i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
-; CHECK-NEXT: %res11 = load atomic i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
+; CHECK-NEXT: %res11 = load atomic i8, ptr %ptr1 syncscope("singlethread") acquire, align 1
%res11 = load atomic i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
-; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+; CHECK-NEXT: %res12 = load atomic i8, ptr %ptr1 syncscope("singlethread") seq_cst, align 1
%res12 = load atomic i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
-; CHECK-NEXT: %res13 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
+; CHECK-NEXT: %res13 = load atomic volatile i8, ptr %ptr1 syncscope("singlethread") unordered, align 1
%res13 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
-; CHECK-NEXT: %res14 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+; CHECK-NEXT: %res14 = load atomic volatile i8, ptr %ptr1 syncscope("singlethread") monotonic, align 1
%res14 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
-; CHECK-NEXT: %res15 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
+; CHECK-NEXT: %res15 = load atomic volatile i8, ptr %ptr1 syncscope("singlethread") acquire, align 1
%res15 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
-; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+; CHECK-NEXT: %res16 = load atomic volatile i8, ptr %ptr1 syncscope("singlethread") seq_cst, align 1
%res16 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
ret void
entry:
%ptr1 = alloca i8
-; CHECK: store i8 2, i8* %ptr1
+; CHECK: store i8 2, ptr %ptr1
store i8 2, i8* %ptr1
-; CHECK-NEXT: store volatile i8 2, i8* %ptr1
+; CHECK-NEXT: store volatile i8 2, ptr %ptr1
store volatile i8 2, i8* %ptr1
-; CHECK-NEXT: store i8 2, i8* %ptr1, align 1
+; CHECK-NEXT: store i8 2, ptr %ptr1, align 1
store i8 2, i8* %ptr1, align 1
-; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1
+; CHECK-NEXT: store volatile i8 2, ptr %ptr1, align 1
store volatile i8 2, i8* %ptr1, align 1
-; CHECK-NEXT: store i8 2, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: store i8 2, ptr %ptr1, align 1, !nontemporal !0
store i8 2, i8* %ptr1, !nontemporal !0
-; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: store volatile i8 2, ptr %ptr1, align 1, !nontemporal !0
store volatile i8 2, i8* %ptr1, !nontemporal !0
-; CHECK-NEXT: store i8 2, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: store i8 2, ptr %ptr1, align 1, !nontemporal !0
store i8 2, i8* %ptr1, align 1, !nontemporal !0
-; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: store volatile i8 2, ptr %ptr1, align 1, !nontemporal !0
store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
ret void
entry:
%ptr1 = alloca i8
-; CHECK: store atomic i8 2, i8* %ptr1 unordered, align 1
+; CHECK: store atomic i8 2, ptr %ptr1 unordered, align 1
store atomic i8 2, i8* %ptr1 unordered, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 monotonic, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 monotonic, align 1
store atomic i8 2, i8* %ptr1 monotonic, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 release, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 release, align 1
store atomic i8 2, i8* %ptr1 release, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 seq_cst, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 seq_cst, align 1
store atomic i8 2, i8* %ptr1 seq_cst, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 unordered, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 unordered, align 1
store atomic volatile i8 2, i8* %ptr1 unordered, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 monotonic, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 monotonic, align 1
store atomic volatile i8 2, i8* %ptr1 monotonic, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 release, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 release, align 1
store atomic volatile i8 2, i8* %ptr1 release, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 seq_cst, align 1
store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 syncscope("singlethread") unordered, align 1
store atomic i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 syncscope("singlethread") monotonic, align 1
store atomic i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 syncscope("singlethread") release, align 1
store atomic i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 syncscope("singlethread") seq_cst, align 1
store atomic i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 syncscope("singlethread") unordered, align 1
store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 syncscope("singlethread") monotonic, align 1
store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 syncscope("singlethread") release, align 1
store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 syncscope("singlethread") seq_cst, align 1
store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
ret void
define void @cmpxchg(i32* %ptr,i32 %cmp,i32 %new){
entry:
- ;cmpxchg [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering>
+ ;cmpxchg [volatile] ptr <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering>
-; CHECK: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
+; CHECK: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new monotonic monotonic
; CHECK-NEXT: %res1 = extractvalue { i32, i1 } [[TMP]], 0
%res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new monotonic monotonic
; CHECK-NEXT: %res2 = extractvalue { i32, i1 } [[TMP]], 0
%res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
; CHECK-NEXT: %res3 = extractvalue { i32, i1 } [[TMP]], 0
%res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
; CHECK-NEXT: %res4 = extractvalue { i32, i1 } [[TMP]], 0
%res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new acquire acquire
; CHECK-NEXT: %res5 = extractvalue { i32, i1 } [[TMP]], 0
%res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new acquire acquire
; CHECK-NEXT: %res6 = extractvalue { i32, i1 } [[TMP]], 0
%res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
; CHECK-NEXT: %res7 = extractvalue { i32, i1 } [[TMP]], 0
%res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
; CHECK-NEXT: %res8 = extractvalue { i32, i1 } [[TMP]], 0
%res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new release monotonic
; CHECK-NEXT: %res9 = extractvalue { i32, i1 } [[TMP]], 0
%res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new release monotonic
; CHECK-NEXT: %res10 = extractvalue { i32, i1 } [[TMP]], 0
%res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
; CHECK-NEXT: %res11 = extractvalue { i32, i1 } [[TMP]], 0
%res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
; CHECK-NEXT: %res12 = extractvalue { i32, i1 } [[TMP]], 0
%res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new acq_rel acquire
; CHECK-NEXT: %res13 = extractvalue { i32, i1 } [[TMP]], 0
%res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new acq_rel acquire
; CHECK-NEXT: %res14 = extractvalue { i32, i1 } [[TMP]], 0
%res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
; CHECK-NEXT: %res15 = extractvalue { i32, i1 } [[TMP]], 0
%res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
; CHECK-NEXT: %res16 = extractvalue { i32, i1 } [[TMP]], 0
%res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new seq_cst seq_cst
; CHECK-NEXT: %res17 = extractvalue { i32, i1 } [[TMP]], 0
%res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new seq_cst seq_cst
; CHECK-NEXT: %res18 = extractvalue { i32, i1 } [[TMP]], 0
%res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
; CHECK-NEXT: %res19 = extractvalue { i32, i1 } [[TMP]], 0
%res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
; CHECK-NEXT: %res20 = extractvalue { i32, i1 } [[TMP]], 0
%res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
define void @getelementptr({i8, i8}, {i8, i8}* %s, <4 x i8*> %ptrs, <4 x i64> %offsets ){
entry:
-; CHECK: %res1 = getelementptr { i8, i8 }, { i8, i8 }* %s, i32 1, i32 1
+; CHECK: %res1 = getelementptr { i8, i8 }, ptr %s, i32 1, i32 1
%res1 = getelementptr {i8, i8}, {i8, i8}* %s, i32 1, i32 1
-; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }, { i8, i8 }* %s, i32 1, i32 1
+; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }, ptr %s, i32 1, i32 1
%res2 = getelementptr inbounds {i8, i8}, {i8, i8}* %s, i32 1, i32 1
-; CHECK-NEXT: %res3 = getelementptr i8, <4 x i8*> %ptrs, <4 x i64> %offsets
+; CHECK-NEXT: %res3 = getelementptr i8, <4 x ptr> %ptrs, <4 x i64> %offsets
%res3 = getelementptr i8, <4 x i8*> %ptrs, <4 x i64> %offsets
ret void
; RUN: llvm-as < %s | llvm-dis -disable-output
-; RUN: verify-uselistorder < %s
%0 = type { %object.ModuleInfo.__vtbl*, i8*, %"byte[]", %1, %"ClassInfo[]", i32, void ()*, void ()*, void ()*, i8*, void ()* } ; type %0
%1 = type { i64, %object.ModuleInfo* } ; type %1
%2 = type { i32, void ()*, i8* } ; type %2
%object.TypeInfo = type { %object.TypeInfo.__vtbl*, i8* }
%object.TypeInfo.__vtbl = type { %object.ClassInfo*, %"byte[]" (%object.Object*)*, i64 (%object.Object*)*, i32 (%object.Object*, %object.Object*)*, i32 (%object.Object*, %object.Object*)*, i64 (%object.TypeInfo*, i8*)*, i32 (%object.TypeInfo*, i8*, i8*)*, i32 (%object.TypeInfo*, i8*, i8*)*, i64 (%object.TypeInfo*)*, void (%object.TypeInfo*, i8*, i8*)*, %object.TypeInfo* (%object.TypeInfo*)*, %"byte[]" (%object.TypeInfo*)*, i32 (%object.TypeInfo*)*, %"OffsetTypeInfo[]" (%object.TypeInfo*)* }
%"void*[]" = type { i64, i8** }
-@_D10ModuleInfo6__vtblZ = external constant %object.ModuleInfo.__vtbl ; <%object.ModuleInfo.__vtbl*> [#uses=1]
-@.str = internal constant [20 x i8] c"tango.core.BitManip\00" ; <[20 x i8]*> [#uses=1]
-@_D5tango4core8BitManip8__ModuleZ = global %0 { %object.ModuleInfo.__vtbl* @_D10ModuleInfo6__vtblZ, i8* null, %"byte[]" { i64 19, i8* getelementptr ([20 x i8], [20 x i8]* @.str, i32 0, i32 0) }, %1 zeroinitializer, %"ClassInfo[]" zeroinitializer, i32 4, void ()* null, void ()* null, void ()* null, i8* null, void ()* null } ; <%0*> [#uses=1]
-@_D5tango4core8BitManip11__moduleRefZ = internal global %ModuleReference { %ModuleReference* null, %object.ModuleInfo* bitcast (%0* @_D5tango4core8BitManip8__ModuleZ to %object.ModuleInfo*) } ; <%ModuleReference*> [#uses=2]
-@_Dmodule_ref = external global %ModuleReference* ; <%ModuleReference**> [#uses=2]
-@llvm.global_ctors = appending constant [1 x %2] [%2 { i32 65535, void ()* @_D5tango4core8BitManip16__moduleinfoCtorZ, i8* null }] ; <[1 x %2]*> [#uses=0]
+@_D10ModuleInfo6__vtblZ = external constant %object.ModuleInfo.__vtbl ; <ptr> [#uses=1]
+@.str = internal constant [20 x i8] c"tango.core.BitManip\00" ; <ptr> [#uses=1]
+@_D5tango4core8BitManip8__ModuleZ = global %0 { %object.ModuleInfo.__vtbl* @_D10ModuleInfo6__vtblZ, i8* null, %"byte[]" { i64 19, i8* getelementptr ([20 x i8], [20 x i8]* @.str, i32 0, i32 0) }, %1 zeroinitializer, %"ClassInfo[]" zeroinitializer, i32 4, void ()* null, void ()* null, void ()* null, i8* null, void ()* null } ; <ptr> [#uses=1]
+@_D5tango4core8BitManip11__moduleRefZ = internal global %ModuleReference { %ModuleReference* null, %object.ModuleInfo* bitcast (%0* @_D5tango4core8BitManip8__ModuleZ to %object.ModuleInfo*) } ; <ptr> [#uses=2]
+@_Dmodule_ref = external global %ModuleReference* ; <ptr> [#uses=2]
+@llvm.global_ctors = appending constant [1 x %2] [%2 { i32 65535, void ()* @_D5tango4core8BitManip16__moduleinfoCtorZ, i8* null }] ; <ptr> [#uses=0]
define fastcc i32 @_D5tango4core8BitManip6popcntFkZi(i32 %x_arg) nounwind readnone {
entry:
define internal void @_D5tango4core8BitManip16__moduleinfoCtorZ() nounwind {
moduleinfoCtorEntry:
- %current = load %ModuleReference*, %ModuleReference** @_Dmodule_ref ; <%ModuleReference*> [#uses=1]
+ %current = load %ModuleReference*, %ModuleReference** @_Dmodule_ref ; <ptr> [#uses=1]
store %ModuleReference* %current, %ModuleReference** getelementptr (%ModuleReference, %ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, i32 0, i32 0)
store %ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, %ModuleReference** @_Dmodule_ref
ret void
@global = global i32 0
-; CHECK: !0 = !{!1, !2, i32* @global, null}
+; CHECK: !0 = !{!1, !2, ptr @global, null}
; CHECK: !1 = !{!2, null}
; CHECK: !2 = !{}
!0 = metadata !{metadata !1, metadata !2, i32* @global, null}
}
; CHECK-LABEL: define void @landingpadInstr1
-; CHECK-SAME: personality i32 (...)* @__gxx_personality_v0
+; CHECK-SAME: personality ptr @__gxx_personality_v0
define void @landingpadInstr1(i1 %cond1, <2 x i1> %cond2, <2 x i8> %x1, <2 x i8> %x2){
entry:
-; CHECK: %res = landingpad { i8*, i32 }
+; CHECK: %res = landingpad { ptr, i32 }
%res = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
-; CHECK: catch i8** @_ZTIi
+; CHECK: catch ptr @_ZTIi
catch i8** @_ZTIi
ret void
}
; CHECK-LABEL: define void @landingpadInstr2
-; CHECK-SAME: personality i32 (...)* @__gxx_personality_v0
+; CHECK-SAME: personality ptr @__gxx_personality_v0
define void @landingpadInstr2(i1 %cond1, <2 x i1> %cond2, <2 x i8> %x1, <2 x i8> %x2){
entry:
-; CHECK: %res = landingpad { i8*, i32 }
+; CHECK: %res = landingpad { ptr, i32 }
%res = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
; CHECK: cleanup
cleanup
}
; CHECK-LABEL: define void @landingpadInstr3
-; CHECK-SAME: personality i32 (...)* @__gxx_personality_v0
+; CHECK-SAME: personality ptr @__gxx_personality_v0
define void @landingpadInstr3(i1 %cond1, <2 x i1> %cond2, <2 x i8> %x1, <2 x i8> %x2){
entry:
-; CHECK: %res = landingpad { i8*, i32 }
+; CHECK: %res = landingpad { ptr, i32 }
%res = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
-; CHECK: catch i8** @_ZTIi
+; CHECK: catch ptr @_ZTIi
catch i8** @_ZTIi
-; CHECK: filter [1 x i8**] [i8** @_ZTId]
+; CHECK: filter [1 x ptr] [ptr @_ZTId]
filter [1 x i8**] [i8** @_ZTId]
ret void
}
; CHECK-NEXT: %res10 = icmp sle i32 %x1, %x2
%res10 = icmp sle i32 %x1, %x2
-; CHECK-NEXT: %res11 = icmp eq i32* %ptr1, %ptr2
+; CHECK-NEXT: %res11 = icmp eq ptr %ptr1, %ptr2
%res11 = icmp eq i32* %ptr1, %ptr2
; CHECK-NEXT: %res12 = icmp eq <2 x i32> %vec1, %vec2
; CHECK-NEXT: %res2 = tail call i32 @test(i32 %x)
%res2 = tail call i32 @test(i32 %x)
-; CHECK-NEXT: %res3 = call i32 (i8*, ...) @printf(i8* %msg, i32 12, i8 42)
+; CHECK-NEXT: %res3 = call i32 (ptr, ...) @printf(ptr %msg, i32 12, i8 42)
%res3 = call i32 (i8*, ...) @printf(i8* %msg, i32 12, i8 42)
ret void
; RUN: llvm-as < %s | llvm-dis | FileCheck %s
; RUN: verify-uselistorder %s
-; CHECK: @a = global [4 x void ()*] [void ()* no_cfi @f1, void ()* @f1, void ()* @f2, void ()* no_cfi @f2]
+; CHECK: @a = global [4 x ptr] [ptr no_cfi @f1, ptr @f1, ptr @f2, ptr no_cfi @f2]
@a = global [4 x void ()*] [void ()* no_cfi @f1, void ()* @f1, void ()* @f2, void ()* no_cfi @f2]
-; CHECK: @b = constant void ()* no_cfi @f3
+; CHECK: @b = constant ptr no_cfi @f3
@b = constant void ()* no_cfi @f3
-; CHECK: @c = constant void ()* @f3
+; CHECK: @c = constant ptr @f3
@c = constant void ()* @f3
; CHECK: declare void @f1()
define void @g() {
%n = alloca void ()*, align 8
- ; CHECK: store void ()* no_cfi @f5, void ()** %n, align 8
+ ; CHECK: store ptr no_cfi @f5, ptr %n, align 8
store void ()* no_cfi @f5, void ()** %n, align 8
%1 = load void ()*, void ()** %n
call void %1()
define void @callit(i8* %ptr) {
%sz = call i64 @llvm.objectsize.i64.p0i8(i8* %ptr, i1 false, i1 true)
- ; CHECK: %sz = call i64 @llvm.objectsize.i64.p0i8(i8* %ptr, i1 false, i1 true, i1 false)
+ ; CHECK: %sz = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
ret void
}
declare i64 @llvm.objectsize.i64.p0i8(i8*, i1, i1)
-; CHECK: declare i64 @llvm.objectsize.i64.p0i8(i8*, i1 immarg, i1 immarg, i1 immarg)
+; CHECK: declare i64 @llvm.objectsize.i64.p0(ptr, i1 immarg, i1 immarg, i1 immarg)
; CHECK: @v2 = global [1 x i32] zeroinitializer
@v3 = alias i16, bitcast (i32* @v1 to i16*)
-; CHECK: @v3 = alias i16, bitcast (i32* @v1 to i16*)
+; CHECK: @v3 = alias i16, ptr @v1
@v4 = alias i32, getelementptr ([1 x i32], [1 x i32]* @v2, i32 0, i32 0)
-; CHECK: @v4 = alias i32, getelementptr inbounds ([1 x i32], [1 x i32]* @v2, i32 0, i32 0)
+; CHECK: @v4 = alias i32, ptr @v2
@v5 = alias i32, i32 addrspace(2)* addrspacecast (i32 addrspace(0)* @v1 to i32 addrspace(2)*)
-; CHECK: @v5 = alias i32, addrspacecast (i32* @v1 to i32 addrspace(2)*)
+; CHECK: @v5 = alias i32, addrspacecast (ptr @v1 to ptr addrspace(2))
@v6 = alias i16, i16* @v3
-; CHECK: @v6 = alias i16, i16* @v3
+; CHECK: @v6 = alias i16, ptr @v3
}
; CHECK: define <2 x float> @f() {
-; CHECK: ret <2 x float> select (i1 ptrtoint (<2 x float> ()* @f to i1), <2 x float> <float 1.000000e+00, float 0.000000e+00>, <2 x float> zeroinitializer)
+; CHECK: ret <2 x float> select (i1 ptrtoint (ptr @f to i1), <2 x float> <float 1.000000e+00, float 0.000000e+00>, <2 x float> zeroinitializer)
; CHECK: }
define void @memcpyintrinsic(i8* %dest, i8* %src, i32 %len) {
entry:
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 %len, i1 true)
+; CHECK: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 %len, i1 true)
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 %len, i1 true)
ret void
define i32 @indirectbr(i8* %Addr){
entry:
-; CHECK: indirectbr i8* %Addr, [label %bb1, label %bb2]
+; CHECK: indirectbr ptr %Addr, [label %bb1, label %bb2]
indirectbr i8* %Addr, [ label %bb1, label %bb2 ]
bb1:
; entries are committed.
; Check an anonymous function as well, since in that case only the alias
; ends up in the value symbol table and having a summary.
-@f = alias void (), void ()* @0 ; <void ()*> [#uses=0]
-@h = external global void ()* ; <void ()*> [#uses=0]
+@f = alias void (), void ()* @0 ; <ptr> [#uses=0]
+@h = external global void ()* ; <ptr> [#uses=0]
define internal void @0() nounwind {
entry:
;CHECK-DAG: @bar = global i32 0
@baz = alias i32, i32* @bar
-;CHECK-DAG: @baz = alias i32, i32* @bar
+;CHECK-DAG: @baz = alias i32, ptr @bar
;BCAN: <SOURCE_FILENAME
;BCAN-NEXT: <GLOBALVAR {{.*}} op7=0/>
; RUN: llvm-dis < %S/upgrade-aarch64-ldstxr.bc | FileCheck %s
define void @f(i32* %p) {
-; CHECK: call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32)
+; CHECK: call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32)
%a = call i64 @llvm.aarch64.ldxr.p0i32(i32* %p)
-; CHECK: call i32 @llvm.aarch64.stxr.p0i32(i64 0, i32* elementtype(i32)
+; CHECK: call i32 @llvm.aarch64.stxr.p0(i64 0, ptr elementtype(i32)
%c = call i32 @llvm.aarch64.stxr.p0i32(i64 0, i32* %p)
-; CHECK: call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32)
+; CHECK: call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i32)
%a2 = call i64 @llvm.aarch64.ldaxr.p0i32(i32* %p)
-; CHECK: call i32 @llvm.aarch64.stlxr.p0i32(i64 0, i32* elementtype(i32)
+; CHECK: call i32 @llvm.aarch64.stlxr.p0(i64 0, ptr elementtype(i32)
%c2 = call i32 @llvm.aarch64.stlxr.p0i32(i64 0, i32* %p)
ret void
}
; RUN: llvm-as %s -o - | llvm-dis - | FileCheck %s
define <vscale x 32 x i8> @ld2.nxv32i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
-; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
; CHECK-NEXT: %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
; CHECK-NEXT: %3 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %2, i64 0)
; CHECK-NEXT: %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
}
define <vscale x 48 x i8> @ld3.nxv48i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
-; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
; CHECK-NEXT: %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
; CHECK-NEXT: %3 = call <vscale x 48 x i8> @llvm.vector.insert.nxv48i8.nxv16i8(<vscale x 48 x i8> poison, <vscale x 16 x i8> %2, i64 0)
; CHECK-NEXT: %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
}
define <vscale x 64 x i8> @ld4.nxv64i8_lower_bound(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
-; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
; CHECK-NEXT: %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
; CHECK-NEXT: %3 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> poison, <vscale x 16 x i8> %2, i64 0)
; CHECK-NEXT: %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
; ldN intrinsic name without any element type
define <vscale x 32 x i8> @ld2.nxv32i8_no_eltty(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
; CHECK-LABEL: @ld2.nxv32i8_no_eltty
-; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
; CHECK-NEXT: %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
; CHECK-NEXT: %3 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %2, i64 0)
; CHECK-NEXT: %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
; ldN instrinsic name with only output type
define <vscale x 32 x i8> @ld2.nxv32i8_no_predty_pty(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
; CHECK-LABEL: @ld2.nxv32i8_no_predty_pty
-; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
; CHECK-NEXT: %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
; CHECK-NEXT: %3 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %2, i64 0)
; CHECK-NEXT: %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
; Test upgrade of llvm.annotation intrinsics.
;
; RUN: llvm-as < %s | llvm-dis | FileCheck %s
-; RUN: llvm-dis --opaque-pointers=0 < %s.bc | FileCheck %s --check-prefix=TYPED
-; RUN: llvm-dis --opaque-pointers=1 < %s.bc | FileCheck %s
+; RUN: llvm-dis < %s.bc | FileCheck %s
-; TYPED: define i32 @f(i32 [[ARG0:%.*]], i8* [[ARG1:%.*]], i8* [[ARG2:%.*]], i32 [[ARG3:%.*]])
+; TYPED: define i32 @f(i32 [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], i32 [[ARG3:%.*]])
; CHECK: define i32 @f(i32 [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], i32 [[ARG3:%.*]])
define i32 @f(i32 %arg0, ptr %arg1, ptr %arg2, i32 %arg3) {
%result = call i32 @llvm.annotation.i32(i32 %arg0, ptr %arg1, ptr %arg2, i32 %arg3)
- ; TYPED: [[RESULT:%.*]] = call i32 @llvm.annotation.i32.p0i8(i32 [[ARG0]], i8* [[ARG1]], i8* [[ARG2]], i32 [[ARG3]])
; CHECK: [[RESULT:%.*]] = call i32 @llvm.annotation.i32.p0(i32 [[ARG0]], ptr [[ARG1]], ptr [[ARG2]], i32 [[ARG3]])
ret i32 %result
}
declare i32 @llvm.annotation.i32(i32, i8*, ptr, i32)
-; TYPED: declare i32 @llvm.annotation.i32.p0i8(i32, i8*, i8*, i32)
; CHECK: declare i32 @llvm.annotation.i32.p0(i32, ptr, ptr, i32)
; RUN: verify-uselistorder %s.bc
define i8* @invalid() {
-; CHECK-LABEL: define i8* @invalid() {
-; CHECK-NEXT: %tmp0 = call i8* @foo(){{$}}
-; CHECK-NEXT: ret i8* %tmp0
+; CHECK-LABEL: define ptr @invalid() {
+; CHECK-NEXT: %tmp0 = call ptr @foo(){{$}}
+; CHECK-NEXT: ret ptr %tmp0
%tmp0 = call i8* @foo() [ "clang.arc.attachedcall"() ]
ret i8* %tmp0
}
define i8* @valid() {
-; CHECK-LABEL: define i8* @valid() {
-; CHECK-NEXT: %tmp0 = call i8* @foo() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
-; CHECK-NEXT: ret i8* %tmp0
+; CHECK-LABEL: define ptr @valid() {
+; CHECK-NEXT: %tmp0 = call ptr @foo() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+; CHECK-NEXT: ret ptr %tmp0
%tmp0 = call i8* @foo() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
ret i8* %tmp0
}
; RUN: llvm-dis < %S/upgrade-arc-runtime-calls-bitcast.bc | FileCheck %s
-; CHECK: tail call i8* @objc_retain(i32 1)
-; CHECK: tail call i8* @objc_storeStrong(
+; CHECK: tail call ptr @objc_retain(i32 1)
+; CHECK: tail call ptr @objc_storeStrong(
define void @testRuntimeCalls(i8* %a, i8** %b) {
%v6 = tail call i8* @objc_retain(i32 1)
// Check that auto-upgrader converts function calls to intrinsic calls. Note that
// the auto-upgrader doesn't touch invoke instructions.
-// ARC: define void @testRuntimeCalls(i8* %[[A:.*]], i8** %[[B:.*]], i8** %[[C:.*]], i32* %[[D:.*]], i32** %[[E:.*]]) personality
-// ARC: %[[V0:.*]] = tail call i8* @llvm.objc.autorelease(i8* %[[A]])
-// ARC-NEXT: tail call void @llvm.objc.autoreleasePoolPop(i8* %[[A]])
-// ARC-NEXT: %[[V1:.*]] = tail call i8* @llvm.objc.autoreleasePoolPush()
-// ARC-NEXT: %[[V2:.*]] = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %[[A]])
-// ARC-NEXT: tail call void @llvm.objc.copyWeak(i8** %[[B]], i8** %[[C]])
-// ARC-NEXT: tail call void @llvm.objc.destroyWeak(i8** %[[B]])
-// ARC-NEXT: %[[V100:.*]] = bitcast i32** %[[E]] to i8**
-// ARC-NEXT: %[[V101:.*]] = bitcast i32* %[[D]] to i8*
-// ARC-NEXT: %[[V102:.*]] = tail call i8* @llvm.objc.initWeak(i8** %[[V100]], i8* %[[V101]])
-// ARC-NEXT: %[[V103:.*]] = bitcast i8* %[[V102]] to i32*
-// ARC-NEXT: %[[V4:.*]] = tail call i8* @llvm.objc.loadWeak(i8** %[[B]])
-// ARC-NEXT: %[[V5:.*]] = tail call i8* @llvm.objc.loadWeakRetained(i8** %[[B]])
-// ARC-NEXT: tail call void @llvm.objc.moveWeak(i8** %[[B]], i8** %[[C]])
-// ARC-NEXT: tail call void @llvm.objc.release(i8* %[[A]])
-// ARC-NEXT: %[[V6:.*]] = tail call i8* @llvm.objc.retain(i8* %[[A]])
-// ARC-NEXT: %[[V7:.*]] = tail call i8* @llvm.objc.retainAutorelease(i8* %[[A]])
-// ARC-NEXT: %[[V8:.*]] = tail call i8* @llvm.objc.retainAutoreleaseReturnValue(i8* %[[A]])
-// ARC-NEXT: %[[V9:.*]] = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %[[A]])
-// ARC-NEXT: %[[V10:.*]] = tail call i8* @llvm.objc.retainBlock(i8* %[[A]])
-// ARC-NEXT: tail call void @llvm.objc.storeStrong(i8** %[[B]], i8* %[[A]])
-// ARC-NEXT: %[[V11:.*]] = tail call i8* @llvm.objc.storeWeak(i8** %[[B]], i8* %[[A]])
-// ARC-NEXT: tail call void (...) @llvm.objc.clang.arc.use(i8* %[[A]])
-// ARC-NEXT: %[[V12:.*]] = tail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %[[A]])
-// ARC-NEXT: %[[V13:.*]] = tail call i8* @llvm.objc.retainedObject(i8* %[[A]])
-// ARC-NEXT: %[[V14:.*]] = tail call i8* @llvm.objc.unretainedObject(i8* %[[A]])
-// ARC-NEXT: %[[V15:.*]] = tail call i8* @llvm.objc.unretainedPointer(i8* %[[A]])
-// ARC-NEXT: %[[V16:.*]] = tail call i8* @objc_retain.autorelease(i8* %[[A]])
-// ARC-NEXT: %[[V17:.*]] = tail call i32 @objc_sync.enter(i8* %[[A]])
-// ARC-NEXT: %[[V18:.*]] = tail call i32 @objc_sync.exit(i8* %[[A]])
-// ARC-NEXT: tail call void @llvm.objc.arc.annotation.topdown.bbstart(i8** %[[B]], i8** %[[C]])
-// ARC-NEXT: tail call void @llvm.objc.arc.annotation.topdown.bbend(i8** %[[B]], i8** %[[C]])
-// ARC-NEXT: tail call void @llvm.objc.arc.annotation.bottomup.bbstart(i8** %[[B]], i8** %[[C]])
-// ARC-NEXT: tail call void @llvm.objc.arc.annotation.bottomup.bbend(i8** %[[B]], i8** %[[C]])
-// ARC-NEXT: invoke void @objc_autoreleasePoolPop(i8* %[[A]])
+// ARC: define void @testRuntimeCalls(ptr %[[A:.*]], ptr %[[B:.*]], ptr %[[C:.*]], ptr %[[D:.*]], ptr %[[E:.*]]) personality
+// ARC: %[[V0:.*]] = tail call ptr @llvm.objc.autorelease(ptr %[[A]])
+// ARC-NEXT: tail call void @llvm.objc.autoreleasePoolPop(ptr %[[A]])
+// ARC-NEXT: %[[V1:.*]] = tail call ptr @llvm.objc.autoreleasePoolPush()
+// ARC-NEXT: %[[V2:.*]] = tail call ptr @llvm.objc.autoreleaseReturnValue(ptr %[[A]])
+// ARC-NEXT: tail call void @llvm.objc.copyWeak(ptr %[[B]], ptr %[[C]])
+// ARC-NEXT: tail call void @llvm.objc.destroyWeak(ptr %[[B]])
+// ARC-NEXT: %[[V102:.*]] = tail call ptr @llvm.objc.initWeak(ptr %[[E]], ptr %[[D]])
+// ARC-NEXT: %[[V4:.*]] = tail call ptr @llvm.objc.loadWeak(ptr %[[B]])
+// ARC-NEXT: %[[V5:.*]] = tail call ptr @llvm.objc.loadWeakRetained(ptr %[[B]])
+// ARC-NEXT: tail call void @llvm.objc.moveWeak(ptr %[[B]], ptr %[[C]])
+// ARC-NEXT: tail call void @llvm.objc.release(ptr %[[A]])
+// ARC-NEXT: %[[V6:.*]] = tail call ptr @llvm.objc.retain(ptr %[[A]])
+// ARC-NEXT: %[[V7:.*]] = tail call ptr @llvm.objc.retainAutorelease(ptr %[[A]])
+// ARC-NEXT: %[[V8:.*]] = tail call ptr @llvm.objc.retainAutoreleaseReturnValue(ptr %[[A]])
+// ARC-NEXT: %[[V9:.*]] = tail call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr %[[A]])
+// ARC-NEXT: %[[V10:.*]] = tail call ptr @llvm.objc.retainBlock(ptr %[[A]])
+// ARC-NEXT: tail call void @llvm.objc.storeStrong(ptr %[[B]], ptr %[[A]])
+// ARC-NEXT: %[[V11:.*]] = tail call ptr @llvm.objc.storeWeak(ptr %[[B]], ptr %[[A]])
+// ARC-NEXT: tail call void (...) @llvm.objc.clang.arc.use(ptr %[[A]])
+// ARC-NEXT: %[[V12:.*]] = tail call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr %[[A]])
+// ARC-NEXT: %[[V13:.*]] = tail call ptr @llvm.objc.retainedObject(ptr %[[A]])
+// ARC-NEXT: %[[V14:.*]] = tail call ptr @llvm.objc.unretainedObject(ptr %[[A]])
+// ARC-NEXT: %[[V15:.*]] = tail call ptr @llvm.objc.unretainedPointer(ptr %[[A]])
+// ARC-NEXT: %[[V16:.*]] = tail call ptr @objc_retain.autorelease(ptr %[[A]])
+// ARC-NEXT: %[[V17:.*]] = tail call i32 @objc_sync.enter(ptr %[[A]])
+// ARC-NEXT: %[[V18:.*]] = tail call i32 @objc_sync.exit(ptr %[[A]])
+// ARC-NEXT: tail call void @llvm.objc.arc.annotation.topdown.bbstart(ptr %[[B]], ptr %[[C]])
+// ARC-NEXT: tail call void @llvm.objc.arc.annotation.topdown.bbend(ptr %[[B]], ptr %[[C]])
+// ARC-NEXT: tail call void @llvm.objc.arc.annotation.bottomup.bbstart(ptr %[[B]], ptr %[[C]])
+// ARC-NEXT: tail call void @llvm.objc.arc.annotation.bottomup.bbend(ptr %[[B]], ptr %[[C]])
+// ARC-NEXT: invoke void @objc_autoreleasePoolPop(ptr %[[A]])
-// NOUPGRADE: define void @testRuntimeCalls(i8* %[[A:.*]], i8** %[[B:.*]], i8** %[[C:.*]], i32* %[[D:.*]], i32** %[[E:.*]]) personality
-// NOUPGRADE: %[[V0:.*]] = tail call i8* @objc_autorelease(i8* %[[A]])
-// NOUPGRADE-NEXT: tail call void @objc_autoreleasePoolPop(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V1:.*]] = tail call i8* @objc_autoreleasePoolPush()
-// NOUPGRADE-NEXT: %[[V2:.*]] = tail call i8* @objc_autoreleaseReturnValue(i8* %[[A]])
-// NOUPGRADE-NEXT: tail call void @objc_copyWeak(i8** %[[B]], i8** %[[C]])
-// NOUPGRADE-NEXT: tail call void @objc_destroyWeak(i8** %[[B]])
-// NOUPGRADE-NEXT: %[[V3:.*]] = tail call i32* @objc_initWeak(i32** %[[E]], i32* %[[D]])
-// NOUPGRADE-NEXT: %[[V4:.*]] = tail call i8* @objc_loadWeak(i8** %[[B]])
-// NOUPGRADE-NEXT: %[[V5:.*]] = tail call i8* @objc_loadWeakRetained(i8** %[[B]])
-// NOUPGRADE-NEXT: tail call void @objc_moveWeak(i8** %[[B]], i8** %[[C]])
-// NOUPGRADE-NEXT: tail call void @objc_release(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V6:.*]] = tail call i8* @objc_retain(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V7:.*]] = tail call i8* @objc_retainAutorelease(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V8:.*]] = tail call i8* @objc_retainAutoreleaseReturnValue(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V9:.*]] = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V10:.*]] = tail call i8* @objc_retainBlock(i8* %[[A]])
-// NOUPGRADE-NEXT: tail call void @objc_storeStrong(i8** %[[B]], i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V11:.*]] = tail call i8* @objc_storeWeak(i8** %[[B]], i8* %[[A]])
-// NOUPGRADE-NEXT: tail call void (...) @llvm.objc.clang.arc.use(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V12:.*]] = tail call i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V13:.*]] = tail call i8* @objc_retainedObject(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V14:.*]] = tail call i8* @objc_unretainedObject(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V15:.*]] = tail call i8* @objc_unretainedPointer(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V16:.*]] = tail call i8* @objc_retain.autorelease(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V17:.*]] = tail call i32 @objc_sync.enter(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V18:.*]] = tail call i32 @objc_sync.exit(i8* %[[A]])
-// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_topdown_bbstart(i8** %[[B]], i8** %[[C]])
-// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_topdown_bbend(i8** %[[B]], i8** %[[C]])
-// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_bottomup_bbstart(i8** %[[B]], i8** %[[C]])
-// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_bottomup_bbend(i8** %[[B]], i8** %[[C]])
-// NOUPGRADE-NEXT: invoke void @objc_autoreleasePoolPop(i8* %[[A]])
+// NOUPGRADE: define void @testRuntimeCalls(ptr %[[A:.*]], ptr %[[B:.*]], ptr %[[C:.*]], ptr %[[D:.*]], ptr %[[E:.*]]) personality
+// NOUPGRADE: %[[V0:.*]] = tail call ptr @objc_autorelease(ptr %[[A]])
+// NOUPGRADE-NEXT: tail call void @objc_autoreleasePoolPop(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V1:.*]] = tail call ptr @objc_autoreleasePoolPush()
+// NOUPGRADE-NEXT: %[[V2:.*]] = tail call ptr @objc_autoreleaseReturnValue(ptr %[[A]])
+// NOUPGRADE-NEXT: tail call void @objc_copyWeak(ptr %[[B]], ptr %[[C]])
+// NOUPGRADE-NEXT: tail call void @objc_destroyWeak(ptr %[[B]])
+// NOUPGRADE-NEXT: %[[V3:.*]] = tail call ptr @objc_initWeak(ptr %[[E]], ptr %[[D]])
+// NOUPGRADE-NEXT: %[[V4:.*]] = tail call ptr @objc_loadWeak(ptr %[[B]])
+// NOUPGRADE-NEXT: %[[V5:.*]] = tail call ptr @objc_loadWeakRetained(ptr %[[B]])
+// NOUPGRADE-NEXT: tail call void @objc_moveWeak(ptr %[[B]], ptr %[[C]])
+// NOUPGRADE-NEXT: tail call void @objc_release(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V6:.*]] = tail call ptr @objc_retain(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V7:.*]] = tail call ptr @objc_retainAutorelease(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V8:.*]] = tail call ptr @objc_retainAutoreleaseReturnValue(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V9:.*]] = tail call ptr @objc_retainAutoreleasedReturnValue(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V10:.*]] = tail call ptr @objc_retainBlock(ptr %[[A]])
+// NOUPGRADE-NEXT: tail call void @objc_storeStrong(ptr %[[B]], ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V11:.*]] = tail call ptr @objc_storeWeak(ptr %[[B]], ptr %[[A]])
+// NOUPGRADE-NEXT: tail call void (...) @llvm.objc.clang.arc.use(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V12:.*]] = tail call ptr @objc_unsafeClaimAutoreleasedReturnValue(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V13:.*]] = tail call ptr @objc_retainedObject(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V14:.*]] = tail call ptr @objc_unretainedObject(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V15:.*]] = tail call ptr @objc_unretainedPointer(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V16:.*]] = tail call ptr @objc_retain.autorelease(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V17:.*]] = tail call i32 @objc_sync.enter(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V18:.*]] = tail call i32 @objc_sync.exit(ptr %[[A]])
+// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_topdown_bbstart(ptr %[[B]], ptr %[[C]])
+// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_topdown_bbend(ptr %[[B]], ptr %[[C]])
+// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_bottomup_bbstart(ptr %[[B]], ptr %[[C]])
+// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_bottomup_bbend(ptr %[[B]], ptr %[[C]])
+// NOUPGRADE-NEXT: invoke void @objc_autoreleasePoolPop(ptr %[[A]])
define void @test(%struct.s* %arg) {
; CHECK-LABEL: define void @test
-; CHECK: %x = call %struct.s* @llvm.preserve.array.access.index.p0s_struct.ss.p0s_struct.ss(%struct.s* elementtype(%struct.s) %arg, i32 0, i32 2)
-; CHECK: %1 = call i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.ss(%struct.s* elementtype(%struct.s) %x, i32 1, i32 1)
+; CHECK: %x = call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype(%struct.s) %arg, i32 0, i32 2)
+; CHECK: %1 = call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s) %x, i32 1, i32 1)
%x = call %struct.s* @llvm.preserve.array.access.index.p0s_struct.ss.p0s_struct.ss(%struct.s* %arg, i32 0, i32 2)
call i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.ss(%struct.s* %x, i32 1, i32 1)
ret void
; RUN: verify-uselistorder < %s.bc
; The 2-field form @llvm.global_ctors will be upgraded when reading bitcode.
-; CHECK: @llvm.global_ctors = appending global [0 x { i32, void ()*, i8* }] zeroinitializer
+; CHECK: @llvm.global_ctors = appending global [0 x { i32, ptr, ptr }] zeroinitializer
; RUN: verify-uselistorder < %s.bc
; The 2-field form @llvm.global_dtors will be upgraded when reading bitcode.
-; CHECK: @llvm.global_dtors = appending global [2 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* null, i8* null }, { i32, void ()*, i8* } { i32 65534, void ()* null, i8* null }]
+; CHECK: @llvm.global_dtors = appending global [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr null, ptr null }, { i32, ptr, ptr } { i32 65534, ptr null, ptr null }]
; RUN: llvm-dis < %s.bc | FileCheck %s
-; CHECK: call void asm "", "=*rm,r"(i32* elementtype(i32) %p1, i32* %p2)
+; CHECK: call void asm "", "=*rm,r"(ptr elementtype(i32) %p1, ptr %p2)
define void @test_call(i32* %p1, i32* %p2) {
call void asm "", "=*rm,r"(i32* %p1, i32* %p2)
ret void
}
-; CHECK: invoke void asm "", "=*rm,r"(i32* elementtype(i32) %p1, i32* %p2)
+; CHECK: invoke void asm "", "=*rm,r"(ptr elementtype(i32) %p1, ptr %p2)
define void @test_invoke(i32* %p1, i32* %p2) personality i8* null {
invoke void asm "", "=*rm,r"(i32* %p1, i32* %p2)
to label %cont unwind label %lpad
ret void
}
-; CHECK: callbr void asm "", "=*rm,r"(i32* elementtype(i32) %p1, i32* %p2)
+; CHECK: callbr void asm "", "=*rm,r"(ptr elementtype(i32) %p1, ptr %p2)
define void @test_callbr(i32* %p1, i32* %p2) {
callbr void asm "", "=*rm,r"(i32* %p1, i32* %p2)
to label %cont []
; the function, but that makes it easier to test that they are handled
; correctly.
define void @f1(i8* %arg0, i8* %arg1, i8* %arg2, i32 %arg3) {
-;CHECK: @f1(i8* [[ARG0:%.*]], i8* [[ARG1:%.*]], i8* [[ARG2:%.*]], i32 [[ARG3:%.*]])
+;CHECK: @f1(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], i32 [[ARG3:%.*]])
%t0 = call i8* @llvm.ptr.annotation.p0i8(i8* %arg0, i8* %arg1, i8* %arg2, i32 %arg3)
-;CHECK: call i8* @llvm.ptr.annotation.p0i8.p0i8(i8* [[ARG0]], i8* [[ARG1]], i8* [[ARG2]], i32 [[ARG3]], i8* null)
+;CHECK: call ptr @llvm.ptr.annotation.p0.p0(ptr [[ARG0]], ptr [[ARG1]], ptr [[ARG2]], i32 [[ARG3]], ptr null)
%arg0_p16 = bitcast i8* %arg0 to i16*
%t1 = call i16* @llvm.ptr.annotation.p0i16(i16* %arg0_p16, i8* %arg1, i8* %arg2, i32 %arg3)
;CHECK: [[ARG0_P16:%.*]] = bitcast
-;CHECK: call i16* @llvm.ptr.annotation.p0i16.p0i8(i16* [[ARG0_P16]], i8* [[ARG1]], i8* [[ARG2]], i32 [[ARG3]], i8* null)
+;CHECK: call ptr @llvm.ptr.annotation.p0.p0(ptr [[ARG0_P16]], ptr [[ARG1]], ptr [[ARG2]], i32 [[ARG3]], ptr null)
%arg0_p256 = bitcast i8* %arg0 to i256*
%t2 = call i256* @llvm.ptr.annotation.p0i256(i256* %arg0_p256, i8* %arg1, i8* %arg2, i32 %arg3)
;CHECK: [[ARG0_P256:%.*]] = bitcast
-;CHECK: call i256* @llvm.ptr.annotation.p0i256.p0i8(i256* [[ARG0_P256]], i8* [[ARG1]], i8* [[ARG2]], i32 [[ARG3]], i8* null)
+;CHECK: call ptr @llvm.ptr.annotation.p0.p0(ptr [[ARG0_P256]], ptr [[ARG1]], ptr [[ARG2]], i32 [[ARG3]], ptr null)
ret void
}
%cmp = icmp ugt i16* %t0, %t1
%sel = select i1 %cmp, i16* %t0, i16* %t1
ret i16* %sel
-; CHECK: [[T0:%.*]] = call i16* @llvm.ptr.annotation.p0i16.p0i8(i16* %x, i8* undef, i8* undef, i32 undef, i8* null)
-; CHECK: [[T1:%.*]] = call i16* @llvm.ptr.annotation.p0i16.p0i8(i16* %y, i8* undef, i8* undef, i32 undef, i8* null)
-; CHECK: %cmp = icmp ugt i16* [[T0]], [[T1]]
-; CHECK: %sel = select i1 %cmp, i16* [[T0]], i16* [[T1]]
-; CHECK: ret i16* %sel
+; CHECK: [[T0:%.*]] = call ptr @llvm.ptr.annotation.p0.p0(ptr %x, ptr undef, ptr undef, i32 undef, ptr null)
+; CHECK: [[T1:%.*]] = call ptr @llvm.ptr.annotation.p0.p0(ptr %y, ptr undef, ptr undef, i32 undef, ptr null)
+; CHECK: %cmp = icmp ugt ptr [[T0]], [[T1]]
+; CHECK: %sel = select i1 %cmp, ptr [[T0]], ptr [[T1]]
+; CHECK: ret ptr %sel
}
+; CHECK: declare ptr @llvm.ptr.annotation.p0.p0(ptr, ptr, ptr, i32, ptr)
declare i8* @llvm.ptr.annotation.p0i8(i8*, i8*, i8*, i32)
-; CHECK: declare i8* @llvm.ptr.annotation.p0i8.p0i8(i8*, i8*, i8*, i32, i8*)
declare i16* @llvm.ptr.annotation.p0i16(i16*, i8*, i8*, i32)
-; CHECK: declare i16* @llvm.ptr.annotation.p0i16.p0i8(i16*, i8*, i8*, i32, i8*)
declare i256* @llvm.ptr.annotation.p0i256(i256*, i8*, i8*, i32)
-; CHECK: declare i256* @llvm.ptr.annotation.p0i256.p0i8(i256*, i8*, i8*, i32, i8*)
define void @_Z4testPiPf(i32* nocapture %pI, float* nocapture %pF) #0 {
entry:
store i32 0, i32* %pI, align 4, !tbaa !{!"int", !0}
- ; CHECK: store i32 0, i32* %pI, align 4, !tbaa [[TAG_INT:!.*]]
+ ; CHECK: store i32 0, ptr %pI, align 4, !tbaa [[TAG_INT:!.*]]
store float 1.000000e+00, float* %pF, align 4, !tbaa !2
- ; CHECK: store float 1.000000e+00, float* %pF, align 4, !tbaa [[TAG_FLOAT:!.*]]
+ ; CHECK: store float 1.000000e+00, ptr %pF, align 4, !tbaa [[TAG_FLOAT:!.*]]
ret void
}
define void @f(i8* %arg0, i8* %arg1, i8* %arg2, i32 %arg3) {
-;CHECK: @f(i8* [[ARG0:%.*]], i8* [[ARG1:%.*]], i8* [[ARG2:%.*]], i32 [[ARG3:%.*]])
+;CHECK: @f(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], i32 [[ARG3:%.*]])
call void @llvm.var.annotation(i8* %arg0, i8* %arg1, i8* %arg2, i32 %arg3)
-;CHECK: call void @llvm.var.annotation.p0i8.p0i8(i8* [[ARG0]], i8* [[ARG1]], i8* [[ARG2]], i32 [[ARG3]], i8* null)
+;CHECK: call void @llvm.var.annotation.p0.p0(ptr [[ARG0]], ptr [[ARG1]], ptr [[ARG2]], i32 [[ARG3]], ptr null)
ret void
}
; Function Attrs: nofree nosync nounwind willreturn
declare void @llvm.var.annotation(i8*, i8*, i8*, i32)
-; CHECK: declare void @llvm.var.annotation.p0i8.p0i8(i8*, i8*, i8*, i32, i8*)
+; CHECK: declare void @llvm.var.annotation.p0.p0(ptr, ptr, ptr, i32, ptr)
%ap = alloca i8*
%ap2 = bitcast i8** %ap to i8*
-; CHECK: call void @llvm.va_start(i8* %ap2)
+; CHECK: call void @llvm.va_start(ptr %ap2)
call void @llvm.va_start(i8* %ap2)
-; CHECK-NEXT: %tmp = va_arg i8** %ap, i32
+; CHECK-NEXT: %tmp = va_arg ptr %ap, i32
%tmp = va_arg i8** %ap, i32
%aq = alloca i8*
%aq2 = bitcast i8** %aq to i8*
-; CHECK: call void @llvm.va_copy(i8* %aq2, i8* %ap2)
+; CHECK: call void @llvm.va_copy(ptr %aq2, ptr %ap2)
call void @llvm.va_copy(i8* %aq2, i8* %ap2)
-; CHECK-NEXT: call void @llvm.va_end(i8* %aq2)
+; CHECK-NEXT: call void @llvm.va_end(ptr %aq2)
call void @llvm.va_end(i8* %aq2)
-; CHECK-NEXT: call void @llvm.va_end(i8* %ap2)
+; CHECK-NEXT: call void @llvm.va_end(ptr %ap2)
call void @llvm.va_end(i8* %ap2)
ret i32 %tmp
}
; before the IR change on this file.
define i32 @test(i32* %addr, i32 %old, i32 %new) {
-; CHECK: [[TMP:%.*]] = cmpxchg i32* %addr, i32 %old, i32 %new seq_cst monotonic
+; CHECK: [[TMP:%.*]] = cmpxchg ptr %addr, i32 %old, i32 %new seq_cst monotonic
; CHECK: %val = extractvalue { i32, i1 } [[TMP]], 0
%val = cmpxchg i32* %addr, i32 %old, i32 %new seq_cst monotonic
ret i32 %val