define protected amdgpu_kernel void @lds_store(i32 %i) sanitize_address {
entry:
- ; CHECK-NOT: call * __asan_report
- %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32] addrspace(3)* @count, i32 0, i32 %i
- store i32 0, i32 addrspace(3)* %arrayidx1, align 4
+ ; CHECK-LABEL: @lds_store(
+ ; CHECK-NOT: call
+ %arrayidx1 = getelementptr inbounds [100 x i32], ptr addrspace(3) @count, i32 0, i32 %i
+ store i32 0, ptr addrspace(3) %arrayidx1, align 4
ret void
}
define protected amdgpu_kernel void @lds_load(i32 %i) sanitize_address {
entry:
- ; CHECK-NOT: call * __asan_report
- %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32] addrspace(3)* @count, i32 0, i32 %i
- %0 = load i32, i32 addrspace(3)* %arrayidx1, align 4
+ ; CHECK-LABEL: @lds_load(
+ ; CHECK-NOT: call
+ %arrayidx1 = getelementptr inbounds [100 x i32], ptr addrspace(3) @count, i32 0, i32 %i
+ %0 = load i32, ptr addrspace(3) %arrayidx1, align 4
ret void
}
+
+; CHECK-LABEL: define internal void @asan.module_ctor()
entry:
; CHECK-NOT: call * __asan_report
%c = alloca i32, align 4, addrspace(5)
- store i32 0, i32 addrspace(5)* %c, align 4
+ store i32 0, ptr addrspace(5) %c, align 4
ret void
}
entry:
; CHECK-NOT: call * __asan_report
%c = alloca i32, align 4, addrspace(5)
- %0 = load i32, i32 addrspace(5)* %c, align 4
+ %0 = load i32, ptr addrspace(5) %c, align 4
ret void
}
; CHECK-LABEL: @constant_load
; CHECK-NOT: load
;
-; CHECK: %[[LOAD_ADDR:[^ ]*]] = ptrtoint i32 addrspace(4)* %a to i64
+; CHECK: %[[LOAD_ADDR:[^ ]*]] = ptrtoint ptr addrspace(4) %a to i64
; CHECK: lshr i64 %[[LOAD_ADDR]], 3
; CHECK: add i64 %{{.*}}, 2147450880
; CHECK: %[[LOAD_SHADOW_PTR:[^ ]*]] = inttoptr
-; CHECK: %[[LOAD_SHADOW:[^ ]*]] = load i8, i8* %[[LOAD_SHADOW_PTR]]
+; CHECK: %[[LOAD_SHADOW:[^ ]*]] = load i8, ptr %[[LOAD_SHADOW_PTR]]
; CHECK: icmp ne i8
; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
;
; CHECK: unreachable
;
; The actual load.
-; CHECK: load i32, i32 addrspace(4)* %a
+; CHECK: load i32, ptr addrspace(4) %a
; CHECK: ret void
- %a = getelementptr inbounds [2 x i32], [2 x i32] addrspace(4)* @x, i64 0, i64 %i
- %q = load i32, i32 addrspace(4)* %a, align 4
+ %a = getelementptr inbounds [2 x i32], ptr addrspace(4) @x, i64 0, i64 %i
+ %q = load i32, ptr addrspace(4) %a, align 4
ret void
}
target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
target triple = "amdgcn-amd-amdhsa"
-define protected amdgpu_kernel void @generic_store(i32 addrspace(1)* %p, i32 %i) sanitize_address {
+define protected amdgpu_kernel void @generic_store(ptr addrspace(1) %p, i32 %i) sanitize_address {
entry:
; CHECK-LABEL: @generic_store
; CHECK-NOT: store
-; CHECK: %[[GENERIC_ADDR:[^ ]*]] = bitcast i32* %q to i8*
-; CHECK: call i1 @llvm.amdgcn.is.shared(i8* %[[GENERIC_ADDR]])
-; CHECK: call i1 @llvm.amdgcn.is.private(i8* %[[GENERIC_ADDR]])
+; CHECK: %[[GENERIC_ADDR:[^ ]*]] = addrspacecast ptr addrspace(1) %p to ptr
+; CHECK: call i1 @llvm.amdgcn.is.shared(ptr %[[GENERIC_ADDR]])
+; CHECK: call i1 @llvm.amdgcn.is.private(ptr %[[GENERIC_ADDR]])
; CHECK: or
-; CHECK: icmp ne i1
+; CHECK: icmp ne i1
; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
;
-; CHECK: %[[STORE_ADDR:[^ ]*]] = ptrtoint i32* %q to i64
+; CHECK: %[[STORE_ADDR:[^ ]*]] = ptrtoint ptr %q to i64
; CHECK: lshr i64 %[[STORE_ADDR]], 3
; CHECK: add i64 %{{.*}}, 2147450880
; CHECK: %[[STORE_SHADOW_PTR:[^ ]*]] = inttoptr
-; CHECK: %[[STORE_SHADOW:[^ ]*]] = load i8, i8* %[[STORE_SHADOW_PTR]]
+; CHECK: %[[STORE_SHADOW:[^ ]*]] = load i8, ptr %[[STORE_SHADOW_PTR]]
; CHECK: icmp ne i8
; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
;
; CHECK: unreachable
;
; The actual store.
-; CHECK: store i32 0, i32* %q
+; CHECK: store i32 0, ptr %q
; CHECK: ret void
- %q = addrspacecast i32 addrspace(1)* %p to i32*
- store i32 0, i32* %q, align 4
+ %q = addrspacecast ptr addrspace(1) %p to ptr
+ store i32 0, ptr %q, align 4
ret void
}
-define protected amdgpu_kernel void @generic_load(i32 addrspace(1)* %p, i32 %i) sanitize_address {
+define protected amdgpu_kernel void @generic_load(ptr addrspace(1) %p, i32 %i) sanitize_address {
entry:
; CHECK-LABEL: @generic_load
; CHECK-NOT: load
-; CHECK: %[[GENERIC_ADDR:[^ ]*]] = bitcast i32* %q to i8*
-; CHECK: call i1 @llvm.amdgcn.is.shared(i8* %[[GENERIC_ADDR]])
-; CHECK: call i1 @llvm.amdgcn.is.private(i8* %[[GENERIC_ADDR]])
+; CHECK: call i1 @llvm.amdgcn.is.shared(ptr %[[GENERIC_ADDR]])
+; CHECK: call i1 @llvm.amdgcn.is.private(ptr %[[GENERIC_ADDR]])
; CHECK: or
-; CHECK: icmp ne i1
+; CHECK: icmp ne i1
; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
;
-; CHECK: %[[STORE_ADDR:[^ ]*]] = ptrtoint i32* %q to i64
+; CHECK: %[[STORE_ADDR:[^ ]*]] = ptrtoint ptr %q to i64
; CHECK: lshr i64 %[[STORE_ADDR]], 3
; CHECK: add i64 %{{.*}}, 2147450880
; CHECK: %[[STORE_SHADOW_PTR:[^ ]*]] = inttoptr
-; CHECK: %[[STORE_SHADOW:[^ ]*]] = load i8, i8* %[[STORE_SHADOW_PTR]]
+; CHECK: %[[STORE_SHADOW:[^ ]*]] = load i8, ptr %[[STORE_SHADOW_PTR]]
; CHECK: icmp ne i8
; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
;
; CHECK: unreachable
;
; The actual store.
-; CHECK: load i32, i32* %q
+; CHECK: load i32, ptr %q
; CHECK: ret void
- %q = addrspacecast i32 addrspace(1)* %p to i32*
- %r = load i32, i32* %q, align 4
+ %q = addrspacecast ptr addrspace(1) %p to ptr
+ %r = load i32, ptr %q, align 4
ret void
}
target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
target triple = "amdgcn-amd-amdhsa"
-define protected amdgpu_kernel void @global_store(i32 addrspace(1)* %p, i32 %i) sanitize_address {
+define protected amdgpu_kernel void @global_store(ptr addrspace(1) %p, i32 %i) sanitize_address {
entry:
; CHECK-LABEL: @global_store
; CHECK-NOT: store
;
-; CHECK: %[[STORE_ADDR:[^ ]*]] = ptrtoint i32 addrspace(1)* %p to i64
+; CHECK: %[[STORE_ADDR:[^ ]*]] = ptrtoint ptr addrspace(1) %p to i64
; CHECK: lshr i64 %[[STORE_ADDR]], 3
; CHECK: add i64 %{{.*}}, 2147450880
; CHECK: %[[STORE_SHADOW_PTR:[^ ]*]] = inttoptr
-; CHECK: %[[STORE_SHADOW:[^ ]*]] = load i8, i8* %[[STORE_SHADOW_PTR]]
+; CHECK: %[[STORE_SHADOW:[^ ]*]] = load i8, ptr %[[STORE_SHADOW_PTR]]
; CHECK: icmp ne i8
; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
;
; CHECK: unreachable
;
; The actual store.
-; CHECK: store i32 0, i32 addrspace(1)* %p
+; CHECK: store i32 0, ptr addrspace(1) %p
; CHECK: ret void
- store i32 0, i32 addrspace(1)* %p, align 4
+ store i32 0, ptr addrspace(1) %p, align 4
ret void
}
-define protected amdgpu_kernel void @global_load(i32 addrspace(1)* %p, i32 %i) sanitize_address {
+define protected amdgpu_kernel void @global_load(ptr addrspace(1) %p, i32 %i) sanitize_address {
entry:
; CHECK-LABEL: @global_load
; CHECK-NOT: load
;
-; CHECK: %[[LOAD_ADDR:[^ ]*]] = ptrtoint i32 addrspace(1)* %p to i64
+; CHECK: %[[LOAD_ADDR:[^ ]*]] = ptrtoint ptr addrspace(1) %p to i64
; CHECK: lshr i64 %[[LOAD_ADDR]], 3
; CHECK: add i64 %{{.*}}, 2147450880
; CHECK: %[[LOAD_SHADOW_PTR:[^ ]*]] = inttoptr
-; CHECK: %[[LOAD_SHADOW:[^ ]*]] = load i8, i8* %[[LOAD_SHADOW_PTR]]
+; CHECK: %[[LOAD_SHADOW:[^ ]*]] = load i8, ptr %[[LOAD_SHADOW_PTR]]
; CHECK: icmp ne i8
; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
;
; CHECK: unreachable
;
; The actual load.
-; CHECK: load i32, i32 addrspace(1)* %p
+; CHECK: load i32, ptr addrspace(1) %p
; CHECK: ret void
- %q = load i32, i32 addrspace(1)* %p, align 4
+ %q = load i32, ptr addrspace(1) %p, align 4
ret void
}
;CHECK: llvm.asan.globals
!llvm.asan.globals = !{!0, !1}
-!0 = !{[1 x i32] addrspace(1)* @g, null, !"name", i1 false, i1 false}
-!1 = !{i8* addrspacecast (i8 addrspace(1)* bitcast ( [1 x i32] addrspace(1)* @g to i8 addrspace(1)*) to i8*), null, !"name", i1 false, i1 false}
+!0 = !{ptr addrspace(1) @g, null, !"name", i1 false, i1 false}
+!1 = !{ptr addrspacecast (ptr addrspace(1) @g to ptr), null, !"name", i1 false, i1 false}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-;@sink = global i32* null, align 4
+;@sink = global ptr null, align 4
; Ignore direct inbounds stack access.
define void @foo() uwtable sanitize_address {
entry:
%a = alloca i32, align 4
- store i32 42, i32* %a, align 4
+ store i32 42, ptr %a, align 4
ret void
; CHECK-LABEL: define void @foo
; CHECK-NOT: __asan_report
define void @baz(i64 %i) sanitize_address {
entry:
%a = alloca [10 x i32], align 4
- %e = getelementptr inbounds [10 x i32], [10 x i32]* %a, i32 0, i64 %i
- store i32 42, i32* %e, align 4
+ %e = getelementptr inbounds [10 x i32], ptr %a, i32 0, i64 %i
+ store i32 42, ptr %e, align 4
ret void
; CHECK-LABEL: define void @baz
; CHECK: __asan_report
define void @bar() sanitize_address {
entry:
%a = alloca [10 x i32], align 4
- %e = getelementptr inbounds [10 x i32], [10 x i32]* %a, i32 0, i64 12
- store i32 42, i32* %e, align 4
+ %e = getelementptr inbounds [10 x i32], ptr %a, i32 0, i64 12
+ store i32 42, ptr %e, align 4
ret void
; CHECK-LABEL: define void @bar
; CHECK: __asan_report
target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-S32"
target triple = "i386-pc-windows-msvc"
-define void @MyCPUID(i32 %fxn, i32* %out) sanitize_address {
+define void @MyCPUID(i32 %fxn, ptr %out) sanitize_address {
%fxn.ptr = alloca i32
%a.ptr = alloca i32
%b.ptr = alloca i32
%c.ptr = alloca i32
%d.ptr = alloca i32
- store i32 %fxn, i32* %fxn.ptr
- call void asm sideeffect inteldialect "xchg ebx, esi\0A\09mov eax, dword ptr $4\0A\09cpuid\0A\09mov dword ptr $0, eax\0A\09mov dword ptr $1, ebx\0A\09mov dword ptr $2, ecx\0A\09mov dword ptr $3, edx\0A\09xchg ebx, esi", "=*m,=*m,=*m,=*m,*m,~{eax},~{ebx},~{ecx},~{edx},~{esi},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %a.ptr, i32* elementtype(i32) %b.ptr, i32* elementtype(i32) %c.ptr, i32* elementtype(i32) %d.ptr, i32* elementtype(i32) %fxn.ptr)
+ store i32 %fxn, ptr %fxn.ptr
+ call void asm sideeffect inteldialect "xchg ebx, esi\0A\09mov eax, dword ptr $4\0A\09cpuid\0A\09mov dword ptr $0, eax\0A\09mov dword ptr $1, ebx\0A\09mov dword ptr $2, ecx\0A\09mov dword ptr $3, edx\0A\09xchg ebx, esi", "=*m,=*m,=*m,=*m,*m,~{eax},~{ebx},~{ecx},~{edx},~{esi},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %a.ptr, ptr elementtype(i32) %b.ptr, ptr elementtype(i32) %c.ptr, ptr elementtype(i32) %d.ptr, ptr elementtype(i32) %fxn.ptr)
- %a = load i32, i32* %a.ptr
- %a.out = getelementptr inbounds i32, i32* %out, i32 0
- store i32 %a, i32* %a.out
+ %a = load i32, ptr %a.ptr
+ store i32 %a, ptr %out
- %b = load i32, i32* %b.ptr
- %b.out = getelementptr inbounds i32, i32* %out, i32 1
- store i32 %b, i32* %b.out
+ %b = load i32, ptr %b.ptr
+ %b.out = getelementptr inbounds i32, ptr %out, i32 1
+ store i32 %b, ptr %b.out
- %c = load i32, i32* %c.ptr
- %c.out = getelementptr inbounds i32, i32* %out, i32 2
- store i32 %c, i32* %c.out
+ %c = load i32, ptr %c.ptr
+ %c.out = getelementptr inbounds i32, ptr %out, i32 2
+ store i32 %c, ptr %c.out
- %d = load i32, i32* %d.ptr
- %d.out = getelementptr inbounds i32, i32* %out, i32 3
- store i32 %d, i32* %d.out
+ %d = load i32, ptr %d.ptr
+ %d.out = getelementptr inbounds i32, ptr %out, i32 3
+ store i32 %d, ptr %d.out
ret void
}
; out of registers on 32-bit platforms. Therefore, we don't do stack malloc on
; such functions.
-; CHECK-LABEL: define void @MyCPUID(i32 %fxn, i32* %out)
+; CHECK-LABEL: define void @MyCPUID(i32 %fxn, ptr %out)
; CHECK: %MyAlloca = alloca [96 x i8], align 32
; CHECK-NOT: call {{.*}} @__asan_stack_malloc
; Don't do stack malloc on functions containing inline assembly on 64-bit
; platforms. It makes LLVM run out of registers.
-; CHECK-LABEL: define void @TestAbsenceOfStackMalloc(i8* %S, i32 %pS, i8* %D, i32 %pD, i32 %h)
+; CHECK-LABEL: define void @TestAbsenceOfStackMalloc(ptr %S, i32 %pS, ptr %D, i32 %pD, i32 %h)
; CHECK: %MyAlloca
; CHECK-NOT: call {{.*}} @__asan_stack_malloc
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
-define void @TestAbsenceOfStackMalloc(i8* %S, i32 %pS, i8* %D, i32 %pD, i32 %h) #0 {
+define void @TestAbsenceOfStackMalloc(ptr %S, i32 %pS, ptr %D, i32 %pD, i32 %h) #0 {
entry:
- %S.addr = alloca i8*, align 8
+ %S.addr = alloca ptr, align 8
%pS.addr = alloca i32, align 4
- %D.addr = alloca i8*, align 8
+ %D.addr = alloca ptr, align 8
%pD.addr = alloca i32, align 4
%h.addr = alloca i32, align 4
%sr = alloca i32, align 4
%pDiffS = alloca i32, align 4
%flagSA = alloca i8, align 1
%flagDA = alloca i8, align 1
- store i8* %S, i8** %S.addr, align 8
- store i32 %pS, i32* %pS.addr, align 4
- store i8* %D, i8** %D.addr, align 8
- store i32 %pD, i32* %pD.addr, align 4
- store i32 %h, i32* %h.addr, align 4
- store i32 4, i32* %sr, align 4
- %0 = load i32, i32* %pD.addr, align 4
+ store ptr %S, ptr %S.addr, align 8
+ store i32 %pS, ptr %pS.addr, align 4
+ store ptr %D, ptr %D.addr, align 8
+ store i32 %pD, ptr %pD.addr, align 4
+ store i32 %h, ptr %h.addr, align 4
+ store i32 4, ptr %sr, align 4
+ %0 = load i32, ptr %pD.addr, align 4
%sub = sub i32 %0, 5
- store i32 %sub, i32* %pDiffD, align 4
- %1 = load i32, i32* %pS.addr, align 4
+ store i32 %sub, ptr %pDiffD, align 4
+ %1 = load i32, ptr %pS.addr, align 4
%shl = shl i32 %1, 1
%sub1 = sub i32 %shl, 5
- store i32 %sub1, i32* %pDiffS, align 4
- %2 = load i32, i32* %pS.addr, align 4
+ store i32 %sub1, ptr %pDiffS, align 4
+ %2 = load i32, ptr %pS.addr, align 4
%and = and i32 %2, 15
%cmp = icmp eq i32 %and, 0
%conv = zext i1 %cmp to i32
%conv2 = trunc i32 %conv to i8
- store i8 %conv2, i8* %flagSA, align 1
- %3 = load i32, i32* %pD.addr, align 4
+ store i8 %conv2, ptr %flagSA, align 1
+ %3 = load i32, ptr %pD.addr, align 4
%and3 = and i32 %3, 15
%cmp4 = icmp eq i32 %and3, 0
%conv5 = zext i1 %cmp4 to i32
%conv6 = trunc i32 %conv5 to i8
- store i8 %conv6, i8* %flagDA, align 1
- call void asm sideeffect "mov\09\09\09$0,\09\09\09\09\09\09\09\09\09\09%rsi\0Amov\09\09\09$2,\09\09\09\09\09\09\09\09\09\09%rcx\0Amov\09\09\09$1,\09\09\09\09\09\09\09\09\09\09%rdi\0Amov\09\09\09$8,\09\09\09\09\09\09\09\09\09\09%rax\0A", "*m,*m,*m,*m,*m,*m,*m,*m,*m,~{rsi},~{rdi},~{rax},~{rcx},~{rdx},~{memory},~{dirflag},~{fpsr},~{flags}"(i8** elementtype(i8*) %S.addr, i8** elementtype(i8*) %D.addr, i32* elementtype(i32) %pS.addr, i32* elementtype(i32) %pDiffS, i32* elementtype(i32) %pDiffD, i32* elementtype(i32) %sr, i8* elementtype(i8) %flagSA, i8* elementtype(i8) %flagDA, i32* elementtype(i32) %h.addr) #1
+ store i8 %conv6, ptr %flagDA, align 1
+ call void asm sideeffect "mov\09\09\09$0,\09\09\09\09\09\09\09\09\09\09%rsi\0Amov\09\09\09$2,\09\09\09\09\09\09\09\09\09\09%rcx\0Amov\09\09\09$1,\09\09\09\09\09\09\09\09\09\09%rdi\0Amov\09\09\09$8,\09\09\09\09\09\09\09\09\09\09%rax\0A", "*m,*m,*m,*m,*m,*m,*m,*m,*m,~{rsi},~{rdi},~{rax},~{rcx},~{rdx},~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(ptr) %S.addr, ptr elementtype(ptr) %D.addr, ptr elementtype(i32) %pS.addr, ptr elementtype(i32) %pDiffS, ptr elementtype(i32) %pDiffD, ptr elementtype(i32) %sr, ptr elementtype(i8) %flagSA, ptr elementtype(i8) %flagDA, ptr elementtype(i32) %h.addr) #1
ret void
}
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
target triple = "i386-unknown-linux-gnu"
-%struct.DSPContext = type { void (i16*, i8*, i32)*, void (i16*, i8*, i8*, i32)*, void (i16*, i8*, i32)*, void (i16*, i8*, i32)*, void (i16*, i8*, i32)*, void (i8*, i16*, i32)*, void (i8*, i16*, i32)*, i32 (i16*)*, void (i8*, i8*, i32, i32, i32, i32, i32)*, void (i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)*, void (i16*)*, void (i16*)*, i32 (i8*, i32)*, i32 (i8*, i32)*, [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], [6 x i32 (i8*, i8*, i8*, i32, i32)*], i32 (i8*, i16*, i32)*, [4 x [4 x void (i8*, i8*, i32, i32)*]], [4 x [4 x void (i8*, i8*, i32, i32)*]], [4 x [4 x void (i8*, i8*, i32, i32)*]], [4 x [4 x void (i8*, i8*, i32, i32)*]], [2 x void (i8*, i8*, i8*, i32, i32)*], [11 x void (i8*, i8*, i32, i32, i32)*], [11 x void (i8*, i8*, i32, i32, i32)*], [2 x [16 x void (i8*, i8*, i32)*]], [2 x [16 x void (i8*, i8*, i32)*]], [2 x [16 x void (i8*, i8*, i32)*]], [2 x [16 x void (i8*, i8*, i32)*]], [8 x void (i8*, i8*, i32)*], [3 x void (i8*, i8*, i32, i32, i32, i32)*], [3 x void (i8*, i8*, i32, i32, i32, i32)*], [3 x void (i8*, i8*, i32, i32, i32, i32)*], [3 x void (i8*, i8*, i32, i32, i32, i32)*], [4 x [16 x void (i8*, i8*, i32)*]], [4 x [16 x void (i8*, i8*, i32)*]], [4 x [16 x void (i8*, i8*, i32)*]], [4 x [16 x void (i8*, i8*, i32)*]], [10 x void (i8*, i32, i32, i32, i32)*], [10 x void (i8*, i8*, i32, i32, i32, i32, i32)*], [2 x [16 x void (i8*, i8*, i32)*]], [2 x [16 x void (i8*, i8*, i32)*]], void (i8*, i32, i32, i32, i32, i32, i32)*, void (i8*, i32, i32, i32, i32, i32, i32)*, void (i8*, i32, i32, i32, i32, i32, i32)*, void (i8*, i32, i32, i32, i32, i32, i32)*, void (i8*, i16*, i32)*, [2 x [4 x i32 (i8*, i8*, i8*, i32, i32)*]], void (i8*, i8*, i32)*, void (i8*, i8*, i8*, i32)*, void (i8*, i8*, i8*, i32)*, void (i8*, i8*, i8*, i32, i32*, i32*)*, void (i8*, i8*, i8*, i32, i32*, i32*)*, i32 (i8*, i8*, i32, i32)*, void (i8*, i8*, i32, i32*, i32*, i32*)*, void (i8*, i8*, i8*, i32, i32)*, void (i32*, i32*, i32)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32)*, void (i8*, i32, i32, i32)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32)*, void (i8*, i32, i32, i32)*, void ([4 x [4 x i16]]*, i8*, [40 x i8]*, [40 x [2 x i16]]*, i32, i32, i32, i32, i32, i32)*, void (i8*, i32, i32)*, void (i8*, i32, i32)*, void (i8*, i32)*, void (i8*, i32, i32)*, void (i8*, i32, i32)*, void (i8*, i32, i32*)*, void (i8*, i32, i32*)*, void (i8*, i8*, i32, i16*, i16*)*, void (float*, float*, i32)*, void ([256 x float]*, [2 x float]*, i32, i32, i32)*, void (i32*, i32, i32, double*)*, void (float*, float*, i32)*, void (float*, float*, float*, i32)*, void (float*, float*, float*, float*, i32)*, void (float*, float*, float*, float*, float, i32)*, void (float*, i32*, float, i32)*, void (float*, float*, float, float, i32)*, void (float*, float*, float, i32)*, [2 x void (float*, float*, float**, float, i32)*], [2 x void (float*, float**, float, i32)*], float (float*, float*, i32)*, void (float*, float*, i32)*, void (i16*, float*, i32)*, void (i16*, float**, i32, i32)*, void (i16*)*, void (i16*)*, void (i16*)*, void (i8*, i32, i16*)*, void (i8*, i32, i16*)*, [64 x i8], i32, i32 (i16*, i16*, i16*, i32)*, void (i16*, i16*, i32)*, void (i8*, i32, i32, i32, i32)*, void (i8*, i16*, i32)*, void (i8*, i16*, i32)*, void (i8*, i16*, i32)*, void (i8*, i16*, i32)*, void ([4 x i16]*)*, void (i8*, i32*, i16*, i32, i8*)*, void (i8*, i32*, i16*, i32, i8*)*, void (i8**, i32*, i16*, i32, i8*)*, void (i8*, i32*, i16*, i32, i8*)*, void (i16*, i16*, i16*, i16*, i16*, i16*, i32)*, void (i16*, i32)*, void (i8*, i32, i8**, i32, i32, i32, i32, i32, %struct.slice_buffer_s*, i32, i8*)*, void (i8*, i32, i32)*, [4 x void (i8*, i32, i8*, i32, i32, i32)*], void (i32*, i32*, i32, i32, i32, i32, i32, i32*)*, void (i16*)*, void (i8*, i32, i16*)*, void (i8*, i32, i16*)*, void (i8*, i32, i16*)*, void (i8*, i32, i16*)*, void (i8*, i32, i16*)*, void (i8*, i32, i16*)*, void (i8*, i32, i16*)*, void (i8*, i32)*, void (i8*, i32)*, void (i8*, i32, i32)*, void (i8*, i32, i32)*, void (i8*, i32, i32)*, void (i8*, i32, i32)*, void (i8*, i32, i32)*, void (i8*, i32, i32)*, [16 x void (i8*, i8*, i32, i32)*], [16 x void (i8*, i8*, i32, i32)*], [12 x void (i8*, i8*, i32)*], void (i8*, i8*, i32, i32*, i32*, i32)*, void (i16*, i16*, i32)*, void (i16*, i16*, i32)*, i32 (i16*, i16*, i32, i32)*, [4 x [16 x void (i8*, i8*, i32)*]], [4 x [16 x void (i8*, i8*, i32)*]], [4 x [16 x void (i8*, i8*, i32)*]], [4 x [16 x void (i8*, i8*, i32)*]], [3 x void (i8*, i8*, i32, i32, i32, i32)*], [3 x void (i8*, i8*, i32, i32, i32, i32)*] }
+%struct.DSPContext = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], [6 x ptr], ptr, [4 x [4 x ptr]], [4 x [4 x ptr]], [4 x [4 x ptr]], [4 x [4 x ptr]], [2 x ptr], [11 x ptr], [11 x ptr], [2 x [16 x ptr]], [2 x [16 x ptr]], [2 x [16 x ptr]], [2 x [16 x ptr]], [8 x ptr], [3 x ptr], [3 x ptr], [3 x ptr], [3 x ptr], [4 x [16 x ptr]], [4 x [16 x ptr]], [4 x [16 x ptr]], [4 x [16 x ptr]], [10 x ptr], [10 x ptr], [2 x [16 x ptr]], [2 x [16 x ptr]], ptr, ptr, ptr, ptr, ptr, [2 x [4 x ptr]], ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, [2 x ptr], [2 x ptr], ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, [64 x i8], i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, [4 x ptr], ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, [16 x ptr], [16 x ptr], [12 x ptr], ptr, ptr, ptr, ptr, [4 x [16 x ptr]], [4 x [16 x ptr]], [4 x [16 x ptr]], [4 x [16 x ptr]], [3 x ptr], [3 x ptr] }
%struct.slice_buffer_s = type opaque
-%struct.AVCodecContext = type { %struct.AVClass*, i32, i32, i32, i32, i32, i8*, i32, %struct.AVRational, i32, i32, i32, i32, i32, void (%struct.AVCodecContext*, %struct.AVFrame*, i32*, i32, i32, i32)*, i32, i32, i32, i32, i32, i32, i32, float, float, i32, i32, i32, i32, float, i32, i32, i32, %struct.AVCodec*, i8*, i32, void (%struct.AVCodecContext*, i8*, i32, i32)*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, [32 x i8], i32, i32, i32, i32, i32, i32, i32, float, i32, i32 (%struct.AVCodecContext*, %struct.AVFrame*)*, void (%struct.AVCodecContext*, %struct.AVFrame*)*, i32, i32, i32, i32, i8*, i8*, float, float, i32, %struct.RcOverride*, i32, i8*, i32, i32, i32, float, float, float, float, i32, float, float, float, float, float, i32, i32, i32*, i32, i32, i32, i32, %struct.AVRational, %struct.AVFrame*, i32, i32, [4 x i64], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32*)*, i32, i32, i32, i32, i32, i32, i8*, i32, i32, i32, i32, i32, i32, i16*, i16*, i32, i32, i32, i32, %struct.AVPaletteControl*, i32, i32 (%struct.AVCodecContext*, %struct.AVFrame*)*, i32, i32, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32 (%struct.AVCodecContext*, i8*)*, i8*, i32*, i32, i32)*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64, i32, float, i64, i32, i64, i64, float, float, %struct.AVHWAccel*, i32, i8*, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32 (%struct.AVCodecContext*, i8*, i32, i32)*, i8*, i32*, i32)*, i32, i32, i32, i32, i32, i32, i8*, float, float, float, float, i32, i32, i32, float, float, float, i32, i32, i32, i32, [4 x i32], i8*, i32, i32, i32, i32 }
-%struct.AVClass = type { i8*, i8* (i8*)*, %struct.AVOption* }
+%struct.AVCodecContext = type { ptr, i32, i32, i32, i32, i32, ptr, i32, %struct.AVRational, i32, i32, i32, i32, i32, ptr, i32, i32, i32, i32, i32, i32, i32, float, float, i32, i32, i32, i32, float, i32, i32, i32, ptr, ptr, i32, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, [32 x i8], i32, i32, i32, i32, i32, i32, i32, float, i32, ptr, ptr, i32, i32, i32, i32, ptr, ptr, float, float, i32, ptr, i32, ptr, i32, i32, i32, float, float, float, float, i32, float, float, float, float, float, i32, i32, ptr, i32, i32, i32, i32, %struct.AVRational, ptr, i32, i32, [4 x i64], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, i32, i32, i32, i32, i32, i32, ptr, i32, i32, i32, i32, i32, i32, ptr, ptr, i32, i32, i32, i32, ptr, i32, ptr, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64, i32, float, i64, i32, i64, i64, float, float, ptr, i32, ptr, i32, i32, i32, i32, i32, ptr, i32, i32, i32, i32, i32, i32, ptr, float, float, float, float, i32, i32, i32, float, float, float, i32, i32, i32, i32, [4 x i32], ptr, i32, i32, i32, i32 }
+%struct.AVClass = type { ptr, ptr, ptr }
%struct.AVOption = type opaque
%struct.AVRational = type { i32, i32 }
-%struct.AVFrame = type { [4 x i8*], [4 x i32], [4 x i8*], i32, i32, i64, i32, i32, i32, i32, i32, i8*, i32, i8*, [2 x [2 x i16]*], i32*, i8, i8*, [4 x i64], i32, i32, i32, i32, i32, %struct.AVPanScan*, i32, i32, i16*, [2 x i8*], i64, i8* }
+%struct.AVFrame = type { [4 x ptr], [4 x i32], [4 x ptr], i32, i32, i64, i32, i32, i32, i32, i32, ptr, i32, ptr, [2 x ptr], ptr, i8, ptr, [4 x i64], i32, i32, i32, i32, i32, ptr, i32, i32, ptr, [2 x ptr], i64, ptr }
%struct.AVPanScan = type { i32, i32, i32, [3 x [2 x i16]] }
-%struct.AVCodec = type { i8*, i32, i32, i32, i32 (%struct.AVCodecContext*)*, i32 (%struct.AVCodecContext*, i8*, i32, i8*)*, i32 (%struct.AVCodecContext*)*, i32 (%struct.AVCodecContext*, i8*, i32*, %struct.AVPacket*)*, i32, %struct.AVCodec*, void (%struct.AVCodecContext*)*, %struct.AVRational*, i32*, i8*, i32*, i32*, i64* }
-%struct.AVPacket = type { i64, i64, i8*, i32, i32, i32, i32, void (%struct.AVPacket*)*, i8*, i64, i64 }
+%struct.AVCodec = type { ptr, i32, i32, i32, ptr, ptr, ptr, ptr, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+%struct.AVPacket = type { i64, i64, ptr, i32, i32, i32, i32, ptr, ptr, i64, i64 }
%struct.RcOverride = type { i32, i32, i32, float }
%struct.AVPaletteControl = type { i32, [256 x i32] }
-%struct.AVHWAccel = type { i8*, i32, i32, i32, i32, %struct.AVHWAccel*, i32 (%struct.AVCodecContext*, i8*, i32)*, i32 (%struct.AVCodecContext*, i8*, i32)*, i32 (%struct.AVCodecContext*)*, i32 }
+%struct.AVHWAccel = type { ptr, i32, i32, i32, i32, ptr, ptr, ptr, ptr, i32 }
-@firtable = internal unnamed_addr constant [9 x i8*] [i8* @ff_mlp_firorder_0, i8* @ff_mlp_firorder_1, i8* @ff_mlp_firorder_2, i8* @ff_mlp_firorder_3, i8* @ff_mlp_firorder_4, i8* @ff_mlp_firorder_5, i8* @ff_mlp_firorder_6, i8* @ff_mlp_firorder_7, i8* @ff_mlp_firorder_8], align 4
-@iirtable = internal unnamed_addr constant [5 x i8*] [i8* @ff_mlp_iirorder_0, i8* @ff_mlp_iirorder_1, i8* @ff_mlp_iirorder_2, i8* @ff_mlp_iirorder_3, i8* @ff_mlp_iirorder_4], align 4
+@firtable = internal unnamed_addr constant [9 x ptr] [ptr @ff_mlp_firorder_0, ptr @ff_mlp_firorder_1, ptr @ff_mlp_firorder_2, ptr @ff_mlp_firorder_3, ptr @ff_mlp_firorder_4, ptr @ff_mlp_firorder_5, ptr @ff_mlp_firorder_6, ptr @ff_mlp_firorder_7, ptr @ff_mlp_firorder_8], align 4
+@iirtable = internal unnamed_addr constant [5 x ptr] [ptr @ff_mlp_iirorder_0, ptr @ff_mlp_iirorder_1, ptr @ff_mlp_iirorder_2, ptr @ff_mlp_iirorder_3, ptr @ff_mlp_iirorder_4], align 4
@ff_mlp_iirorder_0 = external global i8
@ff_mlp_iirorder_1 = external global i8
@ff_mlp_iirorder_2 = external global i8
@ff_mlp_firorder_7 = external global i8
@ff_mlp_firorder_8 = external global i8
-define void @ff_mlp_init_x86(%struct.DSPContext* nocapture %c, %struct.AVCodecContext* nocapture %avctx) nounwind sanitize_address {
+define void @ff_mlp_init_x86(ptr nocapture %c, ptr nocapture %avctx) nounwind sanitize_address {
entry:
- %mlp_filter_channel = getelementptr inbounds %struct.DSPContext, %struct.DSPContext* %c, i32 0, i32 131
- store void (i32*, i32*, i32, i32, i32, i32, i32, i32*)* @mlp_filter_channel_x86, void (i32*, i32*, i32, i32, i32, i32, i32, i32*)** %mlp_filter_channel, align 4, !tbaa !0
+ %mlp_filter_channel = getelementptr inbounds %struct.DSPContext, ptr %c, i32 0, i32 131
+ store ptr @mlp_filter_channel_x86, ptr %mlp_filter_channel, align 4, !tbaa !0
ret void
}
-define internal void @mlp_filter_channel_x86(i32* %state, i32* %coeff, i32 %firorder, i32 %iirorder, i32 %filter_shift, i32 %mask, i32 %blocksize, i32* %sample_buffer) nounwind sanitize_address {
+define internal void @mlp_filter_channel_x86(ptr %state, ptr %coeff, i32 %firorder, i32 %iirorder, i32 %filter_shift, i32 %mask, i32 %blocksize, ptr %sample_buffer) nounwind sanitize_address {
entry:
%filter_shift.addr = alloca i32, align 4
%mask.addr = alloca i32, align 4
%blocksize.addr = alloca i32, align 4
- %firjump = alloca i8*, align 4
- %iirjump = alloca i8*, align 4
- store i32 %filter_shift, i32* %filter_shift.addr, align 4, !tbaa !3
- store i32 %mask, i32* %mask.addr, align 4, !tbaa !3
- %arrayidx = getelementptr inbounds [9 x i8*], [9 x i8*]* @firtable, i32 0, i32 %firorder
- %0 = load i8*, i8** %arrayidx, align 4, !tbaa !0
- store i8* %0, i8** %firjump, align 4, !tbaa !0
- %arrayidx1 = getelementptr inbounds [5 x i8*], [5 x i8*]* @iirtable, i32 0, i32 %iirorder
- %1 = load i8*, i8** %arrayidx1, align 4, !tbaa !0
- store i8* %1, i8** %iirjump, align 4, !tbaa !0
+ %firjump = alloca ptr, align 4
+ %iirjump = alloca ptr, align 4
+ store i32 %filter_shift, ptr %filter_shift.addr, align 4, !tbaa !3
+ store i32 %mask, ptr %mask.addr, align 4, !tbaa !3
+ %arrayidx = getelementptr inbounds [9 x ptr], ptr @firtable, i32 0, i32 %firorder
+ %0 = load ptr, ptr %arrayidx, align 4, !tbaa !0
+ store ptr %0, ptr %firjump, align 4, !tbaa !0
+ %arrayidx1 = getelementptr inbounds [5 x ptr], ptr @iirtable, i32 0, i32 %iirorder
+ %1 = load ptr, ptr %arrayidx1, align 4, !tbaa !0
+ store ptr %1, ptr %iirjump, align 4, !tbaa !0
%sub = sub nsw i32 0, %blocksize
- store i32 %sub, i32* %blocksize.addr, align 4, !tbaa !3
- %2 = call { i32*, i32*, i32* } asm sideeffect "1: \0A\09xor %esi, %esi\0A\09xor %ecx, %ecx\0A\09jmp *$5 \0A\09ff_mlp_firorder_8: \0A\09mov 0x1c+0($0), %eax\0A\09imull 0x1c+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_7: \0A\09mov 0x18+0($0), %eax\0A\09imull 0x18+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_6: \0A\09mov 0x14+0($0), %eax\0A\09imull 0x14+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_5: \0A\09mov 0x10+0($0), %eax\0A\09imull 0x10+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_4: \0A\09mov 0x0c+0($0), %eax\0A\09imull 0x0c+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_3: \0A\09mov 0x08+0($0), %eax\0A\09imull 0x08+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_2: \0A\09mov 0x04+0($0), %eax\0A\09imull 0x04+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_1: \0A\09mov 0x00+0($0), %eax\0A\09imull 0x00+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_0:\0A\09jmp *$6 \0A\09ff_mlp_iirorder_4: \0A\09mov 0x0c+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x0c+4* 8($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_iirorder_3: \0A\09mov 0x08+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x08+4* 8($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_iirorder_2: \0A\09mov 0x04+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x04+4* 8($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_iirorder_1: \0A\09mov 0x00+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x00+4* 8($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_iirorder_0:\0A\09mov %ecx, %edx\0A\09mov %esi, %eax\0A\09movzbl $7 , %ecx\0A\09shrd %cl, %edx, %eax\0A\09mov %eax ,%edx \0A\09add ($2) ,%eax \0A\09and $4 ,%eax \0A\09sub $$4 , $0 \0A\09mov %eax, ($0) \0A\09mov %eax, ($2) \0A\09add $$4* 8 , $2 \0A\09sub %edx ,%eax \0A\09mov %eax,4*(8 + (40 * 4))($0) \0A\09incl $3 \0A\09js 1b \0A\09", "=r,=r,=r,=*m,*m,*m,*m,*m,0,1,2,*m,~{eax},~{edx},~{esi},~{ecx},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %blocksize.addr, i32* elementtype(i32) %mask.addr, i8** elementtype(i8*) %firjump, i8** elementtype(i8*) %iirjump, i32* elementtype(i32) %filter_shift.addr, i32* %state, i32* %coeff, i32* %sample_buffer, i32* elementtype(i32) %blocksize.addr) nounwind, !srcloc !4
+ store i32 %sub, ptr %blocksize.addr, align 4, !tbaa !3
+ %2 = call { ptr, ptr, ptr } asm sideeffect "1: \0A\09xor %esi, %esi\0A\09xor %ecx, %ecx\0A\09jmp *$5 \0A\09ff_mlp_firorder_8: \0A\09mov 0x1c+0($0), %eax\0A\09imull 0x1c+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_7: \0A\09mov 0x18+0($0), %eax\0A\09imull 0x18+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_6: \0A\09mov 0x14+0($0), %eax\0A\09imull 0x14+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_5: \0A\09mov 0x10+0($0), %eax\0A\09imull 0x10+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_4: \0A\09mov 0x0c+0($0), %eax\0A\09imull 0x0c+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_3: \0A\09mov 0x08+0($0), %eax\0A\09imull 0x08+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_2: \0A\09mov 0x04+0($0), %eax\0A\09imull 0x04+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_1: \0A\09mov 0x00+0($0), %eax\0A\09imull 0x00+0($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_firorder_0:\0A\09jmp *$6 \0A\09ff_mlp_iirorder_4: \0A\09mov 0x0c+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x0c+4* 8($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_iirorder_3: \0A\09mov 0x08+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x08+4* 8($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_iirorder_2: \0A\09mov 0x04+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x04+4* 8($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_iirorder_1: \0A\09mov 0x00+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x00+4* 8($1) \0A\09add %eax , %esi\0A\09adc %edx , %ecx\0A\09ff_mlp_iirorder_0:\0A\09mov %ecx, %edx\0A\09mov %esi, %eax\0A\09movzbl $7 , %ecx\0A\09shrd %cl, %edx, %eax\0A\09mov %eax ,%edx \0A\09add ($2) ,%eax \0A\09and $4 ,%eax \0A\09sub $$4 , $0 \0A\09mov %eax, ($0) \0A\09mov %eax, ($2) \0A\09add $$4* 8 , $2 \0A\09sub %edx ,%eax \0A\09mov %eax,4*(8 + (40 * 4))($0) \0A\09incl $3 \0A\09js 1b \0A\09", "=r,=r,=r,=*m,*m,*m,*m,*m,0,1,2,*m,~{eax},~{edx},~{esi},~{ecx},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %blocksize.addr, ptr elementtype(i32) %mask.addr, ptr elementtype(ptr) %firjump, ptr elementtype(ptr) %iirjump, ptr elementtype(i32) %filter_shift.addr, ptr %state, ptr %coeff, ptr %sample_buffer, ptr elementtype(i32) %blocksize.addr) nounwind, !srcloc !4
ret void
}
target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.15.0"
-%t = type { void (%t*)*, void (%t*)*, %sub, i64 }
+%t = type { ptr, ptr, %sub, i64 }
%sub = type { i32 }
define void @foo() sanitize_address {
entry:
%0 = alloca %t, align 8
- %x = getelementptr inbounds %t, %t* %0, i64 0, i32 2
- %1 = bitcast %sub* %x to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %1)
- call void @bar(%sub* nonnull %x)
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %1) #3
+ %x = getelementptr inbounds %t, ptr %0, i64 0, i32 2
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+ call void @bar(ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x) #3
ret void
}
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @bar(%sub*)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @bar(ptr)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
-; CHECK: store i64 %[[STACK_BASE:.+]], i64* %asan_local_stack_base, align 8
+; CHECK: store i64 %[[STACK_BASE:.+]], ptr %asan_local_stack_base, align 8
; CHECK-NOT: store i8 0
-; CHECK: call void @bar(%sub* nonnull %x)
+; CHECK: call void @bar(ptr nonnull %x)
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-define i32 @mycmp(i8* %p, i8* %q) sanitize_address {
+define i32 @mycmp(ptr %p, ptr %q) sanitize_address {
; ALL-LABEL: @mycmp
; NOCMP-NOT: call void @__sanitizer_ptr_cmp
-; CMP: [[P:%[0-9A-Za-z]+]] = ptrtoint i8* %p to i64
-; CMP: [[Q:%[0-9A-Za-z]+]] = ptrtoint i8* %q to i64
- %x = icmp ule i8* %p, %q
+; CMP: [[P:%[0-9A-Za-z]+]] = ptrtoint ptr %p to i64
+; CMP: [[Q:%[0-9A-Za-z]+]] = ptrtoint ptr %q to i64
+ %x = icmp ule ptr %p, %q
; CMP: call void @__sanitizer_ptr_cmp(i64 [[P]], i64 [[Q]])
%y = zext i1 %x to i32
ret i32 %y
}
-define i32 @mysub(i8* %p, i8* %q) sanitize_address {
+define i32 @mysub(ptr %p, ptr %q) sanitize_address {
; ALL-LABEL: @mysub
; NOSUB-NOT: call void @__sanitizer_ptr_sub
-; SUB: [[P:%[0-9A-Za-z]+]] = ptrtoint i8* %p to i64
-; SUB: [[Q:%[0-9A-Za-z]+]] = ptrtoint i8* %q to i64
- %x = ptrtoint i8* %p to i64
- %y = ptrtoint i8* %q to i64
+; SUB: [[P:%[0-9A-Za-z]+]] = ptrtoint ptr %p to i64
+; SUB: [[Q:%[0-9A-Za-z]+]] = ptrtoint ptr %q to i64
+ %x = ptrtoint ptr %p to i64
+ %y = ptrtoint ptr %q to i64
%z = sub i64 %x, %y
; SUB: call void @__sanitizer_ptr_sub(i64 [[P]], i64 [[Q]])
%w = trunc i64 %z to i32
; Function with sanitize_address is instrumented.
; Function Attrs: nounwind uwtable
-define void @instr_sa(i32* %a) sanitize_address {
+define void @instr_sa(ptr %a) sanitize_address {
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
%tmp2 = add i32 %tmp1, 1
- store i32 %tmp2, i32* %a, align 4
+ store i32 %tmp2, ptr %a, align 4
ret void
}
; Function with disable_sanitizer_instrumentation is not instrumented.
; Function Attrs: nounwind uwtable
-define void @noinstr_dsi(i32* %a) disable_sanitizer_instrumentation {
+define void @noinstr_dsi(ptr %a) disable_sanitizer_instrumentation {
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
%tmp2 = add i32 %tmp1, 1
- store i32 %tmp2, i32* %a, align 4
+ store i32 %tmp2, ptr %a, align 4
ret void
}
; disable_sanitizer_instrumentation takes precedence over sanitize_address.
; Function Attrs: nounwind uwtable
-define void @noinstr_dsi_sa(i32* %a) disable_sanitizer_instrumentation sanitize_address {
+define void @noinstr_dsi_sa(ptr %a) disable_sanitizer_instrumentation sanitize_address {
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
%tmp2 = add i32 %tmp1, 1
- store i32 %tmp2, i32* %a, align 4
+ store i32 %tmp2, ptr %a, align 4
ret void
}
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-@v4f32 = global <4 x float>* zeroinitializer, align 8
-@v8i32 = global <8 x i32>* zeroinitializer, align 8
-@v4i64 = global <4 x i32*>* zeroinitializer, align 8
+@v4f32 = global ptr zeroinitializer, align 8
+@v8i32 = global ptr zeroinitializer, align 8
+@v4i64 = global ptr zeroinitializer, align 8
;;;;;;;;;;;;;;;; STORE
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>) argmemonly nounwind
-declare void @llvm.masked.store.v8i32.p0v8i32(<8 x i32>, <8 x i32>*, i32, <8 x i1>) argmemonly nounwind
-declare void @llvm.masked.store.v4p0i32.p0v4p0i32(<4 x i32*>, <4 x i32*>*, i32, <4 x i1>) argmemonly nounwind
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32, <4 x i1>) argmemonly nounwind
+declare void @llvm.masked.store.v8i32.p0(<8 x i32>, ptr, i32, <8 x i1>) argmemonly nounwind
+declare void @llvm.masked.store.v4p0.p0(<4 x ptr>, ptr, i32, <4 x i1>) argmemonly nounwind
define void @store.v4f32.1110(<4 x float> %arg) sanitize_address {
; ALL-LABEL: @store.v4f32.1110
- %p = load <4 x float>*, <4 x float>** @v4f32, align 8
+ %p = load ptr, ptr @v4f32, align 8
; NOSTORE-NOT: call void @__asan_store
-; STORE: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 0
-; STORE: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP0]] to i64
+; STORE: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 0
+; STORE: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP0]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP0]])
-; STORE: [[GEP1:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 1
-; STORE: [[PGEP1:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP1]] to i64
+; STORE: [[GEP1:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 1
+; STORE: [[PGEP1:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP1]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP1]])
-; STORE: [[GEP2:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 2
-; STORE: [[PGEP2:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP2]] to i64
+; STORE: [[GEP2:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 2
+; STORE: [[PGEP2:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP2]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP2]])
-; STORE: tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %arg, <4 x float>* %p, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 false>)
- tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %arg, <4 x float>* %p, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 false>)
+; STORE: tail call void @llvm.masked.store.v4f32.p0(<4 x float> %arg, ptr %p, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 false>)
+ tail call void @llvm.masked.store.v4f32.p0(<4 x float> %arg, ptr %p, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 false>)
ret void
}
define void @store.v8i32.10010110(<8 x i32> %arg) sanitize_address {
; ALL-LABEL: @store.v8i32.10010110
- %p = load <8 x i32>*, <8 x i32>** @v8i32, align 8
+ %p = load ptr, ptr @v8i32, align 8
; NOSTORE-NOT: call void @__asan_store
-; STORE: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, <8 x i32>* %p, i64 0, i64 0
-; STORE: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint i32* [[GEP0]] to i64
+; STORE: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, ptr %p, i64 0, i64 0
+; STORE: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP0]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP0]])
-; STORE: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, <8 x i32>* %p, i64 0, i64 3
-; STORE: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint i32* [[GEP3]] to i64
+; STORE: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, ptr %p, i64 0, i64 3
+; STORE: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP3]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP3]])
-; STORE: [[GEP5:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, <8 x i32>* %p, i64 0, i64 5
-; STORE: [[PGEP5:%[0-9A-Za-z]+]] = ptrtoint i32* [[GEP5]] to i64
+; STORE: [[GEP5:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, ptr %p, i64 0, i64 5
+; STORE: [[PGEP5:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP5]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP5]])
-; STORE: [[GEP6:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, <8 x i32>* %p, i64 0, i64 6
-; STORE: [[PGEP6:%[0-9A-Za-z]+]] = ptrtoint i32* [[GEP6]] to i64
+; STORE: [[GEP6:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, ptr %p, i64 0, i64 6
+; STORE: [[PGEP6:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP6]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP6]])
-; STORE: tail call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32> %arg, <8 x i32>* %p, i32 8, <8 x i1> <i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false>)
- tail call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32> %arg, <8 x i32>* %p, i32 8, <8 x i1> <i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false>)
+; STORE: tail call void @llvm.masked.store.v8i32.p0(<8 x i32> %arg, ptr %p, i32 8, <8 x i1> <i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false>)
+ tail call void @llvm.masked.store.v8i32.p0(<8 x i32> %arg, ptr %p, i32 8, <8 x i1> <i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false>)
ret void
}
-define void @store.v4i64.0001(<4 x i32*> %arg) sanitize_address {
+define void @store.v4i64.0001(<4 x ptr> %arg) sanitize_address {
; ALL-LABEL: @store.v4i64.0001
- %p = load <4 x i32*>*, <4 x i32*>** @v4i64, align 8
+ %p = load ptr, ptr @v4i64, align 8
; NOSTORE-NOT: call void @__asan_store
-; STORE: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <4 x i32*>, <4 x i32*>* %p, i64 0, i64 3
-; STORE: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint i32** [[GEP3]] to i64
+; STORE: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <4 x ptr>, ptr %p, i64 0, i64 3
+; STORE: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP3]] to i64
; STORE: call void @__asan_store8(i64 [[PGEP3]])
-; STORE: tail call void @llvm.masked.store.v4p0i32.p0v4p0i32(<4 x i32*> %arg, <4 x i32*>* %p, i32 8, <4 x i1> <i1 false, i1 false, i1 false, i1 true>)
- tail call void @llvm.masked.store.v4p0i32.p0v4p0i32(<4 x i32*> %arg, <4 x i32*>* %p, i32 8, <4 x i1> <i1 false, i1 false, i1 false, i1 true>)
+; STORE: tail call void @llvm.masked.store.v4p0.p0(<4 x ptr> %arg, ptr %p, i32 8, <4 x i1> <i1 false, i1 false, i1 false, i1 true>)
+ tail call void @llvm.masked.store.v4p0.p0(<4 x ptr> %arg, ptr %p, i32 8, <4 x i1> <i1 false, i1 false, i1 false, i1 true>)
ret void
}
define void @store.v4f32.variable(<4 x float> %arg, <4 x i1> %mask) sanitize_address {
; ALL-LABEL: @store.v4f32.variable
- %p = load <4 x float>*, <4 x float>** @v4f32, align 8
+ %p = load ptr, ptr @v4f32, align 8
; STORE: [[MASK0:%[0-9A-Za-z]+]] = extractelement <4 x i1> %mask, i64 0
; STORE: br i1 [[MASK0]], label %[[THEN0:[0-9A-Za-z]+]], label %[[AFTER0:[0-9A-Za-z]+]]
; STORE: [[THEN0]]:
-; STORE: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 0
-; STORE: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP0]] to i64
+; STORE: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 0
+; STORE: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP0]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP0]])
; STORE: br label %[[AFTER0]]
; STORE: [[AFTER0]]:
; STORE: [[MASK1:%[0-9A-Za-z]+]] = extractelement <4 x i1> %mask, i64 1
; STORE: br i1 [[MASK1]], label %[[THEN1:[0-9A-Za-z]+]], label %[[AFTER1:[0-9A-Za-z]+]]
; STORE: [[THEN1]]:
-; STORE: [[GEP1:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 1
-; STORE: [[PGEP1:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP1]] to i64
+; STORE: [[GEP1:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 1
+; STORE: [[PGEP1:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP1]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP1]])
; STORE: br label %[[AFTER1]]
; STORE: [[AFTER1]]:
; STORE: [[MASK2:%[0-9A-Za-z]+]] = extractelement <4 x i1> %mask, i64 2
; STORE: br i1 [[MASK2]], label %[[THEN2:[0-9A-Za-z]+]], label %[[AFTER2:[0-9A-Za-z]+]]
; STORE: [[THEN2]]:
-; STORE: [[GEP2:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 2
-; STORE: [[PGEP2:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP2]] to i64
+; STORE: [[GEP2:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 2
+; STORE: [[PGEP2:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP2]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP2]])
; STORE: br label %[[AFTER2]]
; STORE: [[AFTER2]]:
; STORE: [[MASK3:%[0-9A-Za-z]+]] = extractelement <4 x i1> %mask, i64 3
; STORE: br i1 [[MASK3]], label %[[THEN3:[0-9A-Za-z]+]], label %[[AFTER3:[0-9A-Za-z]+]]
; STORE: [[THEN3]]:
-; STORE: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 3
-; STORE: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP3]] to i64
+; STORE: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 3
+; STORE: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP3]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP3]])
; STORE: br label %[[AFTER3]]
; STORE: [[AFTER3]]:
-; STORE: tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %arg, <4 x float>* %p, i32 4, <4 x i1> %mask)
- tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %arg, <4 x float>* %p, i32 4, <4 x i1> %mask)
+; STORE: tail call void @llvm.masked.store.v4f32.p0(<4 x float> %arg, ptr %p, i32 4, <4 x i1> %mask)
+ tail call void @llvm.masked.store.v4f32.p0(<4 x float> %arg, ptr %p, i32 4, <4 x i1> %mask)
ret void
}
;; Store using two masked.stores, which should instrument them both.
define void @store.v4f32.1010.split(<4 x float> %arg) sanitize_address {
; BOTH-LABEL: @store.v4f32.1010.split
- %p = load <4 x float>*, <4 x float>** @v4f32, align 8
-; STORE: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 0
-; STORE: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP0]] to i64
+ %p = load ptr, ptr @v4f32, align 8
+; STORE: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 0
+; STORE: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP0]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP0]])
-; STORE: tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %arg, <4 x float>* %p, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 false>)
- tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %arg, <4 x float>* %p, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 false>)
-; STORE: [[GEP1:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 2
-; STORE: [[PGEP1:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP1]] to i64
+; STORE: tail call void @llvm.masked.store.v4f32.p0(<4 x float> %arg, ptr %p, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 false>)
+ tail call void @llvm.masked.store.v4f32.p0(<4 x float> %arg, ptr %p, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 false>)
+; STORE: [[GEP1:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 2
+; STORE: [[PGEP1:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP1]] to i64
; STORE: call void @__asan_store4(i64 [[PGEP1]])
-; STORE: tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %arg, <4 x float>* %p, i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>)
- tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %arg, <4 x float>* %p, i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>)
+; STORE: tail call void @llvm.masked.store.v4f32.p0(<4 x float> %arg, ptr %p, i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>)
+ tail call void @llvm.masked.store.v4f32.p0(<4 x float> %arg, ptr %p, i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>)
ret void
}
;; Store using a masked.store after a full store. Shouldn't instrument the second one.
define void @store.v4f32.0010.after.full.store(<4 x float> %arg) sanitize_address {
; BOTH-LABEL: @store.v4f32.0010.after.full.store
- %p = load <4 x float>*, <4 x float>** @v4f32, align 8
-; STORE: [[PTRTOINT:%[0-9A-Za-z]+]] = ptrtoint <4 x float>* %p to i64
+ %p = load ptr, ptr @v4f32, align 8
+; STORE: [[PTRTOINT:%[0-9A-Za-z]+]] = ptrtoint ptr %p to i64
; STORE: call void @__asan_store16(i64 [[PTRTOINT]])
-; STORE: store <4 x float> %arg, <4 x float>* %p
- store <4 x float> %arg, <4 x float>* %p
+; STORE: store <4 x float> %arg, ptr %p
+ store <4 x float> %arg, ptr %p
; STORE-NOT: call void @__asan_store
-; STORE: tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %arg, <4 x float>* %p, i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>)
- tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %arg, <4 x float>* %p, i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>)
+; STORE: tail call void @llvm.masked.store.v4f32.p0(<4 x float> %arg, ptr %p, i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>)
+ tail call void @llvm.masked.store.v4f32.p0(<4 x float> %arg, ptr %p, i32 4, <4 x i1> <i1 false, i1 false, i1 true, i1 false>)
ret void
}
;;;;;;;;;;;;;;;; LOAD
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>) argmemonly nounwind
-declare <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>) argmemonly nounwind
-declare <4 x i32*> @llvm.masked.load.v4p0i32.p0v4p0i32(<4 x i32*>*, i32, <4 x i1>, <4 x i32*>) argmemonly nounwind
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32, <4 x i1>, <4 x float>) argmemonly nounwind
+declare <8 x i32> @llvm.masked.load.v8i32.p0(ptr, i32, <8 x i1>, <8 x i32>) argmemonly nounwind
+declare <4 x ptr> @llvm.masked.load.v4p0.p0(ptr, i32, <4 x i1>, <4 x ptr>) argmemonly nounwind
define <8 x i32> @load.v8i32.11100001(<8 x i32> %arg) sanitize_address {
; ALL-LABEL: @load.v8i32.11100001
- %p = load <8 x i32>*, <8 x i32>** @v8i32, align 8
+ %p = load ptr, ptr @v8i32, align 8
; NOLOAD-NOT: call void @__asan_load
-; LOAD: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, <8 x i32>* %p, i64 0, i64 0
-; LOAD: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint i32* [[GEP0]] to i64
+; LOAD: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, ptr %p, i64 0, i64 0
+; LOAD: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP0]] to i64
; LOAD: call void @__asan_load4(i64 [[PGEP0]])
-; LOAD: [[GEP1:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, <8 x i32>* %p, i64 0, i64 1
-; LOAD: [[PGEP1:%[0-9A-Za-z]+]] = ptrtoint i32* [[GEP1]] to i64
+; LOAD: [[GEP1:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, ptr %p, i64 0, i64 1
+; LOAD: [[PGEP1:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP1]] to i64
; LOAD: call void @__asan_load4(i64 [[PGEP1]])
-; LOAD: [[GEP2:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, <8 x i32>* %p, i64 0, i64 2
-; LOAD: [[PGEP2:%[0-9A-Za-z]+]] = ptrtoint i32* [[GEP2]] to i64
+; LOAD: [[GEP2:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, ptr %p, i64 0, i64 2
+; LOAD: [[PGEP2:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP2]] to i64
; LOAD: call void @__asan_load4(i64 [[PGEP2]])
-; LOAD: [[GEP7:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, <8 x i32>* %p, i64 0, i64 7
-; LOAD: [[PGEP7:%[0-9A-Za-z]+]] = ptrtoint i32* [[GEP7]] to i64
+; LOAD: [[GEP7:%[0-9A-Za-z]+]] = getelementptr <8 x i32>, ptr %p, i64 0, i64 7
+; LOAD: [[PGEP7:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP7]] to i64
; LOAD: call void @__asan_load4(i64 [[PGEP7]])
-; LOAD: tail call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %p, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>, <8 x i32> %arg)
- %res = tail call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %p, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>, <8 x i32> %arg)
+; LOAD: tail call <8 x i32> @llvm.masked.load.v8i32.p0(ptr %p, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>, <8 x i32> %arg)
+ %res = tail call <8 x i32> @llvm.masked.load.v8i32.p0(ptr %p, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>, <8 x i32> %arg)
ret <8 x i32> %res
}
define <4 x float> @load.v4f32.1001(<4 x float> %arg) sanitize_address {
; ALL-LABEL: @load.v4f32.1001
- %p = load <4 x float>*, <4 x float>** @v4f32, align 8
+ %p = load ptr, ptr @v4f32, align 8
; NOLOAD-NOT: call void @__asan_load
-; LOAD: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 0
-; LOAD: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP0]] to i64
+; LOAD: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 0
+; LOAD: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP0]] to i64
; LOAD: call void @__asan_load4(i64 [[PGEP0]])
-; LOAD: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 3
-; LOAD: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP3]] to i64
+; LOAD: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 3
+; LOAD: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP3]] to i64
; LOAD: call void @__asan_load4(i64 [[PGEP3]])
-; LOAD: tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %p, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x float> %arg)
- %res = tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %p, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x float> %arg)
+; LOAD: tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %p, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x float> %arg)
+ %res = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %p, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x float> %arg)
ret <4 x float> %res
}
-define <4 x i32*> @load.v4i64.0001(<4 x i32*> %arg) sanitize_address {
+define <4 x ptr> @load.v4i64.0001(<4 x ptr> %arg) sanitize_address {
; ALL-LABEL: @load.v4i64.0001
- %p = load <4 x i32*>*, <4 x i32*>** @v4i64, align 8
+ %p = load ptr, ptr @v4i64, align 8
; NOLOAD-NOT: call void @__asan_load
-; LOAD: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <4 x i32*>, <4 x i32*>* %p, i64 0, i64 3
-; LOAD: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint i32** [[GEP3]] to i64
+; LOAD: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <4 x ptr>, ptr %p, i64 0, i64 3
+; LOAD: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP3]] to i64
; LOAD: call void @__asan_load8(i64 [[PGEP3]])
-; LOAD: tail call <4 x i32*> @llvm.masked.load.v4p0i32.p0v4p0i32(<4 x i32*>* %p, i32 8, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x i32*> %arg)
- %res = tail call <4 x i32*> @llvm.masked.load.v4p0i32.p0v4p0i32(<4 x i32*>* %p, i32 8, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x i32*> %arg)
- ret <4 x i32*> %res
+; LOAD: tail call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr %p, i32 8, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x ptr> %arg)
+ %res = tail call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr %p, i32 8, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x ptr> %arg)
+ ret <4 x ptr> %res
}
define <4 x float> @load.v4f32.variable(<4 x float> %arg, <4 x i1> %mask) sanitize_address {
; ALL-LABEL: @load.v4f32.variable
- %p = load <4 x float>*, <4 x float>** @v4f32, align 8
+ %p = load ptr, ptr @v4f32, align 8
; LOAD: [[MASK0:%[0-9A-Za-z]+]] = extractelement <4 x i1> %mask, i64 0
; LOAD: br i1 [[MASK0]], label %[[THEN0:[0-9A-Za-z]+]], label %[[AFTER0:[0-9A-Za-z]+]]
; LOAD: [[THEN0]]:
-; LOAD: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 0
-; LOAD: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP0]] to i64
+; LOAD: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 0
+; LOAD: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP0]] to i64
; LOAD: call void @__asan_load4(i64 [[PGEP0]])
; LOAD: br label %[[AFTER0]]
; LOAD: [[AFTER0]]:
; LOAD: [[MASK1:%[0-9A-Za-z]+]] = extractelement <4 x i1> %mask, i64 1
; LOAD: br i1 [[MASK1]], label %[[THEN1:[0-9A-Za-z]+]], label %[[AFTER1:[0-9A-Za-z]+]]
; LOAD: [[THEN1]]:
-; LOAD: [[GEP1:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 1
-; LOAD: [[PGEP1:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP1]] to i64
+; LOAD: [[GEP1:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 1
+; LOAD: [[PGEP1:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP1]] to i64
; LOAD: call void @__asan_load4(i64 [[PGEP1]])
; LOAD: br label %[[AFTER1]]
; LOAD: [[AFTER1]]:
; LOAD: [[MASK2:%[0-9A-Za-z]+]] = extractelement <4 x i1> %mask, i64 2
; LOAD: br i1 [[MASK2]], label %[[THEN2:[0-9A-Za-z]+]], label %[[AFTER2:[0-9A-Za-z]+]]
; LOAD: [[THEN2]]:
-; LOAD: [[GEP2:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 2
-; LOAD: [[PGEP2:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP2]] to i64
+; LOAD: [[GEP2:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 2
+; LOAD: [[PGEP2:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP2]] to i64
; LOAD: call void @__asan_load4(i64 [[PGEP2]])
; LOAD: br label %[[AFTER2]]
; LOAD: [[AFTER2]]:
; LOAD: [[MASK3:%[0-9A-Za-z]+]] = extractelement <4 x i1> %mask, i64 3
; LOAD: br i1 [[MASK3]], label %[[THEN3:[0-9A-Za-z]+]], label %[[AFTER3:[0-9A-Za-z]+]]
; LOAD: [[THEN3]]:
-; LOAD: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 3
-; LOAD: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP3]] to i64
+; LOAD: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 3
+; LOAD: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP3]] to i64
; LOAD: call void @__asan_load4(i64 [[PGEP3]])
; LOAD: br label %[[AFTER3]]
; LOAD: [[AFTER3]]:
-; LOAD: tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %p, i32 4, <4 x i1> %mask, <4 x float> %arg)
- %res = tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %p, i32 4, <4 x i1> %mask, <4 x float> %arg)
+; LOAD: tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %p, i32 4, <4 x i1> %mask, <4 x float> %arg)
+ %res = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %p, i32 4, <4 x i1> %mask, <4 x float> %arg)
ret <4 x float> %res
}
;; Load using two masked.loads, which should instrument them both.
define <4 x float> @load.v4f32.1001.split(<4 x float> %arg) sanitize_address {
; BOTH-LABEL: @load.v4f32.1001
- %p = load <4 x float>*, <4 x float>** @v4f32, align 8
-; LOAD: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 0
-; LOAD: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP0]] to i64
+ %p = load ptr, ptr @v4f32, align 8
+; LOAD: [[GEP0:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 0
+; LOAD: [[PGEP0:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP0]] to i64
; LOAD: call void @__asan_load4(i64 [[PGEP0]])
-; LOAD: %res = tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %p, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %arg)
- %res = tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %p, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %arg)
-; LOAD: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <4 x float>, <4 x float>* %p, i64 0, i64 3
-; LOAD: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint float* [[GEP3]] to i64
+; LOAD: %res = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %p, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %arg)
+ %res = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %p, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %arg)
+; LOAD: [[GEP3:%[0-9A-Za-z]+]] = getelementptr <4 x float>, ptr %p, i64 0, i64 3
+; LOAD: [[PGEP3:%[0-9A-Za-z]+]] = ptrtoint ptr [[GEP3]] to i64
; LOAD: call void @__asan_load4(i64 [[PGEP3]])
-; LOAD: tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %p, i32 4, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> %res)
- %res2 = tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %p, i32 4, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> %res)
+; LOAD: tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %p, i32 4, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> %res)
+ %res2 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %p, i32 4, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> %res)
ret <4 x float> %res2
}
;; Load using a masked.load after a full load. Shouldn't instrument the second one.
define <4 x float> @load.v4f32.1001.after.full.load(<4 x float> %arg) sanitize_address {
; BOTH-LABEL: @load.v4f32.1001.after.full.load
- %p = load <4 x float>*, <4 x float>** @v4f32, align 8
-; LOAD: [[PTRTOINT:%[0-9A-Za-z]+]] = ptrtoint <4 x float>* %p to i64
+ %p = load ptr, ptr @v4f32, align 8
+; LOAD: [[PTRTOINT:%[0-9A-Za-z]+]] = ptrtoint ptr %p to i64
; LOAD: call void @__asan_load16(i64 [[PTRTOINT]])
-; LOAD: %res = load <4 x float>, <4 x float>* %p
- %res = load <4 x float>, <4 x float>* %p
+; LOAD: %res = load <4 x float>, ptr %p
+ %res = load <4 x float>, ptr %p
; LOAD-NOT: call void @__asan_load
-; LOAD: tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %p, i32 4, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> %arg)
- %res2 = tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %p, i32 4, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> %arg)
+; LOAD: tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %p, i32 4, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> %arg)
+ %res2 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %p, i32 4, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> %arg)
ret <4 x float> %res2
}
target triple = "x86_64-unknown-linux-gnu"
-define void @load(i8* %p1, i16* %p2, i32* %p4, i64* %p8, i128* %p16)
+define void @load(ptr %p1, ptr %p2, ptr %p4, ptr %p8, ptr %p16)
sanitize_address {
- %n1 = load i8, i8* %p1, align 1
- %n2 = load i16, i16* %p2, align 2
- %n4 = load i32, i32* %p4, align 4
- %n8 = load i64, i64* %p8, align 8
- %n16 = load i128, i128* %p16, align 16
-; LOAD: call void @llvm.asan.check.memaccess(i8* %p1, i32 0)
-; LOAD-NEXT: %n1 = load i8, i8* %p1, align 1
-; LOAD-NEXT: %1 = bitcast i16* %p2 to i8*
-; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 2)
-; LOAD-NEXT: %n2 = load i16, i16* %p2, align 2
-; LOAD-NEXT: %2 = bitcast i32* %p4 to i8*
-; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 4)
-; LOAD-NEXT: %n4 = load i32, i32* %p4, align 4
-; LOAD-NEXT: %3 = bitcast i64* %p8 to i8*
-; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 6)
-; LOAD-NEXT: %n8 = load i64, i64* %p8, align 8
-; LOAD-NEXT: %4 = bitcast i128* %p16 to i8*
-; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 8)
-; LOAD-NEXT: %n16 = load i128, i128* %p16, align 16
+ %n1 = load i8, ptr %p1, align 1
+ %n2 = load i16, ptr %p2, align 2
+ %n4 = load i32, ptr %p4, align 4
+ %n8 = load i64, ptr %p8, align 8
+ %n16 = load i128, ptr %p16, align 16
+; LOAD: call void @llvm.asan.check.memaccess(ptr %p1, i32 0)
+; LOAD-NEXT: %n1 = load i8, ptr %p1, align 1
+; LOAD-NEXT: call void @llvm.asan.check.memaccess(ptr %p2, i32 2)
+; LOAD-NEXT: %n2 = load i16, ptr %p2, align 2
+; LOAD-NEXT: call void @llvm.asan.check.memaccess(ptr %p4, i32 4)
+; LOAD-NEXT: %n4 = load i32, ptr %p4, align 4
+; LOAD-NEXT: call void @llvm.asan.check.memaccess(ptr %p8, i32 6)
+; LOAD-NEXT: %n8 = load i64, ptr %p8, align 8
+; LOAD-NEXT: call void @llvm.asan.check.memaccess(ptr %p16, i32 8)
+; LOAD-NEXT: %n16 = load i128, ptr %p16, align 16
-; LOAD-KERNEL: call void @llvm.asan.check.memaccess(i8* %p1, i32 1)
-; LOAD-KERNEL-NEXT: %n1 = load i8, i8* %p1, align 1
-; LOAD-KERNEL-NEXT: %1 = bitcast i16* %p2 to i8*
-; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 3)
-; LOAD-KERNEL-NEXT: %n2 = load i16, i16* %p2, align 2
-; LOAD-KERNEL-NEXT: %2 = bitcast i32* %p4 to i8*
-; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 5)
-; LOAD-KERNEL-NEXT: %n4 = load i32, i32* %p4, align 4
-; LOAD-KERNEL-NEXT: %3 = bitcast i64* %p8 to i8*
-; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 7)
-; LOAD-KERNEL-NEXT: %n8 = load i64, i64* %p8, align 8
-; LOAD-KERNEL-NEXT: %4 = bitcast i128* %p16 to i8*
-; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 9)
-; LOAD-KERNEL-NEXT: %n16 = load i128, i128* %p16, align 16
+; LOAD-KERNEL: call void @llvm.asan.check.memaccess(ptr %p1, i32 1)
+; LOAD-KERNEL-NEXT: %n1 = load i8, ptr %p1, align 1
+; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(ptr %p2, i32 3)
+; LOAD-KERNEL-NEXT: %n2 = load i16, ptr %p2, align 2
+; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(ptr %p4, i32 5)
+; LOAD-KERNEL-NEXT: %n4 = load i32, ptr %p4, align 4
+; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(ptr %p8, i32 7)
+; LOAD-KERNEL-NEXT: %n8 = load i64, ptr %p8, align 8
+; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(ptr %p16, i32 9)
+; LOAD-KERNEL-NEXT: %n16 = load i128, ptr %p16, align 16
ret void
}
-define void @store(i8* %p1, i16* %p2, i32* %p4, i64* %p8, i128* %p16)
+define void @store(ptr %p1, ptr %p2, ptr %p4, ptr %p8, ptr %p16)
sanitize_address {
- store i8 0, i8* %p1, align 1
- store i16 0, i16* %p2, align 2
- store i32 0, i32* %p4, align 4
- store i64 0, i64* %p8, align 8
- store i128 0, i128* %p16, align 16
-; STORE: call void @llvm.asan.check.memaccess(i8* %p1, i32 32)
-; STORE-NEXT: store i8 0, i8* %p1, align 1
-; STORE-NEXT: %1 = bitcast i16* %p2 to i8*
-; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 34)
-; STORE-NEXT: store i16 0, i16* %p2, align 2
-; STORE-NEXT: %2 = bitcast i32* %p4 to i8*
-; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 36)
-; STORE-NEXT: store i32 0, i32* %p4, align 4
-; STORE-NEXT: %3 = bitcast i64* %p8 to i8*
-; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 38)
-; STORE-NEXT: store i64 0, i64* %p8, align 8
-; STORE-NEXT: %4 = bitcast i128* %p16 to i8*
-; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 40)
-; STORE-NEXT: store i128 0, i128* %p16, align 16
+ store i8 0, ptr %p1, align 1
+ store i16 0, ptr %p2, align 2
+ store i32 0, ptr %p4, align 4
+ store i64 0, ptr %p8, align 8
+ store i128 0, ptr %p16, align 16
+; STORE: call void @llvm.asan.check.memaccess(ptr %p1, i32 32)
+; STORE-NEXT: store i8 0, ptr %p1, align 1
+; STORE-NEXT: call void @llvm.asan.check.memaccess(ptr %p2, i32 34)
+; STORE-NEXT: store i16 0, ptr %p2, align 2
+; STORE-NEXT: call void @llvm.asan.check.memaccess(ptr %p4, i32 36)
+; STORE-NEXT: store i32 0, ptr %p4, align 4
+; STORE-NEXT: call void @llvm.asan.check.memaccess(ptr %p8, i32 38)
+; STORE-NEXT: store i64 0, ptr %p8, align 8
+; STORE-NEXT: call void @llvm.asan.check.memaccess(ptr %p16, i32 40)
+; STORE-NEXT: store i128 0, ptr %p16, align 16
-; STORE-KERNEL: call void @llvm.asan.check.memaccess(i8* %p1, i32 33)
-; STORE-KERNEL-NEXT: store i8 0, i8* %p1, align 1
-; STORE-KERNEL-NEXT: %1 = bitcast i16* %p2 to i8*
-; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 35)
-; STORE-KERNEL-NEXT: store i16 0, i16* %p2, align 2
-; STORE-KERNEL-NEXT: %2 = bitcast i32* %p4 to i8*
-; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 37)
-; STORE-KERNEL-NEXT: store i32 0, i32* %p4, align 4
-; STORE-KERNEL-NEXT: %3 = bitcast i64* %p8 to i8*
-; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 39)
-; STORE-KERNEL-NEXT: store i64 0, i64* %p8, align 8
-; STORE-KERNEL-NEXT: %4 = bitcast i128* %p16 to i8*
-; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 41)
-; STORE-KERNEL-NEXT: store i128 0, i128* %p16, align 16
+; STORE-KERNEL: call void @llvm.asan.check.memaccess(ptr %p1, i32 33)
+; STORE-KERNEL-NEXT: store i8 0, ptr %p1, align 1
+; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(ptr %p2, i32 35)
+; STORE-KERNEL-NEXT: store i16 0, ptr %p2, align 2
+; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(ptr %p4, i32 37)
+; STORE-KERNEL-NEXT: store i32 0, ptr %p4, align 4
+; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(ptr %p8, i32 39)
+; STORE-KERNEL-NEXT: store i64 0, ptr %p8, align 8
+; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(ptr %p16, i32 41)
+; STORE-KERNEL-NEXT: store i128 0, ptr %p16, align 16
; STORE-KERNEL-NEXT: ret void
ret void
}
define i32 @load() sanitize_address {
%buf = alloca [10 x i8], align 1
; NOSAFETY: call i64 @__asan_stack_malloc
- %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buf, i64 0, i64 0
- %1 = load i8, i8* %arrayidx, align 1
+ %1 = load i8, ptr %buf, align 1
; NOSAFETY: call void @__asan_load1
ret i32 0
}
define i32 @store() sanitize_address {
%buf = alloca [10 x i8], align 1
; NOSAFETY: call i64 @__asan_stack_malloc
- %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buf, i64 0, i64 0
- store i8 0, i8* %arrayidx
+ store i8 0, ptr %buf
; NOSAFETY: call void @__asan_store1
ret i32 0
}
define i32 @unsafe_alloca(i32 %i) sanitize_address {
%buf.sroa.0 = alloca [10 x i8], align 4
; CHECK: call i64 @__asan_stack_malloc
- %ptr = getelementptr [10 x i8], [10 x i8]* %buf.sroa.0, i32 %i, i32 0
- store volatile i8 0, i8* %ptr, align 4
+ %ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 %i, i32 0
+ store volatile i8 0, ptr %ptr, align 4
; CHECK: call void @__asan_store1
- %ptr2 = getelementptr [10 x i8], [10 x i8]* %buf.sroa.0, i32 0, i32 0
- store volatile i8 0, i8* %ptr2, align 4
+ store volatile i8 0, ptr %buf.sroa.0, align 4
; NOSAFETY: call void @__asan_store1
ret i32 0
}
define void @atomicrmw() sanitize_address {
%buf = alloca [10 x i8], align 1
; NOSAFETY: call i64 @__asan_stack_malloc
- %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buf, i64 0, i64 0
- %1 = atomicrmw add i8* %arrayidx, i8 1 seq_cst
+ %1 = atomicrmw add ptr %buf, i8 1 seq_cst
; NOSAFETY: call void @__asan_store1
ret void
}
define void @cmpxchg(i8 %compare_to, i8 %new_value) sanitize_address {
%buf = alloca [10 x i8], align 1
; NOSAFETY: call i64 @__asan_stack_malloc
- %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buf, i64 0, i64 0
- %1 = cmpxchg i8* %arrayidx, i8 %compare_to, i8 %new_value seq_cst seq_cst
+ %1 = cmpxchg ptr %buf, i8 %compare_to, i8 %new_value seq_cst seq_cst
; NOSAFETY: call void @__asan_store1
ret void
}
; Accessing bytes 4 and 6, not ok to widen to i32 if sanitize_address is set.
-define i32 @test_widening_bad(i8* %P) nounwind ssp noredzone sanitize_address {
+define i32 @test_widening_bad(ptr %P) nounwind ssp noredzone sanitize_address {
entry:
- %tmp = load i8, i8* getelementptr inbounds (%struct_of_7_bytes_4_aligned, %struct_of_7_bytes_4_aligned* @f, i64 0, i32 1), align 4
+ %tmp = load i8, ptr getelementptr inbounds (%struct_of_7_bytes_4_aligned, ptr @f, i64 0, i32 1), align 4
%conv = zext i8 %tmp to i32
- %tmp1 = load i8, i8* getelementptr inbounds (%struct_of_7_bytes_4_aligned, %struct_of_7_bytes_4_aligned* @f, i64 0, i32 3), align 1
+ %tmp1 = load i8, ptr getelementptr inbounds (%struct_of_7_bytes_4_aligned, ptr @f, i64 0, i32 3), align 1
%conv2 = zext i8 %tmp1 to i32
%add = add nsw i32 %conv, %conv2
ret i32 %add
;; Accessing bytes 4 and 5. No widen to i16.
-define i32 @test_widening_ok(i8* %P) nounwind ssp noredzone sanitize_address {
+define i32 @test_widening_ok(ptr %P) nounwind ssp noredzone sanitize_address {
entry:
- %tmp = load i8, i8* getelementptr inbounds (%struct_of_7_bytes_4_aligned, %struct_of_7_bytes_4_aligned* @f, i64 0, i32 1), align 4
+ %tmp = load i8, ptr getelementptr inbounds (%struct_of_7_bytes_4_aligned, ptr @f, i64 0, i32 1), align 4
%conv = zext i8 %tmp to i32
- %tmp1 = load i8, i8* getelementptr inbounds (%struct_of_7_bytes_4_aligned, %struct_of_7_bytes_4_aligned* @f, i64 0, i32 2), align 1
+ %tmp1 = load i8, ptr getelementptr inbounds (%struct_of_7_bytes_4_aligned, ptr @f, i64 0, i32 2), align 1
%conv2 = zext i8 %tmp1 to i32
%add = add nsw i32 %conv, %conv2
ret i32 %add
; Checks that we do not instrument loads and stores comming from custom address space.
; These result in invalid (false positive) reports.
; int foo(int argc, const char * argv[]) {
-; void *__attribute__((address_space(256))) *gs_base = (((void * __attribute__((address_space(256))) *)0));
-; void *somevalue = gs_base[-1];
+; ptr__attribute__((address_space(256))) *gs_base = (((ptr __attribute__((address_space(256))) *)0));
+; ptr somevalue = gs_base[-1];
; return somevalue;
; }
-define i32 @foo(i32 %argc, i8** %argv) sanitize_address {
+define i32 @foo(i32 %argc, ptr %argv) sanitize_address {
entry:
%retval = alloca i32, align 4
%argc.addr = alloca i32, align 4
- %argv.addr = alloca i8**, align 8
- %gs_base = alloca i8* addrspace(256)*, align 8
- %somevalue = alloca i8*, align 8
- store i32 0, i32* %retval, align 4
- store i32 %argc, i32* %argc.addr, align 4
- store i8** %argv, i8*** %argv.addr, align 8
- store i8* addrspace(256)* null, i8* addrspace(256)** %gs_base, align 8
- %0 = load i8* addrspace(256)*, i8* addrspace(256)** %gs_base, align 8
- %arrayidx = getelementptr inbounds i8*, i8* addrspace(256)* %0, i64 -1
- %1 = load i8*, i8* addrspace(256)* %arrayidx, align 8
- store i8* %1, i8** %somevalue, align 8
- %2 = load i8*, i8** %somevalue, align 8
- %3 = ptrtoint i8* %2 to i32
+ %argv.addr = alloca ptr, align 8
+ %gs_base = alloca ptr addrspace(256), align 8
+ %somevalue = alloca ptr, align 8
+ store i32 0, ptr %retval, align 4
+ store i32 %argc, ptr %argc.addr, align 4
+ store ptr %argv, ptr %argv.addr, align 8
+ store ptr addrspace(256) null, ptr %gs_base, align 8
+ %0 = load ptr addrspace(256), ptr %gs_base, align 8
+ %arrayidx = getelementptr inbounds ptr, ptr addrspace(256) %0, i64 -1
+ %1 = load ptr, ptr addrspace(256) %arrayidx, align 8
+ store ptr %1, ptr %somevalue, align 8
+ %2 = load ptr, ptr %somevalue, align 8
+ %3 = ptrtoint ptr %2 to i32
ret i32 %3
}
; CHECK-NOT: call void @__asan_report_load8
target triple = "x86_64-pc-windows-msvc"
; CHECK: @llvm.global_ctors = {{.*}}@asan.module_ctor
-define i32 @test_load(i32* %a) sanitize_address {
+define i32 @test_load(ptr %a) sanitize_address {
; First instrumentation in the function must be to load the dynamic shadow
; address into a local variable.
; CHECK-LABEL: @test_load
; CHECK: entry:
-; CHECK-NEXT: %[[SHADOW:[^ ]*]] = load i64, i64* @__asan_shadow_memory_dynamic_address
+; CHECK-NEXT: %[[SHADOW:[^ ]*]] = load i64, ptr @__asan_shadow_memory_dynamic_address
; Shadow address is loaded and added into the whole offset computation.
; CHECK: add i64 %{{.*}}, %[[SHADOW]]
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
ret i32 %tmp1
}
-define i32 @__asan_options(i32* %a) sanitize_address {
+define i32 @__asan_options(ptr %a) sanitize_address {
; Asan functions are not instrumented. Asan function may be called by
; __asan_init before the shadow initialisation, which may lead to incorrect
; behavior of the instrumented code.
; CHECK-LABEL: @__asan_options
; CHECK: entry:
-; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 4
+; CHECK-NEXT: %tmp1 = load i32, ptr %a, align 4
; CHECK-NEXT: ret i32 %tmp1
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
ret i32 %tmp1
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
-; CHECK: @llvm.used = appending global [1 x i8*] [i8* bitcast (void ()* @asan.module_ctor to i8*)]
-; CHECK: @llvm.global_ctors = {{.*}}{ i32 1, void ()* @asan.module_ctor, i8* bitcast (void ()* @asan.module_ctor to i8*) }
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @asan.module_ctor]
+; CHECK: @llvm.global_ctors = {{.*}}{ i32 1, ptr @asan.module_ctor, ptr @asan.module_ctor }
-define i32 @test_load(i32* %a) sanitize_address {
+define i32 @test_load(ptr %a) sanitize_address {
; CHECK-LABEL: @test_load
; CHECK-NOT: load
-; CHECK: %[[LOAD_ADDR:[^ ]*]] = ptrtoint i32* %a to i64
+; CHECK: %[[LOAD_ADDR:[^ ]*]] = ptrtoint ptr %a to i64
; CHECK-S3: lshr i64 %[[LOAD_ADDR]], 3
; CHECK-S5: lshr i64 %[[LOAD_ADDR]], 5
; CHECK: {{or|add}}
; CHECK: %[[LOAD_SHADOW_PTR:[^ ]*]] = inttoptr
-; CHECK: %[[LOAD_SHADOW:[^ ]*]] = load i8, i8* %[[LOAD_SHADOW_PTR]]
+; CHECK: %[[LOAD_SHADOW:[^ ]*]] = load i8, ptr %[[LOAD_SHADOW_PTR]]
; CHECK: icmp ne i8
; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}!prof ![[PROF:[0-9]+]]
;
; CHECK: unreachable
;
; The actual load.
-; CHECK: %tmp1 = load i32, i32* %a
+; CHECK: %tmp1 = load i32, ptr %a
; CHECK: ret i32 %tmp1
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
ret i32 %tmp1
}
-define void @test_store(i32* %a) sanitize_address {
+define void @test_store(ptr %a) sanitize_address {
; CHECK-LABEL: @test_store
; CHECK-NOT: store
-; CHECK: %[[STORE_ADDR:[^ ]*]] = ptrtoint i32* %a to i64
+; CHECK: %[[STORE_ADDR:[^ ]*]] = ptrtoint ptr %a to i64
; CHECK-S3: lshr i64 %[[STORE_ADDR]], 3
; CHECK-S5: lshr i64 %[[STORE_ADDR]], 5
; CHECK: {{or|add}}
; CHECK: %[[STORE_SHADOW_PTR:[^ ]*]] = inttoptr
-; CHECK: %[[STORE_SHADOW:[^ ]*]] = load i8, i8* %[[STORE_SHADOW_PTR]]
+; CHECK: %[[STORE_SHADOW:[^ ]*]] = load i8, ptr %[[STORE_SHADOW_PTR]]
; CHECK: icmp ne i8
; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
;
; CHECK: unreachable
;
; The actual load.
-; CHECK: store i32 42, i32* %a
+; CHECK: store i32 42, ptr %a
; CHECK: ret void
;
entry:
- store i32 42, i32* %a, align 4
+ store i32 42, ptr %a, align 4
ret void
}
; Check that asan leaves just one alloca.
-declare void @alloca_test_use([10 x i8]*)
+declare void @alloca_test_use(ptr)
define void @alloca_test() sanitize_address {
entry:
%x = alloca [10 x i8], align 1
%y = alloca [10 x i8], align 1
%z = alloca [10 x i8], align 1
- call void @alloca_test_use([10 x i8]* %x)
- call void @alloca_test_use([10 x i8]* %y)
- call void @alloca_test_use([10 x i8]* %z)
+ call void @alloca_test_use(ptr %x)
+ call void @alloca_test_use(ptr %y)
+ call void @alloca_test_use(ptr %z)
ret void
}
; CHECK-NOT: = alloca
; CHECK: ret void
-define void @LongDoubleTest(x86_fp80* nocapture %a) nounwind uwtable sanitize_address {
+define void @LongDoubleTest(ptr nocapture %a) nounwind uwtable sanitize_address {
entry:
- store x86_fp80 0xK3FFF8000000000000000, x86_fp80* %a, align 16
+ store x86_fp80 0xK3FFF8000000000000000, ptr %a, align 16
ret void
}
; CHECK: ret void
-define void @i40test(i40* %a, i40* %b) nounwind uwtable sanitize_address {
+define void @i40test(ptr %a, ptr %b) nounwind uwtable sanitize_address {
entry:
- %t = load i40, i40* %a
- store i40 %t, i40* %b, align 8
+ %t = load i40, ptr %a
+ store i40 %t, ptr %b, align 8
ret void
}
; CHECK: __asan_report_store_n{{.*}}, i64 5)
; CHECK: ret void
-define void @i64test_align1(i64* %b) nounwind uwtable sanitize_address {
+define void @i64test_align1(ptr %b) nounwind uwtable sanitize_address {
entry:
- store i64 0, i64* %b, align 1
+ store i64 0, ptr %b, align 1
ret void
}
; CHECK: ret void
-define void @i80test(i80* %a, i80* %b) nounwind uwtable sanitize_address {
+define void @i80test(ptr %a, ptr %b) nounwind uwtable sanitize_address {
entry:
- %t = load i80, i80* %a
- store i80 %t, i80* %b, align 8
+ %t = load i80, ptr %a
+ store i80 %t, ptr %b, align 8
ret void
}
; CHECK: ret void
; asan should not instrument functions with available_externally linkage.
-define available_externally i32 @f_available_externally(i32* %a) sanitize_address {
+define available_externally i32 @f_available_externally(ptr %a) sanitize_address {
entry:
- %tmp1 = load i32, i32* %a
+ %tmp1 = load i32, ptr %a
ret i32 %tmp1
}
; CHECK-LABEL: @f_available_externally
; CHECK-LABEL: @test_swifterror
; CHECK-NOT: __asan_report_load
; CHECK: ret void
-define void @test_swifterror(i8** swifterror) sanitize_address {
- %swifterror_ptr_value = load i8*, i8** %0
+define void @test_swifterror(ptr swifterror) sanitize_address {
+ %swifterror_ptr_value = load ptr, ptr %0
ret void
}
; CHECK-LABEL: @test_swifterror_2
; CHECK-NOT: __asan_report_store
; CHECK: ret void
-define void @test_swifterror_2(i8** swifterror) sanitize_address {
- store i8* null, i8** %0
+define void @test_swifterror_2(ptr swifterror) sanitize_address {
+ store ptr null, ptr %0
ret void
}
; CHECK-NOT: __asan_report_store
; CHECK: ret void
define void @test_swifterror_3() sanitize_address {
- %swifterror_addr = alloca swifterror i8*
- store i8* null, i8** %swifterror_addr
- call void @test_swifterror_2(i8** swifterror %swifterror_addr)
+ %swifterror_addr = alloca swifterror ptr
+ store ptr null, ptr %swifterror_addr
+ call void @test_swifterror_2(ptr swifterror %swifterror_addr)
ret void
}
target triple = "x86_64-unknown-linux-gnu"
%struct.bar = type { %struct.foo }
-%struct.foo = type { i8*, i8*, i8* }
+%struct.foo = type { ptr, ptr, ptr }
; CHECK-LABEL: @func2
; CHECK-NEXT: tail call void @func1(
; CHECK-NEXT: ret void
-define dso_local void @func2(%struct.foo* %foo) sanitize_address {
- tail call void @func1(%struct.foo* byref(%struct.foo) align 8 %foo) #2
+define dso_local void @func2(ptr %foo) sanitize_address {
+ tail call void @func1(ptr byref(%struct.foo) align 8 %foo) #2
ret void
}
-declare dso_local void @func1(%struct.foo* byref(%struct.foo) align 8)
+declare dso_local void @func1(ptr byref(%struct.foo) align 8)
target triple = "x86_64-unknown-linux-gnu"
%struct.bar = type { %struct.foo }
-%struct.foo = type { i8*, i8*, i8* }
-define dso_local void @func2(%struct.foo* %foo) sanitize_address {
+%struct.foo = type { ptr, ptr, ptr }
+define dso_local void @func2(ptr %foo) sanitize_address {
; CHECK-LABEL: @func2
- tail call void @func1(%struct.foo* byval(%struct.foo) align 8 %foo) #2
+ tail call void @func1(ptr byval(%struct.foo) align 8 %foo) #2
; CHECK: call void @__asan_report_load
ret void
; CHECK: ret void
}
-declare dso_local void @func1(%struct.foo* byval(%struct.foo) align 8)
+declare dso_local void @func1(ptr byval(%struct.foo) align 8)
!0 = !{i32 1, !"wchar_size", i32 4}
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define dso_local i32 @main(i32 %argc, i8** %argv) #0 !dbg !15 {
+define dso_local i32 @main(i32 %argc, ptr %argv) #0 !dbg !15 {
entry:
; No suffix like !dbg !123
; CHECK: %asan_local_stack_base = alloca i64, align 8{{$}}
; CHECK: %3 = call i64 @__asan_stack_malloc_0(i64 64){{$}}
%argc.addr = alloca i32, align 4
- %argv.addr = alloca i8**, align 8
- store i32 %argc, i32* %argc.addr, align 4
- call void @llvm.dbg.declare(metadata i32* %argc.addr, metadata !21, metadata !DIExpression()), !dbg !22
- store i8** %argv, i8*** %argv.addr, align 8
- call void @llvm.dbg.declare(metadata i8*** %argv.addr, metadata !23, metadata !DIExpression()), !dbg !24
- call void @f(i32* %argc.addr), !dbg !25
+ %argv.addr = alloca ptr, align 8
+ store i32 %argc, ptr %argc.addr, align 4
+ call void @llvm.dbg.declare(metadata ptr %argc.addr, metadata !21, metadata !DIExpression()), !dbg !22
+ store ptr %argv, ptr %argv.addr, align 8
+ call void @llvm.dbg.declare(metadata ptr %argv.addr, metadata !23, metadata !DIExpression()), !dbg !24
+ call void @f(ptr %argc.addr), !dbg !25
ret i32 0, !dbg !26
}
-define dso_local void @f(i32* %arg) #0 !dbg !7 {
+define dso_local void @f(ptr %arg) #0 !dbg !7 {
entry:
- %arg.addr = alloca i32*, align 8
- store i32* %arg, i32** %arg.addr, align 8
- call void @llvm.dbg.declare(metadata i32** %arg.addr, metadata !12, metadata !DIExpression()), !dbg !13
+ %arg.addr = alloca ptr, align 8
+ store ptr %arg, ptr %arg.addr, align 8
+ call void @llvm.dbg.declare(metadata ptr %arg.addr, metadata !12, metadata !DIExpression()), !dbg !13
ret void, !dbg !14
}
entry:
%p.addr = alloca i32, align 4
%r = alloca i32, align 4
- store volatile i32 %p, i32* %p.addr, align 4
- call void @llvm.dbg.declare(metadata i32* %p.addr, metadata !17, metadata !DIExpression()), !dbg !18
- call void @llvm.dbg.declare(metadata i32* %r, metadata !19, metadata !DIExpression()), !dbg !21
- %0 = load i32, i32* %p.addr, align 4, !dbg !21
+ store volatile i32 %p, ptr %p.addr, align 4
+ call void @llvm.dbg.declare(metadata ptr %p.addr, metadata !17, metadata !DIExpression()), !dbg !18
+ call void @llvm.dbg.declare(metadata ptr %r, metadata !19, metadata !DIExpression()), !dbg !21
+ %0 = load i32, ptr %p.addr, align 4, !dbg !21
%add = add nsw i32 %0, 1, !dbg !21
- store volatile i32 %add, i32* %r, align 4, !dbg !21
- %1 = load i32, i32* %r, align 4, !dbg !22
+ store volatile i32 %add, ptr %r, align 4, !dbg !21
+ %1 = load i32, ptr %r, align 4, !dbg !22
ret i32 %1, !dbg !22
}
; Note: these dbg.declares used to contain `ptrtoint` operands. The instruction
; selector would then decline to put the variable in the MachineFunction side
; table. Check that the dbg.declares have `alloca` operands.
-; CHECK: call void @llvm.dbg.declare(metadata i8* [[MyAlloca]], metadata ![[ARG_ID:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 32))
-; CHECK: call void @llvm.dbg.declare(metadata i8* [[MyAlloca]], metadata ![[VAR_ID:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 48))
+; CHECK: call void @llvm.dbg.declare(metadata ptr [[MyAlloca]], metadata ![[ARG_ID:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 32))
+; CHECK: call void @llvm.dbg.declare(metadata ptr [[MyAlloca]], metadata ![[VAR_ID:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 48))
declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
; Won't be instrumented because of asan-skip-promotable-allocas.
%non_instrumented3 = alloca i32, align 4
- %ptr = ptrtoint i32* %instrumented to i32
+ %ptr = ptrtoint ptr %instrumented to i32
br label %bb1
bb1:
; CHECK: entry:
; CHECK: %non_instrumented1 = alloca i32, align 4
; CHECK: %non_instrumented2 = alloca i32, align 4
-; CHECK: load i32, i32* @__asan_option_detect_stack_use_after_return
+; CHECK: load i32, ptr @__asan_option_detect_stack_use_after_return
; CHECK: bb0:
; CHECK: %non_instrumented3 = alloca i32, align 4
define i32 @foo() sanitize_address {
entry:
%non_instrumented1 = alloca i32, align 4
- %t = load i32, i32* %non_instrumented1, align 4
+ %t = load i32, ptr %non_instrumented1, align 4
%instrumented = alloca i32, align 4
- %ptr = ptrtoint i32* %instrumented to i32
+ %ptr = ptrtoint ptr %instrumented to i32
ret i32 %t
}
; CHECK: entry:
; CHECK: %non_instrumented1 = alloca i32, align 4
-; CHECK: load i32, i32* %non_instrumented1
-; CHECK: load i32, i32* @__asan_option_detect_stack_use_after_return
+; CHECK: load i32, ptr %non_instrumented1
+; CHECK: load i32, ptr @__asan_option_detect_stack_use_after_return
ret void
}
-@__call_foo = global void ()* @_ZL3foov, section ".preinit_array", align 8
-@__call_foo_2 = global void ()* @_ZL3foov, section ".init_array", align 8
-@__call_foo_3 = global void ()* @_ZL3foov, section ".fini_array", align 8
+@__call_foo = global ptr @_ZL3foov, section ".preinit_array", align 8
+@__call_foo_2 = global ptr @_ZL3foov, section ".init_array", align 8
+@__call_foo_3 = global ptr @_ZL3foov, section ".fini_array", align 8
; CHECK-NOT: asan_gen{{.*}}__call_foo
define i32 @main() #0 {
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval, align 4
+ store i32 0, ptr %retval, align 4
ret i32 0
}
define void @_Z3barv() uwtable sanitize_address {
entry:
%a = alloca i32, align 4
- call void @_Z3fooPi(i32* %a)
+ call void @_Z3fooPi(ptr %a)
ret void
}
-declare void @_Z3fooPi(i32*)
+declare void @_Z3fooPi(ptr)
; We create one global string constant for the stack frame above.
; It should have unnamed_addr and align 1.
; Make sure we don't create any other global constants.
@data1 = dso_local global i32 1, align 4
@data2 = dso_local global i32 2, align 4
-@__link_set_test_set_sym_data1 = internal constant i8* bitcast (i32* @data1 to i8*), section "link_set_test_set", align 8
-@__link_set_test_set_sym_data2 = internal constant i8* bitcast (i32* @data2 to i8*), section "link_set_test_set", align 8
-; CHECK: @__link_set_test_set_sym_data1 = internal constant i8*{{.*}}, section "link_set_test_set"
-; CHECK-NEXT: @__link_set_test_set_sym_data2 = internal constant i8*{{.*}}, section "link_set_test_set"
+@__link_set_test_set_sym_data1 = internal constant ptr @data1, section "link_set_test_set", align 8
+@__link_set_test_set_sym_data2 = internal constant ptr @data2, section "link_set_test_set", align 8
+; CHECK: @__link_set_test_set_sym_data1 = internal constant ptr{{.*}}, section "link_set_test_set"
+; CHECK-NEXT: @__link_set_test_set_sym_data2 = internal constant ptr{{.*}}, section "link_set_test_set"
define i32 @test_promotable_allocas() sanitize_address {
entry:
; CHECK: %0 = alloca i32, align 4
-; CHECK: store i32 0, i32* %0, align 4
-; CHECK: %1 = load i32, i32* %0, align 4
+; CHECK: store i32 0, ptr %0, align 4
+; CHECK: %1 = load i32, ptr %0, align 4
; CHECK: ret i32 %1
; CHECK-NOT: __asan_stack_malloc_0
; CHECK-NOT: call void @__asan_report_store4
%0 = alloca i32, align 4
- store i32 0, i32* %0, align 4
- %1 = load i32, i32* %0, align 4
+ store i32 0, ptr %0, align 4
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
-define void @__asan_default_options(i32* %a) sanitize_address {
+define void @__asan_default_options(ptr %a) sanitize_address {
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
%tmp2 = add i32 %tmp1, 1
- store i32 %tmp2, i32* %a, align 4
+ store i32 %tmp2, ptr %a, align 4
ret void
}
; no action should be taken for these globals
$global_noinst = comdat largest
@aliasee = private unnamed_addr constant [2 x i8] [i8 1, i8 2], comdat($global_noinst)
-@global_noinst = unnamed_addr alias [2 x i8], [2 x i8]* @aliasee
+@global_noinst = unnamed_addr alias [2 x i8], ptr @aliasee
; CHECK-NOT: {{asan_gen.*global_noinst}}
-; CHECK-DAG: @global_noinst = unnamed_addr alias [2 x i8], [2 x i8]* @aliasee
+; CHECK-DAG: @global_noinst = unnamed_addr alias [2 x i8], ptr @aliasee
@global_inst = private constant [2 x i8] [i8 1, i8 2]
; CHECK-DAG: {{asan_gen.*global_inst}}
; CHECK: @asan.module_ctor
; // macOS does use dynamic shadow placement on arm64
; RUN: opt -passes=asan -mtriple=arm64-apple-macosx --data-layout="e-m:o-i64:64-i128:128-n32:64-S128" -S < %s | FileCheck %s --check-prefixes=CHECK,CHECK-DYNAMIC -DPTR_SIZE=64
-define i32 @test_load(i32* %a) sanitize_address {
+define i32 @test_load(ptr %a) sanitize_address {
; First instrumentation in the function must be to load the dynamic shadow
; address into a local variable.
; CHECK-LABEL: @test_load
; CHECK: entry:
-; CHECK-DYNAMIC-NEXT: %[[SHADOW:[^ ]*]] = load i[[PTR_SIZE]], i[[PTR_SIZE]]* @__asan_shadow_memory_dynamic_address
+; CHECK-DYNAMIC-NEXT: %[[SHADOW:[^ ]*]] = load i[[PTR_SIZE]], ptr @__asan_shadow_memory_dynamic_address
; CHECK-NONDYNAMIC-NOT: __asan_shadow_memory_dynamic_address
; Shadow address is loaded and added into the whole offset computation.
; CHECK-DYNAMIC: add i[[PTR_SIZE]] %{{.*}}, %[[SHADOW]]
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
ret i32 %tmp1
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
-define void @load1(i8* %p) sanitize_address {
+define void @load1(ptr %p) sanitize_address {
entry:
- %t = load i8, i8* %p, align 1
+ %t = load i8, ptr %p, align 1
ret void
; CHECK-LABEL: define void @load1
; CHECK: __asan_exp_load1{{.*}} i32 42
; CHECK: ret void
}
-define void @load2(i16* %p) sanitize_address {
+define void @load2(ptr %p) sanitize_address {
entry:
- %t = load i16, i16* %p, align 2
+ %t = load i16, ptr %p, align 2
ret void
; CHECK-LABEL: define void @load2
; CHECK: __asan_exp_load2{{.*}} i32 42
; CHECK: ret void
}
-define void @load4(i32* %p) sanitize_address {
+define void @load4(ptr %p) sanitize_address {
entry:
- %t = load i32, i32* %p, align 4
+ %t = load i32, ptr %p, align 4
ret void
; CHECK-LABEL: define void @load4
; CHECK: __asan_exp_load4{{.*}} i32 42
; CHECK: ret void
}
-define void @load8(i64* %p) sanitize_address {
+define void @load8(ptr %p) sanitize_address {
entry:
- %t = load i64, i64* %p, align 8
+ %t = load i64, ptr %p, align 8
ret void
; CHECK-LABEL: define void @load8
; CHECK: __asan_exp_load8{{.*}} i32 42
; CHECK: ret void
}
-define void @load16(i128* %p) sanitize_address {
+define void @load16(ptr %p) sanitize_address {
entry:
- %t = load i128, i128* %p, align 16
+ %t = load i128, ptr %p, align 16
ret void
; CHECK-LABEL: define void @load16
; CHECK: __asan_exp_load16{{.*}} i32 42
; CHECK: ret void
}
-define void @loadN(i48* %p) sanitize_address {
+define void @loadN(ptr %p) sanitize_address {
entry:
- %t = load i48, i48* %p, align 1
+ %t = load i48, ptr %p, align 1
ret void
; CHECK-LABEL: define void @loadN
; CHECK: __asan_exp_loadN{{.*}} i32 42
; CHECK: ret void
}
-define void @store1(i8* %p) sanitize_address {
+define void @store1(ptr %p) sanitize_address {
entry:
- store i8 1, i8* %p, align 1
+ store i8 1, ptr %p, align 1
ret void
; CHECK-LABEL: define void @store1
; CHECK: __asan_exp_store1{{.*}} i32 42
; CHECK: ret void
}
-define void @store2(i16* %p) sanitize_address {
+define void @store2(ptr %p) sanitize_address {
entry:
- store i16 1, i16* %p, align 2
+ store i16 1, ptr %p, align 2
ret void
; CHECK-LABEL: define void @store2
; CHECK: __asan_exp_store2{{.*}} i32 42
; CHECK: ret void
}
-define void @store4(i32* %p) sanitize_address {
+define void @store4(ptr %p) sanitize_address {
entry:
- store i32 1, i32* %p, align 4
+ store i32 1, ptr %p, align 4
ret void
; CHECK-LABEL: define void @store4
; CHECK: __asan_exp_store4{{.*}} i32 42
; CHECK: ret void
}
-define void @store8(i64* %p) sanitize_address {
+define void @store8(ptr %p) sanitize_address {
entry:
- store i64 1, i64* %p, align 8
+ store i64 1, ptr %p, align 8
ret void
; CHECK-LABEL: define void @store8
; CHECK: __asan_exp_store8{{.*}} i32 42
; CHECK: ret void
}
-define void @store16(i128* %p) sanitize_address {
+define void @store16(ptr %p) sanitize_address {
entry:
- store i128 1, i128* %p, align 16
+ store i128 1, ptr %p, align 16
ret void
; CHECK-LABEL: define void @store16
; CHECK: __asan_exp_store16{{.*}} i32 42
; CHECK: ret void
}
-define void @storeN(i48* %p) sanitize_address {
+define void @storeN(ptr %p) sanitize_address {
entry:
- store i48 1, i48* %p, align 1
+ store i48 1, ptr %p, align 1
ret void
; CHECK-LABEL: define void @storeN
; CHECK: __asan_exp_storeN{{.*}} i32 42
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
-define void @load1(i8* %p) sanitize_address {
+define void @load1(ptr %p) sanitize_address {
entry:
- %t = load i8, i8* %p, align 1
+ %t = load i8, ptr %p, align 1
ret void
; CHECK-LABEL: define void @load1
; CHECK: __asan_report_exp_load1{{.*}} i32 42
; CHECK: ret void
}
-define void @load2(i16* %p) sanitize_address {
+define void @load2(ptr %p) sanitize_address {
entry:
- %t = load i16, i16* %p, align 2
+ %t = load i16, ptr %p, align 2
ret void
; CHECK-LABEL: define void @load2
; CHECK: __asan_report_exp_load2{{.*}} i32 42
; CHECK: ret void
}
-define void @load4(i32* %p) sanitize_address {
+define void @load4(ptr %p) sanitize_address {
entry:
- %t = load i32, i32* %p, align 4
+ %t = load i32, ptr %p, align 4
ret void
; CHECK-LABEL: define void @load4
; CHECK: __asan_report_exp_load4{{.*}} i32 42
; CHECK: ret void
}
-define void @load8(i64* %p) sanitize_address {
+define void @load8(ptr %p) sanitize_address {
entry:
- %t = load i64, i64* %p, align 8
+ %t = load i64, ptr %p, align 8
ret void
; CHECK-LABEL: define void @load8
; CHECK: __asan_report_exp_load8{{.*}} i32 42
; CHECK: ret void
}
-define void @load16(i128* %p) sanitize_address {
+define void @load16(ptr %p) sanitize_address {
entry:
- %t = load i128, i128* %p, align 16
+ %t = load i128, ptr %p, align 16
ret void
; CHECK-LABEL: define void @load16
; CHECK: __asan_report_exp_load16{{.*}} i32 42
; CHECK: ret void
}
-define void @loadN(i48* %p) sanitize_address {
+define void @loadN(ptr %p) sanitize_address {
entry:
- %t = load i48, i48* %p, align 1
+ %t = load i48, ptr %p, align 1
ret void
; CHECK-LABEL: define void @loadN
; CHECK: __asan_report_exp_load_n{{.*}} i32 42
; CHECK: ret void
}
-define void @store1(i8* %p) sanitize_address {
+define void @store1(ptr %p) sanitize_address {
entry:
- store i8 1, i8* %p, align 1
+ store i8 1, ptr %p, align 1
ret void
; CHECK-LABEL: define void @store1
; CHECK: __asan_report_exp_store1{{.*}} i32 42
; CHECK: ret void
}
-define void @store2(i16* %p) sanitize_address {
+define void @store2(ptr %p) sanitize_address {
entry:
- store i16 1, i16* %p, align 2
+ store i16 1, ptr %p, align 2
ret void
; CHECK-LABEL: define void @store2
; CHECK: __asan_report_exp_store2{{.*}} i32 42
; CHECK: ret void
}
-define void @store4(i32* %p) sanitize_address {
+define void @store4(ptr %p) sanitize_address {
entry:
- store i32 1, i32* %p, align 4
+ store i32 1, ptr %p, align 4
ret void
; CHECK-LABEL: define void @store4
; CHECK: __asan_report_exp_store4{{.*}} i32 42
; CHECK: ret void
}
-define void @store8(i64* %p) sanitize_address {
+define void @store8(ptr %p) sanitize_address {
entry:
- store i64 1, i64* %p, align 8
+ store i64 1, ptr %p, align 8
ret void
; CHECK-LABEL: define void @store8
; CHECK: __asan_report_exp_store8{{.*}} i32 42
; CHECK: ret void
}
-define void @store16(i128* %p) sanitize_address {
+define void @store16(ptr %p) sanitize_address {
entry:
- store i128 1, i128* %p, align 16
+ store i128 1, ptr %p, align 16
ret void
; CHECK-LABEL: define void @store16
; CHECK: __asan_report_exp_store16{{.*}} i32 42
; CHECK: ret void
}
-define void @storeN(i48* %p) sanitize_address {
+define void @storeN(ptr %p) sanitize_address {
entry:
- store i48 1, i48* %p, align 1
+ store i48 1, ptr %p, align 1
ret void
; CHECK-LABEL: define void @storeN
; CHECK: __asan_report_exp_store_n{{.*}} i32 42
target datalayout = "e-i64:64-f80:128-s:64-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-declare void @Foo(i8*)
+declare void @Foo(ptr)
define void @Empty() uwtable sanitize_address {
; CHECK-LABEL: @Empty(
; NEVER-LABEL: @Simple(
; NEVER-NEXT: entry:
; NEVER-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32
-; NEVER-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[MYALLOCA]] to i64
+; NEVER-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64
; NEVER-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 32
-; NEVER-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to i8*
-; NEVER-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP0]] to i64*
-; NEVER-NEXT: store i64 1102416563, i64* [[TMP3]], align 8
+; NEVER-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+; NEVER-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP0]] to ptr
+; NEVER-NEXT: store i64 1102416563, ptr [[TMP3]], align 8
; NEVER-NEXT: [[TMP4:%.*]] = add i64 [[TMP0]], 8
-; NEVER-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i64*
-; NEVER-NEXT: store i64 ptrtoint ([11 x i8]* @___asan_gen_ to i64), i64* [[TMP5]], align 8
+; NEVER-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; NEVER-NEXT: store i64 ptrtoint (ptr @___asan_gen_ to i64), ptr [[TMP5]], align 8
; NEVER-NEXT: [[TMP6:%.*]] = add i64 [[TMP0]], 16
-; NEVER-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i64*
-; NEVER-NEXT: store i64 ptrtoint (void ()* @Simple to i64), i64* [[TMP7]], align 8
+; NEVER-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; NEVER-NEXT: store i64 ptrtoint (ptr @Simple to i64), ptr [[TMP7]], align 8
; NEVER-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP0]], 3
; NEVER-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 2147450880
; NEVER-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 0
-; NEVER-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to i64*
-; NEVER-NEXT: store i64 -868083113472691727, i64* [[TMP11]], align 1
-; NEVER-NEXT: call void @Foo(i8* [[TMP2]])
-; NEVER-NEXT: store i64 1172321806, i64* [[TMP3]], align 8
+; NEVER-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; NEVER-NEXT: store i64 -868083113472691727, ptr [[TMP11]], align 1
+; NEVER-NEXT: call void @Foo(ptr [[TMP2]])
+; NEVER-NEXT: store i64 1172321806, ptr [[TMP3]], align 8
; NEVER-NEXT: [[TMP12:%.*]] = add i64 [[TMP9]], 0
-; NEVER-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to i64*
-; NEVER-NEXT: store i64 0, i64* [[TMP13]], align 1
+; NEVER-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; NEVER-NEXT: store i64 0, ptr [[TMP13]], align 1
; NEVER-NEXT: ret void
;
; RUNTIME-LABEL: @Simple(
; RUNTIME-NEXT: entry:
; RUNTIME-NEXT: [[ASAN_LOCAL_STACK_BASE:%.*]] = alloca i64, align 8
-; RUNTIME-NEXT: [[TMP0:%.*]] = load i32, i32* @__asan_option_detect_stack_use_after_return, align 4
+; RUNTIME-NEXT: [[TMP0:%.*]] = load i32, ptr @__asan_option_detect_stack_use_after_return, align 4
; RUNTIME-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0
; RUNTIME-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP4:%.*]]
; RUNTIME: 2:
; RUNTIME-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP9:%.*]]
; RUNTIME: 7:
; RUNTIME-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32
-; RUNTIME-NEXT: [[TMP8:%.*]] = ptrtoint i8* [[MYALLOCA]] to i64
+; RUNTIME-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64
; RUNTIME-NEXT: br label [[TMP9]]
; RUNTIME: 9:
; RUNTIME-NEXT: [[TMP10:%.*]] = phi i64 [ [[TMP5]], [[TMP4]] ], [ [[TMP8]], [[TMP7]] ]
-; RUNTIME-NEXT: store i64 [[TMP10]], i64* [[ASAN_LOCAL_STACK_BASE]], align 8
+; RUNTIME-NEXT: store i64 [[TMP10]], ptr [[ASAN_LOCAL_STACK_BASE]], align 8
; RUNTIME-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 32
-; RUNTIME-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to i8*
-; RUNTIME-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP10]] to i64*
-; RUNTIME-NEXT: store i64 1102416563, i64* [[TMP13]], align 8
+; RUNTIME-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; RUNTIME-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; RUNTIME-NEXT: store i64 1102416563, ptr [[TMP13]], align 8
; RUNTIME-NEXT: [[TMP14:%.*]] = add i64 [[TMP10]], 8
-; RUNTIME-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to i64*
-; RUNTIME-NEXT: store i64 ptrtoint ([11 x i8]* @___asan_gen_ to i64), i64* [[TMP15]], align 8
+; RUNTIME-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; RUNTIME-NEXT: store i64 ptrtoint (ptr @___asan_gen_ to i64), ptr [[TMP15]], align 8
; RUNTIME-NEXT: [[TMP16:%.*]] = add i64 [[TMP10]], 16
-; RUNTIME-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to i64*
-; RUNTIME-NEXT: store i64 ptrtoint (void ()* @Simple to i64), i64* [[TMP17]], align 8
+; RUNTIME-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; RUNTIME-NEXT: store i64 ptrtoint (ptr @Simple to i64), ptr [[TMP17]], align 8
; RUNTIME-NEXT: [[TMP18:%.*]] = lshr i64 [[TMP10]], 3
; RUNTIME-NEXT: [[TMP19:%.*]] = add i64 [[TMP18]], 2147450880
; RUNTIME-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 0
-; RUNTIME-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to i64*
-; RUNTIME-NEXT: store i64 -868083113472691727, i64* [[TMP21]], align 1
-; RUNTIME-NEXT: call void @Foo(i8* [[TMP12]])
-; RUNTIME-NEXT: store i64 1172321806, i64* [[TMP13]], align 8
+; RUNTIME-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; RUNTIME-NEXT: store i64 -868083113472691727, ptr [[TMP21]], align 1
+; RUNTIME-NEXT: call void @Foo(ptr [[TMP12]])
+; RUNTIME-NEXT: store i64 1172321806, ptr [[TMP13]], align 8
; RUNTIME-NEXT: [[TMP22:%.*]] = icmp ne i64 [[TMP5]], 0
; RUNTIME-NEXT: br i1 [[TMP22]], label [[TMP23:%.*]], label [[TMP30:%.*]]
; RUNTIME: 23:
; RUNTIME-NEXT: [[TMP24:%.*]] = add i64 [[TMP19]], 0
-; RUNTIME-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to i64*
-; RUNTIME-NEXT: store i64 -723401728380766731, i64* [[TMP25]], align 1
+; RUNTIME-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; RUNTIME-NEXT: store i64 -723401728380766731, ptr [[TMP25]], align 1
; RUNTIME-NEXT: [[TMP26:%.*]] = add i64 [[TMP5]], 56
-; RUNTIME-NEXT: [[TMP27:%.*]] = inttoptr i64 [[TMP26]] to i64*
-; RUNTIME-NEXT: [[TMP28:%.*]] = load i64, i64* [[TMP27]], align 8
-; RUNTIME-NEXT: [[TMP29:%.*]] = inttoptr i64 [[TMP28]] to i8*
-; RUNTIME-NEXT: store i8 0, i8* [[TMP29]], align 1
+; RUNTIME-NEXT: [[TMP27:%.*]] = inttoptr i64 [[TMP26]] to ptr
+; RUNTIME-NEXT: [[TMP28:%.*]] = load i64, ptr [[TMP27]], align 8
+; RUNTIME-NEXT: [[TMP29:%.*]] = inttoptr i64 [[TMP28]] to ptr
+; RUNTIME-NEXT: store i8 0, ptr [[TMP29]], align 1
; RUNTIME-NEXT: br label [[TMP33:%.*]]
; RUNTIME: 30:
; RUNTIME-NEXT: [[TMP31:%.*]] = add i64 [[TMP19]], 0
-; RUNTIME-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to i64*
-; RUNTIME-NEXT: store i64 0, i64* [[TMP32]], align 1
+; RUNTIME-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; RUNTIME-NEXT: store i64 0, ptr [[TMP32]], align 1
; RUNTIME-NEXT: br label [[TMP33]]
; RUNTIME: 33:
; RUNTIME-NEXT: ret void
; ALWAYS-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP4:%.*]]
; ALWAYS: 2:
; ALWAYS-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32
-; ALWAYS-NEXT: [[TMP3:%.*]] = ptrtoint i8* [[MYALLOCA]] to i64
+; ALWAYS-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64
; ALWAYS-NEXT: br label [[TMP4]]
; ALWAYS: 4:
; ALWAYS-NEXT: [[TMP5:%.*]] = phi i64 [ [[TMP0]], [[ENTRY:%.*]] ], [ [[TMP3]], [[TMP2]] ]
-; ALWAYS-NEXT: store i64 [[TMP5]], i64* [[ASAN_LOCAL_STACK_BASE]], align 8
+; ALWAYS-NEXT: store i64 [[TMP5]], ptr [[ASAN_LOCAL_STACK_BASE]], align 8
; ALWAYS-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], 32
-; ALWAYS-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i8*
-; ALWAYS-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP5]] to i64*
-; ALWAYS-NEXT: store i64 1102416563, i64* [[TMP8]], align 8
+; ALWAYS-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; ALWAYS-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; ALWAYS-NEXT: store i64 1102416563, ptr [[TMP8]], align 8
; ALWAYS-NEXT: [[TMP9:%.*]] = add i64 [[TMP5]], 8
-; ALWAYS-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to i64*
-; ALWAYS-NEXT: store i64 ptrtoint ([11 x i8]* @___asan_gen_ to i64), i64* [[TMP10]], align 8
+; ALWAYS-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; ALWAYS-NEXT: store i64 ptrtoint (ptr @___asan_gen_ to i64), ptr [[TMP10]], align 8
; ALWAYS-NEXT: [[TMP11:%.*]] = add i64 [[TMP5]], 16
-; ALWAYS-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to i64*
-; ALWAYS-NEXT: store i64 ptrtoint (void ()* @Simple to i64), i64* [[TMP12]], align 8
+; ALWAYS-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; ALWAYS-NEXT: store i64 ptrtoint (ptr @Simple to i64), ptr [[TMP12]], align 8
; ALWAYS-NEXT: [[TMP13:%.*]] = lshr i64 [[TMP5]], 3
; ALWAYS-NEXT: [[TMP14:%.*]] = add i64 [[TMP13]], 2147450880
; ALWAYS-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], 0
-; ALWAYS-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to i64*
-; ALWAYS-NEXT: store i64 -868083113472691727, i64* [[TMP16]], align 1
-; ALWAYS-NEXT: call void @Foo(i8* [[TMP7]])
-; ALWAYS-NEXT: store i64 1172321806, i64* [[TMP8]], align 8
+; ALWAYS-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; ALWAYS-NEXT: store i64 -868083113472691727, ptr [[TMP16]], align 1
+; ALWAYS-NEXT: call void @Foo(ptr [[TMP7]])
+; ALWAYS-NEXT: store i64 1172321806, ptr [[TMP8]], align 8
; ALWAYS-NEXT: [[TMP17:%.*]] = icmp ne i64 [[TMP0]], 0
; ALWAYS-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP25:%.*]]
; ALWAYS: 18:
; ALWAYS-NEXT: [[TMP19:%.*]] = add i64 [[TMP14]], 0
-; ALWAYS-NEXT: [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to i64*
-; ALWAYS-NEXT: store i64 -723401728380766731, i64* [[TMP20]], align 1
+; ALWAYS-NEXT: [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr
+; ALWAYS-NEXT: store i64 -723401728380766731, ptr [[TMP20]], align 1
; ALWAYS-NEXT: [[TMP21:%.*]] = add i64 [[TMP0]], 56
-; ALWAYS-NEXT: [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to i64*
-; ALWAYS-NEXT: [[TMP23:%.*]] = load i64, i64* [[TMP22]], align 8
-; ALWAYS-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to i8*
-; ALWAYS-NEXT: store i8 0, i8* [[TMP24]], align 1
+; ALWAYS-NEXT: [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr
+; ALWAYS-NEXT: [[TMP23:%.*]] = load i64, ptr [[TMP22]], align 8
+; ALWAYS-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+; ALWAYS-NEXT: store i8 0, ptr [[TMP24]], align 1
; ALWAYS-NEXT: br label [[TMP28:%.*]]
; ALWAYS: 25:
; ALWAYS-NEXT: [[TMP26:%.*]] = add i64 [[TMP14]], 0
-; ALWAYS-NEXT: [[TMP27:%.*]] = inttoptr i64 [[TMP26]] to i64*
-; ALWAYS-NEXT: store i64 0, i64* [[TMP27]], align 1
+; ALWAYS-NEXT: [[TMP27:%.*]] = inttoptr i64 [[TMP26]] to ptr
+; ALWAYS-NEXT: store i64 0, ptr [[TMP27]], align 1
; ALWAYS-NEXT: br label [[TMP28]]
; ALWAYS: 28:
; ALWAYS-NEXT: ret void
;
entry:
%x = alloca i8, align 16
- call void @Foo(i8* %x)
+ call void @Foo(ptr %x)
ret void
}
; CHECK-LABEL: @Huge(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 100288, align 32
-; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[MYALLOCA]] to i64
+; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 32
-; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to [100000 x i8]*
-; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP0]] to i64*
-; CHECK-NEXT: store i64 1102416563, i64* [[TMP3]], align 8
+; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP0]] to ptr
+; CHECK-NEXT: store i64 1102416563, ptr [[TMP3]], align 8
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP0]], 8
-; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i64*
-; CHECK-NEXT: store i64 ptrtoint ([16 x i8]* @___asan_gen_.1 to i64), i64* [[TMP5]], align 8
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT: store i64 ptrtoint (ptr @___asan_gen_.1 to i64), ptr [[TMP5]], align 8
; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP0]], 16
-; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i64*
-; CHECK-NEXT: store i64 ptrtoint (void ()* @Huge to i64), i64* [[TMP7]], align 8
+; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT: store i64 ptrtoint (ptr @Huge to i64), ptr [[TMP7]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 2147450880
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 0
-; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to i32*
-; CHECK-NEXT: store i32 -235802127, i32* [[TMP11]], align 1
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: store i32 -235802127, ptr [[TMP11]], align 1
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP9]], 12504
-; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to i64*
-; CHECK-NEXT: store i64 -868082074056920077, i64* [[TMP13]], align 1
+; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT: store i64 -868082074056920077, ptr [[TMP13]], align 1
; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP9]], 12512
-; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to i64*
-; CHECK-NEXT: store i64 -868082074056920077, i64* [[TMP15]], align 1
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: store i64 -868082074056920077, ptr [[TMP15]], align 1
; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP9]], 12520
-; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to i64*
-; CHECK-NEXT: store i64 -868082074056920077, i64* [[TMP17]], align 1
+; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: store i64 -868082074056920077, ptr [[TMP17]], align 1
; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP9]], 12528
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to i64*
-; CHECK-NEXT: store i64 -868082074056920077, i64* [[TMP19]], align 1
-; CHECK-NEXT: [[XX:%.*]] = getelementptr inbounds [100000 x i8], [100000 x i8]* [[TMP2]], i64 0, i64 0
-; CHECK-NEXT: call void @Foo(i8* [[XX]])
-; CHECK-NEXT: store i64 1172321806, i64* [[TMP3]], align 8
+; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: store i64 -868082074056920077, ptr [[TMP19]], align 1
+; CHECK-NEXT: call void @Foo(ptr [[TMP2]])
+; CHECK-NEXT: store i64 1172321806, ptr [[TMP3]], align 8
; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP9]], 0
-; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to i32*
-; CHECK-NEXT: store i32 0, i32* [[TMP21]], align 1
+; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT: store i32 0, ptr [[TMP21]], align 1
; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[TMP9]], 12504
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to i64*
-; CHECK-NEXT: store i64 0, i64* [[TMP23]], align 1
+; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT: store i64 0, ptr [[TMP23]], align 1
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP9]], 12512
-; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to i64*
-; CHECK-NEXT: store i64 0, i64* [[TMP25]], align 1
+; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT: store i64 0, ptr [[TMP25]], align 1
; CHECK-NEXT: [[TMP26:%.*]] = add i64 [[TMP9]], 12520
-; CHECK-NEXT: [[TMP27:%.*]] = inttoptr i64 [[TMP26]] to i64*
-; CHECK-NEXT: store i64 0, i64* [[TMP27]], align 1
+; CHECK-NEXT: [[TMP27:%.*]] = inttoptr i64 [[TMP26]] to ptr
+; CHECK-NEXT: store i64 0, ptr [[TMP27]], align 1
; CHECK-NEXT: [[TMP28:%.*]] = add i64 [[TMP9]], 12528
-; CHECK-NEXT: [[TMP29:%.*]] = inttoptr i64 [[TMP28]] to i64*
-; CHECK-NEXT: store i64 0, i64* [[TMP29]], align 1
+; CHECK-NEXT: [[TMP29:%.*]] = inttoptr i64 [[TMP28]] to ptr
+; CHECK-NEXT: store i64 0, ptr [[TMP29]], align 1
; CHECK-NEXT: ret void
;
entry:
%x = alloca [100000 x i8], align 16
- %xx = getelementptr inbounds [100000 x i8], [100000 x i8]* %x, i64 0, i64 0
- call void @Foo(i8* %xx)
+ call void @Foo(ptr %x)
ret void
}
target triple = "x86_64-unknown-linux-gnu"
-define i32 @test_load(i32* %a) sanitize_address {
+define i32 @test_load(ptr %a) sanitize_address {
; First instrumentation in the function must be to load the dynamic shadow
; address into a local variable.
; CHECK-LABEL: @test_load
; CHECK: entry:
-; CHECK-FDS-NEXT: %[[SHADOW:[^ ]*]] = load i64, i64* @__asan_shadow_memory_dynamic_address
+; CHECK-FDS-NEXT: %[[SHADOW:[^ ]*]] = load i64, ptr @__asan_shadow_memory_dynamic_address
; CHECK-NDS-NOT: __asan_shadow_memory_dynamic_address
; Shadow address is loaded and added into the whole offset computation.
; CHECK-FDS: add i64 %{{.*}}, %[[SHADOW]]
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
ret i32 %tmp1
}
; RUN: -data-layout="E-m:e-i64:64-n32:64-S128" | \
; RUN: FileCheck --check-prefix=CHECK-MIPS64 %s
-define i32 @read_4_bytes(i32* %a) sanitize_address {
+define i32 @read_4_bytes(ptr %a) sanitize_address {
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
ret i32 %tmp1
}
define void @b(i32 %c) {
entry:
%conv = sext i32 %c to i64
- %0 = inttoptr i64 %conv to i32 addrspace(42)*
- %cmp = icmp ugt i32 addrspace(42)* %0, getelementptr inbounds ([1 x i32], [1 x i32] addrspace(42)* @a, i64 0, i64 0)
+ %0 = inttoptr i64 %conv to ptr addrspace(42)
+ %cmp = icmp ugt ptr addrspace(42) %0, @a
br i1 %cmp, label %if.then, label %if.end
if.then:
declare i32 @e(...)
!llvm.asan.globals = !{!0}
-!0 = !{[1 x i32] addrspace(42)* @a, null, !"a", i1 false, i1 false}
+!0 = !{ptr addrspace(42) @a, null, !"a", i1 false, i1 false}
; CHECK: @b = {{.*}} %struct
; CHECK: @llvm.compiler.used =
-; CHECK-SAME: i8* bitcast ({ %struct, [16 x i8] }* @a to i8*)
-; CHECK-SAME: i8* bitcast ({ %struct, [16 x i8] }* @b to i8*)
+; CHECK-SAME: ptr @a
+; CHECK-SAME: ptr @b
-define i32 @main(i32, i8** nocapture readnone) {
+define i32 @main(i32, ptr nocapture readnone) {
%3 = alloca %struct, align 8
%4 = alloca %struct, align 8
- %5 = bitcast %struct* %3 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %5, i8* bitcast (%struct* @a to i8*), i64 16, i32 8, i1 false)
- %6 = bitcast %struct* %4 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %6, i8* bitcast (%struct* @b to i8*), i64 16, i32 8, i1 false)
- call void asm sideeffect "", "r,r,~{dirflag},~{fpsr},~{flags}"(%struct* nonnull %3, %struct* nonnull %4)
+ call void @llvm.memcpy.p0.p0.i64(ptr nonnull %3, ptr @a, i64 16, i32 8, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr nonnull %4, ptr @b, i64 16, i32 8, i1 false)
+ call void asm sideeffect "", "r,r,~{dirflag},~{fpsr},~{flags}"(ptr nonnull %3, ptr nonnull %4)
ret i32 0
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32, i1)
@blocked_global = global i32 0, align 4
@_ZZ4funcvE10static_var = internal global i32 0, align 4
@.str = private unnamed_addr constant [14 x i8] c"Hello, world!\00", align 1
-@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__sub_I_asan_globals.cpp, i8* null }]
+@llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_GLOBAL__sub_I_asan_globals.cpp, ptr null }]
; Check that globals were instrumented:
; CHECK: @global = global { i32, [28 x i8] } zeroinitializer, align 32
; CHECK: [[VARNAME:@___asan_gen_.[0-9]+]] = private unnamed_addr constant [7 x i8] c"global\00", align 1
; Check that location descriptors and global names were passed into __asan_register_globals:
-; CHECK: i64 ptrtoint ([7 x i8]* [[VARNAME]] to i64)
+; CHECK: i64 ptrtoint (ptr [[VARNAME]] to i64)
; Check alignment of metadata_array.
; CHECK-S5-SAME: {{align 32$}}
; Function Attrs: nounwind sanitize_address
define internal void @__cxx_global_var_init() #0 section ".text.startup" {
entry:
- %0 = load i32, i32* @global, align 4
- store i32 %0, i32* @dyn_init_global, align 4
+ %0 = load i32, ptr @global, align 4
+ store i32 %0, ptr @dyn_init_global, align 4
ret void
}
; Function Attrs: nounwind sanitize_address
define void @_Z4funcv() #1 {
entry:
- %literal = alloca i8*, align 8
- store i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str, i32 0, i32 0), i8** %literal, align 8
+ %literal = alloca ptr, align 8
+ store ptr @.str, ptr %literal, align 8
ret void
}
@g = global [1 x i32] zeroinitializer, align 4
!llvm.asan.globals = !{!0, !1}
-!0 = !{[1 x i32]* @g, null, !"name", i1 false, i1 false}
-!1 = !{i8* bitcast ([1 x i32]* @g to i8*), null, !"name", i1 false, i1 false}
+!0 = !{ptr @g, null, !"name", i1 false, i1 false}
+!1 = !{ptr @g, null, !"name", i1 false, i1 false}
; CHECK: @__asan_global_dead_global = private global { {{.*}} }, section ".ASAN$GL", comdat($dead_global), align 64, !associated
; CHECK: @__asan_global_private_str = private global { {{.*}} }, section ".ASAN$GL", comdat($private_str), align 64, !associated
-; CHECK: @llvm.compiler.used {{.*}} @__asan_global_dead_global {{.*}} @__asan_global_private_str {{.*}} section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [6 x ptr] [ptr @dead_global, ptr @mystr, ptr @private_str, ptr @__asan_global_dead_global, ptr @__asan_global_mystr, ptr @__asan_global_private_str], section "llvm.metadata"
@dead_global = local_unnamed_addr global i32 42, align 4
@mystr = linkonce_odr unnamed_addr constant [5 x i8] c"main\00", comdat, align 1
; Function Attrs: nounwind uwtable
define i32 @main() local_unnamed_addr #0 {
entry:
- %call = tail call i32 @puts(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @mystr, i64 0, i64 0))
- %call2 = tail call i32 @puts(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @private_str, i64 0, i64 0))
+ %call = tail call i32 @puts(ptr @mystr)
+ %call2 = tail call i32 @puts(ptr @private_str)
ret i32 0
}
; Function Attrs: nounwind
-declare i32 @puts(i8* nocapture readonly) local_unnamed_addr #1
+declare i32 @puts(ptr nocapture readonly) local_unnamed_addr #1
attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
; CHECK-LABEL: define {{.*}} @_Z4swapP1SS0_b(
; First come the argument allocas.
-; CHECK: [[argA:%.*]] = alloca %struct.S*,
-; CHECK-NEXT: [[argB:%.*]] = alloca %struct.S*,
+; CHECK: [[argA:%.*]] = alloca ptr,
+; CHECK-NEXT: [[argB:%.*]] = alloca ptr,
; CHECK-NEXT: [[argDoit:%.*]] = alloca i8,
; Next, the stores into the argument allocas.
-; CHECK-NEXT: store %struct.S* {{.*}}, %struct.S** [[argA]]
-; CHECK-NEXT: store %struct.S* {{.*}}, %struct.S** [[argB]]
+; CHECK-NEXT: store ptr {{.*}}, ptr [[argA]]
+; CHECK-NEXT: store ptr {{.*}}, ptr [[argB]]
; CHECK-NEXT: [[frombool:%.*]] = zext i1 {{.*}} to i8
-; CHECK-NEXT: store i8 [[frombool]], i8* [[argDoit]]
+; CHECK-NEXT: store i8 [[frombool]], ptr [[argDoit]]
-define void @_Z4swapP1SS0_b(%struct.S* %a, %struct.S* %b, i1 zeroext %doit) sanitize_address {
+define void @_Z4swapP1SS0_b(ptr %a, ptr %b, i1 zeroext %doit) sanitize_address {
entry:
- %a.addr = alloca %struct.S*, align 8
- %b.addr = alloca %struct.S*, align 8
+ %a.addr = alloca ptr, align 8
+ %b.addr = alloca ptr, align 8
%doit.addr = alloca i8, align 1
%tmp = alloca %struct.S, align 4
- store %struct.S* %a, %struct.S** %a.addr, align 8
- store %struct.S* %b, %struct.S** %b.addr, align 8
+ store ptr %a, ptr %a.addr, align 8
+ store ptr %b, ptr %b.addr, align 8
%frombool = zext i1 %doit to i8
- store i8 %frombool, i8* %doit.addr, align 1
- %0 = load i8, i8* %doit.addr, align 1
+ store i8 %frombool, ptr %doit.addr, align 1
+ %0 = load i8, ptr %doit.addr, align 1
%tobool = trunc i8 %0 to i1
br i1 %tobool, label %if.end, label %if.then
br label %return
if.end: ; preds = %entry
- %1 = load %struct.S*, %struct.S** %a.addr, align 8
- %2 = bitcast %struct.S* %tmp to i8*
- %3 = bitcast %struct.S* %1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %2, i8* align 4 %3, i64 8, i1 false)
- %4 = load %struct.S*, %struct.S** %b.addr, align 8
- %5 = load %struct.S*, %struct.S** %a.addr, align 8
- %6 = bitcast %struct.S* %5 to i8*
- %7 = bitcast %struct.S* %4 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 %7, i64 8, i1 false)
- %8 = load %struct.S*, %struct.S** %b.addr, align 8
- %9 = bitcast %struct.S* %8 to i8*
- %10 = bitcast %struct.S* %tmp to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %9, i8* align 4 %10, i64 8, i1 false)
+ %1 = load ptr, ptr %a.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp, ptr align 4 %1, i64 8, i1 false)
+ %2 = load ptr, ptr %b.addr, align 8
+ %3 = load ptr, ptr %a.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %3, ptr align 4 %2, i64 8, i1 false)
+ %4 = load ptr, ptr %b.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %4, ptr align 4 %tmp, i64 8, i1 false)
br label %return
return: ; preds = %if.end, %if.then
; Synthetic test case, meant to check that we do not reorder instructions past
; a load when attempting to hoist argument init insts.
; CHECK-LABEL: define {{.*}} @func_with_load_in_arginit_sequence
-; CHECK: [[argA:%.*]] = alloca %struct.S*,
-; CHECK-NEXT: [[argB:%.*]] = alloca %struct.S*,
+; CHECK: [[argA:%.*]] = alloca ptr,
+; CHECK-NEXT: [[argB:%.*]] = alloca ptr,
; CHECK-NEXT: [[argDoit:%.*]] = alloca i8,
-; CHECK-NEXT: store %struct.S* {{.*}}, %struct.S** [[argA]]
-; CHECK-NEXT: store %struct.S* {{.*}}, %struct.S** [[argB]]
+; CHECK-NEXT: store ptr {{.*}}, ptr [[argA]]
+; CHECK-NEXT: store ptr {{.*}}, ptr [[argB]]
; CHECK-NEXT: [[stack_base:%.*]] = alloca i64
-define void @func_with_load_in_arginit_sequence(%struct.S* %a, %struct.S* %b, i1 zeroext %doit) sanitize_address {
+define void @func_with_load_in_arginit_sequence(ptr %a, ptr %b, i1 zeroext %doit) sanitize_address {
entry:
- %a.addr = alloca %struct.S*, align 8
- %b.addr = alloca %struct.S*, align 8
+ %a.addr = alloca ptr, align 8
+ %b.addr = alloca ptr, align 8
%doit.addr = alloca i8, align 1
%tmp = alloca %struct.S, align 4
- store %struct.S* %a, %struct.S** %a.addr, align 8
- store %struct.S* %b, %struct.S** %b.addr, align 8
+ store ptr %a, ptr %a.addr, align 8
+ store ptr %b, ptr %b.addr, align 8
; This load prevents the next argument init sequence from being moved.
- %0 = load i8, i8* %doit.addr, align 1
+ %0 = load i8, ptr %doit.addr, align 1
%frombool = zext i1 %doit to i8
- store i8 %frombool, i8* %doit.addr, align 1
+ store i8 %frombool, ptr %doit.addr, align 1
%tobool = trunc i8 %0 to i1
br i1 %tobool, label %if.end, label %if.then
br label %return
if.end: ; preds = %entry
- %1 = load %struct.S*, %struct.S** %a.addr, align 8
- %2 = bitcast %struct.S* %tmp to i8*
- %3 = bitcast %struct.S* %1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %2, i8* align 4 %3, i64 8, i1 false)
- %4 = load %struct.S*, %struct.S** %b.addr, align 8
- %5 = load %struct.S*, %struct.S** %a.addr, align 8
- %6 = bitcast %struct.S* %5 to i8*
- %7 = bitcast %struct.S* %4 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 %7, i64 8, i1 false)
- %8 = load %struct.S*, %struct.S** %b.addr, align 8
- %9 = bitcast %struct.S* %8 to i8*
- %10 = bitcast %struct.S* %tmp to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %9, i8* align 4 %10, i64 8, i1 false)
+ %1 = load ptr, ptr %a.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp, ptr align 4 %1, i64 8, i1 false)
+ %2 = load ptr, ptr %b.addr, align 8
+ %3 = load ptr, ptr %a.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %3, ptr align 4 %2, i64 8, i1 false)
+ %4 = load ptr, ptr %b.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %4, ptr align 4 %tmp, i64 8, i1 false)
br label %return
return: ; preds = %if.end, %if.then
; Synthetic test case, meant to check that we can handle functions with more
; than one interesting alloca.
; CHECK-LABEL: define {{.*}} @func_with_multiple_interesting_allocas
-; CHECK: [[argA:%.*]] = alloca %struct.S*,
-; CHECK-NEXT: [[argB:%.*]] = alloca %struct.S*,
+; CHECK: [[argA:%.*]] = alloca ptr,
+; CHECK-NEXT: [[argB:%.*]] = alloca ptr,
; CHECK-NEXT: [[argDoit:%.*]] = alloca i8,
-; CHECK-NEXT: store %struct.S* {{.*}}, %struct.S** [[argA]]
-; CHECK-NEXT: store %struct.S* {{.*}}, %struct.S** [[argB]]
+; CHECK-NEXT: store ptr {{.*}}, ptr [[argA]]
+; CHECK-NEXT: store ptr {{.*}}, ptr [[argB]]
; CHECK-NEXT: [[frombool:%.*]] = zext i1 {{.*}} to i8
-; CHECK-NEXT: store i8 [[frombool]], i8* [[argDoit]]
-define void @func_with_multiple_interesting_allocas(%struct.S* %a, %struct.S* %b, i1 zeroext %doit) sanitize_address {
+; CHECK-NEXT: store i8 [[frombool]], ptr [[argDoit]]
+define void @func_with_multiple_interesting_allocas(ptr %a, ptr %b, i1 zeroext %doit) sanitize_address {
entry:
- %a.addr = alloca %struct.S*, align 8
- %b.addr = alloca %struct.S*, align 8
+ %a.addr = alloca ptr, align 8
+ %b.addr = alloca ptr, align 8
%doit.addr = alloca i8, align 1
%tmp = alloca %struct.S, align 4
%tmp2 = alloca %struct.S, align 4
- store %struct.S* %a, %struct.S** %a.addr, align 8
- store %struct.S* %b, %struct.S** %b.addr, align 8
+ store ptr %a, ptr %a.addr, align 8
+ store ptr %b, ptr %b.addr, align 8
%frombool = zext i1 %doit to i8
- store i8 %frombool, i8* %doit.addr, align 1
- %0 = load i8, i8* %doit.addr, align 1
+ store i8 %frombool, ptr %doit.addr, align 1
+ %0 = load i8, ptr %doit.addr, align 1
%tobool = trunc i8 %0 to i1
br i1 %tobool, label %if.end, label %if.then
br label %return
if.end: ; preds = %entry
- %1 = load %struct.S*, %struct.S** %a.addr, align 8
- %2 = bitcast %struct.S* %tmp to i8*
- %3 = bitcast %struct.S* %1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %2, i8* align 4 %3, i64 8, i1 false)
- %4 = load %struct.S*, %struct.S** %b.addr, align 8
- %5 = bitcast %struct.S* %tmp2 to i8*
- %6 = bitcast %struct.S* %4 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %5, i8* align 4 %6, i64 8, i1 false)
- %7 = load %struct.S*, %struct.S** %b.addr, align 8
- %8 = load %struct.S*, %struct.S** %a.addr, align 8
- %9 = bitcast %struct.S* %8 to i8*
- %10 = bitcast %struct.S* %7 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %9, i8* align 4 %10, i64 8, i1 false)
- %11 = load %struct.S*, %struct.S** %b.addr, align 8
- %12 = bitcast %struct.S* %11 to i8*
- %13 = bitcast %struct.S* %tmp to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %12, i8* align 4 %13, i64 8, i1 false)
- %14 = load %struct.S*, %struct.S** %a.addr, align 8
- %15 = bitcast %struct.S* %14 to i8*
- %16 = bitcast %struct.S* %tmp2 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %15, i8* align 4 %16, i64 8, i1 false)
+ %1 = load ptr, ptr %a.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp, ptr align 4 %1, i64 8, i1 false)
+ %2 = load ptr, ptr %b.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp2, ptr align 4 %2, i64 8, i1 false)
+ %3 = load ptr, ptr %b.addr, align 8
+ %4 = load ptr, ptr %a.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %4, ptr align 4 %3, i64 8, i1 false)
+ %5 = load ptr, ptr %b.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %5, ptr align 4 %tmp, i64 8, i1 false)
+ %6 = load ptr, ptr %a.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %6, ptr align 4 %tmp2, i64 8, i1 false)
br label %return
return: ; preds = %if.end, %if.then
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
; CHECK-ALLOCA: __asan_allocas_unpoison
; CHECK-ALLOCA: ret void
%0 = alloca i32, align 4
- %1 = alloca i8*
- store volatile i32 %len, i32* %0, align 4
- %2 = load i32, i32* %0, align 4
+ %1 = alloca ptr
+ store volatile i32 %len, ptr %0, align 4
+ %2 = load i32, ptr %0, align 4
%3 = zext i32 %2 to i64
%4 = alloca i8, i64 %3, align 32
- store volatile i8 0, i8* %4
+ store volatile i8 0, ptr %4
ret void
}
; CHECK-ALLOCA: ret void
entry:
%t = alloca inalloca i32
- store i32 42, i32* %t
- call void @pass_inalloca(i32* inalloca(i32) %t)
+ store i32 42, ptr %t
+ call void @pass_inalloca(ptr inalloca(i32) %t)
ret void
}
-declare void @pass_inalloca(i32* inalloca(i32))
+declare void @pass_inalloca(ptr inalloca(i32))
declare i32 @__gxx_personality_v0(...)
-define i64 @Invoke1() nounwind uwtable ssp sanitize_address personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i64 @Invoke1() nounwind uwtable ssp sanitize_address personality ptr @__gxx_personality_v0 {
entry:
invoke void @NoReturnFunc()
to label %invoke.cont unwind label %lpad
ret i64 0
lpad:
- %0 = landingpad { i8*, i32 }
- filter [0 x i8*] zeroinitializer
+ %0 = landingpad { ptr, i32 }
+ filter [0 x ptr] zeroinitializer
ret i64 1
}
; CHECK-LABEL: @Invoke1
@data1 = dso_local global i32 1, align 4
@data2 = dso_local global i32 2, align 4
-@__invalid$c$name_sym_data1 = internal constant i8* bitcast (i32* @data1 to i8*), section "invalid$c$name", align 8
-@__invalid$c$name_sym_data2 = internal constant i8* bitcast (i32* @data2 to i8*), section "invalid$c$name", align 8
+@__invalid$c$name_sym_data1 = internal constant ptr @data1, section "invalid$c$name", align 8
+@__invalid$c$name_sym_data2 = internal constant ptr @data2, section "invalid$c$name", align 8
; CHECK: @"__invalid$c$name_sym_data1" = internal constant{{.*}}, section "invalid$c$name"
; CHECK-NEXT: @"__invalid$c$name_sym_data2" = internal constant{{.*}}, section "invalid$c$name"
; CHECK: @"__asan_global___invalid$c$name_sym_data1"
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-;@sink = global i32* null, align 4
+;@sink = global ptr null, align 4
; Ignore direct inbounds stack access.
define void @foo() uwtable sanitize_address {
entry:
%a = alloca i32, align 4
- store i32 42, i32* %a, align 4
+ store i32 42, ptr %a, align 4
ret void
; CHECK-LABEL: define void @foo
; CHECK-NOT: __asan_report
define void @baz(i64 %i) sanitize_address {
entry:
%a = alloca [10 x i32], align 4
- %e = getelementptr inbounds [10 x i32], [10 x i32]* %a, i32 0, i64 %i
- store i32 42, i32* %e, align 4
+ %e = getelementptr inbounds [10 x i32], ptr %a, i32 0, i64 %i
+ store i32 42, ptr %e, align 4
ret void
; CHECK-LABEL: define void @baz
; CHECK: __asan_report
define void @bar() sanitize_address {
entry:
%a = alloca [10 x i32], align 4
- %e = getelementptr inbounds [10 x i32], [10 x i32]* %a, i32 0, i64 12
- store i32 42, i32* %e, align 4
+ %e = getelementptr inbounds [10 x i32], ptr %a, i32 0, i64 12
+ store i32 42, ptr %e, align 4
ret void
; CHECK-LABEL: define void @bar
; CHECK: __asan_report
; module ctor/dtor
; CHECK: @___asan_gen_ = private constant [8 x i8] c"<stdin>\00", align 1
-; CHECK: @llvm.used = appending global [2 x i8*] [i8* bitcast (void ()* @asan.module_ctor to i8*), i8* bitcast (void ()* @asan.module_dtor to i8*)], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @asan.module_ctor, i8* bitcast (void ()* @asan.module_ctor to i8*) }]
-; CHECK: @llvm.global_dtors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @asan.module_dtor, i8* bitcast (void ()* @asan.module_dtor to i8*) }]
+; CHECK: @llvm.used = appending global [2 x ptr] [ptr @asan.module_ctor, ptr @asan.module_dtor], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @asan.module_ctor, ptr @asan.module_ctor }]
+; CHECK: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @asan.module_dtor, ptr @asan.module_dtor }]
; Test that we don't instrument global arrays with static initializer
; indexed with constants in-bounds. But instrument all other cases.
; GlobSt is declared here, and has static initializer -- ok to optimize.
define i32 @AccessGlobSt_0_2() sanitize_address {
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @GlobSt, i64 0, i64 2), align 8
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @GlobSt, i64 0, i64 2), align 8
ret i32 %0
; CHECK-LABEL: define i32 @AccessGlobSt_0_2
; CHECK-NOT: __asan_report
; GlobSt is accessed out of bounds -- can't optimize
define i32 @AccessGlobSt_0_12() sanitize_address {
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @GlobSt, i64 0, i64 12), align 8
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @GlobSt, i64 0, i64 12), align 8
ret i32 %0
; CHECK-LABEL: define i32 @AccessGlobSt_0_12
; CHECK: __asan_report
; GlobSt is accessed with Gep that has non-0 first index -- can't optimize.
define i32 @AccessGlobSt_1_2() sanitize_address {
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @GlobSt, i64 1, i64 2), align 8
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @GlobSt, i64 1, i64 2), align 8
ret i32 %0
; CHECK-LABEL: define i32 @AccessGlobSt_1_2
; CHECK: __asan_report
; GlobDy is declared with dynamic initializer -- can't optimize.
define i32 @AccessGlobDy_0_2() sanitize_address {
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @GlobDy, i64 0, i64 2), align 8
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @GlobDy, i64 0, i64 2), align 8
ret i32 %0
; CHECK-LABEL: define i32 @AccessGlobDy_0_2
; CHECK: __asan_report
; GlobEx is an external global -- can't optimize.
define i32 @AccessGlobEx_0_2() sanitize_address {
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @GlobEx, i64 0, i64 2), align 8
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @GlobEx, i64 0, i64 2), align 8
ret i32 %0
; CHECK-LABEL: define i32 @AccessGlobEx_0_2
; CHECK: __asan_report
define internal void @__cxx_global_var_init() section ".text.startup" {
entry:
%call = call i32 @initializer()
- store i32 %call, i32* @xxx, align 4
+ store i32 %call, ptr @xxx, align 4
ret void
}
-@llvm.global_ctors = appending global [2 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @__late_ctor, i8* null }, { i32, void ()*, i8* } { i32 0, void ()* @__early_ctor, i8* null }]
+@llvm.global_ctors = appending global [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @__late_ctor, ptr null }, { i32, ptr, ptr } { i32 0, ptr @__early_ctor, ptr null }]
define internal void @__late_ctor() sanitize_address section ".text.startup" {
entry:
; Check that xxx is instrumented.
define void @touch_xxx() sanitize_address {
- store i32 0, i32 *@xxx, align 4
+ store i32 0, ptr @xxx, align 4
ret void
; CHECK-LABEL: touch_xxx
; CHECK: call void @__asan_report_store4
; Check that XXX is instrumented.
define void @touch_XXX() sanitize_address {
- store i32 0, i32 *@XXX, align 4
+ store i32 0, ptr @XXX, align 4
ret void
; CHECK: define void @touch_XXX
; CHECK: call void @__asan_report_store4
; Check that yyy is NOT instrumented (as it does not have dynamic initializer).
define void @touch_yyy() sanitize_address {
- store i32 0, i32 *@yyy, align 4
+ store i32 0, ptr @yyy, align 4
ret void
; CHECK: define void @touch_yyy
; CHECK-NOT: call void @__asan_report_store4
; Check that YYY is NOT instrumented (as it does not have dynamic initializer).
define void @touch_YYY() sanitize_address {
- store i32 0, i32 *@YYY, align 4
+ store i32 0, ptr @YYY, align 4
ret void
; CHECK: define void @touch_YYY
; CHECK-NOT: call void @__asan_report_store4
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
-define void @IncrementMe(i32* %a) sanitize_address {
+define void @IncrementMe(ptr %a) sanitize_address {
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
%tmp2 = add i32 %tmp1, 1
- store i32 %tmp2, i32* %a, align 4
+ store i32 %tmp2, ptr %a, align 4
ret void
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
-define void @test_load(i32* %a, i64* %b, i512* %c, i80* %d) sanitize_address {
+define void @test_load(ptr %a, ptr %b, ptr %c, ptr %d) sanitize_address {
entry:
; CHECK-CALL: call void @__asan_load4
; CHECK-CALL: call void @__asan_load8
; CHECK-CUSTOM-PREFIX: call void @__foo_load8
; CHECK-CUSTOM-PREFIX: call void @__foo_loadN
; CHECK-INLINE-NOT: call void @__asan_load
- %tmp1 = load i32, i32* %a, align 4
- %tmp2 = load i64, i64* %b, align 8
- %tmp3 = load i512, i512* %c, align 32
- %tmp4 = load i80, i80* %d, align 8
+ %tmp1 = load i32, ptr %a, align 4
+ %tmp2 = load i64, ptr %b, align 8
+ %tmp3 = load i512, ptr %c, align 32
+ %tmp4 = load i80, ptr %d, align 8
ret void
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
-define i32 @foo(i32* %p) sanitize_address {
+define i32 @foo(ptr %p) sanitize_address {
; CHECK: __asan_report_load4_noabort
; CHECK-NOT: unreachable
- %1 = load i32, i32* %p, align 4
+ %1 = load i32, ptr %p, align 4
ret i32 %1
}
$_ZTS3ABC = comdat any
$_ZTI3ABC = comdat any
-@_ZTVN10__cxxabiv117__class_type_infoE = external global i8*
+@_ZTVN10__cxxabiv117__class_type_infoE = external global ptr
@_ZTS3ABC = linkonce_odr constant [5 x i8] c"3ABC\00", comdat
-@_ZTI3ABC = linkonce_odr constant { i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv117__class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @_ZTS3ABC, i32 0, i32 0) }, comdat
+@_ZTI3ABC = linkonce_odr constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), ptr @_ZTS3ABC }, comdat
-define void @Throw() sanitize_address personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @Throw() sanitize_address personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: define void @Throw()
entry:
%x = alloca %struct.ABC, align 4
- %0 = bitcast %struct.ABC* %x to i8*
; Poison memory in prologue: F1F1F1F1F8F3F3F3
- ; CHECK: store i64 -868082052615769615, i64* %{{[0-9]+}}
+ ; CHECK: store i64 -868082052615769615, ptr %{{[0-9]+}}
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %0)
- ; CHECK: store i8 4, i8* %{{[0-9]+}}
+ call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+ ; CHECK: store i8 4, ptr %{{[0-9]+}}
; CHECK-NEXT: @llvm.lifetime.start
- %exception = call i8* @__cxa_allocate_exception(i64 4)
- invoke void @__cxa_throw(i8* %exception, i8* bitcast ({ i8*, i8* }* @_ZTI3ABC to i8*), i8* bitcast (void (%struct.ABC*)* @_ZN3ABCD2Ev to i8*)) noreturn
+ %exception = call ptr @__cxa_allocate_exception(i64 4)
+ invoke void @__cxa_throw(ptr %exception, ptr @_ZTI3ABC, ptr @_ZN3ABCD2Ev) noreturn
to label %unreachable unwind label %lpad
; CHECK: call void @__asan_handle_no_return
; CHECK-NEXT: @__cxa_throw
lpad:
- %1 = landingpad { i8*, i32 }
+ %0 = landingpad { ptr, i32 }
cleanup
- call void @_ZN3ABCD2Ev(%struct.ABC* nonnull %x)
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %0)
- ; CHECK: store i8 -8, i8* %{{[0-9]+}}
+ call void @_ZN3ABCD2Ev(ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ ; CHECK: store i8 -8, ptr %{{[0-9]+}}
; CHECK-NEXT: @llvm.lifetime.end
- resume { i8*, i32 } %1
- ; CHECK: store i64 0, i64* %{{[0-9]+}}
+ resume { ptr, i32 } %0
+ ; CHECK: store i64 0, ptr %{{[0-9]+}}
; CHECK-NEXT: resume
unreachable:
unreachable
}
-%rtti.TypeDescriptor9 = type { i8**, i8*, [10 x i8] }
+%rtti.TypeDescriptor9 = type { ptr, ptr, [10 x i8] }
%eh.CatchableType = type { i32, i32, i32, i32, i32, i32, i32 }
%eh.CatchableTypeArray.1 = type { i32, [1 x i32] }
%eh.ThrowInfo = type { i32, i32, i32, i32 }
$"_CTA1?AUABC@@" = comdat any
$"_TI1?AUABC@@" = comdat any
-@"\01??_7type_info@@6B@" = external constant i8*
-@"\01??_R0?AUABC@@@8" = linkonce_odr global %rtti.TypeDescriptor9 { i8** @"\01??_7type_info@@6B@", i8* null, [10 x i8] c".?AUABC@@\00" }, comdat
+@"\01??_7type_info@@6B@" = external constant ptr
+@"\01??_R0?AUABC@@@8" = linkonce_odr global %rtti.TypeDescriptor9 { ptr @"\01??_7type_info@@6B@", ptr null, [10 x i8] c".?AUABC@@\00" }, comdat
@__ImageBase = external constant i8
-@"_CT??_R0?AUABC@@@84" = linkonce_odr unnamed_addr constant %eh.CatchableType { i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.TypeDescriptor9* @"\01??_R0?AUABC@@@8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 0, i32 -1, i32 0, i32 4, i32 0 }, section ".xdata", comdat
-@"_CTA1?AUABC@@" = linkonce_odr unnamed_addr constant %eh.CatchableTypeArray.1 { i32 1, [1 x i32] [i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%eh.CatchableType* @"_CT??_R0?AUABC@@@84" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32)] }, section ".xdata", comdat
-@"_TI1?AUABC@@" = linkonce_odr unnamed_addr constant %eh.ThrowInfo { i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (void (%struct.ABC*)* @"\01??1ABC@@QEAA@XZ" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%eh.CatchableTypeArray.1* @"_CTA1?AUABC@@" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32) }, section ".xdata", comdat
+@"_CT??_R0?AUABC@@@84" = linkonce_odr unnamed_addr constant %eh.CatchableType { i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??_R0?AUABC@@@8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 0, i32 -1, i32 0, i32 4, i32 0 }, section ".xdata", comdat
+@"_CTA1?AUABC@@" = linkonce_odr unnamed_addr constant %eh.CatchableTypeArray.1 { i32 1, [1 x i32] [i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"_CT??_R0?AUABC@@@84" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32)] }, section ".xdata", comdat
+@"_TI1?AUABC@@" = linkonce_odr unnamed_addr constant %eh.ThrowInfo { i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"\01??1ABC@@QEAA@XZ" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"_CTA1?AUABC@@" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32) }, section ".xdata", comdat
-define void @ThrowWin() sanitize_address personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define void @ThrowWin() sanitize_address personality ptr @__CxxFrameHandler3 {
; CHECK-LABEL: define void @ThrowWin()
entry:
%x = alloca %struct.ABC, align 4
%tmp = alloca %struct.ABC, align 4
- %0 = bitcast %struct.ABC* %x to i8*
; Poison memory in prologue: F1F1F1F1F8F304F2
- ; CHECK: store i64 -935355671561244175, i64* %{{[0-9]+}}
+ ; CHECK: store i64 -935355671561244175, ptr %{{[0-9]+}}
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %0)
- ; CHECK: store i8 4, i8* %{{[0-9]+}}
+ call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+ ; CHECK: store i8 4, ptr %{{[0-9]+}}
; CHECK-NEXT: @llvm.lifetime.start
- %1 = bitcast %struct.ABC* %tmp to i8*
- invoke void @_CxxThrowException(i8* %1, %eh.ThrowInfo* nonnull @"_TI1?AUABC@@") noreturn
+ invoke void @_CxxThrowException(ptr %tmp, ptr nonnull @"_TI1?AUABC@@") noreturn
to label %unreachable unwind label %ehcleanup
; CHECK: call void @__asan_handle_no_return
; CHECK-NEXT: @_CxxThrowException
ehcleanup:
- %2 = cleanuppad within none []
- call void @"\01??1ABC@@QEAA@XZ"(%struct.ABC* nonnull %x) [ "funclet"(token %2) ]
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %0)
- ; CHECK: store i8 -8, i8* %{{[0-9]+}}
+ %0 = cleanuppad within none []
+ call void @"\01??1ABC@@QEAA@XZ"(ptr nonnull %x) [ "funclet"(token %0) ]
+ call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ ; CHECK: store i8 -8, ptr %{{[0-9]+}}
; CHECK-NEXT: @llvm.lifetime.end
- cleanupret from %2 unwind to caller
- ; CHECK: store i64 0, i64* %{{[0-9]+}}
+ cleanupret from %0 unwind to caller
+ ; CHECK: store i64 0, ptr %{{[0-9]+}}
; CHECK-NEXT: cleanupret
unreachable:
declare i32 @__gxx_personality_v0(...)
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
-declare void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr
-declare i8* @__cxa_allocate_exception(i64) local_unnamed_addr
-declare void @_ZN3ABCD2Ev(%struct.ABC* %this) unnamed_addr
-declare void @"\01??1ABC@@QEAA@XZ"(%struct.ABC* %this)
-declare void @_CxxThrowException(i8*, %eh.ThrowInfo*)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @__cxa_throw(ptr, ptr, ptr) local_unnamed_addr
+declare ptr @__cxa_allocate_exception(i64) local_unnamed_addr
+declare void @_ZN3ABCD2Ev(ptr %this) unnamed_addr
+declare void @"\01??1ABC@@QEAA@XZ"(ptr %this)
+declare void @_CxxThrowException(ptr, ptr)
declare i32 @__CxxFrameHandler3(...)
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
define i32 @basic_test() sanitize_address {
; CHECK-LABEL: define i32 @basic_test()
%c = alloca i8, align 1
; Memory is poisoned in prologue: F1F1F1F104F3F8F2
- ; CHECK-UAS: store i64 -866676825215864335, i64* %{{[0-9]+}}
+ ; CHECK-UAS: store i64 -866676825215864335, ptr %{{[0-9]+}}
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %c)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %c)
; Memory is unpoisoned at llvm.lifetime.start: 01
- ; CHECK-UAS: store i8 1, i8* %{{[0-9]+}}
+ ; CHECK-UAS: store i8 1, ptr %{{[0-9]+}}
- store volatile i32 0, i32* %retval
- store volatile i8 0, i8* %c, align 1
+ store volatile i32 0, ptr %retval
+ store volatile i8 0, ptr %c, align 1
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %c)
+ call void @llvm.lifetime.end.p0(i64 1, ptr %c)
; Memory is poisoned at llvm.lifetime.end: F8
- ; CHECK-UAS: store i8 -8, i8* %{{[0-9]+}}
+ ; CHECK-UAS: store i8 -8, ptr %{{[0-9]+}}
; Unpoison memory at function exit in UAS mode.
- ; CHECK-UAS: store i64 0, i64* %{{[0-9]+}}
+ ; CHECK-UAS: store i64 0, ptr %{{[0-9]+}}
; CHECK-UAS: ret i32 0
ret i32 0
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
define void @lifetime_no_size() sanitize_address {
; CHECK-LABEL: define void @lifetime_no_size()
entry:
%i = alloca i32, align 4
- %i.ptr = bitcast i32* %i to i8*
; Poison memory in prologue: F1F1F1F104F3F3F3
- ; CHECK: store i64 -868083100587789839, i64* %{{[0-9]+}}
+ ; CHECK: store i64 -868083100587789839, ptr %{{[0-9]+}}
- call void @llvm.lifetime.start.p0i8(i64 -1, i8* %i.ptr)
+ call void @llvm.lifetime.start.p0(i64 -1, ptr %i)
; Check that lifetime with no size are ignored.
; CHECK-NOT: store
; CHECK: call void @llvm.lifetime.start
- store volatile i8 0, i8* %i.ptr
+ store volatile i8 0, ptr %i
; CHECK: store volatile
- call void @llvm.lifetime.end.p0i8(i64 -1, i8* %i.ptr)
+ call void @llvm.lifetime.end.p0(i64 -1, ptr %i)
; Check that lifetime with no size are ignored.
; CHECK-NOT: store
; CHECK: call void @llvm.lifetime.end
; Unpoison stack frame on exit.
- ; CHECK: store i64 0, i64* %{{[0-9]+}}
+ ; CHECK: store i64 0, ptr %{{[0-9]+}}
; CHECK: ret void
ret void
}
; Regular variable lifetime intrinsics.
%i = alloca i32, align 4
- %i.ptr = bitcast i32* %i to i8*
; Poison memory in prologue: F1F1F1F1F8F3F3F3
- ; CHECK: store i64 -868082052615769615, i64* %{{[0-9]+}}
+ ; CHECK: store i64 -868082052615769615, ptr %{{[0-9]+}}
; Memory is unpoisoned at llvm.lifetime.start
- call void @llvm.lifetime.start.p0i8(i64 3, i8* %i.ptr)
- ; CHECK: store i8 4, i8* %{{[0-9]+}}
+ call void @llvm.lifetime.start.p0(i64 3, ptr %i)
+ ; CHECK: store i8 4, ptr %{{[0-9]+}}
; CHECK-NEXT: llvm.lifetime.start
- store volatile i8 0, i8* %i.ptr
+ store volatile i8 0, ptr %i
; CHECK: store volatile
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %i.ptr)
- ; CHECK: store i8 -8, i8* %{{[0-9]+}}
+ call void @llvm.lifetime.end.p0(i64 4, ptr %i)
+ ; CHECK: store i8 -8, ptr %{{[0-9]+}}
; CHECK-NEXT: call void @llvm.lifetime.end
; Memory is poisoned at every call to llvm.lifetime.end
- call void @llvm.lifetime.end.p0i8(i64 2, i8* %i.ptr)
- ; CHECK: store i8 -8, i8* %{{[0-9]+}}
+ call void @llvm.lifetime.end.p0(i64 2, ptr %i)
+ ; CHECK: store i8 -8, ptr %{{[0-9]+}}
; CHECK-NEXT: call void @llvm.lifetime.end
; Lifetime intrinsics for array.
%arr = alloca [10 x i32], align 16
- %arr.ptr = bitcast [10 x i32]* %arr to i8*
- call void @llvm.lifetime.start.p0i8(i64 40, i8* %arr.ptr)
+ call void @llvm.lifetime.start.p0(i64 40, ptr %arr)
; CHECK-DEFAULT: call void @__asan_unpoison_stack_memory(i64 %{{[^ ]+}}, i64 40)
; CHECK-NO-DYNAMIC-NOT: call void @__asan_unpoison_stack_memory(i64 %{{[^ ]+}}, i64 40)
- store volatile i8 0, i8* %arr.ptr
+ store volatile i8 0, ptr %arr
; CHECK: store volatile
- call void @llvm.lifetime.end.p0i8(i64 40, i8* %arr.ptr)
+ call void @llvm.lifetime.end.p0(i64 40, ptr %arr)
; CHECK-DEFAULT: call void @__asan_poison_stack_memory(i64 %{{[^ ]+}}, i64 40)
; CHECK-NO-DYNAMIC-NOT: call void @__asan_poison_stack_memory(i64 %{{[^ ]+}}, i64 40)
; One more lifetime start/end for the same variable %i.
- call void @llvm.lifetime.start.p0i8(i64 2, i8* %i.ptr)
- ; CHECK: store i8 4, i8* %{{[0-9]+}}
+ call void @llvm.lifetime.start.p0(i64 2, ptr %i)
+ ; CHECK: store i8 4, ptr %{{[0-9]+}}
; CHECK-NEXT: llvm.lifetime.start
- store volatile i8 0, i8* %i.ptr
+ store volatile i8 0, ptr %i
; CHECK: store volatile
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %i.ptr)
- ; CHECK: store i8 -8, i8* %{{[0-9]+}}
+ call void @llvm.lifetime.end.p0(i64 4, ptr %i)
+ ; CHECK: store i8 -8, ptr %{{[0-9]+}}
; CHECK-NEXT: llvm.lifetime.end
; Memory is unpoisoned at function exit (only once).
- ; CHECK: store i64 0, i64* %{{[0-9]+}}
+ ; CHECK: store i64 0, ptr %{{[0-9]+}}
; CHECK-NEXT: ret void
ret void
}
entry:
%i = alloca i64, align 4
- %i.ptr = bitcast i64* %i to i8*
; Poison memory in prologue: F1F1F1F1F8F3F3F3
- ; CHECK: store i64 -868082052615769615, i64* %{{[0-9]+}}
+ ; CHECK: store i64 -868082052615769615, ptr %{{[0-9]+}}
- call void @llvm.lifetime.start.p0i8(i64 8, i8* %i.ptr)
- ; CHECK: store i8 0, i8* %{{[0-9]+}}
+ call void @llvm.lifetime.start.p0(i64 8, ptr %i)
+ ; CHECK: store i8 0, ptr %{{[0-9]+}}
; CHECK-NEXT: llvm.lifetime.start
- store volatile i8 0, i8* %i.ptr
+ store volatile i8 0, ptr %i
; CHECK: store volatile
br i1 %x, label %bb0, label %bb1
bb0:
- %i.ptr2 = bitcast i64* %i to i8*
br label %bb1
bb1:
- %i.phi = phi i8* [ %i.ptr, %entry ], [ %i.ptr2, %bb0 ]
- call void @llvm.lifetime.end.p0i8(i64 8, i8* %i.phi)
- ; CHECK: store i8 -8, i8* %{{[0-9]+}}
+ %i.phi = phi ptr [ %i, %entry ], [ %i, %bb0 ]
+ call void @llvm.lifetime.end.p0(i64 8, ptr %i.phi)
+ ; CHECK: store i8 -8, ptr %{{[0-9]+}}
; CHECK-NEXT: llvm.lifetime.end
ret void
- ; CHECK: store i64 0, i64* %{{[0-9]+}}
+ ; CHECK: store i64 0, ptr %{{[0-9]+}}
; CHECK-NEXT: ret void
}
; CHECK-LABEL: define void @getelementptr_args
entry:
%x = alloca [1024 x i8], align 16
- %d = alloca i8*, align 8
+ %d = alloca ptr, align 8
; F1F1F1F1
- ; CHECK: store i32 -235802127, i32* %{{[0-9]+}}
+ ; CHECK: store i32 -235802127, ptr %{{[0-9]+}}
; F3F3F3F3F3F3F3F3
- ; CHECK: store i64 -868082074056920077, i64* %{{[0-9]+}}
+ ; CHECK: store i64 -868082074056920077, ptr %{{[0-9]+}}
; F3F3F3F3F3F3F3F3
- ; CHECK: store i64 -868082074056920077, i64* %{{[0-9]+}}
+ ; CHECK: store i64 -868082074056920077, ptr %{{[0-9]+}}
- %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* %x, i64 0, i64 0
- call void @llvm.lifetime.start.p0i8(i64 1024, i8* %0)
+ call void @llvm.lifetime.start.p0(i64 1024, ptr %x)
; CHECK: call void @__asan_set_shadow_00(i64 %{{[0-9]+}}, i64 128)
; CHECK-NEXT: call void @llvm.lifetime.start
- store i8* %0, i8** %d, align 8
- ; CHECK: store i8
+ store ptr %x, ptr %d, align 8
+ ; CHECK: store ptr
- call void @llvm.lifetime.end.p0i8(i64 1024, i8* %0)
+ call void @llvm.lifetime.end.p0(i64 1024, ptr %x)
; CHECK: call void @__asan_set_shadow_f8(i64 %{{[0-9]+}}, i64 128)
; CHECK-NEXT: call void @llvm.lifetime.end
entry:
%a.addr = alloca i64, align 8
%b = alloca [0 x i8], align 1
- store i64 %a, i64* %a.addr, align 8
+ store i64 %a, ptr %a.addr, align 8
- %0 = bitcast [0 x i8]* %b to i8*
- call void @llvm.lifetime.start.p0i8(i64 0, i8* %0) #2
- ; CHECK: %{{[0-9]+}} = bitcast
- ; CHECK-NEXT: call void @llvm.lifetime.start
+ call void @llvm.lifetime.start.p0(i64 0, ptr %b) #2
+ ; CHECK: call void @llvm.lifetime.start
- %1 = bitcast [0 x i8]* %b to i8*
- call void @llvm.lifetime.end.p0i8(i64 0, i8* %1) #2
- ; CHECK-NEXT: %{{[0-9]+}} = bitcast
- ; CHECK-NEXT: call void @llvm.lifetime.end
+ call void @llvm.lifetime.end.p0(i64 0, ptr %b) #2
+ ; CHECK: call void @llvm.lifetime.end
ret void
; CHECK-NEXT: ret void
target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
target triple = "i686-pc-windows-msvc18.0.0"
-declare i32 @llvm.eh.typeid.for(i8*) #2
-declare i8* @llvm.frameaddress(i32)
-declare i8* @llvm.eh.recoverfp(i8*, i8*)
-declare i8* @llvm.localrecover(i8*, i8*, i32)
+declare i32 @llvm.eh.typeid.for(ptr) #2
+declare ptr @llvm.frameaddress(i32)
+declare ptr @llvm.eh.recoverfp(ptr, ptr)
+declare ptr @llvm.localrecover(ptr, ptr, i32)
declare void @llvm.localescape(...) #1
declare i32 @_except_handler3(...)
-declare void @may_throw(i32* %r)
+declare void @may_throw(ptr %r)
-define i32 @main() sanitize_address personality i8* bitcast (i32 (...)* @_except_handler3 to i8*) {
+define i32 @main() sanitize_address personality ptr @_except_handler3 {
entry:
%r = alloca i32, align 4
%__exception_code = alloca i32, align 4
- call void (...) @llvm.localescape(i32* nonnull %__exception_code)
- %0 = bitcast i32* %r to i8*
- store i32 0, i32* %r, align 4
- invoke void @may_throw(i32* nonnull %r) #4
+ call void (...) @llvm.localescape(ptr nonnull %__exception_code)
+ store i32 0, ptr %r, align 4
+ invoke void @may_throw(ptr nonnull %r) #4
to label %__try.cont unwind label %lpad
lpad: ; preds = %entry
- %1 = landingpad { i8*, i32 }
- catch i8* bitcast (i32 ()* @"\01?filt$0@0@main@@" to i8*)
- %2 = extractvalue { i8*, i32 } %1, 1
- %3 = call i32 @llvm.eh.typeid.for(i8* bitcast (i32 ()* @"\01?filt$0@0@main@@" to i8*)) #1
- %matches = icmp eq i32 %2, %3
+ %0 = landingpad { ptr, i32 }
+ catch ptr @"\01?filt$0@0@main@@"
+ %1 = extractvalue { ptr, i32 } %0, 1
+ %2 = call i32 @llvm.eh.typeid.for(ptr @"\01?filt$0@0@main@@") #1
+ %matches = icmp eq i32 %1, %2
br i1 %matches, label %__except, label %eh.resume
__except: ; preds = %lpad
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %__try.cont
__try.cont: ; preds = %entry, %__except
- %4 = load i32, i32* %r, align 4
- ret i32 %4
+ %3 = load i32, ptr %r, align 4
+ ret i32 %3
eh.resume: ; preds = %lpad
- resume { i8*, i32 } %1
+ resume { ptr, i32 } %0
}
; Check that the alloca remains static and the localescape call remains in the
; CHECK-NOT: br {{.*}}label
; CHECK: %__exception_code = alloca i32, align 4
; CHECK-NOT: br {{.*}}label
-; CHECK: call void (...) @llvm.localescape(i32* nonnull %__exception_code)
+; CHECK: call void (...) @llvm.localescape(ptr nonnull %__exception_code)
; Function Attrs: nounwind
define internal i32 @"\01?filt$0@0@main@@"() #1 {
entry:
- %0 = tail call i8* @llvm.frameaddress(i32 1)
- %1 = tail call i8* @llvm.eh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %0)
- %2 = tail call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* %1, i32 0)
- %__exception_code = bitcast i8* %2 to i32*
- %3 = getelementptr inbounds i8, i8* %0, i32 -20
- %4 = bitcast i8* %3 to { i32*, i8* }**
- %5 = load { i32*, i8* }*, { i32*, i8* }** %4, align 4
- %6 = getelementptr inbounds { i32*, i8* }, { i32*, i8* }* %5, i32 0, i32 0
- %7 = load i32*, i32** %6, align 4
- %8 = load i32, i32* %7, align 4
- store i32 %8, i32* %__exception_code, align 4
+ %0 = tail call ptr @llvm.frameaddress(i32 1)
+ %1 = tail call ptr @llvm.eh.recoverfp(ptr @main, ptr %0)
+ %2 = tail call ptr @llvm.localrecover(ptr @main, ptr %1, i32 0)
+ %3 = getelementptr inbounds i8, ptr %0, i32 -20
+ %4 = load ptr, ptr %3, align 4
+ %5 = getelementptr inbounds { ptr, ptr }, ptr %4, i32 0, i32 0
+ %6 = load ptr, ptr %5, align 4
+ %7 = load i32, ptr %6, align 4
+ store i32 %7, ptr %2, align 4
ret i32 1
}
; CHECK-LABEL: define internal i32 @"\01?filt$0@0@main@@"()
-; CHECK: tail call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* {{.*}}, i32 0)
+; CHECK: tail call ptr @llvm.localrecover(ptr @main, ptr {{.*}}, i32 0)
-define void @ScaleFilterCols_SSSE3(i8* %dst_ptr, i8* %src_ptr, i32 %dst_width, i32 %x, i32 %dx) sanitize_address {
+define void @ScaleFilterCols_SSSE3(ptr %dst_ptr, ptr %src_ptr, i32 %dst_width, i32 %x, i32 %dx) sanitize_address {
entry:
%dst_width.addr = alloca i32, align 4
- store i32 %dst_width, i32* %dst_width.addr, align 4
- %0 = call { i8*, i8*, i32, i32, i32 } asm sideeffect "", "=r,=r,={ax},=r,=r,=*rm,rm,rm,0,1,2,3,4,5,~{memory},~{cc},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) nonnull %dst_width.addr, i32 %x, i32 %dx, i8* %dst_ptr, i8* %src_ptr, i32 0, i32 0, i32 0, i32 %dst_width)
+ store i32 %dst_width, ptr %dst_width.addr, align 4
+ %0 = call { ptr, ptr, i32, i32, i32 } asm sideeffect "", "=r,=r,={ax},=r,=r,=*rm,rm,rm,0,1,2,3,4,5,~{memory},~{cc},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) nonnull %dst_width.addr, i32 %x, i32 %dx, ptr %dst_ptr, ptr %src_ptr, i32 0, i32 0, i32 0, i32 %dst_width)
ret void
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
-declare void @llvm.memset.inline.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) nounwind
-declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) nounwind
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) nounwind
-define void @memintr_test(i8* %a, i8* %b) nounwind uwtable sanitize_address {
+define void @memintr_test(ptr %a, ptr %b) nounwind uwtable sanitize_address {
entry:
- tail call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 100, i1 false)
- tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %a, i8* %b, i64 100, i1 false)
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 100, i1 false)
+ tail call void @llvm.memset.p0.i64(ptr %a, i8 0, i64 100, i1 false)
+ tail call void @llvm.memmove.p0.p0.i64(ptr %a, ptr %b, i64 100, i1 false)
+ tail call void @llvm.memcpy.p0.p0.i64(ptr %a, ptr %b, i64 100, i1 false)
ret void
}
; CHECK-LABEL: memintr_test
; CHECK-NOPREFIX: @memcpy
; CHECK: ret void
-define void @memintr_inline_test(i8* %a, i8* %b) nounwind uwtable sanitize_address {
+define void @memintr_inline_test(ptr %a, ptr %b) nounwind uwtable sanitize_address {
entry:
- tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 0, i64 100, i1 false)
- tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* %a, i8* %b, i64 100, i1 false)
+ tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 100, i1 false)
+ tail call void @llvm.memcpy.inline.p0.p0.i64(ptr %a, ptr %b, i64 100, i1 false)
ret void
}
; CHECK-LABEL: memintr_inline_test
; CHECK-NOPREFIX: @memcpy
; CHECK: ret void
-define void @memintr_test_nosanitize(i8* %a, i8* %b) nounwind uwtable {
+define void @memintr_test_nosanitize(ptr %a, ptr %b) nounwind uwtable {
entry:
- tail call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 100, i1 false)
- tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %a, i8* %b, i64 100, i1 false)
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 100, i1 false)
+ tail call void @llvm.memset.p0.i64(ptr %a, i8 0, i64 100, i1 false)
+ tail call void @llvm.memmove.p0.p0.i64(ptr %a, ptr %b, i64 100, i1 false)
+ tail call void @llvm.memcpy.p0.p0.i64(ptr %a, ptr %b, i64 100, i1 false)
ret void
}
; CHECK-LABEL: memintr_test_nosanitize
; CHECK: @llvm.memcpy
; CHECK: ret void
-declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
-declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
-declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture writeonly, i8, i64, i32) nounwind
+declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32) nounwind
-define void @memintr_element_atomic_test(i8* %a, i8* %b) nounwind uwtable sanitize_address {
+define void @memintr_element_atomic_test(ptr %a, ptr %b) nounwind uwtable sanitize_address {
; This is a canary test to make sure that these don't get lowered into calls that don't
; have the element-atomic property. Eventually, asan will have to be enhanced to lower
; these properly.
; CHECK-LABEL: memintr_element_atomic_test
- ; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 100, i32 1)
- ; CHECK-NEXT: tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %a, i8* align 1 %b, i64 100, i32 1)
- ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %a, i8* align 1 %b, i64 100, i32 1)
+ ; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %a, i8 0, i64 100, i32 1)
+ ; CHECK-NEXT: tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %a, ptr align 1 %b, i64 100, i32 1)
+ ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %a, ptr align 1 %b, i64 100, i32 1)
; CHECK-NEXT: ret void
- tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 100, i32 1)
- tail call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %a, i8* align 1 %b, i64 100, i32 1)
- tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %a, i8* align 1 %b, i64 100, i32 1)
+ tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %a, i8 0, i64 100, i32 1)
+ tail call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %a, ptr align 1 %b, i64 100, i32 1)
+ tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %a, ptr align 1 %b, i64 100, i32 1)
ret void
}
define i32 @test_load() sanitize_address {
entry:
- %tmp = load i32, i32* @g, align 4
+ %tmp = load i32, ptr @g, align 4
ret i32 %tmp
}
;
; RUN: opt < %s -passes=asan -S | FileCheck %s
-define internal i32 @foo(i32* %p) sanitize_address {
- %rv = load i32, i32* %p
+define internal i32 @foo(ptr %p) sanitize_address {
+ %rv = load i32, ptr %p
ret i32 %rv
}
-declare void @alloca_test_use([10 x i8]*)
-define i32 @call_foo(i32* %a) sanitize_address {
+declare void @alloca_test_use(ptr)
+define i32 @call_foo(ptr %a) sanitize_address {
%x = alloca [10 x i8], align 1
- call void @alloca_test_use([10 x i8]* %x)
- %r = musttail call i32 @foo(i32* %a)
+ call void @alloca_test_use(ptr %x)
+ %r = musttail call i32 @foo(ptr %a)
ret i32 %r
}
-; CHECK-LABEL: define i32 @call_foo(i32* %a)
-; CHECK: %r = musttail call i32 @foo(i32* %a)
+; CHECK-LABEL: define i32 @call_foo(ptr %a)
+; CHECK: %r = musttail call i32 @foo(ptr %a)
; CHECK-NEXT: ret i32 %r
-define i32 @call_foo_cast(i32* %a) sanitize_address {
+define i32 @call_foo_cast(ptr %a) sanitize_address {
%x = alloca [10 x i8], align 1
- call void @alloca_test_use([10 x i8]* %x)
- %r = musttail call i32 @foo(i32* %a)
+ call void @alloca_test_use(ptr %x)
+ %r = musttail call i32 @foo(ptr %a)
%t = bitcast i32 %r to i32
ret i32 %t
}
-; CHECK-LABEL: define i32 @call_foo_cast(i32* %a)
-; CHECK: %r = musttail call i32 @foo(i32* %a)
+; CHECK-LABEL: define i32 @call_foo_cast(ptr %a)
+; CHECK: %r = musttail call i32 @foo(ptr %a)
; CHECK-NEXT: %t = bitcast i32 %r to i32
; CHECK-NEXT: ret i32 %t
; RUN: opt < %s -passes=asan -S -mtriple=x86_64-scei-ps4 | FileCheck %s
; RUN: opt < %s -passes=asan -S -mtriple=x86_64-sie-ps5 | FileCheck %s
-define i32 @read_4_bytes(i32* %a) sanitize_address {
+define i32 @read_4_bytes(ptr %a) sanitize_address {
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
ret i32 %tmp1
}
; RUN: opt < %s -passes=asan -asan-mapping-offset 0xc0ffee -asan-mapping-scale 0 -S | FileCheck --check-prefix=CHECK-BOTH %s
target triple = "x86_64-unknown-linux-gnu"
-define i32 @read_offset(i32* %a) sanitize_address {
+define i32 @read_offset(ptr %a) sanitize_address {
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
ret i32 %tmp1
}
; CHECK-OFFSET-LABEL: @read_offset
; CHECK-OFFSET-NEXT: add{{.*}}3735928559
; CHECK-OFFSET: ret
-define i32 @read_scale(i32* %a) sanitize_address {
+define i32 @read_scale(ptr %a) sanitize_address {
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
ret i32 %tmp1
}
; CHECK-SCALE-LABEL: @read_scale
; CHECK-SCALE-NEXT: add{{.*}}
; CHECK-SCALE: ret
-define i32 @read_both(i32* %a) sanitize_address {
+define i32 @read_both(ptr %a) sanitize_address {
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
ret i32 %tmp1
}
; CHECK-BOTH-LABEL: @read_both
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-declare void @Foo(i8*)
+declare void @Foo(ptr)
define void @Bar() uwtable sanitize_address {
entry:
%x = alloca [650 x i8], align 16
- %xx = getelementptr inbounds [650 x i8], [650 x i8]* %x, i64 0, i64 0
+ %xx = getelementptr inbounds [650 x i8], ptr %x, i64 0, i64 0
%y = alloca [13 x i8], align 1
- %yy = getelementptr inbounds [13 x i8], [13 x i8]* %y, i64 0, i64 0
+ %yy = getelementptr inbounds [13 x i8], ptr %y, i64 0, i64 0
%z = alloca [40 x i8], align 1
- %zz = getelementptr inbounds [40 x i8], [40 x i8]* %z, i64 0, i64 0
+ %zz = getelementptr inbounds [40 x i8], ptr %z, i64 0, i64 0
; CHECK: [[SHADOW_BASE:%[0-9]+]] = add i64 %{{[0-9]+}}, 17592186044416
; F1F1F1F1
; ENTRY-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 0
- ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; ENTRY-NEXT: store [[TYPE]] -235802127, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-NEXT: store i32 -235802127, ptr [[PTR]], align 1
; 02F2F2F2F2F2F2F2
; ENTRY-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 85
- ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; ENTRY-NEXT: store [[TYPE]] 212499257711850226, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-NEXT: store i64 212499257711850226, ptr [[PTR]], align 1
; F2F2F2F2F2F2F2F2
; ENTRY-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 93
- ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; ENTRY-NEXT: store [[TYPE]] -940422246894996750, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-NEXT: store i64 -940422246894996750, ptr [[PTR]], align 1
; F20005F2F2000000
; ENTRY-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 101
- ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; ENTRY-NEXT: store [[TYPE]] -1008799775530680320, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-NEXT: store i64 -1008799775530680320, ptr [[PTR]], align 1
; F3F3F3F3
; ENTRY-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 111
- ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; ENTRY-NEXT: store [[TYPE]] -202116109, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-NEXT: store i32 -202116109, ptr [[PTR]], align 1
; F3
; ENTRY-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 115
- ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
- ; ENTRY-NEXT: store [[TYPE]] -13, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-NEXT: store i8 -13, ptr [[PTR]], align 1
; F1F1F1F1
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 0
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -235802127, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i32 -235802127, ptr [[PTR]], align 1
; F8F8F8...
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 4
; F2F2F2F2F2F2F2F2
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 86
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -940422246894996750, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i64 -940422246894996750, ptr [[PTR]], align 1
; F2F2F2F2F2F2F2F2
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 94
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -940422246894996750, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i64 -940422246894996750, ptr [[PTR]], align 1
; F8F8F2F2F8F8F8F8
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 102
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -506387832706107144, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i64 -506387832706107144, ptr [[PTR]], align 1
; F8F3F3F3
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 110
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -118230029, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i32 -118230029, ptr [[PTR]], align 1
; F3F3
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 114
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i16]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -3085, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i16 -3085, ptr [[PTR]], align 1
; CHECK-LABEL: %xx = getelementptr inbounds
; CHECK-NEXT: %yy = getelementptr inbounds
; CHECK-NEXT: %zz = getelementptr inbounds
- call void @llvm.lifetime.start.p0i8(i64 650, i8* %xx)
+ call void @llvm.lifetime.start.p0(i64 650, ptr %xx)
; 0000...
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 4
; ENTRY-UAS-NEXT: call void @__asan_set_shadow_00(i64 [[OFFSET]], i64 81)
; 02
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 85
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] 2, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i8 2, ptr [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 650, i8* %xx)
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 650, ptr %xx)
- call void @Foo(i8* %xx)
- ; CHECK-NEXT: call void @Foo(i8* %xx)
+ call void @Foo(ptr %xx)
+ ; CHECK-NEXT: call void @Foo(ptr %xx)
- call void @llvm.lifetime.end.p0i8(i64 650, i8* %xx)
+ call void @llvm.lifetime.end.p0(i64 650, ptr %xx)
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 4
; ENTRY-UAS-NEXT: call void @__asan_set_shadow_f8(i64 [[OFFSET]], i64 82)
- ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 650, i8* %xx)
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 650, ptr %xx)
- call void @llvm.lifetime.start.p0i8(i64 13, i8* %yy)
+ call void @llvm.lifetime.start.p0(i64 13, ptr %yy)
; 0005
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 102
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i16]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] 5, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i16 5, ptr [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 13, i8* %yy)
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 13, ptr %yy)
- call void @Foo(i8* %yy)
- ; CHECK-NEXT: call void @Foo(i8* %yy)
+ call void @Foo(ptr %yy)
+ ; CHECK-NEXT: call void @Foo(ptr %yy)
- call void @llvm.lifetime.end.p0i8(i64 13, i8* %yy)
+ call void @llvm.lifetime.end.p0(i64 13, ptr %yy)
; F8F8
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 102
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i16]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -1800, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i16 -1800, ptr [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 13, i8* %yy)
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 13, ptr %yy)
- call void @llvm.lifetime.start.p0i8(i64 40, i8* %zz)
+ call void @llvm.lifetime.start.p0(i64 40, ptr %zz)
; 00000000
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 106
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i32 0, ptr [[PTR]], align 1
; 00
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 110
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i8 0, ptr [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 40, i8* %zz)
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr %zz)
- call void @Foo(i8* %zz)
- ; CHECK-NEXT: call void @Foo(i8* %zz)
+ call void @Foo(ptr %zz)
+ ; CHECK-NEXT: call void @Foo(ptr %zz)
- call void @llvm.lifetime.end.p0i8(i64 40, i8* %zz)
+ call void @llvm.lifetime.end.p0(i64 40, ptr %zz)
; F8F8F8F8
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 106
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -117901064, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i32 -117901064, ptr [[PTR]], align 1
; F8
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 110
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -8, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i8 -8, ptr [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 40, i8* %zz)
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr %zz)
; CHECK: {{^[0-9]+}}:
; 00000000
; EXIT-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 0
- ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; EXIT-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; EXIT-NEXT: store i32 0, ptr [[PTR]], align 1
; 0000000000000000
; EXIT-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 85
- ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; EXIT-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; EXIT-NEXT: store i64 0, ptr [[PTR]], align 1
; 0000000000000000
; EXIT-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 93
- ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; EXIT-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; EXIT-NEXT: store i64 0, ptr [[PTR]], align 1
; 0000000000000000
; EXIT-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 101
- ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; EXIT-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; EXIT-NEXT: store i64 0, ptr [[PTR]], align 1
; 00000000
; EXIT-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 111
- ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; EXIT-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; EXIT-NEXT: store i32 0, ptr [[PTR]], align 1
; 00
; EXIT-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 115
- ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
- ; EXIT-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; EXIT-NEXT: store i8 0, ptr [[PTR]], align 1
; 0000...
; EXIT-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 0
; CHECK: ret void
}
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
; CHECK-ON: declare void @__asan_set_shadow_00(i64, i64)
; CHECK-ON: declare void @__asan_set_shadow_f1(i64, i64)
target datalayout = "e-i64:64-f80:128-s:64-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-declare void @Foo(i8*)
+declare void @Foo(ptr)
define void @Bar() uwtable sanitize_address {
entry:
%x = alloca [650 x i8], align 16
- %xx = getelementptr inbounds [650 x i8], [650 x i8]* %x, i64 0, i64 0
+ %xx = getelementptr inbounds [650 x i8], ptr %x, i64 0, i64 0
%y = alloca [13 x i8], align 1
- %yy = getelementptr inbounds [13 x i8], [13 x i8]* %y, i64 0, i64 0
+ %yy = getelementptr inbounds [13 x i8], ptr %y, i64 0, i64 0
%z = alloca [40 x i8], align 1
- %zz = getelementptr inbounds [40 x i8], [40 x i8]* %z, i64 0, i64 0
+ %zz = getelementptr inbounds [40 x i8], ptr %z, i64 0, i64 0
; CHECK: [[SHADOW_BASE:%[0-9]+]] = add i64 %{{[0-9]+}}, 2147450880
; F1F1F1F1
; ENTRY-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 0
- ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; ENTRY-NEXT: store [[TYPE]] -235802127, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-NEXT: store i32 -235802127, ptr [[PTR]], align 1
; 02F2F2F2F2F2F2F2
; ENTRY-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 85
- ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; ENTRY-NEXT: store [[TYPE]] -940422246894996990, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-NEXT: store i64 -940422246894996990, ptr [[PTR]], align 1
; F2F2F2F2F2F2F2F2
; ENTRY-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 93
- ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; ENTRY-NEXT: store [[TYPE]] -940422246894996750, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-NEXT: store i64 -940422246894996750, ptr [[PTR]], align 1
; F20005F2F2000000
; ENTRY-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 101
- ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; ENTRY-NEXT: store [[TYPE]] 1043442499826, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-NEXT: store i64 1043442499826, ptr [[PTR]], align 1
; F3F3F3F3
; ENTRY-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 111
- ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; ENTRY-NEXT: store [[TYPE]] -202116109, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-NEXT: store i32 -202116109, ptr [[PTR]], align 1
; F3
; ENTRY-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 115
- ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
- ; ENTRY-NEXT: store [[TYPE]] -13, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-NEXT: store i8 -13, ptr [[PTR]], align 1
; F1F1F1F1
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 0
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -235802127, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i32 -235802127, ptr [[PTR]], align 1
; F8F8F8...
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 4
; F2F2F2F2F2F2F2F2
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 86
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -940422246894996750, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i64 -940422246894996750, ptr [[PTR]], align 1
; F2F2F2F2F2F2F2F2
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 94
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -940422246894996750, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i64 -940422246894996750, ptr [[PTR]], align 1
; F8F8F2F2F8F8F8F8
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 102
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -506381209967593224, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i64 -506381209967593224, ptr [[PTR]], align 1
; F8F3F3F3
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 110
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -202116104, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i32 -202116104, ptr [[PTR]], align 1
; F3F3
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 114
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i16]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -3085, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i16 -3085, ptr [[PTR]], align 1
; CHECK-LABEL: %xx = getelementptr inbounds
; CHECK-NEXT: %yy = getelementptr inbounds
; CHECK-NEXT: %zz = getelementptr inbounds
- call void @llvm.lifetime.start.p0i8(i64 650, i8* %xx)
+ call void @llvm.lifetime.start.p0(i64 650, ptr %xx)
; 0000...
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 4
; ENTRY-UAS-NEXT: call void @__asan_set_shadow_00(i64 [[OFFSET]], i64 81)
; 02
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 85
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] 2, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i8 2, ptr [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 650, i8* %xx)
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 650, ptr %xx)
- call void @Foo(i8* %xx)
- ; CHECK-NEXT: call void @Foo(i8* %xx)
+ call void @Foo(ptr %xx)
+ ; CHECK-NEXT: call void @Foo(ptr %xx)
- call void @llvm.lifetime.end.p0i8(i64 650, i8* %xx)
+ call void @llvm.lifetime.end.p0(i64 650, ptr %xx)
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 4
; ENTRY-UAS-NEXT: call void @__asan_set_shadow_f8(i64 [[OFFSET]], i64 82)
- ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 650, i8* %xx)
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 650, ptr %xx)
- call void @llvm.lifetime.start.p0i8(i64 13, i8* %yy)
+ call void @llvm.lifetime.start.p0(i64 13, ptr %yy)
; 0005
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 102
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i16]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] 1280, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i16 1280, ptr [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 13, i8* %yy)
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 13, ptr %yy)
- call void @Foo(i8* %yy)
- ; CHECK-NEXT: call void @Foo(i8* %yy)
+ call void @Foo(ptr %yy)
+ ; CHECK-NEXT: call void @Foo(ptr %yy)
- call void @llvm.lifetime.end.p0i8(i64 13, i8* %yy)
+ call void @llvm.lifetime.end.p0(i64 13, ptr %yy)
; F8F8
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 102
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i16]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -1800, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i16 -1800, ptr [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 13, i8* %yy)
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 13, ptr %yy)
- call void @llvm.lifetime.start.p0i8(i64 40, i8* %zz)
+ call void @llvm.lifetime.start.p0(i64 40, ptr %zz)
; 00000000
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 106
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i32 0, ptr [[PTR]], align 1
; 00
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 110
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i8 0, ptr [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 40, i8* %zz)
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr %zz)
- call void @Foo(i8* %zz)
- ; CHECK-NEXT: call void @Foo(i8* %zz)
+ call void @Foo(ptr %zz)
+ ; CHECK-NEXT: call void @Foo(ptr %zz)
- call void @llvm.lifetime.end.p0i8(i64 40, i8* %zz)
+ call void @llvm.lifetime.end.p0(i64 40, ptr %zz)
; F8F8F8F8
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 106
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -117901064, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i32 -117901064, ptr [[PTR]], align 1
; F8
; ENTRY-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 110
- ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
- ; ENTRY-UAS-NEXT: store [[TYPE]] -8, [[TYPE]]* [[PTR]], align 1
+ ; ENTRY-UAS-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; ENTRY-UAS-NEXT: store i8 -8, ptr [[PTR]], align 1
- ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 40, i8* %zz)
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr %zz)
; CHECK: {{^[0-9]+}}:
; 00000000
; EXIT-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 0
- ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; EXIT-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; EXIT-NEXT: store i32 0, ptr [[PTR]], align 1
; 0000000000000000
; EXIT-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 85
- ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; EXIT-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; EXIT-NEXT: store i64 0, ptr [[PTR]], align 1
; 0000000000000000
; EXIT-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 93
- ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; EXIT-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; EXIT-NEXT: store i64 0, ptr [[PTR]], align 1
; 0000000000000000
; EXIT-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 101
- ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i64]]*
- ; EXIT-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; EXIT-NEXT: store i64 0, ptr [[PTR]], align 1
; 00000000
; EXIT-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 111
- ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i32]]*
- ; EXIT-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; EXIT-NEXT: store i32 0, ptr [[PTR]], align 1
; 00
; EXIT-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 115
- ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to [[TYPE:i8]]*
- ; EXIT-NEXT: store [[TYPE]] 0, [[TYPE]]* [[PTR]], align 1
+ ; EXIT-NEXT: [[PTR:%[0-9]+]] = inttoptr i64 [[OFFSET]] to ptr
+ ; EXIT-NEXT: store i8 0, ptr [[PTR]], align 1
; 0000...
; EXIT-UAS-NEXT: [[OFFSET:%[0-9]+]] = add i64 [[SHADOW_BASE]], 0
; CHECK: ret void
}
-declare void @foo(i32*)
+declare void @foo(ptr)
define void @PR41481(i1 %b) sanitize_address {
; CHECK-LABEL: @PR41481
entry:
%p1 = alloca i32
%p2 = alloca i32
- %q1 = bitcast i32* %p1 to i8*
- %q2 = bitcast i32* %p2 to i8*
br label %bb1
; Since we cannot account for all lifetime intrinsics in this function, we
; ENTRY-UAS: store i64 -935356719533264399
bb1:
- %p = select i1 %b, i32* %p1, i32* %p2
- %q = select i1 %b, i8* %q1, i8* %q2
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %q)
- call void @foo(i32* %p)
+ %p = select i1 %b, ptr %p1, ptr %p2
+ %q = select i1 %b, ptr %p1, ptr %p2
+ call void @llvm.lifetime.start.p0(i64 4, ptr %q)
+ call void @foo(ptr %p)
br i1 %b, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %q1)
+ call void @llvm.lifetime.end.p0(i64 4, ptr %p1)
br label %end
bb3:
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %q2)
+ call void @llvm.lifetime.end.p0(i64 4, ptr %p2)
br label %end
end:
}
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
; CHECK-ON: declare void @__asan_set_shadow_00(i64, i64)
; CHECK-ON: declare void @__asan_set_shadow_f1(i64, i64)
%struct.A = type { [8 x i32] }
-declare i32 @bar(%struct.A*)
+declare i32 @bar(ptr)
; Test behavior for named argument with explicit alignment. The memcpy and
; alloca alignments should match the explicit alignment of 64.
-define void @foo(%struct.A* byval(%struct.A) align 64 %a) sanitize_address {
+define void @foo(ptr byval(%struct.A) align 64 %a) sanitize_address {
entry:
; CHECK-LABEL: foo
; CHECK: call i64 @__asan_stack_malloc
; CHECK: alloca i8, i64 {{.*}} align 64
-; CHECK: [[copyPtr:%[^ \t]+]] = inttoptr i64 %{{[^ \t]+}} to %struct.A*
-; CHECK: [[copyBytePtr:%[^ \t]+]] = bitcast %struct.A* [[copyPtr]]
-; CHECK: [[aBytePtr:%[^ \t]+]] = bitcast %struct.A* %a
-; CHECK: call void @llvm.memcpy{{[^%]+}}[[copyBytePtr]]{{[^%]+}} align 64 [[aBytePtr]],{{[^,]+}},
-; CHECK: call i32 @bar(%struct.A* [[copyPtr]])
+; CHECK: [[copyPtr:%[^ \t]+]] = inttoptr i64 %{{[^ \t]+}} to ptr
+; CHECK: call void @llvm.memcpy{{[^%]+}}[[copyPtr]]{{[^%]+}} align 64 %a,{{[^,]+}},
+; CHECK: call i32 @bar(ptr [[copyPtr]])
; CHECK: ret void
- %call = call i32 @bar(%struct.A* %a)
+ %call = call i32 @bar(ptr %a)
ret void
}
; minimum alignment of 4 bytes since struct.A contains i32s which have 4-byte
; alignment. However, the alloca alignment will be 32 since that is the value
; passed via the -asan-realign-stack option, which is greater than 4.
-define void @baz(%struct.A* byval(%struct.A)) sanitize_address {
+define void @baz(ptr byval(%struct.A)) sanitize_address {
entry:
; CHECK-LABEL: baz
; CHECK: call i64 @__asan_stack_malloc
; CHECK: alloca i8, i64 {{.*}} align 32
-; CHECK: [[copyPtr:%[^ \t]+]] = inttoptr i64 %{{[^ \t]+}} to %struct.A*
-; CHECK: [[copyBytePtr:%[^ \t]+]] = bitcast %struct.A* [[copyPtr]]
-; CHECK: [[aBytePtr:%[^ \t]+]] = bitcast %struct.A* %0
-; CHECK: call void @llvm.memcpy{{[^%]+}}[[copyBytePtr]]{{[^%]+}} align 4 [[aBytePtr]],{{[^,]+}}
-; CHECK: call i32 @bar(%struct.A* [[copyPtr]])
+; CHECK: [[copyPtr:%[^ \t]+]] = inttoptr i64 %{{[^ \t]+}} to ptr
+; CHECK: call void @llvm.memcpy{{[^%]+}}[[copyPtr]]{{[^%]+}} align 4 %0,{{[^,]+}}
+; CHECK: call i32 @bar(ptr [[copyPtr]])
; CHECK: ret void
- %call = call i32 @bar(%struct.A* %0)
+ %call = call i32 @bar(ptr %0)
ret void
}
target datalayout = "e-i64:64-f80:128-s:64-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-declare void @Foo(i8*)
+declare void @Foo(ptr)
define void @Bar() uwtable sanitize_address {
entry:
; CHECK-PLAIN: ret void
; CHECK-UAR-LABEL: Bar
-; CHECK-UAR-RUNTIME: load i32, i32* @__asan_option_detect_stack_use_after_return
+; CHECK-UAR-RUNTIME: load i32, ptr @__asan_option_detect_stack_use_after_return
; CHECK-UAR-RUNTIME: label
; CHECK-UAR-RUNTIME: call i64 @__asan_stack_malloc_4
; CHECK-UAR-ALWAYS: call i64 @__asan_stack_malloc_always_4
%x = alloca [20 x i8], align 16
%y = alloca [25 x i8], align 1
%z = alloca [500 x i8], align 1
- %xx = getelementptr inbounds [20 x i8], [20 x i8]* %x, i64 0, i64 0
- call void @Foo(i8* %xx)
- %yy = getelementptr inbounds [25 x i8], [25 x i8]* %y, i64 0, i64 0
- call void @Foo(i8* %yy)
- %zz = getelementptr inbounds [500 x i8], [500 x i8]* %z, i64 0, i64 0
- call void @Foo(i8* %zz)
+ call void @Foo(ptr %x)
+ call void @Foo(ptr %y)
+ call void @Foo(ptr %z)
ret void
}
; CHECK-LABEL: Func1
; CHECK: entry:
-; CHECK-RUNTIME: load i32, i32* @__asan_option_detect_stack_use_after_return
-; COM: CHECK-NORUNTIME-NOT: load i32, i32* @__asan_option_detect_stack_use_after_return
+; CHECK-RUNTIME: load i32, ptr @__asan_option_detect_stack_use_after_return
+; COM: CHECK-NORUNTIME-NOT: load i32, ptr @__asan_option_detect_stack_use_after_return
; CHECK-RUNTIME: [[UAR_ENABLED_BB:^[0-9]+]]:
; CHECK-RUNTIME: [[FAKE_STACK_RT:%[0-9]+]] = call i64 @__asan_stack_malloc_
; CHECK: [[NO_FAKE_STACK_BB:^[0-9]+]]:
; CHECK: %MyAlloca = alloca i8, i64
-; CHECK: [[ALLOCA:%[0-9]+]] = ptrtoint i8* %MyAlloca
+; CHECK: [[ALLOCA:%[0-9]+]] = ptrtoint ptr %MyAlloca
; CHECK-RUNTIME: phi i64 [ [[FAKE_STACK]], %[[FAKE_STACK_BB]] ], [ [[ALLOCA]], %[[NO_FAKE_STACK_BB]] ]
; CHECK-ALWAYS: phi i64 [ [[FAKE_STACK_RT]], %entry ], [ [[ALLOCA]], %[[NO_FAKE_STACK_BB]] ]
; CHECK: ret void
%XXX = alloca [20 x i8], align 1
- %arr.ptr = bitcast [20 x i8]* %XXX to i8*
- store volatile i8 0, i8* %arr.ptr
+ store volatile i8 0, ptr %XXX
ret void
}
; CHECK: ret void
%XXX = alloca [20 x i8], align 1
- %arr.ptr = bitcast [20 x i8]* %XXX to i8*
- store volatile i8 0, i8* %arr.ptr
+ store volatile i8 0, ptr %XXX
call void asm sideeffect "mov %%rbx, %%rcx", "~{dirflag},~{fpsr},~{flags}"() nounwind
ret void
}
; CHECK: ret void
entry:
%a = alloca i32, align 4
- %call = call i32 @_setjmp(%struct.__jmp_buf_tag* getelementptr inbounds ([1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* @_ZL3buf, i32 0, i32 0)) nounwind returns_twice
+ %call = call i32 @_setjmp(ptr @_ZL3buf) nounwind returns_twice
%cmp = icmp eq i32 0, %call
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- call void @longjmp(%struct.__jmp_buf_tag* getelementptr inbounds ([1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* @_ZL3buf, i32 0, i32 0), i32 1) noreturn nounwind
+ call void @longjmp(ptr @_ZL3buf, i32 1) noreturn nounwind
unreachable
if.end: ; preds = %entry
- call void @_Z10escape_ptrPi(i32* %a)
+ call void @_Z10escape_ptrPi(ptr %a)
ret void
}
-declare i32 @_setjmp(%struct.__jmp_buf_tag*) nounwind returns_twice
-declare void @longjmp(%struct.__jmp_buf_tag*, i32) noreturn nounwind
-declare void @_Z10escape_ptrPi(i32*)
+declare i32 @_setjmp(ptr) nounwind returns_twice
+declare void @longjmp(ptr, i32) noreturn nounwind
+declare void @_Z10escape_ptrPi(ptr)
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-declare void @Use(i8*)
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
+declare void @Use(ptr)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
; CHECK: private unnamed_addr constant{{.*}}3 32 10 3 XXX 64 20 3 YYY 128 30 3 ZZZ\0
; CHECK: private unnamed_addr constant{{.*}}3 32 5 3 AAA 64 55 3 BBB 160 555 3 CCC\0
%XXX = alloca [10 x i8], align 1
%YYY = alloca [20 x i8], align 1
%ZZZ = alloca [30 x i8], align 1
- %arr1.ptr = bitcast [10 x i8]* %XXX to i8*
- store volatile i8 0, i8* %arr1.ptr
- %arr2.ptr = bitcast [20 x i8]* %YYY to i8*
- store volatile i8 0, i8* %arr2.ptr
- %arr3.ptr = bitcast [30 x i8]* %ZZZ to i8*
- store volatile i8 0, i8* %arr3.ptr
+ store volatile i8 0, ptr %XXX
+ store volatile i8 0, ptr %YYY
+ store volatile i8 0, ptr %ZZZ
ret void
}
%AAA = alloca [5 x i8], align 1
%BBB = alloca [55 x i8], align 1
%CCC = alloca [555 x i8], align 1
- %arr1.ptr = bitcast [5 x i8]* %AAA to i8*
- store volatile i8 0, i8* %arr1.ptr
- %arr2.ptr = bitcast [55 x i8]* %BBB to i8*
- store volatile i8 0, i8* %arr2.ptr
- %arr3.ptr = bitcast [555 x i8]* %CCC to i8*
- store volatile i8 0, i8* %arr3.ptr
+ store volatile i8 0, ptr %AAA
+ store volatile i8 0, ptr %BBB
+ store volatile i8 0, ptr %CCC
ret void
}
%AAA = alloca [128 x i8], align 16
%BBB = alloca [128 x i8], align 64
%CCC = alloca [128 x i8], align 256
- %arr1.ptr = bitcast [128 x i8]* %AAA to i8*
- store volatile i8 0, i8* %arr1.ptr
- %arr2.ptr = bitcast [128 x i8]* %BBB to i8*
- store volatile i8 0, i8* %arr2.ptr
- %arr3.ptr = bitcast [128 x i8]* %CCC to i8*
- store volatile i8 0, i8* %arr3.ptr
+ store volatile i8 0, ptr %AAA
+ store volatile i8 0, ptr %BBB
+ store volatile i8 0, ptr %CCC
ret void
}
define void @Func5() sanitize_address #0 !dbg !11 {
%AAA = alloca i32, align 4 ; File is not the same as !11
%BBB = alloca i32, align 4 ; File is the same as !11
- %BBB.ptr = bitcast i32* %BBB to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %BBB.ptr), !dbg !12
- store volatile i32 5, i32* %BBB, align 4
- %AAA.ptr = bitcast i32* %AAA to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %AAA.ptr), !dbg !14
- store volatile i32 3, i32* %AAA, align 4
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %AAA.ptr), !dbg !17
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %BBB.ptr), !dbg !18
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %BBB), !dbg !12
+ store volatile i32 5, ptr %BBB, align 4
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %AAA), !dbg !14
+ store volatile i32 3, ptr %AAA, align 4
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %AAA), !dbg !17
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %BBB), !dbg !18
ret void
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
-declare i8* @memchr(i8* %a, i32 %b, i64 %c)
-declare i32 @memcmp(i8* %a, i8* %b, i64 %c)
-declare i32 @strcmp(i8* %a, i8* %b)
-declare i8* @strcpy(i8* %a, i8* %b)
-declare i8* @stpcpy(i8* %a, i8* %b)
-declare i64 @strlen(i8* %a)
-declare i64 @strnlen(i8* %a, i64 %b)
+declare ptr @memchr(ptr %a, i32 %b, i64 %c)
+declare i32 @memcmp(ptr %a, ptr %b, i64 %c)
+declare i32 @strcmp(ptr %a, ptr %b)
+declare ptr @strcpy(ptr %a, ptr %b)
+declare ptr @stpcpy(ptr %a, ptr %b)
+declare i64 @strlen(ptr %a)
+declare i64 @strnlen(ptr %a, i64 %b)
; CHECK: call{{.*}}@memchr{{.*}} #[[ATTR:[0-9]+]]
; CHECK: call{{.*}}@memcmp{{.*}} #[[ATTR]]
; CHECK: call{{.*}}@strnlen{{.*}} #[[ATTR]]
; attributes #[[ATTR]] = { nobuiltin }
-define void @f1(i8* %a, i8* %b) nounwind uwtable sanitize_address {
- tail call i8* @memchr(i8* %a, i32 1, i64 12)
- tail call i32 @memcmp(i8* %a, i8* %b, i64 12)
- tail call i32 @strcmp(i8* %a, i8* %b)
- tail call i8* @strcpy(i8* %a, i8* %b)
- tail call i8* @stpcpy(i8* %a, i8* %b)
- tail call i64 @strlen(i8* %a)
- tail call i64 @strnlen(i8* %a, i64 12)
+define void @f1(ptr %a, ptr %b) nounwind uwtable sanitize_address {
+ tail call ptr @memchr(ptr %a, i32 1, i64 12)
+ tail call i32 @memcmp(ptr %a, ptr %b, i64 12)
+ tail call i32 @strcmp(ptr %a, ptr %b)
+ tail call ptr @strcpy(ptr %a, ptr %b)
+ tail call ptr @stpcpy(ptr %a, ptr %b)
+ tail call i64 @strlen(ptr %a)
+ tail call i64 @strnlen(ptr %a, i64 12)
ret void
}
; RUN: opt < %s -passes=asan -asan-mapping-scale=5 -S | FileCheck --check-prefixes=CHECK,CHECK-S5 %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
-define i32 @read_4_bytes(i32* %a) sanitize_address {
+define i32 @read_4_bytes(ptr %a) sanitize_address {
entry:
- %tmp1 = load i32, i32* %a, align 4
+ %tmp1 = load i32, ptr %a, align 4
ret i32 %tmp1
}
; CHECK-LABEL: @read_4_bytes
; CHECK-S5-NEXT: add{{.*}}2147352576
; CHECK: ret
-define void @example_atomicrmw(i64* %ptr) nounwind uwtable sanitize_address {
+define void @example_atomicrmw(ptr %ptr) nounwind uwtable sanitize_address {
entry:
- %0 = atomicrmw add i64* %ptr, i64 1 seq_cst
+ %0 = atomicrmw add ptr %ptr, i64 1 seq_cst
ret void
}
; CHECK: atomicrmw
; CHECK: ret
-define void @example_cmpxchg(i64* %ptr, i64 %compare_to, i64 %new_value) nounwind uwtable sanitize_address {
+define void @example_cmpxchg(ptr %ptr, i64 %compare_to, i64 %new_value) nounwind uwtable sanitize_address {
entry:
- %0 = cmpxchg i64* %ptr, i64 %compare_to, i64 %new_value seq_cst seq_cst
+ %0 = cmpxchg ptr %ptr, i64 %compare_to, i64 %new_value seq_cst seq_cst
ret void
}
; Check that the address sanitizer pass can be reused
; RUN: opt < %s -S -run-twice -passes=asan
-define void @foo(i64* %b) nounwind uwtable sanitize_address {
+define void @foo(ptr %b) nounwind uwtable sanitize_address {
entry:
- store i64 0, i64* %b, align 1
+ store i64 0, ptr %b, align 1
ret void
}
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
-%struct.A = type { i32 (...)** }
-declare void @__ubsan_handle_dynamic_type_cache_miss(i8*, i64, i64) uwtable
-declare void @__ubsan_handle_pointer_overflow(i8*, i64, i64) uwtable
+%struct.A = type { ptr }
+declare void @__ubsan_handle_dynamic_type_cache_miss(ptr, i64, i64) uwtable
+declare void @__ubsan_handle_pointer_overflow(ptr, i64, i64) uwtable
@__ubsan_vptr_type_cache = external global [128 x i64]
@.src = private unnamed_addr constant [19 x i8] c"tmp/ubsan/vptr.cpp\00", align 1
@0 = private unnamed_addr constant { i16, i16, [4 x i8] } { i16 -1, i16 0, [4 x i8] c"'A'\00" }
-@_ZTI1A = external constant i8*
-@1 = private unnamed_addr global { { [19 x i8]*, i32, i32 }, { i16, i16, [4 x i8] }*, i8*, i8 } { { [19 x i8]*, i32, i32 } { [19 x i8]* @.src, i32 2, i32 18 }, { i16, i16, [4 x i8] }* @0, i8* bitcast (i8** @_ZTI1A to i8*), i8 4 }
-@2 = private unnamed_addr global { { [19 x i8]*, i32, i32 } } { { [19 x i8]*, i32, i32 } { [19 x i8]* @.src, i32 24, i32 25 } }
+@_ZTI1A = external constant ptr
+@1 = private unnamed_addr global { { ptr, i32, i32 }, ptr, ptr, i8 } { { ptr, i32, i32 } { ptr @.src, i32 2, i32 18 }, ptr @0, ptr @_ZTI1A, i8 4 }
+@2 = private unnamed_addr global { { ptr, i32, i32 } } { { ptr, i32, i32 } { ptr @.src, i32 24, i32 25 } }
-define void @_Z3BarP1A(%struct.A* %a) uwtable sanitize_address {
+define void @_Z3BarP1A(ptr %a) uwtable sanitize_address {
; CHECK-LABEL: define void @_Z3BarP1A
entry:
- %0 = bitcast %struct.A* %a to void (%struct.A*)***
- %vtable = load void (%struct.A*)**, void (%struct.A*)*** %0, align 8
+ %vtable = load ptr, ptr %a, align 8
; CHECK: __asan_report_load8
- %1 = load void (%struct.A*)*, void (%struct.A*)** %vtable, align 8
+ %0 = load ptr, ptr %vtable, align 8
; CHECK: __asan_report_load8
- %2 = ptrtoint void (%struct.A*)** %vtable to i64
- %3 = xor i64 %2, -303164226014115343, !nosanitize !0
- %4 = mul i64 %3, -7070675565921424023, !nosanitize !0
- %5 = lshr i64 %4, 47, !nosanitize !0
- %6 = xor i64 %4, %2, !nosanitize !0
- %7 = xor i64 %6, %5, !nosanitize !0
- %8 = mul i64 %7, -7070675565921424023, !nosanitize !0
- %9 = lshr i64 %8, 47, !nosanitize !0
- %10 = xor i64 %9, %8, !nosanitize !0
- %11 = mul i64 %10, -7070675565921424023, !nosanitize !0
- %12 = and i64 %11, 127, !nosanitize !0
- %13 = getelementptr inbounds [128 x i64], [128 x i64]* @__ubsan_vptr_type_cache, i64 0, i64 %12, !nosanitize !0
+ %1 = ptrtoint ptr %vtable to i64
+ %2 = xor i64 %1, -303164226014115343, !nosanitize !0
+ %3 = mul i64 %2, -7070675565921424023, !nosanitize !0
+ %4 = lshr i64 %3, 47, !nosanitize !0
+ %5 = xor i64 %3, %1, !nosanitize !0
+ %6 = xor i64 %5, %4, !nosanitize !0
+ %7 = mul i64 %6, -7070675565921424023, !nosanitize !0
+ %8 = lshr i64 %7, 47, !nosanitize !0
+ %9 = xor i64 %8, %7, !nosanitize !0
+ %10 = mul i64 %9, -7070675565921424023, !nosanitize !0
+ %11 = and i64 %10, 127, !nosanitize !0
+ %12 = getelementptr inbounds [128 x i64], ptr @__ubsan_vptr_type_cache, i64 0, i64 %11, !nosanitize !0
; CHECK-NOT: __asan_report_load8
- %14 = load i64, i64* %13, align 8, !nosanitize !0
- %15 = icmp eq i64 %14, %11, !nosanitize !0
- br i1 %15, label %cont, label %handler.dynamic_type_cache_miss, !nosanitize !0
+ %13 = load i64, ptr %12, align 8, !nosanitize !0
+ %14 = icmp eq i64 %13, %10, !nosanitize !0
+ br i1 %14, label %cont, label %handler.dynamic_type_cache_miss, !nosanitize !0
handler.dynamic_type_cache_miss: ; preds = %entry
- %16 = ptrtoint %struct.A* %a to i64, !nosanitize !0
- tail call void @__ubsan_handle_dynamic_type_cache_miss(i8* bitcast ({ { [19 x i8]*, i32, i32 }, { i16, i16, [4 x i8] }*, i8*, i8 }* @1 to i8*), i64 %16, i64 %11) #2, !nosanitize !0
+ %15 = ptrtoint ptr %a to i64, !nosanitize !0
+ tail call void @__ubsan_handle_dynamic_type_cache_miss(ptr @1, i64 %15, i64 %10) #2, !nosanitize !0
br label %cont, !nosanitize !0
cont: ; preds = %handler.dynamic_type_cache_miss, %entry
- tail call void %1(%struct.A* %a)
+ tail call void %0(ptr %a)
; CHECK: ret void
ret void
}
; NOCMP-LABEL: define void @_Z3foov
entry:
%bar = alloca [10 x i8], align 1
- %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %bar, i64 0, i64 4
- %0 = ptrtoint [10 x i8]* %bar to i64, !nosanitize !0
+ %arrayidx = getelementptr inbounds [10 x i8], ptr %bar, i64 0, i64 4
+ %0 = ptrtoint ptr %bar to i64, !nosanitize !0
; NOCMP-NOT: call void @__sanitizer_ptr_cmp
- %1 = icmp ult [10 x i8]* %bar, inttoptr (i64 -4 to [10 x i8]*), !nosanitize !0
+ %1 = icmp ult ptr %bar, inttoptr (i64 -4 to ptr), !nosanitize !0
br i1 %1, label %cont, label %handler.pointer_overflow, !nosanitize !0
handler.pointer_overflow: ; preds = %entry
%2 = add i64 %0, 4, !nosanitize !0
- call void @__ubsan_handle_pointer_overflow(i8* bitcast ({ { [19 x i8]*, i32, i32 } }* @2 to i8*), i64 %0, i64 %2), !nosanitize !0
+ call void @__ubsan_handle_pointer_overflow(ptr @2, i64 %0, i64 %2), !nosanitize !0
br label %cont, !nosanitize !0
cont: ; preds = %handler.pointer_overflow, %entry
- store i8 0, i8* %arrayidx, align 1
+ store i8 0, ptr %arrayidx, align 1
; NOCMP: ret void
ret void
}
$__crt_init_callback = comdat any
$__crt_init_end = comdat any
-@__pobjMapEntryFirst = weak_odr dso_local constant i8* null, section "ATL$__a", comdat, align 8
-@__pobjMapEntryMiddle = weak_odr dso_local constant i8* null, section "ATL$__m", comdat, align 8
-@__pobjMapEntryLast = weak_odr dso_local constant i8* null, section "ATL$__z", comdat, align 8
-@__crt_init_begin = weak_odr dso_local constant i8* null, section ".CRT$XCA", comdat, align 8
-@__crt_init_callback = weak_odr dso_local constant i8* null, section ".CRT$XCU", comdat, align 8
-@__crt_init_end = weak_odr dso_local constant i8* null, section ".CRT$XCZ", comdat, align 8
+@__pobjMapEntryFirst = weak_odr dso_local constant ptr null, section "ATL$__a", comdat, align 8
+@__pobjMapEntryMiddle = weak_odr dso_local constant ptr null, section "ATL$__m", comdat, align 8
+@__pobjMapEntryLast = weak_odr dso_local constant ptr null, section "ATL$__z", comdat, align 8
+@__crt_init_begin = weak_odr dso_local constant ptr null, section ".CRT$XCA", comdat, align 8
+@__crt_init_callback = weak_odr dso_local constant ptr null, section ".CRT$XCU", comdat, align 8
+@__crt_init_end = weak_odr dso_local constant ptr null, section ".CRT$XCZ", comdat, align 8
-; CHECK: @__pobjMapEntryFirst = weak_odr dso_local constant i8* null, section "ATL$__a", comdat, align 8
-; CHECK: @__pobjMapEntryMiddle = weak_odr dso_local constant i8* null, section "ATL$__m", comdat, align 8
-; CHECK: @__pobjMapEntryLast = weak_odr dso_local constant i8* null, section "ATL$__z", comdat, align 8
-; CHECK: @__crt_init_begin = weak_odr dso_local constant i8* null, section ".CRT$XCA", comdat, align 8
-; CHECK: @__crt_init_callback = weak_odr dso_local constant i8* null, section ".CRT$XCU", comdat, align 8
-; CHECK: @__crt_init_end = weak_odr dso_local constant i8* null, section ".CRT$XCZ", comdat, align 8
+; CHECK: @__pobjMapEntryFirst = weak_odr dso_local constant ptr null, section "ATL$__a", comdat, align 8
+; CHECK: @__pobjMapEntryMiddle = weak_odr dso_local constant ptr null, section "ATL$__m", comdat, align 8
+; CHECK: @__pobjMapEntryLast = weak_odr dso_local constant ptr null, section "ATL$__z", comdat, align 8
+; CHECK: @__crt_init_begin = weak_odr dso_local constant ptr null, section ".CRT$XCA", comdat, align 8
+; CHECK: @__crt_init_callback = weak_odr dso_local constant ptr null, section ".CRT$XCU", comdat, align 8
+; CHECK: @__crt_init_end = weak_odr dso_local constant ptr null, section ".CRT$XCZ", comdat, align 8
!llvm.asan.globals = !{!0, !2, !4, !6, !8, !10}
!llvm.module.flags = !{!12, !13}
!llvm.ident = !{!14}
-!0 = !{i8** @__pobjMapEntryFirst, !1, !"__pobjMapEntryFirst", i1 false, i1 false}
+!0 = !{ptr @__pobjMapEntryFirst, !1, !"__pobjMapEntryFirst", i1 false, i1 false}
!1 = !{!"t.c", i32 6, i32 61}
-!2 = !{i8** @__pobjMapEntryMiddle, !3, !"__pobjMapEntryMiddle", i1 false, i1 false}
+!2 = !{ptr @__pobjMapEntryMiddle, !3, !"__pobjMapEntryMiddle", i1 false, i1 false}
!3 = !{!"t.c", i32 7, i32 61}
-!4 = !{i8** @__pobjMapEntryLast, !5, !"__pobjMapEntryLast", i1 false, i1 false}
+!4 = !{ptr @__pobjMapEntryLast, !5, !"__pobjMapEntryLast", i1 false, i1 false}
!5 = !{!"t.c", i32 8, i32 61}
-!6 = !{i8** @__crt_init_begin, !7, !"__crt_init_begin", i1 false, i1 false}
+!6 = !{ptr @__crt_init_begin, !7, !"__crt_init_begin", i1 false, i1 false}
!7 = !{!"t.c", i32 16, i32 62}
-!8 = !{i8** @__crt_init_callback, !9, !"__crt_init_callback", i1 false, i1 false}
+!8 = !{ptr @__crt_init_callback, !9, !"__crt_init_callback", i1 false, i1 false}
!9 = !{!"t.c", i32 17, i32 62}
-!10 = !{i8** @__crt_init_end, !11, !"__crt_init_end", i1 false, i1 false}
+!10 = !{ptr @__crt_init_end, !11, !"__crt_init_end", i1 false, i1 false}
!11 = !{!"t.c", i32 18, i32 62}
!12 = !{i32 1, !"wchar_size", i32 2}
!13 = !{i32 7, !"PIC Level", i32 2}
; CHECK: @"__asan_global_??_C@_04JIHMPGLA@asdf?$AA@" =
; CHECK-SAME: private global { i64, i64, i64, i64, i64, i64, i64, i64 }
-; CHECK-SAME: { i64 ptrtoint ({ [5 x i8], [27 x i8] }* @"??_C@_04JIHMPGLA@asdf?$AA@" to i64),
-; CHECK-SAME: i64 5, i64 32, i64 ptrtoint ([7 x i8]* @___asan_gen_.1 to i64), i64 ptrtoint ([8
-; CHECK-SAME: x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }, section ".ASAN$GL",
+; CHECK-SAME: { i64 ptrtoint (ptr @"??_C@_04JIHMPGLA@asdf?$AA@" to i64),
+; CHECK-SAME: i64 5, i64 32, i64 ptrtoint (ptr @___asan_gen_.1 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }, section ".ASAN$GL",
; CHECK-SAME: comdat($"??_C@_04JIHMPGLA@asdf?$AA@"), align 64
; ModuleID = 't.cpp'
@"??_C@_04JIHMPGLA@asdf?$AA@" = linkonce_odr dso_local unnamed_addr constant [5 x i8] c"asdf\00", comdat, align 1
; Function Attrs: nounwind sanitize_address uwtable
-define dso_local i8* @"?getstr@@YAPEBDXZ"() #0 {
+define dso_local ptr @"?getstr@@YAPEBDXZ"() #0 {
entry:
- ret i8* getelementptr inbounds ([5 x i8], [5 x i8]* @"??_C@_04JIHMPGLA@asdf?$AA@", i32 0, i32 0)
+ ret ptr @"??_C@_04JIHMPGLA@asdf?$AA@"
}
attributes #0 = { nounwind sanitize_address uwtable }
; CHECK-IFUNC: @__asan_shadow = external global [0 x i8]
; CHECK-NOIFUNC: @__asan_shadow_memory_dynamic_address = external global i32
-define i32 @test_load(i32* %a) sanitize_address {
+define i32 @test_load(ptr %a) sanitize_address {
; First instrumentation in the function must be to load the dynamic shadow
; address into a local variable.
; CHECK-LABEL: @test_load
; CHECK: entry:
-; CHECK-IFUNC-NEXT: %[[A:[^ ]*]] = ptrtoint i32* %a to i32
+; CHECK-IFUNC-NEXT: %[[A:[^ ]*]] = ptrtoint ptr %a to i32
; CHECK-IFUNC-NEXT: %[[B:[^ ]*]] = lshr i32 %[[A]], 3
-; CHECK-IFUNC-NEXT: %[[C:[^ ]*]] = add i32 %[[B]], ptrtoint ([0 x i8]* @__asan_shadow to i32)
+; CHECK-IFUNC-NEXT: %[[C:[^ ]*]] = add i32 %[[B]], ptrtoint (ptr @__asan_shadow to i32)
-; CHECK-IFUNC-NOREMAT-NEXT: %[[S:[^ ]*]] = call i32 asm "", "=r,0"([0 x i8]* @__asan_shadow)
-; CHECK-IFUNC-NOREMAT-NEXT: %[[A:[^ ]*]] = ptrtoint i32* %a to i32
+; CHECK-IFUNC-NOREMAT-NEXT: %[[S:[^ ]*]] = call i32 asm "", "=r,0"(ptr @__asan_shadow)
+; CHECK-IFUNC-NOREMAT-NEXT: %[[A:[^ ]*]] = ptrtoint ptr %a to i32
; CHECK-IFUNC-NOREMAT-NEXT: %[[B:[^ ]*]] = lshr i32 %[[A]], 3
; CHECK-IFUNC-NOREMAT-NEXT: %[[C:[^ ]*]] = add i32 %[[B]], %[[S]]
-; CHECK-NOIFUNC-NEXT: %[[SHADOW:[^ ]*]] = load i32, i32* @__asan_shadow_memory_dynamic_address
-; CHECK-NOIFUNC-NEXT: %[[A:[^ ]*]] = ptrtoint i32* %a to i32
+; CHECK-NOIFUNC-NEXT: %[[SHADOW:[^ ]*]] = load i32, ptr @__asan_shadow_memory_dynamic_address
+; CHECK-NOIFUNC-NEXT: %[[A:[^ ]*]] = ptrtoint ptr %a to i32
; CHECK-NOIFUNC-NEXT: %[[B:[^ ]*]] = lshr i32 %[[A]], 3
; CHECK-NOIFUNC-NEXT: %[[C:[^ ]*]] = add i32 %[[B]], %[[SHADOW]]
entry:
- %x = load i32, i32* %a, align 4
+ %x = load i32, ptr %a, align 4
ret i32 %x
}