target triple = "x86_64-unknown-linux-gnu"
declare void @f2()
-declare i8* @f3()
+declare ptr @f3()
define void @f1() {
call void @f2()
; Make sure that the backend can handle undefined references.
; Do an indirect call so that the undefined ref shows up in the combined index.
- call void bitcast (i8*()* @f3 to void()*)()
+ call void @f3()
ret void
}
// Create a sample address sanitizer bitcode library.
-// RUN: %clang_cc1 -no-opaque-pointers -x ir -fcuda-is-device -triple amdgcn-amd-amdhsa -emit-llvm-bc \
+// RUN: %clang_cc1 -x ir -fcuda-is-device -triple amdgcn-amd-amdhsa -emit-llvm-bc \
// RUN: -disable-llvm-passes -o %t.asanrtl.bc %S/Inputs/amdgpu-asanrtl.ll
// Check sanitizer runtime library functions survive
// optimizations without being removed or parameters altered.
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
+// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -fsanitize=address \
// RUN: -mlink-bitcode-file %t.asanrtl.bc -x hip \
// RUN: | FileCheck -check-prefixes=ASAN %s
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
+// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -fsanitize=address \
// RUN: -O3 -mlink-bitcode-file %t.asanrtl.bc -x hip \
// RUN: | FileCheck -check-prefixes=ASAN %s
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
+// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -x hip \
// RUN: | FileCheck %s
// REQUIRES: amdgpu-registered-target
// ASAN-DAG: define weak void @__amdgpu_device_library_preserve_asan_functions()
-// ASAN-DAG: @__amdgpu_device_library_preserve_asan_functions_ptr = weak addrspace(1) constant void ()* @__amdgpu_device_library_preserve_asan_functions
+// ASAN-DAG: @__amdgpu_device_library_preserve_asan_functions_ptr = weak addrspace(1) constant ptr @__amdgpu_device_library_preserve_asan_functions
// ASAN-DAG: @llvm.compiler.used = {{.*}}@__amdgpu_device_library_preserve_asan_functions_ptr
// ASAN-DAG: define weak void @__asan_report_load1(i64 %{{.*}})
// ELF-WARNING: is not an ELF image, so notes cannot be added to it.
// CHECK-IR: target triple = "x86_64-pc-linux-gnu"
-// CHECK-IR-DAG: [[ENTTY:%.+]] = type { i8*, i8*, i{{32|64}}, i32, i32 }
-// CHECK-IR-DAG: [[IMAGETY:%.+]] = type { i8*, i8*, [[ENTTY]]*, [[ENTTY]]* }
-// CHECK-IR-DAG: [[DESCTY:%.+]] = type { i32, [[IMAGETY]]*, [[ENTTY]]*, [[ENTTY]]* }
+// CHECK-IR-DAG: [[ENTTY:%.+]] = type { ptr, ptr, i{{32|64}}, i32, i32 }
+// CHECK-IR-DAG: [[IMAGETY:%.+]] = type { ptr, ptr, ptr, ptr }
+// CHECK-IR-DAG: [[DESCTY:%.+]] = type { i32, ptr, ptr, ptr }
// CHECK-IR: [[ENTBEGIN:@.+]] = external hidden constant [[ENTTY]]
// CHECK-IR: [[ENTEND:@.+]] = external hidden constant [[ENTTY]]
// CHECK-IR: [[BIN:@.+]] = internal unnamed_addr constant [[BINTY:\[[0-9]+ x i8\]]] c"Content of device file{{.+}}"
-// CHECK-IR: [[IMAGES:@.+]] = internal unnamed_addr constant [1 x [[IMAGETY]]] [{{.+}} { i8* getelementptr inbounds ([[BINTY]], [[BINTY]]* [[BIN]], i64 0, i64 0), i8* getelementptr inbounds ([[BINTY]], [[BINTY]]* [[BIN]], i64 1, i64 0), [[ENTTY]]* [[ENTBEGIN]], [[ENTTY]]* [[ENTEND]] }]
+// CHECK-IR: [[IMAGES:@.+]] = internal unnamed_addr constant [1 x [[IMAGETY]]] [{{.+}} { ptr [[BIN]], ptr getelementptr inbounds ([[BINTY]], ptr [[BIN]], i64 1, i64 0), ptr [[ENTBEGIN]], ptr [[ENTEND]] }]
-// CHECK-IR: [[DESC:@.+]] = internal constant [[DESCTY]] { i32 1, [[IMAGETY]]* getelementptr inbounds ([1 x [[IMAGETY]]], [1 x [[IMAGETY]]]* [[IMAGES]], i64 0, i64 0), [[ENTTY]]* [[ENTBEGIN]], [[ENTTY]]* [[ENTEND]] }
+// CHECK-IR: [[DESC:@.+]] = internal constant [[DESCTY]] { i32 1, ptr [[IMAGES]], ptr [[ENTBEGIN]], ptr [[ENTEND]] }
-// CHECK-IR: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* [[REGFN:@.+]], i8* null }]
-// CHECK-IR: @llvm.global_dtors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* [[UNREGFN:@.+]], i8* null }]
+// CHECK-IR: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr [[REGFN:@.+]], ptr null }]
+// CHECK-IR: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr [[UNREGFN:@.+]], ptr null }]
// CHECK-IR: define internal void [[REGFN]]()
-// CHECK-IR: call void @__tgt_register_lib([[DESCTY]]* [[DESC]])
+// CHECK-IR: call void @__tgt_register_lib(ptr [[DESC]])
// CHECK-IR: ret void
-// CHECK-IR: declare void @__tgt_register_lib([[DESCTY]]*)
+// CHECK-IR: declare void @__tgt_register_lib(ptr)
// CHECK-IR: define internal void [[UNREGFN]]()
-// CHECK-IR: call void @__tgt_unregister_lib([[DESCTY]]* [[DESC]])
+// CHECK-IR: call void @__tgt_unregister_lib(ptr [[DESC]])
// CHECK-IR: ret void
-// CHECK-IR: declare void @__tgt_unregister_lib([[DESCTY]]*)
+// CHECK-IR: declare void @__tgt_unregister_lib(ptr)
// Check that clang-offload-wrapper adds LLVMOMPOFFLOAD notes
// into the ELF offload images:
// OPENMP-NEXT: @__stop_omp_offloading_entries = external hidden constant %__tgt_offload_entry
// OPENMP-NEXT: @__dummy.omp_offloading.entry = hidden constant [0 x %__tgt_offload_entry] zeroinitializer, section "omp_offloading_entries"
// OPENMP-NEXT: @.omp_offloading.device_image = internal unnamed_addr constant [0 x i8] zeroinitializer
-// OPENMP-NEXT: @.omp_offloading.device_images = internal unnamed_addr constant [1 x %__tgt_device_image] [%__tgt_device_image { i8* getelementptr inbounds ([0 x i8], [0 x i8]* @.omp_offloading.device_image, i64 0, i64 0), i8* getelementptr inbounds ([0 x i8], [0 x i8]* @.omp_offloading.device_image, i64 0, i64 0), %__tgt_offload_entry* @__start_omp_offloading_entries, %__tgt_offload_entry* @__stop_omp_offloading_entries }]
-// OPENMP-NEXT: @.omp_offloading.descriptor = internal constant %__tgt_bin_desc { i32 1, %__tgt_device_image* getelementptr inbounds ([1 x %__tgt_device_image], [1 x %__tgt_device_image]* @.omp_offloading.device_images, i64 0, i64 0), %__tgt_offload_entry* @__start_omp_offloading_entries, %__tgt_offload_entry* @__stop_omp_offloading_entries }
-// OPENMP-NEXT: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @.omp_offloading.descriptor_reg, i8* null }]
-// OPENMP-NEXT: @llvm.global_dtors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @.omp_offloading.descriptor_unreg, i8* null }]
+// OPENMP-NEXT: @.omp_offloading.device_images = internal unnamed_addr constant [1 x %__tgt_device_image] [%__tgt_device_image { ptr @.omp_offloading.device_image, ptr @.omp_offloading.device_image, ptr @__start_omp_offloading_entries, ptr @__stop_omp_offloading_entries }]
+// OPENMP-NEXT: @.omp_offloading.descriptor = internal constant %__tgt_bin_desc { i32 1, ptr @.omp_offloading.device_images, ptr @__start_omp_offloading_entries, ptr @__stop_omp_offloading_entries }
+// OPENMP-NEXT: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.omp_offloading.descriptor_reg, ptr null }]
+// OPENMP-NEXT: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.omp_offloading.descriptor_unreg, ptr null }]
// OPENMP: define internal void @.omp_offloading.descriptor_reg() section ".text.startup" {
// OPENMP-NEXT: entry:
-// OPENMP-NEXT: call void @__tgt_register_lib(%__tgt_bin_desc* @.omp_offloading.descriptor)
+// OPENMP-NEXT: call void @__tgt_register_lib(ptr @.omp_offloading.descriptor)
// OPENMP-NEXT: ret void
// OPENMP-NEXT: }
// OPENMP: define internal void @.omp_offloading.descriptor_unreg() section ".text.startup" {
// OPENMP-NEXT: entry:
-// OPENMP-NEXT: call void @__tgt_unregister_lib(%__tgt_bin_desc* @.omp_offloading.descriptor)
+// OPENMP-NEXT: call void @__tgt_unregister_lib(ptr @.omp_offloading.descriptor)
// OPENMP-NEXT: ret void
// OPENMP-NEXT: }
// RUN: -linker-path /usr/bin/ld -- %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=CUDA
// CUDA: @.fatbin_image = internal constant [0 x i8] zeroinitializer, section ".nv_fatbin"
-// CUDA-NEXT: @.fatbin_wrapper = internal constant %fatbin_wrapper { i32 1180844977, i32 1, i8* getelementptr inbounds ([0 x i8], [0 x i8]* @.fatbin_image, i32 0, i32 0), i8* null }, section ".nvFatBinSegment", align 8
+// CUDA-NEXT: @.fatbin_wrapper = internal constant %fatbin_wrapper { i32 1180844977, i32 1, ptr @.fatbin_image, ptr null }, section ".nvFatBinSegment", align 8
// CUDA-NEXT: @__dummy.cuda_offloading.entry = hidden constant [0 x %__tgt_offload_entry] zeroinitializer, section "cuda_offloading_entries"
-// CUDA-NEXT: @.cuda.binary_handle = internal global i8** null
+// CUDA-NEXT: @.cuda.binary_handle = internal global ptr null
// CUDA-NEXT: @__start_cuda_offloading_entries = external hidden constant [0 x %__tgt_offload_entry]
// CUDA-NEXT: @__stop_cuda_offloading_entries = external hidden constant [0 x %__tgt_offload_entry]
-// CUDA-NEXT: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @.cuda.fatbin_reg, i8* null }]
+// CUDA-NEXT: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.cuda.fatbin_reg, ptr null }]
// CUDA: define internal void @.cuda.fatbin_reg() section ".text.startup" {
// CUDA-NEXT: entry:
-// CUDA-NEXT: %0 = call i8** @__cudaRegisterFatBinary(i8* bitcast (%fatbin_wrapper* @.fatbin_wrapper to i8*))
-// CUDA-NEXT: store i8** %0, i8*** @.cuda.binary_handle, align 8
-// CUDA-NEXT: call void @.cuda.globals_reg(i8** %0)
-// CUDA-NEXT: call void @__cudaRegisterFatBinaryEnd(i8** %0)
-// CUDA-NEXT: %1 = call i32 @atexit(void ()* @.cuda.fatbin_unreg)
+// CUDA-NEXT: %0 = call ptr @__cudaRegisterFatBinary(ptr @.fatbin_wrapper)
+// CUDA-NEXT: store ptr %0, ptr @.cuda.binary_handle, align 8
+// CUDA-NEXT: call void @.cuda.globals_reg(ptr %0)
+// CUDA-NEXT: call void @__cudaRegisterFatBinaryEnd(ptr %0)
+// CUDA-NEXT: %1 = call i32 @atexit(ptr @.cuda.fatbin_unreg)
// CUDA-NEXT: ret void
// CUDA-NEXT: }
// CUDA: define internal void @.cuda.fatbin_unreg() section ".text.startup" {
// CUDA-NEXT: entry:
-// CUDA-NEXT: %0 = load i8**, i8*** @.cuda.binary_handle, align 8
-// CUDA-NEXT: call void @__cudaUnregisterFatBinary(i8** %0)
+// CUDA-NEXT: %0 = load ptr, ptr @.cuda.binary_handle, align 8
+// CUDA-NEXT: call void @__cudaUnregisterFatBinary(ptr %0)
// CUDA-NEXT: ret void
// CUDA-NEXT: }
-// CUDA: define internal void @.cuda.globals_reg(i8** %0) section ".text.startup" {
+// CUDA: define internal void @.cuda.globals_reg(ptr %0) section ".text.startup" {
// CUDA-NEXT: entry:
-// CUDA-NEXT: br i1 icmp ne ([0 x %__tgt_offload_entry]* @__start_cuda_offloading_entries, [0 x %__tgt_offload_entry]* @__stop_cuda_offloading_entries), label %while.entry, label %while.end
+// CUDA-NEXT: br i1 icmp ne (ptr @__start_cuda_offloading_entries, ptr @__stop_cuda_offloading_entries), label %while.entry, label %while.end
// CUDA: while.entry:
-// CUDA-NEXT: %entry1 = phi %__tgt_offload_entry* [ getelementptr inbounds ([0 x %__tgt_offload_entry], [0 x %__tgt_offload_entry]* @__start_cuda_offloading_entries, i64 0, i64 0), %entry ], [ %7, %if.end ]
-// CUDA-NEXT: %1 = getelementptr inbounds %__tgt_offload_entry, %__tgt_offload_entry* %entry1, i64 0, i32 0
-// CUDA-NEXT: %addr = load i8*, i8** %1, align 8
-// CUDA-NEXT: %2 = getelementptr inbounds %__tgt_offload_entry, %__tgt_offload_entry* %entry1, i64 0, i32 1
-// CUDA-NEXT: %name = load i8*, i8** %2, align 8
-// CUDA-NEXT: %3 = getelementptr inbounds %__tgt_offload_entry, %__tgt_offload_entry* %entry1, i64 0, i32 2
-// CUDA-NEXT: %size = load i64, i64* %3, align 4
+// CUDA-NEXT: %entry1 = phi ptr [ @__start_cuda_offloading_entries, %entry ], [ %7, %if.end ]
+// CUDA-NEXT: %1 = getelementptr inbounds %__tgt_offload_entry, ptr %entry1, i64 0, i32 0
+// CUDA-NEXT: %addr = load ptr, ptr %1, align 8
+// CUDA-NEXT: %2 = getelementptr inbounds %__tgt_offload_entry, ptr %entry1, i64 0, i32 1
+// CUDA-NEXT: %name = load ptr, ptr %2, align 8
+// CUDA-NEXT: %3 = getelementptr inbounds %__tgt_offload_entry, ptr %entry1, i64 0, i32 2
+// CUDA-NEXT: %size = load i64, ptr %3, align 4
// CUDA-NEXT: %4 = icmp eq i64 %size, 0
// CUDA-NEXT: br i1 %4, label %if.then, label %if.else
// CUDA: if.then:
-// CUDA-NEXT: %5 = call i32 @__cudaRegisterFunction(i8** %0, i8* %addr, i8* %name, i8* %name, i32 -1, i8* null, i8* null, i8* null, i8* null, i32* null)
+// CUDA-NEXT: %5 = call i32 @__cudaRegisterFunction(ptr %0, ptr %addr, ptr %name, ptr %name, i32 -1, ptr null, ptr null, ptr null, ptr null, ptr null)
// CUDA-NEXT: br label %if.end
// CUDA: if.else:
-// CUDA-NEXT: %6 = call i32 @__cudaRegisterVar(i8** %0, i8* %addr, i8* %name, i8* %name, i32 0, i64 %size, i32 0, i32 0)
+// CUDA-NEXT: %6 = call i32 @__cudaRegisterVar(ptr %0, ptr %addr, ptr %name, ptr %name, i32 0, i64 %size, i32 0, i32 0)
// CUDA-NEXT: br label %if.end
// CUDA: if.end:
-// CUDA-NEXT: %7 = getelementptr inbounds %__tgt_offload_entry, %__tgt_offload_entry* %entry1, i64 1
-// CUDA-NEXT: %8 = icmp eq %__tgt_offload_entry* %7, getelementptr inbounds ([0 x %__tgt_offload_entry], [0 x %__tgt_offload_entry]* @__stop_cuda_offloading_entries, i64 0, i64 0)
+// CUDA-NEXT: %7 = getelementptr inbounds %__tgt_offload_entry, ptr %entry1, i64 1
+// CUDA-NEXT: %8 = icmp eq ptr %7, @__stop_cuda_offloading_entries
// CUDA-NEXT: br i1 %8, label %while.end, label %while.entry
// CUDA: while.end:
================
As of April 2022 both LLVM and Clang have complete support for opaque pointers,
-and opaque pointers are enabled by default in Clang.
+and opaque pointers are enabled by default in LLVM and Clang.
For users of the clang driver interface, it is possible to temporarily restore
the old default using the ``-DCLANG_ENABLE_OPAQUE_POINTERS=OFF`` cmake option,
Usage for LTO can be disabled by passing ``-Wl,-plugin-opt=no-opaque-pointers``
to the clang driver.
+For users of LLVM as a library, opaque pointers can be disabled by calling
+``setOpaquePointers(false)`` on the ``LLVMContext``.
+
+For users of LLVM tools like opt, opaque pointers can be disabled by passing
+``-opaque-pointers=0``.
+
The next steps for the opaque pointer migration are:
* Migrate Clang/LLVM tests to use opaque pointers.
-* Enable opaque pointers by default in LLVM.
* Remove support for typed pointers after the LLVM 15 branch has been created.
static cl::opt<bool>
OpaquePointersCL("opaque-pointers", cl::desc("Use opaque pointers"),
- cl::init(false));
+ cl::init(true));
LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
: DiagHandler(std::make_unique<DiagnosticHandler>()),
; BARE: }
@B = external global i32
-; ANNOT: @B = external global i32 ; [#uses=0 type=i32*]
+; ANNOT: @B = external global i32 ; [#uses=0 type=ptr]
define <4 x i1> @foo(<4 x float> %a, <4 x float> %b) nounwind {
entry:
; RUN: not opt -S < %s 2>&1 | FileCheck %s
; CHECK: Intrinsic has incorrect return type!
-; CHECK-NEXT: float (double, <2 x double>)* @llvm.vector.reduce.fadd.f32.f64.v2f64
+; CHECK-NEXT: ptr @llvm.vector.reduce.fadd.f32.f64.v2f64
define float @fadd_invalid_scalar_res(double %acc, <2 x double> %in) {
%res = call float @llvm.vector.reduce.fadd.f32.f64.v2f64(double %acc, <2 x double> %in)
ret float %res
}
; CHECK: Intrinsic has incorrect argument type!
-; CHECK-NEXT: double (float, <2 x double>)* @llvm.vector.reduce.fadd.f64.f32.v2f64
+; CHECK-NEXT: ptr @llvm.vector.reduce.fadd.f64.f32.v2f64
define double @fadd_invalid_scalar_start(float %acc, <2 x double> %in) {
%res = call double @llvm.vector.reduce.fadd.f64.f32.v2f64(float %acc, <2 x double> %in)
ret double %res
}
; CHECK: Intrinsic has incorrect return type!
-; CHECK-NEXT: <2 x double> (double, <2 x double>)* @llvm.vector.reduce.fadd.v2f64.f64.v2f64
+; CHECK-NEXT: ptr @llvm.vector.reduce.fadd.v2f64.f64.v2f64
define <2 x double> @fadd_invalid_vector_res(double %acc, <2 x double> %in) {
%res = call <2 x double> @llvm.vector.reduce.fadd.v2f64.f64.v2f64(double %acc, <2 x double> %in)
ret <2 x double> %res
}
; CHECK: Intrinsic has incorrect argument type!
-; CHECK-NEXT: double (<2 x double>, <2 x double>)* @llvm.vector.reduce.fadd.f64.v2f64.v2f64
+; CHECK-NEXT: ptr @llvm.vector.reduce.fadd.f64.v2f64.v2f64
define double @fadd_invalid_vector_start(<2 x double> %in, <2 x double> %acc) {
%res = call double @llvm.vector.reduce.fadd.f64.v2f64.v2f64(<2 x double> %acc, <2 x double> %in)
ret double %res
; RUN: llvm-as < %s | llvm-c-test --module-list-globals | FileCheck %s
@foo = constant [7 x i8] c"foobar\00", align 1
-;CHECK: GlobalDefinition: foo [7 x i8]*
+;CHECK: GlobalDefinition: foo ptr
@bar = common global i32 0, align 4
-;CHECK: GlobalDefinition: bar i32*
+;CHECK: GlobalDefinition: bar ptr
; CHECK: bb.1:
; CHECK: successors:
; CHECK: bb.2:
- ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p0) :: (load (s32) from `i32* undef`, align 8)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p0) :: (load (s32) from `ptr undef`, align 8)
; CHECK: [[MUL:%[0-9]+]]:_(s32) = nsw G_MUL [[C]], [[LOAD]]
; CHECK: [[MUL1:%[0-9]+]]:_(s32) = nsw G_MUL [[MUL]], [[C1]]
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
bb.3:
- %2:_(s32) = G_LOAD %3(p0) :: (load (s32) from `i32* undef`, align 8)
+ %2:_(s32) = G_LOAD %3(p0) :: (load (s32) from `ptr undef`, align 8)
%5:_(s32) = nsw G_MUL %4, %2
%7:_(s32) = nsw G_MUL %5, %6
%9:_(s32) = nsw G_MUL %7, %8
; CHECK-NEXT: [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[COPY]]
; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[FCVTHSr]], %subreg.hsub
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]]
- ; CHECK-NEXT: STRHHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `half* undef`)
+ ; CHECK-NEXT: STRHHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `ptr undef`)
; CHECK-NEXT: B %bb.2
bb.0:
successors: %bb.1(0x80000000)
%3:gpr(s16) = G_PHI %1(s16), %bb.1, %5(s16), %bb.2
%5:fpr(s16) = G_FPTRUNC %8(s32)
- G_STORE %3(s16), %4(p0) :: (store (s16) into `half* undef`)
+ G_STORE %3(s16), %4(p0) :: (store (s16) into `ptr undef`)
G_BR %bb.2
...
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI:%[0-9]+]]:fpr16 = PHI %7, %bb.2, [[COPY2]], %bb.1
; CHECK-NEXT: [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[COPY]]
- ; CHECK-NEXT: STRHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `half* undef`)
+ ; CHECK-NEXT: STRHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `ptr undef`)
; CHECK-NEXT: B %bb.2
bb.0:
successors: %bb.1(0x80000000)
%3:fpr(s16) = G_PHI %5(s16), %bb.2, %1(s16), %bb.1
%5:fpr(s16) = G_FPTRUNC %8(s32)
- G_STORE %3(s16), %4(p0) :: (store (s16) into `half* undef`)
+ G_STORE %3(s16), %4(p0) :: (store (s16) into `ptr undef`)
G_BR %bb.2
...
# CHECK-LABLE: test
# CHECK: bb.0:
# CHECK-NEXT: liveins: $x0, $x17, $x18
-# CHECK: renamable $q13_q14_q15 = LD3Threev16b undef renamable $x17 :: (load (s384) from `<16 x i8>* undef`, align 64)
-# CHECK-NEXT: renamable $q23_q24_q25 = LD3Threev16b undef renamable $x18 :: (load (s384) from `<16 x i8>* undef`, align 64)
+# CHECK: renamable $q13_q14_q15 = LD3Threev16b undef renamable $x17 :: (load (s384) from `ptr undef`, align 64)
+# CHECK-NEXT: renamable $q23_q24_q25 = LD3Threev16b undef renamable $x18 :: (load (s384) from `ptr undef`, align 64)
# CHECK-NEXT: $q16 = EXTv16i8 renamable $q23, renamable $q23, 8
# CHECK-NEXT: renamable $q20 = EXTv16i8 renamable $q14, renamable $q14, 8
# CHECK-NEXT: STRQui killed renamable $q20, $sp, 4 :: (store (s128))
; CHECK: B %bb.2
; CHECK: bb.1:
; CHECK: successors: %bb.9(0x80000000)
- ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[DEF3]], 0 :: (load (s64) from `i64* undef`)
+ ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[DEF3]], 0 :: (load (s64) from `ptr undef`)
; CHECK: B %bb.9
; CHECK: bb.2:
; CHECK: successors: %bb.3(0x40000000), %bb.4(0x40000000)
bb.2:
successors: %bb.8(0x80000000)
- %8:gpr64 = LDRXui %9, 0 :: (load (s64) from `i64* undef`)
+ %8:gpr64 = LDRXui %9, 0 :: (load (s64) from `ptr undef`)
B %bb.8
bb.3:
; CHECK: %ptr2:_(p1) = G_IMPLICIT_DEF
; CHECK: %ptr3:_(p1) = COPY $vgpr2_vgpr3
; CHECK: %ptr4:_(p1) = COPY $vgpr4_vgpr5
- ; CHECK: G_STORE %src1(s32), %ptr1(p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
- ; CHECK: G_STORE %src2(s32), %ptr2(p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+ ; CHECK: G_STORE %src1(s32), %ptr1(p1) :: (volatile store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK: G_STORE %src2(s32), %ptr2(p1) :: (volatile store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; CHECK: %div:_(s32), %rem:_ = G_SDIVREM %src1, %src2
; CHECK: G_STORE %div(s32), %ptr3(p1) :: (store (s32), addrspace 1)
; CHECK: G_STORE %rem(s32), %ptr4(p1) :: (store (s32), addrspace 1)
; GCN-LABEL: name: merge_flat_load_dword_2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `i32* undef`, align 4)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: merge_flat_load_dword_3
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 0, 1, implicit $exec, implicit $flat_scr :: (load (s96) from `i32* undef`, align 4)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 0, 1, implicit $exec, implicit $flat_scr :: (load (s96) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[FLAT_LOAD_DWORDX3_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX3_]].sub2
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
; GCN-LABEL: name: merge_flat_load_dword_4
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 2, implicit $exec, implicit $flat_scr :: (load (s128) from `i32* undef`, align 4)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 2, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-LABEL: name: merge_flat_load_dword_5
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 3, implicit $exec, implicit $flat_scr :: (load (s128) from `i32* undef`, align 4)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 3, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
- ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 16, 3, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 16, 3, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[FLAT_LOAD_DWORD]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 3, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
; GCN-LABEL: name: merge_flat_load_dword_6
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i32* undef`, align 4)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 16, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `i32* undef`, align 4)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 16, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[COPY6]], implicit [[COPY7]]
; GCN-LABEL: name: merge_flat_load_dwordx2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i64* undef`, align 4)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: merge_flat_load_dwordx3_with_dwordx1
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 12, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i128* undef`, align 8)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 12, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 8)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: merge_flat_load_dwordx1_with_dwordx2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 12, 0, implicit $exec, implicit $flat_scr :: (load (s96) from `i32* undef`, align 4)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 12, 0, implicit $exec, implicit $flat_scr :: (load (s96) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX3_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[FLAT_LOAD_DWORDX3_]].sub1_sub2
; GCN-NEXT: S_NOP 0, implicit [[COPY1]], implicit [[COPY]]
; GCN-LABEL: name: no_merge_flat_load_dword_agpr_with_vgpr
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
- ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:agpr_32 = FLAT_LOAD_DWORD [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:agpr_32 = FLAT_LOAD_DWORD [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[FLAT_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
; GCN-LABEL: name: no_merge_flat_load_dword_disjoint
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
- ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 8, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 8, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[FLAT_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
; GCN-LABEL: name: no_merge_flat_load_dword_overlap
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
- ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 3, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 3, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[FLAT_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
; GCN-LABEL: name: no_merge_flat_load_dword_different_cpol
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 1, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
- ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 1, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[FLAT_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 1, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
- ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `i32* undef`, align 4)
+ ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, killed [[DEF3]], %subreg.sub2
- ; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE1]], 4, 1, implicit $exec, implicit $flat_scr :: (store (s96) into `i32* undef`, align 4)
+ ; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE1]], 4, 1, implicit $exec, implicit $flat_scr :: (store (s96) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF1]].sub1, %subreg.sub1, [[DEF1]].sub0, %subreg.sub0
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[DEF1]].sub2, %subreg.sub2, killed [[REG_SEQUENCE]], %subreg.sub0_sub1
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[DEF1]].sub3, %subreg.sub3, killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2
- ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 2, implicit $exec, implicit $flat_scr :: (store (s128) into `i32* undef`, align 4)
+ ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 2, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vreg_128 = IMPLICIT_DEF
FLAT_STORE_DWORD %0, %1.sub1, 8, 2, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`, align 4)
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:areg_64_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:areg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF3]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:areg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF4]], %subreg.sub3
- ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 3, implicit $exec, implicit $flat_scr :: (store (s128) into `i32* undef`, align 4)
- ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF5]], 20, 3, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+ ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 3, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 4)
+ ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF5]], 20, 3, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:agpr_32 = IMPLICIT_DEF
%2:agpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF3]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF4]], %subreg.sub3
- ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `i32* undef`, align 8)
+ ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 8)
; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF5]], %subreg.sub0, [[DEF6]], %subreg.sub1
- ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE3]], 20, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `i32* undef`, align 4)
+ ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE3]], 20, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0_sub1, killed [[DEF2]], %subreg.sub2_sub3
- ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `i64* undef`, align 4)
+ ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
%2:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_96_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0_sub1_sub2, killed [[DEF2]], %subreg.sub3
- ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `i64* undef`)
+ ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vreg_96_align2 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
- ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+ ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
+ ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:agpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
- ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 6, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+ ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
+ ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 6, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
- ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 2, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`, align 2)
+ ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
+ ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 2, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`, align 2)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 1, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
- ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+ ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 1, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
+ ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF:%[0-9]+]]:vreg_128_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]].sub0_sub1, killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
- ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]].sub2_sub3, killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+ ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]].sub0_sub1, killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
+ ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]].sub2_sub3, killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_128_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-LABEL: name: merge_flat_global_load_dword_2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `float* undef` + 4, align 4)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef` + 4, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: merge_global_flat_load_dword_2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `float addrspace(1)* undef`)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr addrspace(1) undef`)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: merge_global_flat_load_dword_3
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s96) from `float* undef`, align 16)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s96) from `ptr undef`, align 16)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[FLAT_LOAD_DWORDX3_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX3_]].sub2
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1
; GCN-LABEL: name: merge_global_flat_load_dword_4
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i32 addrspace(1)* undef` + 4, align 4, basealign 8)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr addrspace(1) undef` + 4, align 4, basealign 8)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-LABEL: name: merge_flat_global_load_dwordx2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `double* undef`, align 8)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 8)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: merge_flat_global_load_dwordx3
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `float* undef`, align 4)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX4_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_96_align2 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub1_sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: merge_global_flat_load_dwordx3
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i32 addrspace(1)* undef`, align 4)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr addrspace(1) undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX4_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_96_align2 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub1_sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: no_merge_flat_global_load_dword_saddr
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
- ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `float* undef`)
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF1]], [[DEF]].sub0, 4, 0, implicit $exec :: (load (s64) from `i32 addrspace(1)* undef` + 4, align 4, addrspace 1)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF1]], [[DEF]].sub0, 4, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef` + 4, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1
; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: no_merge_global_saddr_flat_load_dword
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF1]], [[DEF]].sub0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `i32* undef` + 4, align 4)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF1]], [[DEF]].sub0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef` + 4, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD_SADDR]], implicit [[COPY]], implicit [[COPY1]]
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
- ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `i32* undef`, align 4)
+ ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
- ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `i32 addrspace(1)* undef`, align 4)
+ ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `ptr addrspace(1) undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1_sub2
- ; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s96) into `i32* undef`, align 4)
+ ; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s96) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_96_align2 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1_sub2_sub3
- ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `i32* undef`, align 4)
+ ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vreg_96_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub2, [[DEF2]], %subreg.sub0_sub1
- ; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s96) into `i64* undef`, align 8)
+ ; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s96) into `ptr undef`, align 8)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_96_align2 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub3, [[DEF2]], %subreg.sub0_sub1_sub2
- ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `<3 x i32>* undef`)
+ ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vreg_96_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF2]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
- ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF3]], [[DEF1]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF2]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF3]], [[DEF1]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:sreg_64_xexec = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF2]], [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF3]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF2]], [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF3]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:sreg_64_xexec = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-LABEL: name: merge_global_load_dword_2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec :: (load (s64) from `float addrspace(1)* undef` + 4, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef` + 4, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: merge_global_load_dword_3
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 0, 1, implicit $exec :: (load (s96) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 0, 1, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX3_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX3_]].sub2
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
; GCN-LABEL: name: merge_global_load_dword_4
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 2, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 2, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-LABEL: name: merge_global_load_dword_5
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 3, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 3, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 16, 3, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 16, 3, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[GLOBAL_LOAD_DWORD]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 3, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-LABEL: name: merge_global_load_dword_6
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 16, 0, implicit $exec :: (load (s64) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 16, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[COPY6]], implicit [[COPY7]]
; GCN-LABEL: name: merge_global_load_dwordx2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec :: (load (s128) from `i64 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: merge_global_load_dwordx3_with_dwordx1
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 12, 0, implicit $exec :: (load (s128) from `i128 addrspace(1)* undef`, align 8, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 12, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 8, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: merge_global_load_dwordx1_with_dwordx2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 12, 0, implicit $exec :: (load (s96) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 12, 0, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX3_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[GLOBAL_LOAD_DWORDX3_]].sub1_sub2
; GCN-NEXT: S_NOP 0, implicit [[COPY1]], implicit [[COPY]]
; GCN-LABEL: name: no_merge_global_load_dword_agpr_with_vgpr
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:agpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:agpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-LABEL: name: no_merge_global_load_dword_disjoint
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 8, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 8, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-LABEL: name: no_merge_global_load_dword_overlap
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 3, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 3, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-LABEL: name: no_merge_global_load_dword_different_cpol
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 1, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 1, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 1, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-LABEL: name: merge_global_load_dword_saddr_2
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s64) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: merge_global_load_dword_saddr_3
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_SADDR:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3_SADDR [[DEF]], [[DEF1]], 0, 1, implicit $exec :: (load (s96) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_SADDR:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3_SADDR [[DEF]], [[DEF1]], 0, 1, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX3_SADDR]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX3_SADDR]].sub2
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
; GCN-LABEL: name: merge_global_load_dword_saddr_4
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 2, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 2, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_SADDR]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-LABEL: name: merge_global_load_dword_saddr_6
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 4, 3, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 4, 3, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_SADDR]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 20, 3, implicit $exec :: (load (s64) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 20, 3, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0
; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[COPY6]], implicit [[COPY7]]
; GCN-LABEL: name: merge_global_load_dwordx2_saddr
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s128) from `i64 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[GLOBAL_LOAD_DWORDX4_SADDR]].sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: no_merge_global_load_dword_and_global_load_dword_saddr
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD_SADDR]]
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
; GCN-LABEL: name: no_merge_global_load_dword_saddr_different_saddr
; GCN: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]].sub0_sub1, [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]].sub2_sub3, [[DEF1]], 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]].sub0_sub1, [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]].sub2_sub3, [[DEF1]], 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD_SADDR]], implicit [[GLOBAL_LOAD_DWORD_SADDR1]]
%0:sgpr_128 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
; GCN-LABEL: name: no_merge_global_load_dword_saddr_different_vaddr
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub1, 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub1, 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD_SADDR]], implicit [[GLOBAL_LOAD_DWORD_SADDR1]]
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
; GCN-LABEL: name: merge_global_load_dword_2_out_of_order
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec :: (load (s64) from `float addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_]].sub0
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
; GCN-LABEL: name: merge_global_load_dword_3_out_of_order
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 0, 0, implicit $exec :: (load (s96) from `float addrspace(1)* undef`, align 16, addrspace 1)
+ ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 0, 0, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 16, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX3_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX3_]].sub2
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
- ; GCN-NEXT: GLOBAL_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec :: (store (s64) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, killed [[DEF3]], %subreg.sub2
- ; GCN-NEXT: GLOBAL_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE1]], 4, 1, implicit $exec :: (store (s96) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE1]], 4, 1, implicit $exec :: (store (s96) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF1]].sub1, %subreg.sub1, [[DEF1]].sub0, %subreg.sub0
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[DEF1]].sub2, %subreg.sub2, killed [[REG_SEQUENCE]], %subreg.sub0_sub1
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[DEF1]].sub3, %subreg.sub3, killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2
- ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 2, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 2, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vreg_128 = IMPLICIT_DEF
GLOBAL_STORE_DWORD %0, %1.sub1, 8, 2, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:areg_64_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:areg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF3]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:areg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF4]], %subreg.sub3
- ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 3, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
- ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], [[DEF5]], 20, 3, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 3, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], [[DEF5]], 20, 3, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:agpr_32 = IMPLICIT_DEF
%2:agpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF3]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF4]], %subreg.sub3
- ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 0, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 8, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 0, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 8, addrspace 1)
; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF5]], %subreg.sub0, [[DEF6]], %subreg.sub1
- ; GCN-NEXT: GLOBAL_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE3]], 20, 0, implicit $exec :: (store (s64) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE3]], 20, 0, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0_sub1, killed [[DEF2]], %subreg.sub2_sub3
- ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec :: (store (s128) into `i64 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
%2:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_96_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0_sub1_sub2, killed [[DEF2]], %subreg.sub3
- ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec :: (store (s128) into `i64 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vreg_96_align2 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:agpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 6, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 6, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 2, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, align 2, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 2, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, align 2, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 1, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 1, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF:%[0-9]+]]:vreg_128_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]].sub0_sub1, killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]].sub2_sub3, killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]].sub0_sub1, killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]].sub2_sub3, killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_128_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
- ; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE]], [[DEF]], 0, 0, implicit $exec :: (store (s64) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE]], [[DEF]], 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF4:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF4]], %subreg.sub2
- ; GCN-NEXT: GLOBAL_STORE_DWORDX3_SADDR [[DEF1]], killed [[REG_SEQUENCE1]], [[DEF]], 4, 1, implicit $exec :: (store (s96) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX3_SADDR [[DEF1]], killed [[REG_SEQUENCE1]], [[DEF]], 4, 1, implicit $exec :: (store (s96) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF4]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF5]], %subreg.sub3
- ; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 2, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 2, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF4]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF5]], %subreg.sub3
- ; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 3, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 3, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF6]], %subreg.sub0, [[DEF7]], %subreg.sub1
- ; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE3]], [[DEF]], 20, 3, implicit $exec :: (store (s64) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE3]], [[DEF]], 20, 3, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF1]], [[DEF3]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF1]], [[DEF3]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub1, [[DEF3]], [[DEF]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub1, [[DEF3]], [[DEF]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[DEF2]], [[DEF]].sub0_sub1, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
- ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[DEF3]], [[DEF]].sub2_sub3, 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[DEF2]], [[DEF]].sub0_sub1, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+ ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[DEF3]], [[DEF]].sub2_sub3, 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:sgpr_128 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
- { id: 1, class: gprb }
# RW-DEFAULT-NOMOVT: constants:
# RW-DEFAULT-NOMOVT: id: 0
-# RW-DEFAULT-NOMOVT: value: 'i32* @internal_global'
+# RW-DEFAULT-NOMOVT: value: 'ptr @internal_global'
# RWPI-NOMOVT: constants:
# RWPI-NOMOVT: id: 0
# RWPI-NOMOVT: value: 'internal_global(SBREL)'
- { id: 1, class: gprb }
# RW-DEFAULT-NOMOVT: constants:
# RW-DEFAULT-NOMOVT: id: 0
-# RW-DEFAULT-NOMOVT: value: 'i32* @external_global'
+# RW-DEFAULT-NOMOVT: value: 'ptr @external_global'
# RWPI-NOMOVT: constants:
# RWPI-NOMOVT: id: 0
# RWPI-NOMOVT: value: 'external_global(SBREL)'
- { id: 1, class: gprb }
# RO-DEFAULT-NOMOVT: constants:
# RO-DEFAULT-NOMOVT: id: 0
-# RO-DEFAULT-NOMOVT: value: 'i32* @internal_constant'
+# RO-DEFAULT-NOMOVT: value: 'ptr @internal_constant'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @internal_constant
- { id: 1, class: gprb }
# RO-DEFAULT-NOMOVT: constants:
# RO-DEFAULT-NOMOVT: id: 0
-# RO-DEFAULT-NOMOVT: value: 'i32* @external_constant'
+# RO-DEFAULT-NOMOVT: value: 'ptr @external_constant'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @external_constant
- { id: 1, class: gprb }
# ELF-NOMOVT: constants:
# ELF-NOMOVT: id: 0
-# ELF-NOMOVT: value: 'i32* @internal_global'
+# ELF-NOMOVT: value: 'ptr @internal_global'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @internal_global
- { id: 1, class: gprb }
# ELF-NOMOVT: constants:
# ELF-NOMOVT: id: 0
-# ELF-NOMOVT: value: 'i32* @external_global'
+# ELF-NOMOVT: value: 'ptr @external_global'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @external_global
- { id: 1, class: gprb }
# RW-DEFAULT-NOMOVT: constants:
# RW-DEFAULT-NOMOVT: id: 0
-# RW-DEFAULT-NOMOVT: value: 'i32* @internal_global'
+# RW-DEFAULT-NOMOVT: value: 'ptr @internal_global'
# RWPI-NOMOVT: constants:
# RWPI-NOMOVT: id: 0
# RWPI-NOMOVT: value: 'internal_global(SBREL)'
- { id: 1, class: gprb }
# RW-DEFAULT-NOMOVT: constants:
# RW-DEFAULT-NOMOVT: id: 0
-# RW-DEFAULT-NOMOVT: value: 'i32* @external_global'
+# RW-DEFAULT-NOMOVT: value: 'ptr @external_global'
# RWPI-NOMOVT: constants:
# RWPI-NOMOVT: id: 0
# RWPI-NOMOVT: value: 'external_global(SBREL)'
- { id: 1, class: gprb }
# RO-DEFAULT-NOMOVT: constants:
# RO-DEFAULT-NOMOVT: id: 0
-# RO-DEFAULT-NOMOVT: value: 'i32* @internal_constant'
+# RO-DEFAULT-NOMOVT: value: 'ptr @internal_constant'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @internal_constant
- { id: 1, class: gprb }
# RO-DEFAULT-NOMOVT: constants:
# RO-DEFAULT-NOMOVT: id: 0
-# RO-DEFAULT-NOMOVT: value: 'i32* @external_constant'
+# RO-DEFAULT-NOMOVT: value: 'ptr @external_constant'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @external_constant
- { id: 1, class: gprb }
# ELF-NOMOVT: constants:
# ELF-NOMOVT: id: 0
-# ELF-NOMOVT: value: 'i32* @internal_global'
+# ELF-NOMOVT: value: 'ptr @internal_global'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @internal_global
- { id: 1, class: gprb }
# ELF-NOMOVT: constants:
# ELF-NOMOVT: id: 0
-# ELF-NOMOVT: value: 'i32* @external_global'
+# ELF-NOMOVT: value: 'ptr @external_global'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @external_global
bb.0:
; CHECK-LABEL: name: aligned_memoperands
; CHECK: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`, align 2)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`, align 8)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, align 2)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, align 2)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, basealign 8)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`, align 2)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`, align 8)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12, align 2)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12, align 2)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12, basealign 8)
%0:_(p0) = IMPLICIT_DEF
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`)
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 2)
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 4) ; redundant
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 8)
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, align 2)
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, align 4) ; redundant
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 2) ; printed as "align"
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 4) ; redundant
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 8)
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`)
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`, align 2)
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`, align 4) ; redundant
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`, align 8)
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, align 2)
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, align 4) ; redundant
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, basealign 2) ; printed as "align"
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, basealign 4) ; redundant
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, basealign 8)
...
; MIPS-DAG: t{{[0-9]+}}: i32 = ADDiu Register:i32 $zero, TargetConstant:i32<1>
; MIPS-DAG: t{{[0-9]+}}: i32 = ADDiu Register:i32 $zero, TargetConstant:i32<2048>
; MIPS-DAG: t{{[0-9]+}}: i32 = LUi TargetConstant:i32<128>
-; MIPS: t{{[0-9]+}}: ch,glue = JAL TargetGlobalAddress:i32<void (i32, i32, i32)* @f>
+; MIPS: t{{[0-9]+}}: ch,glue = JAL TargetGlobalAddress:i32<ptr @f>
; MIPS: t[[A:[0-9]+]]: i32 = LUi TargetConstant:i32<2304>
; MIPS: t{{[0-9]+}}: i32 = ORi t[[A]], TargetConstant:i32<2>
; MM-DAG: t{{[0-9]+}}: i32 = LI16_MM TargetConstant:i32<1>
; MM-DAG: t{{[0-9]+}}: i32 = ADDiu_MM Register:i32 $zero, TargetConstant:i32<2048>
; MM-DAG: t{{[0-9]+}}: i32 = LUi_MM TargetConstant:i32<128>
-; MM: t{{[0-9]+}}: ch,glue = JAL_MM TargetGlobalAddress:i32<void (i32, i32, i32)* @f>
+; MM: t{{[0-9]+}}: ch,glue = JAL_MM TargetGlobalAddress:i32<ptr @f>
; MM: t[[A:[0-9]+]]: i32 = LUi_MM TargetConstant:i32<2304>
; MM: t{{[0-9]+}}: i32 = ORi_MM t[[A]], TargetConstant:i32<2>
; FP library calls can have fast-math-flags.
; FMFDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'log2_approx:'
-; FMFDEBUG: ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<double (double)* @log2>
+; FMFDEBUG: ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<ptr @log2>
; FMFDEBUG: ch,glue = callseq_end t15, TargetConstant:i64<32>, TargetConstant:i64<0>, t15:1
; FMFDEBUG: f64,ch,glue = CopyFromReg t16, Register:f64 $f1, t16:1
; FMFDEBUG: Type-legalized selection DAG: %bb.0 'log2_approx:'
; GLOBALDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'log2_approx:'
-; GLOBALDEBUG: ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<double (double)* @log2>
+; GLOBALDEBUG: ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<ptr @log2>
; GLOBALDEBUG: ch,glue = callseq_end t15, TargetConstant:i64<32>, TargetConstant:i64<0>, t15:1
; GLOBALDEBUG: f64,ch,glue = CopyFromReg t16, Register:f64 $f1, t16:1
; GLOBALDEBUG: Type-legalized selection DAG: %bb.0 'log2_approx:'
; CHECK: J %bb.4
; CHECK: bb.2:
; CHECK: successors:
- ; CHECK: STMux %20.subreg_l32, undef %8:addr64bit, 0, $noreg :: (store (s32) into `i32* undef`)
+ ; CHECK: STMux %20.subreg_l32, undef %8:addr64bit, 0, $noreg :: (store (s32) into `ptr undef`)
; CHECK: bb.3:
; CHECK: successors:
; CHECK: bb.4:
bb.2:
successors:
- STMux killed %4, undef %22:addr64bit, 0, $noreg :: (store (s32) into `i32* undef`)
+ STMux killed %4, undef %22:addr64bit, 0, $noreg :: (store (s32) into `ptr undef`)
bb.3:
successors:
bb.0:
; CHECK-LABEL: name: foo
; CHECK: renamable $eax = IMPLICIT_DEF
- ; CHECK: renamable $edx = MOVZX32rm8 renamable $eax, 1, $noreg, 0, $noreg :: (load (s8) from `i168* undef` + 20, align 4, basealign 16)
- ; CHECK: dead renamable $ecx = MOV32rm renamable $eax, 1, $noreg, 0, $noreg :: (load (s32) from `i168* undef` + 12, basealign 16)
- ; CHECK: renamable $al = MOV8rm killed renamable $eax, 1, $noreg, 0, $noreg :: (load (s8) from `i32* undef`, align 4)
+ ; CHECK: renamable $edx = MOVZX32rm8 renamable $eax, 1, $noreg, 0, $noreg :: (load (s8) from `ptr undef` + 20, align 4, basealign 16)
+ ; CHECK: dead renamable $ecx = MOV32rm renamable $eax, 1, $noreg, 0, $noreg :: (load (s32) from `ptr undef` + 12, basealign 16)
+ ; CHECK: renamable $al = MOV8rm killed renamable $eax, 1, $noreg, 0, $noreg :: (load (s8) from `ptr undef`, align 4)
; CHECK: dead renamable $ecx = COPY renamable $edx
; CHECK: dead renamable $ecx = COPY renamable $edx
; CHECK: dead renamable $ecx = COPY renamable $edx
; CHECK: dead renamable $eax = SHRD32rrCL renamable $eax, killed renamable $edx, implicit-def dead $eflags, implicit killed $cl
; CHECK: RET32
%0:gr32 = IMPLICIT_DEF
- %1:gr32 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg :: (load (s8) from `i168* undef` + 20, align 4, basealign 16)
- %2:gr32 = MOV32rm %0, 1, $noreg, 0, $noreg :: (load (s32) from `i168* undef` + 12, basealign 16)
- %3:gr8 = MOV8rm %0, 1, $noreg, 0, $noreg :: (load (s8) from `i32* undef`, align 4)
+ %1:gr32 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg :: (load (s8) from `ptr undef` + 20, align 4, basealign 16)
+ %2:gr32 = MOV32rm %0, 1, $noreg, 0, $noreg :: (load (s32) from `ptr undef` + 12, basealign 16)
+ %3:gr8 = MOV8rm %0, 1, $noreg, 0, $noreg :: (load (s8) from `ptr undef`, align 4)
%4:gr32 = COPY %1
%5:gr32 = COPY %1
%6:gr32 = COPY %1
}
; CHECK-NOT: @llvm.global_dtors
-; CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @asan.module_ctor, i8* bitcast (void ()* @asan.module_ctor to i8*) }]
+; CHECK: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @asan.module_ctor, ptr @asan.module_ctor }]
; CHECK-NOT: @llvm.global_dtors
; CHECK: define internal void @asan.module_ctor() #[[#]] comdat
; CHECK-NOT: @llvm.global_dtors
@c = internal global [2 x i32] zeroinitializer, align 4
@d = unnamed_addr global [2 x i32] zeroinitializer, align 4
-; NOALIAS: @__asan_global_a = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @a to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.1 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
-; NOALIAS-NEXT: @__asan_global_b = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @b to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.2 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
-; NOALIAS-NEXT: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @c to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.3 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
-; NOALIAS-NEXT: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @d to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.4 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
+; NOALIAS: @__asan_global_a = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @a to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.1 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
+; NOALIAS-NEXT: @__asan_global_b = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @b to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.2 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
+; NOALIAS-NEXT: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @c to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.3 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
+; NOALIAS-NEXT: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @d to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.4 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
-; ALIAS: @__asan_global_a = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @0 to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.1 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
-; ALIAS-NEXT: @__asan_global_b = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @1 to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.2 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
-; ALIAS-NEXT: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @2 to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.3 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
-; ALIAS-NEXT: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @3 to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.4 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
+; ALIAS: @__asan_global_a = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @0 to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.1 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
+; ALIAS-NEXT: @__asan_global_b = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @1 to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.2 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
+; ALIAS-NEXT: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @2 to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.3 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
+; ALIAS-NEXT: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @3 to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.4 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
; ALIAS: @0 = private alias {{.*}} @a
; ALIAS-NEXT: @1 = private alias {{.*}} @b
; ALIAS-NEXT: @2 = private alias {{.*}} @c
define i8 @add(i8 %a, i8 %b) {
; CHECK: @add.dfsan
- ; CHECK-DAG: %[[#ALABEL:]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[ARGTLSTYPE:\[100 x i64\]]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]]
- ; CHECK-DAG: %[[#BLABEL:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
+ ; CHECK-DAG: %[[#ALABEL:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+ ; CHECK-DAG: %[[#BLABEL:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
; CHECK: %[[#UNION:]] = or i[[#SBITS]] %[[#ALABEL]], %[[#BLABEL]]
; CHECK: %c = add i8 %a, %b
- ; CHECK: store i[[#SBITS]] %[[#UNION]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; CHECK: store i[[#SBITS]] %[[#UNION]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; CHECK: ret i8 %c
%c = add i8 %a, %b
ret i8 %c
i1 %a200
) {
; CHECK: @arg_overflow.dfsan
- ; CHECK: [[A199:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
- ; CHECK: store i32 [[A199]], i32* @__dfsan_retval_origin_tls, align 4
+ ; CHECK: [[A199:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
+ ; CHECK: store i32 [[A199]], ptr @__dfsan_retval_origin_tls, align 4
%r = add i1 %a199, %a200
ret i1 %r
define i1 @param_overflow(i1 %a) {
; CHECK: @param_overflow.dfsan
- ; CHECK: store i32 %1, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
- ; CHECK-NEXT: store i[[#SBITS]] %2, i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 398) to i[[#SBITS]]*), align 2
- ; CHECK-NEXT: store i[[#SBITS]] %2, i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 400) to i[[#SBITS]]*), align 2
+ ; CHECK: store i32 %1, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
+ ; CHECK-NEXT: store i[[#SBITS]] %2, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 398) to ptr), align 2
+ ; CHECK-NEXT: store i[[#SBITS]] %2, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 400) to ptr), align 2
; CHECK-NEXT: %r = call i1 @arg_overflow.dfsan
- ; CHECK: %_dfsret_o = load i32, i32* @__dfsan_retval_origin_tls, align 4
- ; CHECK: store i32 %_dfsret_o, i32* @__dfsan_retval_origin_tls, align 4
+ ; CHECK: %_dfsret_o = load i32, ptr @__dfsan_retval_origin_tls, align 4
+ ; CHECK: store i32 %_dfsret_o, ptr @__dfsan_retval_origin_tls, align 4
%r = call i1 @arg_overflow(
i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
define void @param_with_zero_shadow() {
; CHECK: @param_with_zero_shadow.dfsan
- ; CHECK-NEXT: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2
+ ; CHECK-NEXT: store i[[#SBITS]] 0, ptr @__dfsan_arg_tls, align 2
; CHECK-NEXT: call void @foo.dfsan(i1 true)
call void @foo(i1 1)
define i32 @phiop(i32 %a, i32 %b, i1 %c) {
; CHECK: @phiop.dfsan
; CHECK: entry:
- ; CHECK: [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
- ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
- ; CHECK: [[BS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN:2]]
- ; CHECK: [[AS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+ ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
+ ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+ ; CHECK: [[AS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; CHECK: br i1 %c, label %next, label %done
; CHECK: next:
; CHECK: br i1 %c, label %T, label %F
; CHECK: br label %done
; CHECK: done:
; CHECK: [[PO:%.*]] = phi i32 [ [[BAO_T]], %T ], [ [[BAO_F]], %F ], [ [[AO]], %entry ]
- ; CHECK: store i32 [[PO]], i32* @__dfsan_retval_origin_tls, align 4
+ ; CHECK: store i32 [[PO]], ptr @__dfsan_retval_origin_tls, align 4
entry:
br i1 %c, label %next, label %done
define i8 @select8(i1 %c, i8 %t, i8 %f) {
; TRACK_CONTROL_FLOW: @select8.dfsan
- ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
- ; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
- ; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
- ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2
+ ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
+ ; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
+ ; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+ ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
; TRACK_CONTROL_FLOW: [[TFO:%.*]] = select i1 %c, i32 [[TO]], i32 [[FO]]
; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
; TRACK_CONTROL_FLOW: [[CTFO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[TFO]]
- ; TRACK_CONTROL_FLOW: store i32 [[CTFO]], i32* @__dfsan_retval_origin_tls, align 4
+ ; TRACK_CONTROL_FLOW: store i32 [[CTFO]], ptr @__dfsan_retval_origin_tls, align 4
; NO_TRACK_CONTROL_FLOW: @select8.dfsan
- ; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
- ; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+ ; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
+ ; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; NO_TRACK_CONTROL_FLOW: [[TFO:%.*]] = select i1 %c, i32 [[TO]], i32 [[FO]]
- ; NO_TRACK_CONTROL_FLOW: store i32 [[TFO]], i32* @__dfsan_retval_origin_tls, align 4
+ ; NO_TRACK_CONTROL_FLOW: store i32 [[TFO]], ptr @__dfsan_retval_origin_tls, align 4
%a = select i1 %c, i8 %t, i8 %f
ret i8 %a
define i8 @select8e(i1 %c, i8 %tf) {
; TRACK_CONTROL_FLOW: @select8e.dfsan
- ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
- ; TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
- ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2
+ ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
+ ; TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+ ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
; TRACK_CONTROL_FLOW: [[CTFO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[TFO]]
- ; TRACK_CONTROL_FLOW: store i32 [[CTFO]], i32* @__dfsan_retval_origin_tls, align 4
+ ; TRACK_CONTROL_FLOW: store i32 [[CTFO]], ptr @__dfsan_retval_origin_tls, align 4
; NO_TRACK_CONTROL_FLOW: @select8e.dfsan
- ; NO_TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
- ; NO_TRACK_CONTROL_FLOW: store i32 [[TFO]], i32* @__dfsan_retval_origin_tls, align 4
+ ; NO_TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+ ; NO_TRACK_CONTROL_FLOW: store i32 [[TFO]], ptr @__dfsan_retval_origin_tls, align 4
%a = select i1 %c, i8 %tf, i8 %tf
ret i8 %a
define <4 x i8> @select8v(<4 x i1> %c, <4 x i8> %t, <4 x i8> %f) {
; TRACK_CONTROL_FLOW: @select8v.dfsan
- ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
- ; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
- ; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
- ; TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align 2
- ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2
+ ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
+ ; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
+ ; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+ ; TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
+ ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
; TRACK_CONTROL_FLOW: [[FS_NE:%.*]] = icmp ne i[[#SBITS]] [[FS]], 0
; TRACK_CONTROL_FLOW: [[FTO:%.*]] = select i1 [[FS_NE]], i32 [[FO]], i32 [[TO]]
; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
; TRACK_CONTROL_FLOW: [[CFTO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[FTO]]
- ; TRACK_CONTROL_FLOW: store i32 [[CFTO]], i32* @__dfsan_retval_origin_tls, align 4
+ ; TRACK_CONTROL_FLOW: store i32 [[CFTO]], ptr @__dfsan_retval_origin_tls, align 4
; NO_TRACK_CONTROL_FLOW: @select8v.dfsan
- ; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
- ; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
- ; NO_TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align 2
+ ; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
+ ; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+ ; NO_TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
; NO_TRACK_CONTROL_FLOW: [[FS_NE:%.*]] = icmp ne i[[#SBITS]] [[FS]], 0
; NO_TRACK_CONTROL_FLOW: [[FTO:%.*]] = select i1 [[FS_NE]], i32 [[FO]], i32 [[TO]]
- ; NO_TRACK_CONTROL_FLOW: store i32 [[FTO]], i32* @__dfsan_retval_origin_tls, align 4
+ ; NO_TRACK_CONTROL_FLOW: store i32 [[FTO]], ptr @__dfsan_retval_origin_tls, align 4
%a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f
ret <4 x i8> %a
; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
define {i32, i32} @test({i32, i32} %a, i1 %c) {
- ; CHECK: %[[#AL:]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN:2]]
+ ; CHECK: %[[#AL:]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
; CHECK: %[[#AL0:]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } %[[#AL]], i[[#SBITS]] 0, 0
; CHECK: %[[#AL1:]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } %[[#AL]], i[[#SBITS]] 0, 1
; CHECK: %[[#PL:]] = phi { i[[#SBITS]], i[[#SBITS]] } [ %[[#AL0]], %T ], [ %[[#AL1]], %F ]
- ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } %[[#PL]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]]
+ ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } %[[#PL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
entry:
br i1 %c, label %T, label %F
define i8 @select8(i1 %c, i8 %t, i8 %f) {
; TRACK_CF: @select8.dfsan
- ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]]
- ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
- ; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+ ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+ ; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; TRACK_CF: %[[#R+3]] = select i1 %c, i[[#SBITS]] %[[#R+1]], i[[#SBITS]] %[[#R]]
; TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+2]], %[[#R+3]]
; TRACK_CF: %a = select i1 %c, i8 %t, i8 %f
- ; TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; TRACK_CF: ret i8 %a
; NO_TRACK_CF: @select8.dfsan
- ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]]
- ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
- ; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+ ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+ ; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; NO_TRACK_CF: %[[#R+3]] = select i1 %c, i[[#SBITS]] %[[#R+1]], i[[#SBITS]] %[[#R]]
; NO_TRACK_CF: %a = select i1 %c, i8 %t, i8 %f
- ; NO_TRACK_CF: store i[[#SBITS]] %[[#R+3]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; NO_TRACK_CF: store i[[#SBITS]] %[[#R+3]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; NO_TRACK_CF: ret i8 %a
%a = select i1 %c, i8 %t, i8 %f
define i8 @select8e(i1 %c, i8 %tf) {
; TRACK_CF: @select8e.dfsan
- ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
- ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+ ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+1]], %[[#R]]
; TRACK_CF: %a = select i1 %c, i8 %tf, i8 %tf
- ; TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; TRACK_CF: ret i8 %a
; NO_TRACK_CF: @select8e.dfsan
- ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
- ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+ ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; NO_TRACK_CF: %a = select i1 %c, i8 %tf, i8 %tf
- ; NO_TRACK_CF: store i[[#SBITS]] %[[#R]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; NO_TRACK_CF: store i[[#SBITS]] %[[#R]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; NO_TRACK_CF: ret i8 %a
%a = select i1 %c, i8 %tf, i8 %tf
define <4 x i8> @select8v(<4 x i1> %c, <4 x i8> %t, <4 x i8> %f) {
; TRACK_CF: @select8v.dfsan
- ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]]
- ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
- ; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+ ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+ ; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; TRACK_CF: %[[#R+3]] = or i[[#SBITS]] %[[#R+1]], %[[#R]]
; TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+2]], %[[#R+3]]
; TRACK_CF: %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f
- ; TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; TRACK_CF: ret <4 x i8> %a
; NO_TRACK_CF: @select8v.dfsan
- ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]]
- ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
- ; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+ ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+ ; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; NO_TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+1]], %[[#R]]
; NO_TRACK_CF: %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f
- ; NO_TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+ ; NO_TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; NO_TRACK_CF: ret <4 x i8> %a
%a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f
define i32 @m() {
; CHECK-LABEL: @m.dfsan
- ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_dfsan_get_label(i64 signext 56, i[[#SBITS]] zeroext 0, i[[#SBITS]]* %{{.*}})
+ ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_dfsan_get_label(i64 signext 56, i[[#SBITS]] zeroext 0, ptr %{{.*}})
entry:
%call = call zeroext i16 @dfsan_get_label(i64 signext 56)
define i32 @k() {
; CHECK-LABEL: @k.dfsan
- ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k2(i64 signext 56, i64 signext 67, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]]* %{{.*}})
+ ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k2(i64 signext 56, i64 signext 67, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, ptr %{{.*}})
entry:
%call = call zeroext i16 @k2(i64 signext 56, i64 signext 67)
define i32 @k3() {
; CHECK-LABEL: @k3.dfsan
- ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]]* %{{.*}})
+ ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, ptr %{{.*}})
entry:
%call = call zeroext i16 @k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89)
declare zeroext i16 @dfsan_get_label(i64 signext)
; CHECK-LABEL: @"dfsw$dfsan_get_label"
-; CHECK: %{{.*}} = call i16 @__dfsw_dfsan_get_label(i64 %0, i[[#SBITS]] zeroext %1, i[[#SBITS]]* %{{.*}})
+; CHECK: %{{.*}} = call i16 @__dfsw_dfsan_get_label(i64 %0, i[[#SBITS]] zeroext %1, ptr %{{.*}})
declare zeroext i16 @k2(i64 signext, i64 signext)
; CHECK-LABEL: @"dfsw$k2"
-; CHECK: %{{.*}} = call i16 @__dfsw_k2(i64 %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]]* %{{.*}})
+; CHECK: %{{.*}} = call i16 @__dfsw_k2(i64 %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, ptr %{{.*}})
declare zeroext i16 @k4(i64 signext, i64 signext, i64 signext, i64 signext)
; CHECK-LABEL: @"dfsw$k4"
-; CHECK: %{{.*}} = call i16 @__dfsw_k4(i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]]* %{{.*}})
+; CHECK: %{{.*}} = call i16 @__dfsw_k4(i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, ptr %{{.*}})
-; CHECK: declare zeroext i16 @__dfsw_dfsan_get_label(i64 signext, i[[#SBITS]], i[[#SBITS]]*)
-; CHECK: declare zeroext i16 @__dfsw_k2(i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*)
-; CHECK: declare zeroext i16 @__dfsw_k4(i64 signext, i64 signext, i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*)
+; CHECK: declare zeroext i16 @__dfsw_dfsan_get_label(i64 signext, i[[#SBITS]], ptr)
+; CHECK: declare zeroext i16 @__dfsw_k2(i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], ptr)
+; CHECK: declare zeroext i16 @__dfsw_k4(i64 signext, i64 signext, i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], ptr)
; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
-; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint ([0 x i8]* @__start_hwasan_globals to i64), i64 ptrtoint ({ i32, i32, i32, [8 x i8], i32, i32 }* @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint ([0 x i8]* @__stop_hwasan_globals to i64), i64 ptrtoint ({ i32, i32, i32, [8 x i8], i32, i32 }* @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
+; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
; CHECK: @four.hwasan = private global { i32, [12 x i8] } { i32 1, [12 x i8] c"\00\00\00\00\00\00\00\00\00\00\00," }, align 16
-; CHECK: @four.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint ({ i32, [12 x i8] }* @four.hwasan to i64), i64 ptrtoint ({ i32, i32 }* @four.hwasan.descriptor to i64)) to i32), i32 738197508 }, section "hwasan_globals", !associated [[FOUR:![0-9]+]]
+; CHECK: @four.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @four.hwasan to i64), i64 ptrtoint (ptr @four.hwasan.descriptor to i64)) to i32), i32 738197508 }, section "hwasan_globals", !associated [[FOUR:![0-9]+]]
; CHECK: @sixteen.hwasan = private global [16 x i8] zeroinitializer, align 16
-; CHECK: @sixteen.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint ([16 x i8]* @sixteen.hwasan to i64), i64 ptrtoint ({ i32, i32 }* @sixteen.hwasan.descriptor to i64)) to i32), i32 754974736 }, section "hwasan_globals", !associated [[SIXTEEN:![0-9]+]]
+; CHECK: @sixteen.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @sixteen.hwasan to i64), i64 ptrtoint (ptr @sixteen.hwasan.descriptor to i64)) to i32), i32 754974736 }, section "hwasan_globals", !associated [[SIXTEEN:![0-9]+]]
; CHECK: @huge.hwasan = private global [16777232 x i8] zeroinitializer, align 16
-; CHECK: @huge.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint ([16777232 x i8]* @huge.hwasan to i64), i64 ptrtoint ({ i32, i32 }* @huge.hwasan.descriptor to i64)) to i32), i32 788529136 }, section "hwasan_globals", !associated [[HUGE:![0-9]+]]
-; CHECK: @huge.hwasan.descriptor.1 = private constant { i32, i32 } { i32 trunc (i64 add (i64 sub (i64 ptrtoint ([16777232 x i8]* @huge.hwasan to i64), i64 ptrtoint ({ i32, i32 }* @huge.hwasan.descriptor.1 to i64)), i64 16777200) to i32), i32 771751968 }, section "hwasan_globals", !associated [[HUGE]]
+; CHECK: @huge.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor to i64)) to i32), i32 788529136 }, section "hwasan_globals", !associated [[HUGE:![0-9]+]]
+; CHECK: @huge.hwasan.descriptor.1 = private constant { i32, i32 } { i32 trunc (i64 add (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor.1 to i64)), i64 16777200) to i32), i32 771751968 }, section "hwasan_globals", !associated [[HUGE]]
-; CHECK: @four = alias i32, inttoptr (i64 add (i64 ptrtoint ({ i32, [12 x i8] }* @four.hwasan to i64), i64 6341068275337658368) to i32*)
-; CHECK: @sixteen = alias [16 x i8], inttoptr (i64 add (i64 ptrtoint ([16 x i8]* @sixteen.hwasan to i64), i64 6485183463413514240) to [16 x i8]*)
-; CHECK: @huge = alias [16777232 x i8], inttoptr (i64 add (i64 ptrtoint ([16777232 x i8]* @huge.hwasan to i64), i64 6629298651489370112) to [16777232 x i8]*)
+; CHECK: @four = alias i32, inttoptr (i64 add (i64 ptrtoint (ptr @four.hwasan to i64), i64 6341068275337658368) to ptr)
+; CHECK: @sixteen = alias [16 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @sixteen.hwasan to i64), i64 6485183463413514240) to ptr)
+; CHECK: @huge = alias [16777232 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @huge.hwasan to i64), i64 6629298651489370112) to ptr)
-; CHECK: [[NOTE]] = !{{{{}} i32, i32, i32, [8 x i8], i32, i32 }* @hwasan.note}
-; CHECK: [[FOUR]] = !{{{{}} i32, [12 x i8] }* @four.hwasan}
-; CHECK: [[SIXTEEN]] = !{[16 x i8]* @sixteen.hwasan}
-; CHECK: [[HUGE]] = !{[16777232 x i8]* @huge.hwasan}
+; CHECK: [[NOTE]] = !{ptr @hwasan.note}
+; CHECK: [[FOUR]] = !{ptr @four.hwasan}
+; CHECK: [[SIXTEEN]] = !{ptr @sixteen.hwasan}
+; CHECK: [[HUGE]] = !{ptr @huge.hwasan}
source_filename = "foo"
}
; CHECK-LABEL: define i32 @_Z1fv
; CHECK: order_file_entry
-; CHECK: %[[T1:.+]] = load i8, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @bitmap_0, i32 0, i32 0
-; CHECK: store i8 1, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @bitmap_0, i32 0, i32 0)
+; CHECK: %[[T1:.+]] = load i8, ptr @bitmap_0
+; CHECK: store i8 1, ptr @bitmap_0
; CHECK: %[[T2:.+]] = icmp eq i8 %[[T1]], 0
; CHECK: br i1 %[[T2]], label %order_file_set, label
; CHECK: order_file_set
-; CHECK: %[[T3:.+]] = atomicrmw add i32* @_llvm_order_file_buffer_idx, i32 1 seq_cst
+; CHECK: %[[T3:.+]] = atomicrmw add ptr @_llvm_order_file_buffer_idx, i32 1 seq_cst
; CHECK: %[[T5:.+]] = and i32 %[[T3]], 131071
-; CHECK: %[[T4:.+]] = getelementptr [131072 x i64], [131072 x i64]* @_llvm_order_file_buffer, i32 0, i32 %[[T5]]
-; CHECK: store i64 {{.*}}, i64* %[[T4]]
+; CHECK: %[[T4:.+]] = getelementptr [131072 x i64], ptr @_llvm_order_file_buffer, i32 0, i32 %[[T5]]
+; CHECK: store i64 {{.*}}, ptr %[[T4]]
; CHECK: @"__A8764FDD_x@c" = internal unnamed_addr global i8 1, section ".just.my.code", align 1, !dbg !5
; CHECK: define void @l1() !dbg !12 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__7DF23CF5_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__7DF23CF5_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @l2() !dbg !16 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__7DF23CF5_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__7DF23CF5_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w1() !dbg !18 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w2() !dbg !19 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w3() !dbg !21 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w4() !dbg !23 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
-; CHECK: define weak void @__CheckForDebuggerJustMyCode(i8* noundef %0) unnamed_addr {
+; CHECK: define weak void @__CheckForDebuggerJustMyCode(ptr noundef %0) unnamed_addr {
; CHECK: ret void
; CHECK: }
; CHECK: $_JustMyCode_Default = comdat any
; CHECK: @"_A8764FDD_x@c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !0
-; CHECK: @llvm.used = appending global [1 x i8*] [i8* bitcast (void (i8*)* @_JustMyCode_Default to i8*)], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @_JustMyCode_Default], section "llvm.metadata"
; CHECK: define void @w1() #0 !dbg !10 {
-; CHECK: call x86_fastcallcc void @__CheckForDebuggerJustMyCode(i8* inreg noundef @"_A8764FDD_x@c")
+; CHECK: call x86_fastcallcc void @__CheckForDebuggerJustMyCode(ptr inreg noundef @"_A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
-; CHECK: define void @_JustMyCode_Default(i8* inreg noundef %0) unnamed_addr comdat {
+; CHECK: define void @_JustMyCode_Default(ptr inreg noundef %0) unnamed_addr comdat {
; CHECK: ret void
; CHECK: }
-; CHECK: declare x86_fastcallcc void @__CheckForDebuggerJustMyCode(i8* inreg noundef) unnamed_addr
+; CHECK: declare x86_fastcallcc void @__CheckForDebuggerJustMyCode(ptr inreg noundef) unnamed_addr
; CHECK: !0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
; CHECK: !1 = distinct !DIGlobalVariable(name: "_A8764FDD_x@c", scope: !2, file: !3, type: !5, isLocal: true, isDefinition: true)
; CHECK: $__JustMyCode_Default = comdat any
; CHECK: @"__7DF23CF5_x@c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !0
-; CHECK: @llvm.used = appending global [1 x i8*] [i8* bitcast (void (i8*)* @__JustMyCode_Default to i8*)], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @__JustMyCode_Default], section "llvm.metadata"
; CHECK: @"__A8764FDD_x@c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !5
; CHECK: @"__0C712A50_x@c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !9
; CHECK: @"__A3605329_x@c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !12
; CHECK: define void @l1() !dbg !19 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__7DF23CF5_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__7DF23CF5_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @l2() !dbg !23 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__7DF23CF5_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__7DF23CF5_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w1() !dbg !25 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w2() !dbg !26 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w3() !dbg !28 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w4() !dbg !30 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w5() !dbg !32 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__0C712A50_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__0C712A50_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w6() !dbg !33 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A3605329_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A3605329_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w7() !dbg !34 {
-; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__0C712A50_x@c")
+; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__0C712A50_x@c")
; CHECK: ret void
; CHECK: }
-; CHECK: define void @__JustMyCode_Default(i8* noundef %0) unnamed_addr comdat {
+; CHECK: define void @__JustMyCode_Default(ptr noundef %0) unnamed_addr comdat {
; CHECK: ret void
; CHECK: }
-; CHECK: declare void @__CheckForDebuggerJustMyCode(i8* noundef) unnamed_addr
+; CHECK: declare void @__CheckForDebuggerJustMyCode(ptr noundef) unnamed_addr
; CHECK: !llvm.linker.options = !{!18}
; CHECK-LABEL: @bar
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 24
; CHECK: [[V:%.*]] = zext {{.*}}
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[V]], {{.*}} [[M]]
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 36
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i32*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 40
; CHECK: [[V:%.*]] = sext {{.*}}
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[V]], {{.*}} [[M]]
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 48
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 160
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 168
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 176
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 184
; CHECK: [[V:%.*]] = zext {{.*}}
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[V]], {{.*}} [[M]]
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 192
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 204
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i32*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 208
; CHECK: [[V:%.*]] = sext {{.*}}
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[V]], {{.*}} [[M]]
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 216
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 224
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
; CHECK: store {{.*}} 72, {{.*}} %va_arg_overflow_size
; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 800)
+; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
+; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
declare i64 @sum(i64 %n, ...)
define <4 x i64> @test_mm256_abs_epi8(<4 x i64> %a) local_unnamed_addr #0 {
; CHECK-LABEL: @test_mm256_abs_epi8(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CHECK: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <32 x i8>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
; CHECK-NEXT: [[TMP4:%.*]] = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> [[TMP3]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <32 x i8> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <32 x i8> [[TMP4]] to <4 x i64>
-; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT: store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
+; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; CHECK: ret <4 x i64> [[TMP6]]
;
entry:
define <4 x i64> @test_mm256_abs_epi16(<4 x i64> %a) local_unnamed_addr #0 {
; CHECK-LABEL: @test_mm256_abs_epi16(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CHECK: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <16 x i16>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <16 x i16>
; CHECK-NEXT: [[TMP4:%.*]] = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> [[TMP3]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i16> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i16> [[TMP4]] to <4 x i64>
-; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT: store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
+; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; CHECK: ret <4 x i64> [[TMP6]]
;
entry:
define <4 x i64> @test_mm256_abs_epi32(<4 x i64> %a) local_unnamed_addr #0 {
; CHECK-LABEL: @test_mm256_abs_epi32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CHECK: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <8 x i32>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <8 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> [[TMP3]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP4]] to <4 x i64>
-; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT: store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
+; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; CHECK: ret <4 x i64> [[TMP6]]
;
entry:
define <4 x double> @test_fabs(<4 x double> %a) local_unnamed_addr #0 {
; CHECK-LABEL: @test_fabs(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CHECK: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = tail call <4 x double> @llvm.fabs.v4f64(<4 x double> [[A:%.*]])
-; CHECK-NEXT: store <4 x i64> [[TMP0]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT: store <4 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8
+; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; CHECK: ret <4 x double> [[TMP2]]
;
entry:
}
; CHECK-LABEL: @InsertValue(
-; CHECK-DAG: [[Sx:%.*]] = load i32, i32* {{.*}}@__msan_param_tls to i32*)
-; CHECK-DAG: [[Sy:%.*]] = load i32, i32* {{.*}}@__msan_param_tls to i64), i64 8) to i32*)
+; CHECK-DAG: [[Sx:%.*]] = load i32, ptr @__msan_param_tls
+; CHECK-DAG: [[Sy:%.*]] = load i32, ptr {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK: [[A:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[Sx]], 0
; CHECK: [[B:%.*]] = insertvalue [2 x i32] [[A]], i32 [[Sy]], 1
-; CHECK: store [2 x i32] [[B]], [2 x i32]* {{.*}}@__msan_retval_tls
+; CHECK: store [2 x i32] [[B]], ptr {{.*}}@__msan_retval_tls
; CHECK: ret [2 x i32]
}
; CHECK-LABEL: @InsertValueDouble(
-; CHECK-DAG: [[Sx:%.*]] = load i64, i64* getelementptr {{.*}}@__msan_param_tls, i32 0, i32 0
-; CHECK-DAG: [[Sy:%.*]] = load i64, i64* {{.*}}@__msan_param_tls to i64), i64 8) to i64*)
+; CHECK-DAG: [[Sx:%.*]] = load i64, ptr @__msan_param_tls
+; CHECK-DAG: [[Sy:%.*]] = load i64, ptr {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK: [[A:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[Sx]], 0
; CHECK: [[B:%.*]] = insertvalue [2 x i64] [[A]], i64 [[Sy]], 1
-; CHECK: store [2 x i64] [[B]], [2 x i64]* {{.*}}@__msan_retval_tls
+; CHECK: store [2 x i64] [[B]], ptr {{.*}}@__msan_retval_tls
; CHECK: ret [2 x double]
}
; CHECK-LABEL: @ExtractValue(
-; CHECK: [[Sa:%.*]] = load [2 x i32], [2 x i32]* {{.*}}@__msan_param_tls to [2 x i32]*)
+; CHECK: [[Sa:%.*]] = load [2 x i32], ptr @__msan_param_tls
; CHECK: [[Sx:%.*]] = extractvalue [2 x i32] [[Sa]], 1
-; CHECK: store i32 [[Sx]], i32* {{.*}}@__msan_retval_tls
+; CHECK: store i32 [[Sx]], ptr @__msan_retval_tls
; CHECK: ret i32
}
; CHECK-LABEL: @ArrayInStruct(
-; CHECK: [[Ss:%.*]] = load { i32, i32, [3 x i32] }, { i32, i32, [3 x i32] }* {{.*}}@__msan_param_tls to { i32, i32, [3 x i32] }*)
+; CHECK: [[Ss:%.*]] = load { i32, i32, [3 x i32] }, ptr @__msan_param_tls
; CHECK: [[Sx:%.*]] = extractvalue { i32, i32, [3 x i32] } [[Ss]], 2, 1
-; CHECK: store i32 [[Sx]], i32* {{.*}}@__msan_retval_tls
+; CHECK: store i32 [[Sx]], ptr @__msan_retval_tls
; CHECK: ret i32
}
; CHECK-LABEL: @ArrayOfStructs(
-; CHECK: [[Ss:%.*]] = load [3 x { i32, i32 }], [3 x { i32, i32 }]* {{.*}}@__msan_param_tls to [3 x { i32, i32 }]*)
+; CHECK: [[Ss:%.*]] = load [3 x { i32, i32 }], ptr @__msan_param_tls
; CHECK: [[Sx:%.*]] = extractvalue [3 x { i32, i32 }] [[Ss]], 2, 1
-; CHECK: store i32 [[Sx]], i32* {{.*}}@__msan_retval_tls
+; CHECK: store i32 [[Sx]], ptr @__msan_retval_tls
; CHECK: ret i32
}
; CHECK-LABEL: @ArrayOfVectors(
-; CHECK: [[Ss:%.*]] = load [3 x <8 x i16>], [3 x <8 x i16>]* {{.*}}@__msan_param_tls to [3 x <8 x i16>]*)
+; CHECK: [[Ss:%.*]] = load [3 x <8 x i16>], ptr @__msan_param_tls
; CHECK: [[Sx:%.*]] = extractvalue [3 x <8 x i16>] [[Ss]], 1
-; CHECK: store <8 x i16> [[Sx]], <8 x i16>* {{.*}}@__msan_retval_tls
+; CHECK: store <8 x i16> [[Sx]], ptr @__msan_retval_tls
; CHECK: ret <8 x i16>
}
; CHECK-LABEL: @Test_bzhi_32(
-; CHECK-DAG: %[[SA:.*]] = load i32, {{.*}}@__msan_param_tls to i32*)
+; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32
}
; CHECK-LABEL: @Test_bzhi_64(
-; CHECK-DAG: %[[SA:.*]] = load i64, {{.*}}@__msan_param_tls, i32 0, i32 0
+; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64
}
; CHECK-LABEL: @Test_bextr_32(
-; CHECK-DAG: %[[SA:.*]] = load i32, {{.*}}@__msan_param_tls to i32*)
+; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32
}
; CHECK-LABEL: @Test_bextr_64(
-; CHECK-DAG: %[[SA:.*]] = load i64, {{.*}}@__msan_param_tls, i32 0, i32 0
+; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64
}
; CHECK-LABEL: @Test_pdep_32(
-; CHECK-DAG: %[[SA:.*]] = load i32, {{.*}}@__msan_param_tls to i32*)
+; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32
}
; CHECK-LABEL: @Test_pdep_64(
-; CHECK-DAG: %[[SA:.*]] = load i64, {{.*}}@__msan_param_tls, i32 0, i32 0
+; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64
}
; CHECK-LABEL: @Test_pext_32(
-; CHECK-DAG: %[[SA:.*]] = load i32, {{.*}}@__msan_param_tls to i32*)
+; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32
}
; CHECK-LABEL: @Test_pext_64(
-; CHECK-DAG: %[[SA:.*]] = load i64, {{.*}}@__msan_param_tls, i32 0, i32 0
+; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64
}
; CHECK-LABEL: @clmul00
-; CHECK: %[[S0:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
-; CHECK: %[[S1:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
+; CHECK: %[[S0:.*]] = load <2 x i64>, ptr {{.*}}@__msan_param_tls
+; CHECK: %[[S1:.*]] = load <2 x i64>, ptr {{.*}}@__msan_param_tls
; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK: %[[SHUF1:.*]] = shufflevector <2 x i64> %[[S1]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK: %[[SRET:.*]] = or <2 x i64> %[[SHUF0]], %[[SHUF1]]
-; CHECK: store <2 x i64> %[[SRET]], <2 x i64>* {{.*}}@__msan_retval_tls
+; CHECK: store <2 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
define <2 x i64> @clmul10(<2 x i64> %a, <2 x i64> %b) sanitize_memory {
entry:
}
; CHECK-LABEL: @clmul10
-; CHECK: %[[S0:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
-; CHECK: %[[S1:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
+; CHECK: %[[S0:.*]] = load <2 x i64>, ptr {{.*}}@__msan_param_tls
+; CHECK: %[[S1:.*]] = load <2 x i64>, ptr {{.*}}@__msan_param_tls
; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK: %[[SHUF1:.*]] = shufflevector <2 x i64> %[[S1]], <2 x i64> poison, <2 x i32> <i32 1, i32 1>
; CHECK: %[[SRET:.*]] = or <2 x i64> %[[SHUF0]], %[[SHUF1]]
-; CHECK: store <2 x i64> %[[SRET]], <2 x i64>* {{.*}}@__msan_retval_tls
+; CHECK: store <2 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
define <4 x i64> @clmul11_256(<4 x i64> %a, <4 x i64> %b) sanitize_memory {
entry:
}
; CHECK-LABEL: @clmul11_256
-; CHECK: %[[S0:.*]] = load <4 x i64>, <4 x i64>* {{.*}}@__msan_param_tls
-; CHECK: %[[S1:.*]] = load <4 x i64>, <4 x i64>* {{.*}}@__msan_param_tls
+; CHECK: %[[S0:.*]] = load <4 x i64>, ptr {{.*}}@__msan_param_tls
+; CHECK: %[[S1:.*]] = load <4 x i64>, ptr {{.*}}@__msan_param_tls
; CHECK: %[[SHUF0:.*]] = shufflevector <4 x i64> %[[S0]], <4 x i64> poison, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
; CHECK: %[[SHUF1:.*]] = shufflevector <4 x i64> %[[S1]], <4 x i64> poison, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
; CHECK: %[[SRET:.*]] = or <4 x i64> %[[SHUF0]], %[[SHUF1]]
-; CHECK: store <4 x i64> %[[SRET]], <4 x i64>* {{.*}}@__msan_retval_tls
+; CHECK: store <4 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
define <8 x i64> @clmul01_512(<8 x i64> %a, <8 x i64> %b) sanitize_memory {
entry:
}
; CHECK-LABEL: @clmul01_512
-; CHECK: %[[S0:.*]] = load <8 x i64>, <8 x i64>* {{.*}}@__msan_param_tls
-; CHECK: %[[S1:.*]] = load <8 x i64>, <8 x i64>* {{.*}}@__msan_param_tls
+; CHECK: %[[S0:.*]] = load <8 x i64>, ptr {{.*}}@__msan_param_tls
+; CHECK: %[[S1:.*]] = load <8 x i64>, ptr {{.*}}@__msan_param_tls
; CHECK: %[[SHUF0:.*]] = shufflevector <8 x i64> %[[S0]], <8 x i64> poison, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
; CHECK: %[[SHUF1:.*]] = shufflevector <8 x i64> %[[S1]], <8 x i64> poison, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
; CHECK: %[[SRET:.*]] = or <8 x i64> %[[SHUF0]], %[[SHUF1]]
; ORIGIN: %[[FLAT:.*]] = bitcast <8 x i64> %[[SHUF1]] to i512
; ORIGIN: %[[I:.*]] = icmp ne i512 %[[FLAT]], 0
; ORIGIN: %[[O:.*]] = select i1 %[[I]],
-; CHECK: store <8 x i64> %[[SRET]], <8 x i64>* {{.*}}@__msan_retval_tls
+; CHECK: store <8 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
; ORIGIN: store i32 %[[O]], i32* @__msan_retval_origin_tls
define void @var_funnel_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64> %a512, i64 %b64, <2 x i64> %b128, <4 x i64> %b256, <8 x i64> %b512, i64 %c64, <2 x i64> %c128, <4 x i64> %c256, <8 x i64> %c512) sanitize_memory {
; CHECK-LABEL: @var_funnel_i64(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i64*), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 240) to i64*), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <2 x i64>*), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <2 x i64>*), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 248) to <2 x i64>*), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i64>*), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <4 x i64>*), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 264) to <4 x i64>*), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <8 x i64>*), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <8 x i64>*), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <8 x i64>*), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0
; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i64
define void @var_funnel_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i32> %a512, i32 %b32, <4 x i32> %b128, <8 x i32> %b256, <16 x i32> %b512, i32 %c32, <4 x i32> %c128, <8 x i32> %c256, <16 x i32> %c512) sanitize_memory {
; CHECK-LABEL: @var_funnel_i32(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i32*), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 240) to i32*), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i32>*), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <4 x i32>*), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 248) to <4 x i32>*), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <8 x i32>*), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <8 x i32>*), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 264) to <8 x i32>*), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <16 x i32>*), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <16 x i32>*), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <16 x i32>*), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP3]], 0
; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i32
define void @var_funnel_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i16> %a512, i16 %b16, <8 x i16> %b128, <16 x i16> %b256, <32 x i16> %b512, i16 %c16, <8 x i16> %c128, <16 x i16> %c256, <32 x i16> %c512) sanitize_memory {
; CHECK-LABEL: @var_funnel_i16(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* bitcast ([100 x i64]* @__msan_param_tls to i16*), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i16*), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 240) to i16*), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <8 x i16>*), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <8 x i16>*), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 248) to <8 x i16>*), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <16 x i16>*), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <16 x i16>*), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 264) to <16 x i16>*), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <32 x i16>*), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <32 x i16>*), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <32 x i16>*), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i16 [[TMP3]], 0
; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i16
define void @var_funnel_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %a512, i8 %b8, <16 x i8> %b128, <32 x i8> %b256, <64 x i8> %b512, i8 %c8, <16 x i8> %c128, <32 x i8> %c256, <64 x i8> %c512) sanitize_memory {
; CHECK-LABEL: @var_funnel_i8(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* bitcast ([100 x i64]* @__msan_param_tls to i8*), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, i8* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i8*), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, i8* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 240) to i8*), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <16 x i8>*), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <16 x i8>*), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 248) to <16 x i8>*), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <32 x i8>*), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <32 x i8>*), align 8
-; CHECK-NEXT: [[TMP9:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 264) to <32 x i8>*), align 8
-; CHECK-NEXT: [[TMP10:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <64 x i8>*), align 8
-; CHECK-NEXT: [[TMP11:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <64 x i8>*), align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <64 x i8>*), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP3]], 0
; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i8
define void @var_rotate_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64> %a512, i64 %c64, <2 x i64> %c128, <4 x i64> %c256, <8 x i64> %c512) sanitize_memory {
; CHECK-LABEL: @var_rotate_i64(
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i64*), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <2 x i64>*), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <2 x i64>*), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i64>*), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <4 x i64>*), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <8 x i64>*), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <8 x i64>*), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i64 [[TMP2]], 0
; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i64
define void @var_rotate_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i32> %a512, i32 %c32, <4 x i32> %c128, <8 x i32> %c256, <16 x i32> %c512) sanitize_memory {
; CHECK-LABEL: @var_rotate_i32(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i32*), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i32>*), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <4 x i32>*), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <8 x i32>*), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <8 x i32>*), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <16 x i32>*), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <16 x i32>*), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i32
define void @var_rotate_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i16> %a512, i16 %c16, <8 x i16> %c128, <16 x i16> %c256, <32 x i16> %c512) sanitize_memory {
; CHECK-LABEL: @var_rotate_i16(
-; CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* bitcast ([100 x i64]* @__msan_param_tls to i16*), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i16*), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <8 x i16>*), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <8 x i16>*), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <16 x i16>*), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <16 x i16>*), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <32 x i16>*), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <32 x i16>*), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i16 [[TMP2]], 0
; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i16
define void @var_rotate_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %a512, i8 %c8, <16 x i8> %c128, <32 x i8> %c256, <64 x i8> %c512) sanitize_memory {
; CHECK-LABEL: @var_rotate_i8(
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* bitcast ([100 x i64]* @__msan_param_tls to i8*), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, i8* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i8*), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <16 x i8>*), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <16 x i8>*), align 8
-; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <32 x i8>*), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <32 x i8>*), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <64 x i8>*), align 8
-; CHECK-NEXT: [[TMP8:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <64 x i8>*), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP2]], 0
; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i8
}
; CHECK-LABEL: @Shuffle(
-; CHECK: [[A:%.*]] = load i32, i32* {{.*}}@__msan_param_origin_tls,
-; CHECK: store i32 [[A]], i32* @__msan_retval_origin_tls
+; CHECK: [[A:%.*]] = load i32, ptr @__msan_param_origin_tls
+; CHECK: store i32 [[A]], ptr @__msan_retval_origin_tls
; CHECK: ret <4 x i32>
; CHECK-DAG: declare void @__sanitizer_cov_trace_div4(i32 zeroext)
; CHECK-DAG: declare void @__sanitizer_cov_trace_div8(i64)
; CHECK-DAG: declare void @__sanitizer_cov_trace_gep(i64)
-; CHECK-DAG: declare void @__sanitizer_cov_trace_switch(i64, i64*)
+; CHECK-DAG: declare void @__sanitizer_cov_trace_switch(i64, ptr)
; CHECK-DAG: declare void @__sanitizer_cov_trace_pc()
-; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard(i32*)
-; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard_init(i32*, i32*)
+; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard(ptr)
+; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard_init(ptr, ptr)
; CHECK-NOT: declare
; CHECK-DAG: declare void @__sanitizer_cov_trace_div4(i32 zeroext)
; CHECK-DAG: declare void @__sanitizer_cov_trace_div8(i64)
; CHECK-DAG: declare void @__sanitizer_cov_trace_gep(i64)
-; CHECK-DAG: declare void @__sanitizer_cov_trace_switch(i64, i64*)
+; CHECK-DAG: declare void @__sanitizer_cov_trace_switch(i64, ptr)
; CHECK-DAG: declare void @__sanitizer_cov_trace_pc()
-; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard(i32*)
-; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard_init(i32*, i32*)
+; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard(ptr)
+; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard_init(ptr, ptr)
; CHECK-NOT: declare
; CHECK: @__sancov_gen_{{.*}} = private global [1 x i8] zeroinitializer, section ".SCOV$CM", comdat($foo), align 1
-; CHECK: @__sancov_gen_{{.*}} = private constant [2 x i64*]
-; CHECK-SAME: [i64* bitcast (i32 (i32)* @foo to i64*), i64* inttoptr (i64 1 to i64*)],
+; CHECK: @__sancov_gen_{{.*}} = private constant [2 x ptr]
+; CHECK-SAME: [ptr @foo, ptr inttoptr (i64 1 to ptr)],
; CHECK-SAME: section ".SCOVP$M", comdat($foo), align 8
; Tables for 'bar' should be in the 'bar' comdat.
; CHECK: @__sancov_gen_{{.*}} = private global [1 x i8] zeroinitializer, section ".SCOV$CM", comdat($bar), align 1
-; CHECK: @__sancov_gen_{{.*}} = private constant [2 x i64*]
-; CHECK-SAME: [i64* bitcast (i32 (i32)* @bar to i64*), i64* inttoptr (i64 1 to i64*)],
+; CHECK: @__sancov_gen_{{.*}} = private constant [2 x ptr]
+; CHECK-SAME: [ptr @bar, ptr inttoptr (i64 1 to ptr)],
; CHECK-SAME: section ".SCOVP$M", comdat($bar), align 8
; 'foo' and 'bar' should be in their new comdat groups.
target triple = "x86_64-unknown-linux-gnu"
define void @foo() {
entry:
-; CHECK: %0 = load i8, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize
+; CHECK: %0 = load i8, ptr @__sancov_gen_, align 1, !nosanitize
; CHECK: %1 = add i8 %0, 1
-; CHECK: store i8 %1, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize
+; CHECK: store i8 %1, ptr @__sancov_gen_, align 1, !nosanitize
ret void
}
-; CHECK: call void @__sanitizer_cov_8bit_counters_init(i8* @__start___sancov_cntrs, i8* @__stop___sancov_cntrs)
+; CHECK: call void @__sanitizer_cov_8bit_counters_init(ptr @__start___sancov_cntrs, ptr @__stop___sancov_cntrs)
; CHECK: @__sancov_gen_ = private global [1 x i1] zeroinitializer, section "__sancov_bools", comdat($foo), align 1{{$}}
; CHECK: @__start___sancov_bools = extern_weak hidden global i1
; CHECK-NEXT: @__stop___sancov_bools = extern_weak hidden global i1
-; CHECK: @llvm.used = appending global [1 x i8*] [i8* bitcast (void ()* @sancov.module_ctor_bool_flag to i8*)], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i8*] [i8* bitcast ([1 x i1]* @__sancov_gen_ to i8*)], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @sancov.module_ctor_bool_flag], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @__sancov_gen_], section "llvm.metadata"
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
define void @foo() {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i1, i1* getelementptr inbounds ([1 x i1], [1 x i1]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize ![[#EMPTY:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr @__sancov_gen_, align 1, !nosanitize ![[#EMPTY:]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i1 [[TMP0]], false
; CHECK-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP3:%.*]]
; CHECK: 2:
-; CHECK-NEXT: store i1 true, i1* getelementptr inbounds ([1 x i1], [1 x i1]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize ![[#EMPTY:]]
+; CHECK-NEXT: store i1 true, ptr @__sancov_gen_, align 1, !nosanitize ![[#EMPTY:]]
; CHECK-NEXT: br label [[TMP3]]
; CHECK: 3:
; CHECK-NEXT: ret void
entry:
ret void
}
-; CHECK: call void @__sanitizer_cov_bool_flag_init(i1* @__start___sancov_bools, i1* @__stop___sancov_bools)
+; CHECK: call void @__sanitizer_cov_bool_flag_init(ptr @__start___sancov_bools, ptr @__stop___sancov_bools)
; CHECK: ![[#EMPTY]] = !{}
entry:
; CHECK: __sancov_gen_cov_switch_values = internal global [5 x i64] [i64 3, i64 32, i64 1, i64 101, i64 1001]
; CHECK: [[TMP:%[0-9]*]] = zext i32 %x to i64
-; CHECK-NEXT: call void @__sanitizer_cov_trace_switch(i64 [[TMP]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @__sancov_gen_cov_switch_values, i32 0, i32 0))
+; CHECK-NEXT: call void @__sanitizer_cov_trace_switch(i64 [[TMP]], ptr @__sancov_gen_cov_switch_values)
switch i32 %x, label %sw.epilog [
i32 1, label %sw.bb
i32 1001, label %sw.bb.1
; RUN: llvm-as %s -o %t.2.bc
; RUN: llvm-link %t.1.bc %t.2.bc -S | FileCheck %s
-; CHECK: @bar = global i32 ()* @foo.2
+; CHECK: @bar = global ptr @foo.2
; CHECK: define internal i32 @foo.2() {
; CHECK-NEXT: ret i32 7
; RUN: llvm-as %p/2008-07-06-AliasFnDecl2.ll -o %t2.bc
; RUN: llvm-link %t1.bc %t2.bc -o %t3.bc
-@b = alias void (), void ()* @a
+@b = alias void (), ptr @a
define void @a() nounwind {
entry:
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
target triple = "i386-pc-linux-gnu"
-@sched_clock = alias i64 (), i64 ()* @native_sched_clock
+@sched_clock = alias i64 (), ptr @native_sched_clock
-@foo = alias i32, i32* @realfoo
+@foo = alias i32, ptr @realfoo
@realfoo = global i32 0
define i64 @native_sched_clock() nounwind {
; RUN: llvm-link %t.1.bc %t.2.bc -S | FileCheck %s
; CHECK: [i32 7, i32 4, i32 8]
-@X = appending global [2 x i32] [ i32 7, i32 4 ] ; <[2 x i32]*> [#uses=2]
-@Y = global i32* getelementptr ([2 x i32], [2 x i32]* @X, i64 0, i64 0) ; <i32**> [#uses=0]
+@X = appending global [2 x i32] [ i32 7, i32 4 ]
+@Y = global ptr @X
define void @foo(i64 %V) {
- %Y = getelementptr [2 x i32], [2 x i32]* @X, i64 0, i64 %V ; <i32*> [#uses=0]
+ %Y = getelementptr [2 x i32], ptr @X, i64 0, i64 %V
ret void
}
-@bar = global i32()* @foo
+@bar = global ptr @foo
define internal i32 @foo() {
ret i32 7
}
declare i32 @foo()
define void @bar() !dbg !4 {
- load i32, i32* @X, !dbg !10
+ load i32, ptr @X, !dbg !10
call i32 @foo(), !dbg !11
ret void, !dbg !12
}
define i32 @main() #0 !dbg !4 {
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval, align 4
+ store i32 0, ptr %retval, align 4
ret i32 0, !dbg !11
}
; erroneously renamed to A.1 and not linked to the declaration from
; the first module
-@C = alias void (), void ()* @A
+@C = alias void (), ptr @A
define void @D() {
call void @C()
ret void
}
-; CHECK-DAG: @C = alias void (), void ()* @A
+; CHECK-DAG: @C = alias void (), ptr @A
; CHECK-DAG: define void @B()
; CHECK-DAG: call void @A()
; CHECK-DAG: define void @D()
; Verify that linking GlobalAliases preserves the thread_local attribute
; CHECK: @tlsvar1 = thread_local global i32 0, align 4
-; CHECK: @tlsvar2 = hidden thread_local alias i32, i32* @tlsvar1
+; CHECK: @tlsvar2 = hidden thread_local alias i32, ptr @tlsvar1
@tlsvar2 = external thread_local global i32, align 4
$c1 = comdat any
@v1 = weak_odr global i32 42, comdat($c1)
-define weak_odr i32 @f1(i8*) comdat($c1) {
+define weak_odr i32 @f1(ptr) comdat($c1) {
bb10:
br label %bb11
bb11:
ret i32 42
}
-@r11 = global i32* @v1
-@r12 = global i32 (i8*)* @f1
+@r11 = global ptr @v1
+@r12 = global ptr @f1
-@a11 = alias i32, i32* @v1
-@a12 = alias i16, bitcast (i32* @v1 to i16*)
+@a11 = alias i32, ptr @v1
+@a12 = alias i16, ptr @v1
-@a13 = alias i32 (i8*), i32 (i8*)* @f1
-@a14 = alias i16, bitcast (i32 (i8*)* @f1 to i16*)
-@a15 = alias i16, i16* @a14
+@a13 = alias i32 (ptr), ptr @f1
+@a14 = alias i16, ptr @f1
+@a15 = alias i16, ptr @a14
; CHECK2: $c1 = comdat any
; CHECK2: $c2 = comdat any
; CHECK2-DAG: @v1 = weak_odr global i32 42, comdat($c1)
-; CHECK2-DAG: @r11 = global i32* @v1{{$}}
-; CHECK2-DAG: @r12 = global i32 (i8*)* @f1{{$}}
+; CHECK2-DAG: @r11 = global ptr @v1{{$}}
+; CHECK2-DAG: @r12 = global ptr @f1{{$}}
-; CHECK2-DAG: @r21 = global i32* @v1{{$}}
-; CHECK2-DAG: @r22 = global i32 (i8*)* @f1{{$}}
+; CHECK2-DAG: @r21 = global ptr @v1{{$}}
+; CHECK2-DAG: @r22 = global ptr @f1{{$}}
; CHECK2-DAG: @v1.1 = internal global i32 41, comdat($c2)
-; CHECK2-DAG: @a11 = alias i32, i32* @v1{{$}}
-; CHECK2-DAG: @a12 = alias i16, bitcast (i32* @v1 to i16*)
+; CHECK2-DAG: @a11 = alias i32, ptr @v1{{$}}
+; CHECK2-DAG: @a12 = alias i16, ptr @v1
-; CHECK2-DAG: @a13 = alias i32 (i8*), i32 (i8*)* @f1{{$}}
-; CHECK2-DAG: @a14 = alias i16, bitcast (i32 (i8*)* @f1 to i16*)
+; CHECK2-DAG: @a13 = alias i32 (ptr), ptr @f1{{$}}
+; CHECK2-DAG: @a14 = alias i16, ptr @f1
-; CHECK2-DAG: @a21 = alias i32, i32* @v1.1{{$}}
-; CHECK2-DAG: @a22 = alias i16, bitcast (i32* @v1.1 to i16*)
+; CHECK2-DAG: @a21 = alias i32, ptr @v1.1{{$}}
+; CHECK2-DAG: @a22 = alias i16, ptr @v1.1
-; CHECK2-DAG: @a23 = alias i32 (i8*), i32 (i8*)* @f1.2{{$}}
-; CHECK2-DAG: @a24 = alias i16, bitcast (i32 (i8*)* @f1.2 to i16*)
+; CHECK2-DAG: @a23 = alias i32 (ptr), ptr @f1.2{{$}}
+; CHECK2-DAG: @a24 = alias i16, ptr @f1.2
-; CHECK2: define weak_odr protected i32 @f1(i8* %0) comdat($c1) {
+; CHECK2: define weak_odr protected i32 @f1(ptr %0) comdat($c1) {
; CHECK2-NEXT: bb10:
; CHECK2-NEXT: br label %bb11{{$}}
; CHECK2: bb11:
; CHECK2-NEXT: ret i32 42
; CHECK2-NEXT: }
-; CHECK2: define internal i32 @f1.2(i8* %this) comdat($c2) {
+; CHECK2: define internal i32 @f1.2(ptr %this) comdat($c2) {
; CHECK2-NEXT: bb20:
-; CHECK2-NEXT: store i8* %this, i8** null
+; CHECK2-NEXT: store ptr %this, ptr null
; CHECK2-NEXT: br label %bb21
; CHECK2: bb21:
; CHECK2-NEXT: ret i32 41
; This is only present in this file. The linker will keep $c1 from the first
; file and this will be undefined.
@will_be_undefined = global i32 1, comdat($c1)
-@use = global i32* @will_be_undefined
+@use = global ptr @will_be_undefined
@v1 = weak_odr global i32 41, comdat($c2)
-define weak_odr protected i32 @f1(i8* %this) comdat($c2) {
+define weak_odr protected i32 @f1(ptr %this) comdat($c2) {
bb20:
- store i8* %this, i8** null
+ store ptr %this, ptr null
br label %bb21
bb21:
ret i32 41
}
-@r21 = global i32* @v1
-@r22 = global i32(i8*)* @f1
+@r21 = global ptr @v1
+@r22 = global ptr @f1
-@a21 = alias i32, i32* @v1
-@a22 = alias i16, bitcast (i32* @v1 to i16*)
+@a21 = alias i32, ptr @v1
+@a22 = alias i16, ptr @v1
-@a23 = alias i32(i8*), i32(i8*)* @f1
-@a24 = alias i16, bitcast (i32(i8*)* @f1 to i16*)
-@a25 = alias i16, i16* @a24
+@a23 = alias i32(ptr), ptr @f1
+@a24 = alias i16, ptr @f1
+@a25 = alias i16, ptr @a24
;--- 3.ll
; CHECK3: @bar = global i32 0, comdat($a1)
; CHECK3: @baz = private global i32 42, comdat($a1)
-; CHECK3: @a1 = internal alias i32, i32* @baz
+; CHECK3: @a1 = internal alias i32, ptr @baz
$a1 = comdat any
@bar = global i32 0, comdat($a1)
;--- 3-aux.ll
$a1 = comdat any
@baz = private global i32 42, comdat($a1)
-@a1 = internal alias i32, i32* @baz
-define i32* @abc() {
- ret i32* @a1
+@a1 = internal alias i32, ptr @baz
+define ptr @abc() {
+ ret ptr @a1
}
$foo = comdat any
@foo = global i8 0, comdat
-; CHECK: @llvm.global_ctors = appending global [0 x { i32, void ()*, i8* }] zeroinitializer
+; CHECK: @llvm.global_ctors = appending global [0 x { i32, ptr, ptr }] zeroinitializer
; CHECK: @foo = global i8 0, comdat
%t = type { i8 }
@foo = global %t zeroinitializer, comdat
-; CHECK: @llvm.global_ctors = appending global [0 x { i32, void ()*, i8* }] zeroinitializer
+; CHECK: @llvm.global_ctors = appending global [0 x { i32, ptr, ptr }] zeroinitializer
; CHECK: @foo = global %t zeroinitializer, comdat
; EXPORTSTATIC-DAG: @staticvar.llvm.{{.*}} = hidden global
; Eventually @staticconstvar can be exported as a copy and not promoted
; EXPORTSTATIC-DAG: @staticconstvar.llvm.0 = hidden unnamed_addr constant
-; EXPORTSTATIC-DAG: @P.llvm.{{.*}} = hidden global void ()* null
+; EXPORTSTATIC-DAG: @P.llvm.{{.*}} = hidden global ptr null
; EXPORTSTATIC-DAG: define hidden i32 @staticfunc.llvm.
; EXPORTSTATIC-DAG: define hidden void @staticfunc2.llvm.
; IMPORTSTATIC-DAG: @staticconstvar.llvm.{{.*}} = external hidden unnamed_addr constant
; IMPORTSTATIC-DAG: define available_externally i32 @referencestatics
; IMPORTSTATIC-DAG: %call = call i32 @staticfunc.llvm.
-; IMPORTSTATIC-DAG: %0 = load i32, i32* @staticvar.llvm.
+; IMPORTSTATIC-DAG: %0 = load i32, ptr @staticvar.llvm.
; IMPORTSTATIC-DAG: declare hidden i32 @staticfunc.llvm.
; Ensure that imported global (external) function and variable references
; Ensure that imported static function pointer correctly promoted and renamed.
; RUN: llvm-link %t2.bc -summary-index=%t3.thinlto.bc -import=callfuncptr:%t.bc -S | FileCheck %s --check-prefix=IMPORTFUNCPTR
-; IMPORTFUNCPTR-DAG: @P.llvm.{{.*}} = external hidden global void ()*
+; IMPORTFUNCPTR-DAG: @P.llvm.{{.*}} = external hidden global ptr
; IMPORTFUNCPTR-DAG: define available_externally void @callfuncptr
-; IMPORTFUNCPTR-DAG: %0 = load void ()*, void ()** @P.llvm.
+; IMPORTFUNCPTR-DAG: %0 = load ptr, ptr @P.llvm.
; Ensure that imported weak function reference/definition handled properly.
; Imported weak_any definition should be skipped with warning, and imported
@staticvar = internal global i32 1, align 4
@staticconstvar = internal unnamed_addr constant [2 x i32] [i32 10, i32 20], align 4
@commonvar = common global i32 0, align 4
-@P = internal global void ()* null, align 8
+@P = internal global ptr null, align 8
-@weakalias = weak alias void (...), bitcast (void ()* @globalfunc1 to void (...)*)
-@analias = alias void (...), bitcast (void ()* @globalfunc2 to void (...)*)
-@linkoncealias = alias void (...), bitcast (void ()* @linkoncefunc to void (...)*)
+@weakalias = weak alias void (...), ptr @globalfunc1
+@analias = alias void (...), ptr @globalfunc2
+@linkoncealias = alias void (...), ptr @linkoncefunc
define void @globalfunc1() #0 {
entry:
define i32 @referencestatics(i32 %i) #0 {
entry:
%i.addr = alloca i32, align 4
- store i32 %i, i32* %i.addr, align 4
+ store i32 %i, ptr %i.addr, align 4
%call = call i32 @staticfunc()
- %0 = load i32, i32* @staticvar, align 4
+ %0 = load i32, ptr @staticvar, align 4
%add = add nsw i32 %call, %0
- %1 = load i32, i32* %i.addr, align 4
+ %1 = load i32, ptr %i.addr, align 4
%idxprom = sext i32 %1 to i64
- %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* @staticconstvar, i64 0, i64 %idxprom
- %2 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [2 x i32], ptr @staticconstvar, i64 0, i64 %idxprom
+ %2 = load i32, ptr %arrayidx, align 4
%add1 = add nsw i32 %add, %2
ret i32 %add1
}
define i32 @referenceglobals(i32 %i) #0 {
entry:
%i.addr = alloca i32, align 4
- store i32 %i, i32* %i.addr, align 4
+ store i32 %i, ptr %i.addr, align 4
call void @globalfunc1()
- %0 = load i32, i32* @globalvar, align 4
+ %0 = load i32, ptr @globalvar, align 4
ret i32 %0
}
define i32 @referencecommon(i32 %i) #0 {
entry:
%i.addr = alloca i32, align 4
- store i32 %i, i32* %i.addr, align 4
- %0 = load i32, i32* @commonvar, align 4
+ store i32 %i, ptr %i.addr, align 4
+ %0 = load i32, ptr @commonvar, align 4
ret i32 %0
}
define void @setfuncptr() #0 {
entry:
- store void ()* @staticfunc2, void ()** @P, align 8
+ store ptr @staticfunc2, ptr @P, align 8
ret void
}
define void @callfuncptr() #0 {
entry:
- %0 = load void ()*, void ()** @P, align 8
+ %0 = load ptr, ptr @P, align 8
call void %0()
ret void
}
;; Check that ifuncs are linked in properly.
-; CHECK-DAG: @foo = ifunc void (), void ()* ()* @foo_resolve
-; CHECK-DAG: define internal void ()* @foo_resolve() {
+; CHECK-DAG: @foo = ifunc void (), ptr @foo_resolve
+; CHECK-DAG: define internal ptr @foo_resolve() {
-; CHECK-DAG: @bar = ifunc void (), void ()* ()* @bar_resolve
-; CHECK-DAG: define internal void ()* @bar_resolve() {
+; CHECK-DAG: @bar = ifunc void (), ptr @bar_resolve
+; CHECK-DAG: define internal ptr @bar_resolve() {
;--- a.ll
declare void @bar()
;--- b.ll
-@foo = ifunc void (), void ()* ()* @foo_resolve
-@bar = ifunc void (), void ()* ()* @bar_resolve
+@foo = ifunc void (), ptr @foo_resolve
+@bar = ifunc void (), ptr @bar_resolve
-define internal void ()* @foo_resolve() {
- ret void ()* null
+define internal ptr @foo_resolve() {
+ ret ptr null
}
-define internal void ()* @bar_resolve() {
- ret void ()* null
+define internal ptr @bar_resolve() {
+ ret ptr null
}
@is_really_as1_gv_other_type = external global i32
; CHECK-LABEL: @foo(
-; CHECK: %load0 = load volatile i32, i32* addrspacecast (i32 addrspace(1)* @is_really_as1_gv to i32*), align 4
-; CHECK: %load1 = load volatile i32, i32* addrspacecast (i32 addrspace(1)* bitcast (float addrspace(1)* @is_really_as1_gv_other_type to i32 addrspace(1)*) to i32*), align 4
+; CHECK: %load0 = load volatile i32, ptr addrspacecast (ptr addrspace(1) @is_really_as1_gv to ptr), align 4
+; CHECK: %load1 = load volatile i32, ptr addrspacecast (ptr addrspace(1) @is_really_as1_gv_other_type to ptr), align 4
define void @foo() {
- %load0 = load volatile i32, i32* @is_really_as1_gv, align 4
- %load1 = load volatile i32, i32* @is_really_as1_gv_other_type, align 4
+ %load0 = load volatile i32, ptr @is_really_as1_gv, align 4
+ %load1 = load volatile i32, ptr @is_really_as1_gv_other_type, align 4
ret void
}
@foo = external dso_local local_unnamed_addr constant i32, align 4
define dso_local i32 @_Z3barv() local_unnamed_addr {
entry:
- %0 = load i32, i32* @foo, align 4
+ %0 = load i32, ptr @foo, align 4
ret i32 %0
}
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define i32 @main({ i64, { i64, i8* }* } %unnamed) #0 {
+define i32 @main({ i64, ptr } %unnamed) #0 {
%1 = call i32 @_simplefunction() #1
ret i32 %1
}
; RUN: | llvm-bcanalyzer -dump | FileCheck %s
; CHECK: <STRTAB_BLOCK
-; CHECK-NEXT: blob data = 'mainglobalfunc1llvm.invariant.start.p0i8{{.*}}'
+; CHECK-NEXT: blob data = 'mainglobalfunc1llvm.invariant.start.p0{{.*}}'
; Check that the summary is able to print the names despite the lack of
; string table in the legacy bitcode.
target triple = "x86_64-apple-macosx10.11.0"
-@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__I_a, i8* null }]
+@llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_GLOBAL__I_a, ptr null }]
declare void @baz()
; alive.
; We want to make sure the @linkonceodrfuncwithalias copy in Input/deadstrip.ll
; is also scanned when computing reachability.
-@linkonceodralias = linkonce_odr alias void (), void ()* @linkonceodrfuncwithalias
+@linkonceodralias = linkonce_odr alias void (), ptr @linkonceodrfuncwithalias
define linkonce_odr void @linkonceodrfuncwithalias() {
entry:
; Function Attrs: nounwind uwtable
define i32 @main() local_unnamed_addr {
- store i32 42, i32* @A, align 4
+ store i32 42, ptr @A, align 4
ret i32 0
}
define i32 @main() #0 {
entry:
call void (...) @foo()
- %0 = load i32, i32* @baz, align 4
+ %0 = load i32, ptr @baz, align 4
ret i32 %0
}
@baz = external local_unnamed_addr constant i32, align 4
define i32 @main() local_unnamed_addr {
- %1 = load i32, i32* @baz, align 4
+ %1 = load i32, ptr @baz, align 4
ret i32 %1
}
target triple = "x86_64-unknown-linux-gnu"
@a = dso_local global i32 42, align 4
-@b = dso_local global i32* @a, align 8
+@b = dso_local global ptr @a, align 8
define dso_local void @extern() {
- call i32 @extern_aux(i32* @a, i32** @b)
+ call i32 @extern_aux(ptr @a, ptr @b)
ret void
}
-define dso_local i32 @extern_aux(i32* %a, i32** %b) {
- %p = load i32*, i32** %b, align 8
- store i32 33, i32* %p, align 4
- %v = load i32, i32* %a, align 4
+define dso_local i32 @extern_aux(ptr %a, ptr %b) {
+ %p = load ptr, ptr %b, align 8
+ store i32 33, ptr %p, align 4
+ %v = load i32, ptr %a, align 4
ret i32 %v
}
define linkonce dso_local void @linkonce() {
- call i32 @linkonce_aux(i32* @a, i32** @b)
+ call i32 @linkonce_aux(ptr @a, ptr @b)
ret void
}
-define linkonce i32 @linkonce_aux(i32* %a, i32** %b) {
- %p = load i32*, i32** %b, align 8
- store i32 33, i32* %p, align 4
- %v = load i32, i32* %a, align 4
+define linkonce i32 @linkonce_aux(ptr %a, ptr %b) {
+ %p = load ptr, ptr %b, align 8
+ store i32 33, ptr %p, align 4
+ %v = load i32, ptr %a, align 4
ret i32 %v
}
define linkonce_odr dso_local void @linkonceodr() {
- call i32 @linkonceodr_aux(i32* @a, i32** @b)
+ call i32 @linkonceodr_aux(ptr @a, ptr @b)
ret void
}
-define linkonce_odr i32 @linkonceodr_aux(i32* %a, i32** %b) {
- %p = load i32*, i32** %b, align 8
- store i32 33, i32* %p, align 4
- %v = load i32, i32* %a, align 4
+define linkonce_odr i32 @linkonceodr_aux(ptr %a, ptr %b) {
+ %p = load ptr, ptr %b, align 8
+ store i32 33, ptr %p, align 4
+ %v = load i32, ptr %a, align 4
ret i32 %v
}
define weak dso_local void @weak() {
- call i32 @weak_aux(i32* @a, i32** @b)
+ call i32 @weak_aux(ptr @a, ptr @b)
ret void
}
-define weak i32 @weak_aux(i32* %a, i32** %b) {
- %p = load i32*, i32** %b, align 8
- store i32 33, i32* %p, align 4
- %v = load i32, i32* %a, align 4
+define weak i32 @weak_aux(ptr %a, ptr %b) {
+ %p = load ptr, ptr %b, align 8
+ store i32 33, ptr %p, align 4
+ %v = load i32, ptr %a, align 4
ret i32 %v
}
define weak_odr dso_local void @weakodr() {
- call i32 @weakodr_aux(i32* @a, i32** @b)
+ call i32 @weakodr_aux(ptr @a, ptr @b)
ret void
}
-define weak_odr i32 @weakodr_aux(i32* %a, i32** %b) {
- %p = load i32*, i32** %b, align 8
- store i32 33, i32* %p, align 4
- %v = load i32, i32* %a, align 4
+define weak_odr i32 @weakodr_aux(ptr %a, ptr %b) {
+ %p = load ptr, ptr %b, align 8
+ store i32 33, ptr %p, align 4
+ %v = load i32, ptr %a, align 4
ret i32 %v
}
@foo = external dso_local local_unnamed_addr constant i32, align 4
define dso_local i32 @main() local_unnamed_addr {
entry:
- %0 = load i32, i32* @foo, align 4
+ %0 = load i32, ptr @foo, align 4
%call = tail call i32 @_Z3barv()
%add = add nsw i32 %call, %0
ret i32 %add
@g = external global i32
define i32 @main() {
- %v = load i32, i32* @g
+ %v = load i32, ptr @g
ret i32 %v
}
define i32 @main() {
%v = call i32 @foo()
- %v2 = load i32, i32* @g
+ %v2 = load i32, ptr @g
%v3 = add i32 %v, %v2
ret i32 %v3
}
@g = external global i32
define i32 @main() {
- %v = load i32, i32* @g
+ %v = load i32, ptr @g
%q = add i32 %v, 1
- store i32 %q, i32* @g
+ store i32 %q, ptr @g
ret i32 %v
}
declare void @foo()
-define void @bar() personality i32 (i32, i32, i64, i8*, i8*)* @personality_routine {
+define void @bar() personality ptr @personality_routine {
ret void
}
-define internal i32 @personality_routine(i32, i32, i64, i8*, i8*) {
+define internal i32 @personality_routine(i32, i32, i64, ptr, ptr) {
call void @foo()
ret i32 0
}
; can make a local copy of someglobal and someglobal2 because they are both
; 'unnamed_addr' constants. This should eventually be done as well.
; RUN: llvm-lto -thinlto-action=import -import-constants-with-refs %t.bc -thinlto-index=%t3.bc -o - | llvm-dis -o - | FileCheck %s --check-prefix=IMPORT
-; IMPORT: @someglobal.llvm.0 = available_externally hidden unnamed_addr constant i8* bitcast (void ()* @referencedbyglobal to i8*)
-; IMPORT: @someglobal2.llvm.0 = available_externally hidden unnamed_addr constant i8* bitcast (void ()* @localreferencedbyglobal.llvm.0 to i8*)
+; IMPORT: @someglobal.llvm.0 = available_externally hidden unnamed_addr constant ptr @referencedbyglobal
+; IMPORT: @someglobal2.llvm.0 = available_externally hidden unnamed_addr constant ptr @localreferencedbyglobal.llvm.0
; IMPORT: define available_externally void @bar()
; Check the export side: we currently only export bar(), which causes
@G = weak dso_local local_unnamed_addr global i32 0, align 4
define dso_local i32 @main() local_unnamed_addr {
- %1 = load i32, i32* @G, align 4
+ %1 = load i32, ptr @G, align 4
ret i32 %1
}
;
; The if-then-else blocks should be in just one function.
; CHECK: [[FOO_DIAMOND_LABEL]]:
-; CHECK: call void [[FOO_DIAMOND:@[^(]*]](i32 %arg1, i32 %arg, i32* [[RES_VAL_ADDR]])
-; CHECK-NEXT: [[RES_VAL:%[^ ]*]] = load i32, i32* [[RES_VAL_ADDR]]
+; CHECK: call void [[FOO_DIAMOND:@[^(]*]](i32 %arg1, i32 %arg, ptr [[RES_VAL_ADDR]])
+; CHECK-NEXT: [[RES_VAL:%[^ ]*]] = load i32, ptr [[RES_VAL_ADDR]]
; Then it should directly jump to end.
; CHECK: br label %[[FOO_END_LABEL:.*$]]
;
; CHECK: br i1 %or.cond, label %bb9, label %[[BAR_DIAMOND_LABEL:.*$]]
;
; CHECK: [[BAR_DIAMOND_LABEL]]:
-; CHECK: [[CMP:%[^ ]*]] = call i1 [[BAR_DIAMOND:@[^(]*]](i32 %arg1, i32 %arg, i32*
+; CHECK: [[CMP:%[^ ]*]] = call i1 [[BAR_DIAMOND:@[^(]*]](i32 %arg1, i32 %arg, ptr
; CHECK: br i1 [[CMP]], label %bb26, label %bb30
define i32 @bar(i32 %arg, i32 %arg1) {
bb:
ret i32 %c3
}
-; CHECK: define internal void @callee_writeonly.1.if.then(i32 %v, i32* %sub.out) [[FN_ATTRS0:#[0-9]+]]
-; CHECK: define internal void @callee_most.2.if.then(i32 %v, i32* %sub.out) [[FN_ATTRS:#[0-9]+]]
+; CHECK: define internal void @callee_writeonly.1.if.then(i32 %v, ptr %sub.out) [[FN_ATTRS0:#[0-9]+]]
+; CHECK: define internal void @callee_most.2.if.then(i32 %v, ptr %sub.out) [[FN_ATTRS:#[0-9]+]]
; attributes to preserve
attributes #0 = {
; CHECK-LABEL: @caller
; CHECK: codeRepl.i:
; CHECK-NOT: br label
-; CHECK: call void @callee.2.if.then(i32 %v, i32* %mul.loc.i), !dbg ![[DBG2:[0-9]+]]
+; CHECK: call void @callee.2.if.then(i32 %v, ptr %mul.loc.i), !dbg ![[DBG2:[0-9]+]]
define i32 @caller(i32 %v) !dbg !8 {
entry:
%call = call i32 @callee(i32 %v), !dbg !14
; CHECK-LABEL: @caller2
; CHECK: codeRepl.i:
; CHECK-NOT: br label
-; CHECK: call void @callee2.1.if.then(i32 %v, i32* %sub.loc.i), !dbg ![[DBG4:[0-9]+]]
+; CHECK: call void @callee2.1.if.then(i32 %v, ptr %sub.loc.i), !dbg ![[DBG4:[0-9]+]]
define i32 @caller2(i32 %v) !dbg !21 {
entry:
%call = call i32 @callee2(i32 %v), !dbg !22
; CHECK-LABEL: @caller
; CHECK: codeRepl.i:
; CHECK-NOT: br label
-; CHECK: call void (i32, i32*, ...) @callee.1.if.then(i32 %v, i32* %mul.loc.i, i32 99), !dbg ![[DBG2:[0-9]+]]
+; CHECK: call void (i32, ptr, ...) @callee.1.if.then(i32 %v, ptr %mul.loc.i, i32 99), !dbg ![[DBG2:[0-9]+]]
define i32 @caller(i32 %v) !dbg !8 {
entry:
%call = call i32 (i32, ...) @callee(i32 %v, i32 99), !dbg !14
@staticvar = internal global i32 1, align 4
@staticconstvar = internal unnamed_addr constant [2 x i32] [i32 10, i32 20], align 4
@commonvar = common global i32 0, align 4
-@P = internal global void ()* null, align 8
+@P = internal global ptr null, align 8
-@weakalias = weak alias void (...), bitcast (void ()* @globalfunc1 to void (...)*)
-@analias = alias void (...), bitcast (void ()* @globalfunc2 to void (...)*)
-@linkoncealias = alias void (...), bitcast (void ()* @linkoncefunc to void (...)*)
+@weakalias = weak alias void (...), ptr @globalfunc1
+@analias = alias void (...), ptr @globalfunc2
+@linkoncealias = alias void (...), ptr @linkoncefunc
define void @globalfunc1() #0 {
entry:
define i32 @referencestatics(i32 %i) #0 {
entry:
%i.addr = alloca i32, align 4
- store i32 %i, i32* %i.addr, align 4
+ store i32 %i, ptr %i.addr, align 4
%call = call i32 @staticfunc()
- %0 = load i32, i32* @staticvar, align 4
+ %0 = load i32, ptr @staticvar, align 4
%add = add nsw i32 %call, %0
- %1 = load i32, i32* %i.addr, align 4
+ %1 = load i32, ptr %i.addr, align 4
%idxprom = sext i32 %1 to i64
- %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* @staticconstvar, i64 0, i64 %idxprom
- %2 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [2 x i32], ptr @staticconstvar, i64 0, i64 %idxprom
+ %2 = load i32, ptr %arrayidx, align 4
%add1 = add nsw i32 %add, %2
ret i32 %add1
}
define i32 @referenceglobals(i32 %i) #0 {
entry:
%i.addr = alloca i32, align 4
- store i32 %i, i32* %i.addr, align 4
+ store i32 %i, ptr %i.addr, align 4
call void @globalfunc1()
- %0 = load i32, i32* @globalvar, align 4
+ %0 = load i32, ptr @globalvar, align 4
ret i32 %0
}
define i32 @referencecommon(i32 %i) #0 {
entry:
%i.addr = alloca i32, align 4
- store i32 %i, i32* %i.addr, align 4
- %0 = load i32, i32* @commonvar, align 4
+ store i32 %i, ptr %i.addr, align 4
+ %0 = load i32, ptr @commonvar, align 4
ret i32 %0
}
define void @setfuncptr() #0 {
entry:
- store void ()* @staticfunc2, void ()** @P, align 8
+ store ptr @staticfunc2, ptr @P, align 8
ret void
}
define void @callfuncptr() #0 {
entry:
- %0 = load void ()*, void ()** @P, align 8
+ %0 = load ptr, ptr @P, align 8
call void %0()
ret void
}
declare i32 @__gxx_personality_v0(...)
; Add enough instructions to prevent import with inst limit of 5
-define internal void @funcwithpersonality() #2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define internal void @funcwithpersonality() #2 personality ptr @__gxx_personality_v0 {
entry:
call void @globalfunc2()
call void @globalfunc2()
; Variadic function with va_start should not be imported because inliner
; doesn't handle it.
define void @variadic_va_start(...) {
- %ap = alloca i8*, align 8
- %ap.0 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap.0)
+ %ap = alloca ptr, align 8
+ call void @llvm.va_start(ptr %ap)
ret void
}
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
; Ensure that all uses of local variable @P which has used in setfuncptr
; and callfuncptr are to the same promoted/renamed global.
-; CHECK-DAG: @P.llvm.{{.*}} = available_externally hidden global void ()* null
-; CHECK-DAG: %0 = load void ()*, void ()** @P.llvm.
-; CHECK-DAG: store void ()* @staticfunc2.llvm.{{.*}}, void ()** @P.llvm.
+; CHECK-DAG: @P.llvm.{{.*}} = available_externally hidden global ptr null
+; CHECK-DAG: %0 = load ptr, ptr @P.llvm.
+; CHECK-DAG: store ptr @staticfunc2.llvm.{{.*}}, ptr @P.llvm.
; Ensure that @referencelargelinkonce definition is pulled in, but later we
; also check that the linkonceodr function is not.
declare void @linkoncefunc2(...) #1
; INSTLIMDEF-DAG: Import funcwithpersonality
-; INSTLIMDEF-DAG: define available_externally hidden void @funcwithpersonality.llvm.{{.*}}() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !thinlto_src_module !0 {
+; INSTLIMDEF-DAG: define available_externally hidden void @funcwithpersonality.llvm.{{.*}}() personality ptr @__gxx_personality_v0 !thinlto_src_module !0 {
; INSTLIM5-DAG: declare hidden void @funcwithpersonality.llvm.{{.*}}()
; We can import variadic functions without a va_start, since the inliner
ret i32 0
}
-@analias = alias void (), void ()* @globalfunc
+@analias = alias void (), ptr @globalfunc
define void @globalfunc() #0 {
entry:
; CHECK-LABEL: void @empty()
; CHECK-NEXT: entry:
-; CHECK-NEXT: %0 = atomicrmw add i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0), i64 1 monotonic, align 8, !dbg [[DBG:![0-9]+]]
+; CHECK-NEXT: %0 = atomicrmw add ptr @__llvm_gcov_ctr, i64 1 monotonic, align 8, !dbg [[DBG:![0-9]+]]
; CHECK-NEXT: ret void, !dbg [[DBG]]
define dso_local void @empty() !dbg !5 {
;
; GCDA: [[FILE_LOOP_HEADER]]:
; GCDA-NEXT: %[[IV:.*]] = phi i32 [ 0, %entry ], [ %[[NEXT_IV:.*]], %[[FILE_LOOP_LATCH:.*]] ]
-; GCDA-NEXT: %[[FILE_INFO:.*]] = getelementptr inbounds {{.*}}, {{.*}}* @__llvm_internal_gcov_emit_file_info, i32 0, i32 %[[IV]]
-; GCDA-NEXT: %[[START_FILE_ARGS:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[FILE_INFO]], i32 0, i32 0
-; GCDA-NEXT: %[[START_FILE_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[START_FILE_ARGS]], i32 0, i32 0
-; GCDA-NEXT: %[[START_FILE_ARG_0:.*]] = load i8*, i8** %[[START_FILE_ARG_0_PTR]]
-; GCDA-NEXT: %[[START_FILE_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[START_FILE_ARGS]], i32 0, i32 1
-; GCDA-NEXT: %[[START_FILE_ARG_1:.*]] = load i32, i32* %[[START_FILE_ARG_1_PTR]]
-; GCDA-NEXT: %[[START_FILE_ARG_2_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[START_FILE_ARGS]], i32 0, i32 2
-; GCDA-NEXT: %[[START_FILE_ARG_2:.*]] = load i32, i32* %[[START_FILE_ARG_2_PTR]]
-; GCDA-NEXT: call void @llvm_gcda_start_file(i8* %[[START_FILE_ARG_0]], i32 %[[START_FILE_ARG_1]], i32 %[[START_FILE_ARG_2]])
-; GCDA-NEXT: %[[NUM_COUNTERS_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[FILE_INFO]], i32 0, i32 1
-; GCDA-NEXT: %[[NUM_COUNTERS:.*]] = load i32, i32* %[[NUM_COUNTERS_PTR]]
-; GCDA-NEXT: %[[EMIT_FUN_ARGS_ARRAY_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[FILE_INFO]], i32 0, i32 2
-; GCDA-NEXT: %[[EMIT_FUN_ARGS_ARRAY:.*]] = load {{.*}}*, {{.*}}** %[[EMIT_FUN_ARGS_ARRAY_PTR]]
-; GCDA-NEXT: %[[EMIT_ARCS_ARGS_ARRAY_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[FILE_INFO]], i32 0, i32 3
-; GCDA-NEXT: %[[EMIT_ARCS_ARGS_ARRAY:.*]] = load {{.*}}*, {{.*}}** %[[EMIT_ARCS_ARGS_ARRAY_PTR]]
+; GCDA-NEXT: %[[FILE_INFO:.*]] = getelementptr inbounds {{.*}}, ptr @__llvm_internal_gcov_emit_file_info, i32 0, i32 %[[IV]]
+; GCDA-NEXT: %[[START_FILE_ARGS:.*]] = getelementptr inbounds {{.*}}, ptr %[[FILE_INFO]], i32 0, i32 0
+; GCDA-NEXT: %[[START_FILE_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[START_FILE_ARGS]], i32 0, i32 0
+; GCDA-NEXT: %[[START_FILE_ARG_0:.*]] = load ptr, ptr %[[START_FILE_ARG_0_PTR]]
+; GCDA-NEXT: %[[START_FILE_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[START_FILE_ARGS]], i32 0, i32 1
+; GCDA-NEXT: %[[START_FILE_ARG_1:.*]] = load i32, ptr %[[START_FILE_ARG_1_PTR]]
+; GCDA-NEXT: %[[START_FILE_ARG_2_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[START_FILE_ARGS]], i32 0, i32 2
+; GCDA-NEXT: %[[START_FILE_ARG_2:.*]] = load i32, ptr %[[START_FILE_ARG_2_PTR]]
+; GCDA-NEXT: call void @llvm_gcda_start_file(ptr %[[START_FILE_ARG_0]], i32 %[[START_FILE_ARG_1]], i32 %[[START_FILE_ARG_2]])
+; GCDA-NEXT: %[[NUM_COUNTERS_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[FILE_INFO]], i32 0, i32 1
+; GCDA-NEXT: %[[NUM_COUNTERS:.*]] = load i32, ptr %[[NUM_COUNTERS_PTR]]
+; GCDA-NEXT: %[[EMIT_FUN_ARGS_ARRAY_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[FILE_INFO]], i32 0, i32 2
+; GCDA-NEXT: %[[EMIT_FUN_ARGS_ARRAY:.*]] = load ptr, ptr %[[EMIT_FUN_ARGS_ARRAY_PTR]]
+; GCDA-NEXT: %[[EMIT_ARCS_ARGS_ARRAY_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[FILE_INFO]], i32 0, i32 3
+; GCDA-NEXT: %[[EMIT_ARCS_ARGS_ARRAY:.*]] = load ptr, ptr %[[EMIT_ARCS_ARGS_ARRAY_PTR]]
; GCDA-NEXT: %[[ENTER_COUNTER_LOOP_COND:.*]] = icmp slt i32 0, %[[NUM_COUNTERS]]
; GCDA-NEXT: br i1 %[[ENTER_COUNTER_LOOP_COND]], label %[[COUNTER_LOOP:.*]], label %[[FILE_LOOP_LATCH]]
;
; GCDA: [[COUNTER_LOOP]]:
; GCDA-NEXT: %[[JV:.*]] = phi i32 [ 0, %[[FILE_LOOP_HEADER]] ], [ %[[NEXT_JV:.*]], %[[COUNTER_LOOP]] ]
-; GCDA-NEXT: %[[EMIT_FUN_ARGS:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_FUN_ARGS_ARRAY]], i32 %[[JV]]
-; GCDA-NEXT: %[[EMIT_FUN_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_FUN_ARGS]], i32 0, i32 0
-; GCDA-NEXT: %[[EMIT_FUN_ARG_0:.*]] = load i32, i32* %[[EMIT_FUN_ARG_0_PTR]]
-; GCDA-NEXT: %[[EMIT_FUN_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_FUN_ARGS]], i32 0, i32 1
-; GCDA-NEXT: %[[EMIT_FUN_ARG_1:.*]] = load i32, i32* %[[EMIT_FUN_ARG_1_PTR]]
-; GCDA-NEXT: %[[EMIT_FUN_ARG_2_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_FUN_ARGS]], i32 0, i32 2
-; GCDA-NEXT: %[[EMIT_FUN_ARG_2:.*]] = load i32, i32* %[[EMIT_FUN_ARG_2_PTR]]
+; GCDA-NEXT: %[[EMIT_FUN_ARGS:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_FUN_ARGS_ARRAY]], i32 %[[JV]]
+; GCDA-NEXT: %[[EMIT_FUN_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_FUN_ARGS]], i32 0, i32 0
+; GCDA-NEXT: %[[EMIT_FUN_ARG_0:.*]] = load i32, ptr %[[EMIT_FUN_ARG_0_PTR]]
+; GCDA-NEXT: %[[EMIT_FUN_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_FUN_ARGS]], i32 0, i32 1
+; GCDA-NEXT: %[[EMIT_FUN_ARG_1:.*]] = load i32, ptr %[[EMIT_FUN_ARG_1_PTR]]
+; GCDA-NEXT: %[[EMIT_FUN_ARG_2_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_FUN_ARGS]], i32 0, i32 2
+; GCDA-NEXT: %[[EMIT_FUN_ARG_2:.*]] = load i32, ptr %[[EMIT_FUN_ARG_2_PTR]]
; GCDA-NEXT: call void @llvm_gcda_emit_function(i32 %[[EMIT_FUN_ARG_0]],
; GCDA-SAME: i32 %[[EMIT_FUN_ARG_1]],
; GCDA-SAME: i32 %[[EMIT_FUN_ARG_2]])
-; GCDA-NEXT: %[[EMIT_ARCS_ARGS:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_ARCS_ARGS_ARRAY]], i32 %[[JV]]
-; GCDA-NEXT: %[[EMIT_ARCS_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_ARCS_ARGS]], i32 0, i32 0
-; GCDA-NEXT: %[[EMIT_ARCS_ARG_0:.*]] = load i32, i32* %[[EMIT_ARCS_ARG_0_PTR]]
-; GCDA-NEXT: %[[EMIT_ARCS_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_ARCS_ARGS]], i32 0, i32 1
-; GCDA-NEXT: %[[EMIT_ARCS_ARG_1:.*]] = load i64*, i64** %[[EMIT_ARCS_ARG_1_PTR]]
+; GCDA-NEXT: %[[EMIT_ARCS_ARGS:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_ARCS_ARGS_ARRAY]], i32 %[[JV]]
+; GCDA-NEXT: %[[EMIT_ARCS_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_ARCS_ARGS]], i32 0, i32 0
+; GCDA-NEXT: %[[EMIT_ARCS_ARG_0:.*]] = load i32, ptr %[[EMIT_ARCS_ARG_0_PTR]]
+; GCDA-NEXT: %[[EMIT_ARCS_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_ARCS_ARGS]], i32 0, i32 1
+; GCDA-NEXT: %[[EMIT_ARCS_ARG_1:.*]] = load ptr, ptr %[[EMIT_ARCS_ARG_1_PTR]]
; GCDA-NEXT: call void @llvm_gcda_emit_arcs(i32 %[[EMIT_ARCS_ARG_0]],
-; GCDA-SAME: i64* %[[EMIT_ARCS_ARG_1]])
+; GCDA-SAME: ptr %[[EMIT_ARCS_ARG_1]])
; GCDA-NEXT: %[[NEXT_JV]] = add i32 %[[JV]], 1
; GCDA-NEXT: %[[COUNTER_LOOP_COND:.*]] = icmp slt i32 %[[NEXT_JV]], %[[NUM_COUNTERS]]
; GCDA-NEXT: br i1 %[[COUNTER_LOOP_COND]], label %[[COUNTER_LOOP]], label %[[FILE_LOOP_LATCH]]
define dso_local i32 @instr(i32 %a) !dbg !28 {
; CHECK-LABEL: @instr(
-; CHECK-NEXT: [[GCOV_CTR:%.*]] = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0), align 4, !dbg [[DBG8:![0-9]+]]
+; CHECK-NEXT: [[GCOV_CTR:%.*]] = load i64, ptr @__llvm_gcov_ctr, align 4, !dbg [[DBG8:![0-9]+]]
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[GCOV_CTR]], 1, !dbg [[DBG8]]
-; CHECK-NEXT: store i64 [[TMP1]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0), align 4, !dbg [[DBG8]]
+; CHECK-NEXT: store i64 [[TMP1]], ptr @__llvm_gcov_ctr, align 4, !dbg [[DBG8]]
; CHECK-NEXT: ret i32 42, !dbg [[DBG8]]
;
ret i32 42, !dbg !44
; CHECK: define internal void @__llvm_gcov_reset()
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* bitcast ([1 x i64]* @__llvm_gcov_ctr to i8*), i8 0, i64 8, i1 false)
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* bitcast ([1 x i64]* @__llvm_gcov_ctr.1 to i8*), i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr @__llvm_gcov_ctr, i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr @__llvm_gcov_ctr.1, i8 0, i64 8, i1 false)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5, !6}
; CHECK-LABEL: define {{.*}}@foo.cold.1(
; CHECK: call {{.*}}@sink
; CHECK: %p.ce = phi i32 [ 1, %coldbb ], [ 3, %coldbb2 ]
-; CHECK-NEXT: store i32 %p.ce, i32* %p.ce.out
+; CHECK-NEXT: store i32 %p.ce, ptr %p.ce.out
define void @foo(i32 %cond) {
entry:
; CHECK-NEXT: ]
;
; CHECK: codeRepl:
-; CHECK-NEXT: bitcast
; CHECK-NEXT: lifetime.start
-; CHECK-NEXT: call void @pluto.cold.1(i1* %tmp8.ce.loc)
-; CHECK-NEXT: %tmp8.ce.reload = load i1, i1* %tmp8.ce.loc
+; CHECK-NEXT: call void @pluto.cold.1(ptr %tmp8.ce.loc)
+; CHECK-NEXT: %tmp8.ce.reload = load i1, ptr %tmp8.ce.loc
; CHECK-NEXT: lifetime.end
; CHECK-NEXT: br label %bb7
;
; CHECK-LABEL: @f1(
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32 0)
-; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 0)
+; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
; CHECK-LABEL: @f2(
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32 1)
-; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 1)
+; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
; CHECK-NEXT: switch i32 [[TMP1]], label [[FINAL_BLOCK_0:%.*]] [
; CHECK-NEXT: ]
; CHECK: output_block_0_1:
-; CHECK-NEXT: store i32 [[PHINODE_CE]], i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: store i32 [[PHINODE_CE]], ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_1]]
; CHECK: output_block_1_1:
-; CHECK-NEXT: store i32 [[TMP7]], i32* [[TMP0]], align 4
+; CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_1]]
; CHECK: final_block_0:
; CHECK-NEXT: ret i1 false
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE1_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[PHINODE1_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32* [[PHINODE1_CE_LOC]])
-; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE1_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
+; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE1_CE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE1_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[PHINODE1_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32* [[PHINODE1_CE_LOC]])
-; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE1_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
+; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE1_CE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
; CHECK-NEXT: [[PHINODE1_CE:%.*]] = phi i32 [ 5, [[BB1_TO_OUTLINE]] ], [ 5, [[BB2]] ]
; CHECK-NEXT: br label [[BB5_EXITSTUB:%.*]]
; CHECK: bb5.exitStub:
-; CHECK-NEXT: store i32 [[PHINODE_CE]], i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT: store i32 [[PHINODE1_CE]], i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT: store i32 [[PHINODE_CE]], ptr [[TMP0:%.*]], align 4
+; CHECK-NEXT: store i32 [[PHINODE1_CE]], ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: ret i1 true
; CHECK: bb1_after_outline.exitStub:
; CHECK-NEXT: ret i1 false
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE_CE_LOC1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32 0)
-; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 0)
+; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: placeholder:
; CHECK-NEXT: [[A:%.*]] = sub i32 5, 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb3:
-; CHECK-NEXT: [[LT_CAST3:%.*]] = bitcast i32* [[PHINODE_CE_LOC1]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST3]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[PHINODE_CE_LOC1]], i32 1)
-; CHECK-NEXT: [[PHINODE_CE_RELOAD2:%.*]] = load i32, i32* [[PHINODE_CE_LOC1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST3]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC1]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[PHINODE_CE_LOC1]], i32 1)
+; CHECK-NEXT: [[PHINODE_CE_RELOAD2:%.*]] = load i32, ptr [[PHINODE_CE_LOC1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC1]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: placeholder1:
; CHECK-NEXT: [[B:%.*]] = add i32 5, 4
; CHECK-NEXT: i32 1, label [[OUTPUT_BLOCK_1_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_0_0:
-; CHECK-NEXT: store i32 [[PHINODE_CE]], i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: store i32 [[PHINODE_CE]], ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: output_block_1_0:
-; CHECK-NEXT: store i32 [[TMP7]], i32* [[TMP0]], align 4
+; CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: final_block_0:
; CHECK-NEXT: ret void
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[A:%.*]] = add i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32 [[A]], i32* null, i32 -1)
+; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32 [[A]], ptr null, i32 -1)
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: [[E:%.*]] = sub i32 [[TMP0]], [[TMP1]]
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[A:%.*]] = sub i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[F_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32 1, i32* [[F_CE_LOC]], i32 0)
-; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, i32* [[F_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr [[F_CE_LOC]], i32 0)
+; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: [[E:%.*]] = add i32 [[TMP0]], [[TMP1]]
; CHECK-NEXT: i32 0, label [[OUTPUT_BLOCK_1_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_1_0:
-; CHECK-NEXT: store i32 [[TMP5:%.*]], i32* [[TMP3:%.*]], align 4
+; CHECK-NEXT: store i32 [[TMP5:%.*]], ptr [[TMP3:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: phi_block:
; CHECK-NEXT: [[TMP5]] = phi i32 [ [[TMP2]], [[BB2_TO_OUTLINE]] ], [ [[TMP2]], [[BB3]] ]
; CHECK-LABEL: @fn1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[B_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[B_CE_LOC]], i32 0)
-; CHECK-NEXT: [[B_CE_RELOAD:%.*]] = load i32, i32* [[B_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_CE_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[B_CE_LOC]], i32 0)
+; CHECK-NEXT: [[B_CE_RELOAD:%.*]] = load i32, ptr [[B_CE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_CE_LOC]])
; CHECK-NEXT: br label [[BLOCK_3:%.*]]
; CHECK: block_3:
; CHECK-NEXT: [[B:%.*]] = phi i32 [ [[B_CE_RELOAD]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* null, i32 -1)
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr null, i32 -1)
; CHECK-NEXT: br label [[BLOCK_6:%.*]]
; CHECK: block_6:
; CHECK-NEXT: unreachable
; CHECK-NEXT: i32 0, label [[OUTPUT_BLOCK_0_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_0_0:
-; CHECK-NEXT: store i32 [[B_CE]], i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: store i32 [[B_CE]], ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: final_block_0:
; CHECK-NEXT: ret void
; CHECK-LABEL: @fn1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[B_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[B_CE_LOC]], i32 0)
-; CHECK-NEXT: [[B_CE_RELOAD:%.*]] = load i32, i32* [[B_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_CE_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[B_CE_LOC]], i32 0)
+; CHECK-NEXT: [[B_CE_RELOAD:%.*]] = load i32, ptr [[B_CE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_CE_LOC]])
; CHECK-NEXT: br label [[BLOCK_3:%.*]]
; CHECK: block_3:
; CHECK-NEXT: [[B:%.*]] = phi i32 [ [[B_CE_RELOAD]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: call void @outlined_ir_func_0(i32* null, i32 -1)
+; CHECK-NEXT: call void @outlined_ir_func_0(ptr null, i32 -1)
; CHECK-NEXT: br label [[BLOCK_6:%.*]]
; CHECK: block_6:
; CHECK-NEXT: unreachable
; CHECK-NEXT: i32 0, label [[OUTPUT_BLOCK_0_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_0_0:
-; CHECK-NEXT: store i32 [[B_CE]], i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT: store i32 [[B_CE]], ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: final_block_0:
; CHECK-NEXT: ret void
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[A:%.*]] = add i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[F_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32* [[F_CE_LOC]], i32 0)
-; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, i32* [[F_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], ptr [[F_CE_LOC]], i32 0)
+; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: [[E:%.*]] = sub i32 [[TMP0]], [[TMP1]]
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[A:%.*]] = sub i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[F_CE_LOC]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32* [[F_CE_LOC]], i32 1)
-; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, i32* [[F_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], ptr [[F_CE_LOC]], i32 1)
+; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: [[E:%.*]] = add i32 [[TMP0]], [[TMP1]]
; CHECK-NEXT: i32 1, label [[OUTPUT_BLOCK_1_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_0_0:
-; CHECK-NEXT: store i32 [[F_CE]], i32* [[TMP2:%.*]], align 4
+; CHECK-NEXT: store i32 [[F_CE]], ptr [[TMP2:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: output_block_1_0:
-; CHECK-NEXT: store i32 [[TMP4]], i32* [[TMP2]], align 4
+; CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP2]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: final_block_0:
; CHECK-NEXT: ret void
; RUN: opt -S %s -lowertypetests -lowertypetests-summary-action=export -lowertypetests-read-summary=%S/Inputs/exported-funcs.yaml | FileCheck %s
;
-; CHECK: @alias1 = weak alias void (), void ()* @external_addrtaken
-; CHECK: @alias2 = hidden alias void (), void ()* @external_addrtaken
+; CHECK: @alias1 = weak alias void (), ptr @external_addrtaken
+; CHECK: @alias2 = hidden alias void (), ptr @external_addrtaken
; CHECK-NOT: @alias3 = alias
; CHECK-NOT: @not_present
; CHECK: [[G:@[0-9]+]] = private constant { [2048 x i8] } zeroinitializer
-; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i32 0)
-; X86: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to i8*)
-; X86: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 1 to i8*)
+; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, ptr [[G]]
+; X86: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to ptr)
+; X86: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 1 to ptr)
-; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i64 4)
-; X86: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to i8*)
-; X86: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 128 to i8*)
+; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr (i8, ptr [[G]], i64 4)
+; X86: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to ptr)
+; X86: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 128 to ptr)
; ARM-NOT: alias {{.*}} inttoptr
-; CHECK: @foo = alias [2048 x i8], getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0)
+; CHECK: @foo = alias [2048 x i8], ptr [[G]]
; SUMMARY: TypeIdMap:
; SUMMARY-NEXT: typeid1:
; CHECK: [[G:@[0-9]+]] = private constant { [2048 x i8] } zeroinitializer
; CHECK: [[B:@[0-9]+]] = private constant [258 x i8] c"\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01"
-; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i32 0)
-; X86: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to i8*)
-; X86: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 65 to i8*)
-; CHECK: @__typeid_typeid1_byte_array = hidden alias i8, i8* @bits.1
-; X86: @__typeid_typeid1_bit_mask = hidden alias i8, inttoptr (i8 2 to i8*)
-
-; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i64 4)
-; X86: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to i8*)
-; X86: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 257 to i8*)
-; CHECK: @__typeid_typeid2_byte_array = hidden alias i8, i8* @bits
-; X86: @__typeid_typeid2_bit_mask = hidden alias i8, inttoptr (i8 1 to i8*)
+; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, ptr [[G]]
+; X86: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to ptr)
+; X86: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 65 to ptr)
+; CHECK: @__typeid_typeid1_byte_array = hidden alias i8, ptr @bits.1
+; X86: @__typeid_typeid1_bit_mask = hidden alias i8, inttoptr (i8 2 to ptr)
+
+; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr (i8, ptr [[G]], i64 4)
+; X86: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to ptr)
+; X86: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 257 to ptr)
+; CHECK: @__typeid_typeid2_byte_array = hidden alias i8, ptr @bits
+; X86: @__typeid_typeid2_bit_mask = hidden alias i8, inttoptr (i8 1 to ptr)
; ARM-NOT: alias {{.*}} inttoptr
-; CHECK: @foo = alias [2048 x i8], getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0)
-; CHECK: @bits = private alias i8, getelementptr inbounds ([258 x i8], [258 x i8]* [[B]], i64 0, i64 0)
-; CHECK: @bits.1 = private alias i8, getelementptr inbounds ([258 x i8], [258 x i8]* [[B]], i64 0, i64 0)
+; CHECK: @foo = alias [2048 x i8], ptr [[G]]
+; CHECK: @bits = private alias i8, ptr [[B]]
+; CHECK: @bits.1 = private alias i8, ptr [[B]]
; SUMMARY: TypeIdMap:
; SUMMARY-NEXT: typeid1:
!8 = !{i64 0, !"typeid3"}
-; CHECK-DAG: @__typeid_typeid1_global_addr = hidden alias i8, bitcast (void ()* [[JT1:.*]] to i8*)
-; CHECK-DAG: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 3 to i8*)
-; CHECK-DAG: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 4 to i8*)
+; CHECK-DAG: @__typeid_typeid1_global_addr = hidden alias i8, ptr [[JT1:.*]]
+; CHECK-DAG: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 3 to ptr)
+; CHECK-DAG: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 4 to ptr)
-; CHECK-DAG: @h = alias void (i8), bitcast (void ()* [[JT1]] to void (i8)*)
-; CHECK-DAG: @f = alias void (i32), {{.*}}getelementptr {{.*}}void ()* [[JT1]]
-; CHECK-DAG: @f2 = alias void (i32), {{.*}}getelementptr {{.*}}void ()* [[JT1]]
-; CHECK-DAG: @external.cfi_jt = hidden alias void (), {{.*}}getelementptr {{.*}}void ()* [[JT1]]
-; CHECK-DAG: @external_weak.cfi_jt = hidden alias void (), {{.*}}getelementptr {{.*}}void ()* [[JT1]]
+; CHECK-DAG: @h = alias void (i8), ptr [[JT1]]
+; CHECK-DAG: @f = alias void (i32), {{.*}}getelementptr {{.*}}ptr [[JT1]]
+; CHECK-DAG: @f2 = alias void (i32), {{.*}}getelementptr {{.*}}ptr [[JT1]]
+; CHECK-DAG: @external.cfi_jt = hidden alias void (), {{.*}}getelementptr {{.*}}ptr [[JT1]]
+; CHECK-DAG: @external_weak.cfi_jt = hidden alias void (), {{.*}}getelementptr {{.*}}ptr [[JT1]]
-; CHECK-DAG: @__typeid_typeid2_global_addr = hidden alias i8, bitcast (void ()* [[JT2:.*]] to i8*)
+; CHECK-DAG: @__typeid_typeid2_global_addr = hidden alias i8, ptr [[JT2:.*]]
-; CHECK-DAG: @g = alias void (), void ()* [[JT2]]
+; CHECK-DAG: @g = alias void (), ptr [[JT2]]
; CHECK-DAG: define hidden void @h.cfi(i8 {{.*}}) !type !{{.*}}
; CHECK-DAG: declare !type !{{.*}} void @external()
; CHECK: [[G:@[0-9]+]] = private constant { [2048 x i8] } zeroinitializer
-; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i32 0)
-; CHECK-X86: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to i8*)
-; CHECK-X86: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 3 to i8*)
-; CHECK-X86: @__typeid_typeid1_inline_bits = hidden alias i8, inttoptr (i32 9 to i8*)
+; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, ptr [[G]]
+; CHECK-X86: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to ptr)
+; CHECK-X86: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 3 to ptr)
+; CHECK-X86: @__typeid_typeid1_inline_bits = hidden alias i8, inttoptr (i32 9 to ptr)
-; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i64 4)
-; CHECK-X86: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to i8*)
-; CHECK-X86: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 33 to i8*)
-; CHECK-X86: @__typeid_typeid2_inline_bits = hidden alias i8, inttoptr (i64 8589934593 to i8*)
+; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr (i8, ptr [[G]], i64 4)
+; CHECK-X86: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to ptr)
+; CHECK-X86: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 33 to ptr)
+; CHECK-X86: @__typeid_typeid2_inline_bits = hidden alias i8, inttoptr (i64 8589934593 to ptr)
-; CHECK: @foo = alias [2048 x i8], getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0)
+; CHECK: @foo = alias [2048 x i8], ptr [[G]]
; SUMMARY: TypeIdMap:
; SUMMARY-NEXT: typeid1:
; CHECK: [[G:@[0-9]+]] = private constant { i32 } { i32 42 }
-; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, bitcast ({ i32 }* [[G]] to i8*)
-; CHECK: @foo = alias i32, getelementptr inbounds ({ i32 }, { i32 }* [[G]], i32 0, i32 0)
+; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, ptr [[G]]
+; CHECK: @foo = alias i32, ptr [[G]]
; SUMMARY: TypeIdMap:
; SUMMARY-NEXT: typeid1:
!1 = !{!"external_addrtaken", i8 0, !2}
!2 = !{i64 0, !"typeid1"}
-; CHECK-DAG: @external_addrtaken = alias void (i8), bitcast
+; CHECK-DAG: @external_addrtaken = alias void (i8), ptr @.cfi.jumptable
; CHECK: bb14:
; CHECK-NEXT: br label [[BB16]]
; CHECK: bb15:
-; CHECK-NEXT: store i8 poison, i8* null, align 1
+; CHECK-NEXT: store i8 poison, ptr null, align 1
; CHECK-NEXT: br label [[BB16]]
; CHECK: bb16:
; CHECK-NEXT: [[TMP17:%.*]] = phi i32 [ poison, [[BB15]] ], [ 1, [[BB14]] ], [ 9, [[BB7]] ]
; CHECK: if.then11:
; CHECK-NEXT: br label [[CLEANUP]]
; CHECK: if.end12:
-; CHECK-NEXT: store i8 poison, i8* null, align 1
+; CHECK-NEXT: store i8 poison, ptr null, align 1
; CHECK-NEXT: br label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[CLEANUP_DEST:%.*]] = phi i32 [ poison, [[IF_END12]] ], [ 1, [[IF_THEN11]] ], [ 9, [[IF_THEN]] ]
; USE-SAME: !prof ![[FUNC_ENTRY_COUNT:[0-9]+]]
entry:
; GEN: entry:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_1, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 0)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_br_1, i64 {{[0-9]+}}, i32 2, i32 0)
%cmp = icmp sgt i32 %i, 0
br i1 %cmp, label %if.then, label %if.end
; USE: br i1 %cmp, label %if.then, label %if.end
if.then:
; GEN: if.then:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_1, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 1)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_br_1, i64 {{[0-9]+}}, i32 2, i32 1)
%add = add nsw i32 %i, 2
br label %if.end
entry:
; GEN: entry:
; NOTENTRY-NOT: llvm.instrprof.increment
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_2, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 0)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_br_2, i64 {{[0-9]+}}, i32 2, i32 0)
%cmp = icmp sgt i32 %i, 0
br i1 %cmp, label %if.then, label %if.else
; USE: br i1 %cmp, label %if.then, label %if.else
if.then:
; GEN: if.then:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_2, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 0)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_br_2, i64 {{[0-9]+}}, i32 2, i32 0)
; ENTRY-NOT: llvm.instrprof.increment
%add = add nsw i32 %i, 2
br label %if.end
if.else:
; GEN: if.else:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_2, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 1)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_br_2, i64 {{[0-9]+}}, i32 2, i32 1)
%sub = sub nsw i32 %i, 2
br label %if.end
bb12: ; preds = %bb9
ret void
-; NONATOMIC_PROMO: %[[PROMO1:[a-z0-9.]+]] = load {{.*}} @__profc_foo{{.*}} 0)
+; NONATOMIC_PROMO: %[[PROMO1:[a-z0-9.]+]] = load {{.*}} @__profc_foo{{.*}}
; NONATOMIC_PROMO-NEXT: add {{.*}} %[[PROMO1]], %[[LIVEOUT1]]
-; NONATOMIC_PROMO-NEXT: store {{.*}}@__profc_foo{{.*}}0)
+; NONATOMIC_PROMO-NEXT: store {{.*}}@__profc_foo{{.*}}
; NONATOMIC_PROMO-NEXT: %[[PROMO2:[a-z0-9.]+]] = load {{.*}} @__profc_foo{{.*}} 1)
; NONATOMIC_PROMO-NEXT: add {{.*}} %[[PROMO2]], %[[LIVEOUT2]]
; NONATOMIC_PROMO-NEXT: store {{.*}}@__profc_foo{{.*}}1)
; NONATOMIC_PROMO-NEXT: %[[PROMO3:[a-z0-9.]+]] = load {{.*}} @__profc_foo{{.*}} 2)
; NONATOMIC_PROMO-NEXT: add {{.*}} %[[PROMO3]], %[[LIVEOUT3]]
; NONATOMIC_PROMO-NEXT: store {{.*}}@__profc_foo{{.*}}2)
-; ATOMIC_PROMO: atomicrmw add {{.*}} @__profc_foo{{.*}}0), i64 %[[LIVEOUT1]] seq_cst
+; ATOMIC_PROMO: atomicrmw add {{.*}} @__profc_foo{{.*}}, i64 %[[LIVEOUT1]] seq_cst
; ATOMIC_PROMO-NEXT: atomicrmw add {{.*}} @__profc_foo{{.*}}1), i64 %[[LIVEOUT2]] seq_cst
; ATOMIC_PROMO-NEXT: atomicrmw add {{.*}} @__profc_foo{{.*}}2), i64 %[[LIVEOUT3]] seq_cst
; PROMO-NOT: @__profc_foo{{.*}})
entry:
; CHECK: entry:
; NOTENTRY-NOT: call void @llvm.instrprof.increment
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 0)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 0)
switch i32 %i, label %sw.default [
i32 1, label %sw.bb
i32 2, label %sw.bb1
; USE-SAME: !prof ![[BW_SWITCH:[0-9]+]]
; CHECK: entry.sw.bb2_crit_edge1:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 1)
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 2)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 1)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 2)
; CHECK: br label %sw.bb2
; CHECK: entry.sw.bb2_crit_edge:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 0)
-; TENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 1)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 0)
+; TENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 1)
; CHECK: br label %sw.bb2
sw.bb:
; GEN: sw.bb:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 5)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 5)
%call = call i32 @bar(i32 2)
br label %sw.epilog
sw.bb1:
; GEN: sw.bb1:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 4)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 4)
%call2 = call i32 @bar(i32 1024)
br label %sw.epilog
if.then:
; GEN: if.then:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 2)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 2)
; ENTRY-NOT: call void @llvm.instrprof.increment
%call4 = call i32 @bar(i32 4)
br label %return
if.end:
; GEN: if.end:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 3)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 3)
%call5 = call i32 @bar(i32 8)
br label %sw.epilog
if.then8:
; GEN: if.then8:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 7)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 7)
%add = add nsw i32 %call6, 10
br label %if.end9
if.end9:
; GEN: if.end9:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 6)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 6)
%res.0 = phi i32 [ %add, %if.then8 ], [ %call6, %sw.default ]
br label %sw.epilog
define internal i32 @bar(i32 %i) {
entry:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__profn__stdin__bar, i32 0, i32 0), i64 {{[0-9]+}}, i32 1, i32 0)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn__stdin__bar, i64 {{[0-9]+}}, i32 1, i32 0)
ret i32 %i
}
define i32 @test_br_2(i32 %i) {
entry:
; GEN: entry:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_2, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 0)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_br_2, i64 {{[0-9]+}}, i32 2, i32 0)
; GENA: entry:
-; GENA: %{{[0-9+]}} = atomicrmw add i64* getelementptr inbounds ([2 x i64], [2 x i64]* @__profc_test_br_2, i32 0, i32 0), i64 1 monotonic
+; GENA: %{{[0-9+]}} = atomicrmw add ptr @__profc_test_br_2, i64 1 monotonic
; USE: br i1 %cmp, label %if.then, label %if.else
; USE-SAME: !prof ![[BW_ENTRY:[0-9]+]]
; USE: ![[BW_ENTRY]] = !{!"branch_weights", i32 0, i32 1}
if.else:
; GEN: if.else:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_2, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 1)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_br_2, i64 {{[0-9]+}}, i32 2, i32 1)
; GENA: if.else:
-; GENA: %pgocount = load i64, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @__profc_test_br_2, i32 0, i32 1), align 8
+; GENA: %pgocount = load i64, ptr getelementptr inbounds ([2 x i64], ptr @__profc_test_br_2, i32 0, i32 1), align 8
; GENA: [[V:%[0-9]*]] = add i64 %pgocount, 1
-; GENA: store i64 [[V]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @__profc_test_br_2, i32 0, i32 1), align 8
+; GENA: store i64 [[V]], ptr getelementptr inbounds ([2 x i64], ptr @__profc_test_br_2, i32 0, i32 1), align 8
%sub = sub nsw i32 %i, 2
br label %if.end
define i32 @test_simple_for(i32 %n) {
entry:
; GEN: entry:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_simple_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 1)
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_simple_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 0)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_simple_for, i64 {{[0-9]+}}, i32 2, i32 1)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_simple_for, i64 {{[0-9]+}}, i32 2, i32 0)
br label %for.cond
for.cond:
for.inc:
; GEN: for.inc:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_simple_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 0)
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_simple_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 1)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_simple_for, i64 {{[0-9]+}}, i32 2, i32 0)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_simple_for, i64 {{[0-9]+}}, i32 2, i32 1)
%inc1 = add nsw i32 %i, 1
br label %for.cond
define i32 @test_nested_for(i32 %r, i32 %s) {
entry:
; GEN: entry:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_nested_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 3, i32 2)
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_nested_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 3, i32 0)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_nested_for, i64 {{[0-9]+}}, i32 3, i32 2)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_nested_for, i64 {{[0-9]+}}, i32 3, i32 0)
br label %for.cond.outer
for.cond.outer:
for.inc.inner:
; GEN: for.inc.inner:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_nested_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 3, i32 0)
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_nested_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 3, i32 1)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_nested_for, i64 {{[0-9]+}}, i32 3, i32 0)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_nested_for, i64 {{[0-9]+}}, i32 3, i32 1)
%inc.1 = add nsw i32 %j.0, 1
br label %for.cond.inner
for.inc.outer:
; GEN: for.inc.outer:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_nested_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 3, i32 1)
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_nested_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 3, i32 2)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_nested_for, i64 {{[0-9]+}}, i32 3, i32 1)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_nested_for, i64 {{[0-9]+}}, i32 3, i32 2)
%inc.2 = add nsw i32 %i.0, 1
br label %for.cond.outer
define i32 @foo(i32 %i) {
entry:
-; GEN: %pgocount = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_foo
-; GEN-NOT: %pgocount.i = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc__stdin__bar
+; GEN: %pgocount = load i64, ptr @__profc_foo
+; GEN-NOT: %pgocount.i = load i64, ptr @__profc__stdin__bar
%call = call i32 @bar()
%add = add nsw i32 %i, %call
ret i32 %add
define i32 @single_bb() {
entry:
; GEN: entry:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_single_bb, i32 0, i32 0), i64 {{[0-9]+}}, i32 1, i32 0)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_single_bb, i64 {{[0-9]+}}, i32 1, i32 0)
ret i32 0
}
entry:
; GEN: entry:
; NOTENTRY-NOT: call void @llvm.instrprof.increment
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__profn_test_switch, i32 0, i32 0), i64 {{[0-9]+}}, i32 4, i32 0)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_switch, i64 {{[0-9]+}}, i32 4, i32 0)
switch i32 %i, label %sw.default [
i32 1, label %sw.bb
i32 2, label %sw.bb1
sw.bb:
; GEN: sw.bb:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__profn_test_switch, i32 0, i32 0), i64 {{[0-9]+}}, i32 4, i32 2)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_switch, i64 {{[0-9]+}}, i32 4, i32 2)
br label %sw.epilog
sw.bb1:
; GEN: sw.bb1:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__profn_test_switch, i32 0, i32 0), i64 {{[0-9]+}}, i32 4, i32 0)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_switch, i64 {{[0-9]+}}, i32 4, i32 0)
; ENTRY-NOT: call void @llvm.instrprof.increment
br label %sw.epilog
sw.bb2:
; GEN: sw.bb2:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__profn_test_switch, i32 0, i32 0), i64 {{[0-9]+}}, i32 4, i32 1)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_switch, i64 {{[0-9]+}}, i32 4, i32 1)
br label %sw.epilog
sw.default:
; GEN: sw.default:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__profn_test_switch, i32 0, i32 0), i64 {{[0-9]+}}, i32 4, i32 3)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_switch, i64 {{[0-9]+}}, i32 4, i32 3)
br label %sw.epilog
sw.epilog:
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-@foo = external local_unnamed_addr global void ()*, align 8
-@bar = external local_unnamed_addr global void ()*, align 8
+@foo = external local_unnamed_addr global ptr, align 8
+@bar = external local_unnamed_addr global ptr, align 8
define i32 @main() local_unnamed_addr {
entry:
- %0 = load void ()*, void ()** @foo, align 8
+ %0 = load ptr, ptr @foo, align 8
; ICALL-PROM: br i1 %{{[0-9]+}}, label %if.true.direct_targ, label %if.false.orig_indirect, !prof [[BRANCH_WEIGHT:![0-9]+]]
tail call void %0(), !prof !1
- %1 = load void ()*, void ()** @bar, align 8
+ %1 = load ptr, ptr @bar, align 8
; ICALL-PROM: br i1 %{{[0-9]+}}, label %if.true.direct_targ1, label %if.false.orig_indirect2, !prof [[BRANCH_WEIGHT:![0-9]+]]
tail call void %1(), !prof !2
ret i32 0
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-@fptr = local_unnamed_addr global void ()* null, align 8
+@fptr = local_unnamed_addr global ptr null, align 8
; Function Attrs: norecurse uwtable
define i32 @main() local_unnamed_addr #0 !prof !34 {
entry:
- %0 = load void ()*, void ()** @fptr, align 8
+ %0 = load ptr, ptr @fptr, align 8
; ICALL-PROM: br i1 %{{[0-9]+}}, label %if.true.direct_targ, label %if.false.orig_indirect
tail call void %0(), !prof !40
ret i32 0
; CHECK-NEXT: br i1 [[TMP0]], label [[SWITCH_LOOKUP:%.*]], label [[RETURN:%.*]]
; CHECK: switch.lookup:
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[SWITCH_TABLEIDX]] to i64
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [7 x i32], [7 x i32]* @switch.table.f, i64 0, i64 [[TMP1]]
-; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [7 x i32], ptr @switch.table.f, i64 0, i64 [[TMP1]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
; CHECK-NEXT: br label [[RETURN]]
; CHECK: return:
; CHECK-NEXT: [[R:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 15, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[MERGE]]
; CHECK: merge:
; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[A]], [[BRANCH2]] ], [ [[B]], [[BRANCH1]] ]
-; CHECK-NEXT: [[STATEPOINT_TOKEN:%.*]] = call token (i64, i32, i32 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i32f(i64 2882400000, i32 0, i32 ()* elementtype(i32 ()) @foo, i32 0, i32 0, i32 0, i32 0)
+; CHECK-NEXT: [[STATEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(i32 ()) @foo, i32 0, i32 0, i32 0, i32 0)
; CHECK-NEXT: [[RET1:%.*]] = call i32 @llvm.experimental.gc.result.i32(token [[STATEPOINT_TOKEN]])
; CHECK-NEXT: ret i32 [[RET1]]
;
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -rewrite-statepoints-for-gc -S < %s | FileCheck %s
; RUN: opt -passes=rewrite-statepoints-for-gc -S < %s | FileCheck %s
define double @caller_3() gc "statepoint-example" {
; CHECK-LABEL: @caller_3(
-; CHECK: call cc42 token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint
-; CHECK: unreachable
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SAFEPOINT_TOKEN:%.*]] = call cc42 token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(void ()) @__llvm_deoptimize, i32 0, i32 0, i32 0, i32 0) [ "deopt"() ]
+; CHECK-NEXT: unreachable
+;
entry:
%val = call cc42 double(...) @llvm.experimental.deoptimize.f64() [ "deopt"() ]
define void @test1() gc "statepoint-example" {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[STATEPOINT_TOKEN:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 57) ]
-; CHECK-NEXT: [[STATEPOINT_TOKEN1:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* elementtype(void ()) @bar, i32 0, i32 2, i32 0, i32 0) [ "deopt"(i32 42) ]
-; CHECK-NEXT: [[STATEPOINT_TOKEN2:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* elementtype(void ()) @baz, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 13) ]
+; CHECK-NEXT: [[STATEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 57) ]
+; CHECK-NEXT: [[STATEPOINT_TOKEN1:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(void ()) @bar, i32 0, i32 2, i32 0, i32 0) [ "deopt"(i32 42) ]
+; CHECK-NEXT: [[STATEPOINT_TOKEN2:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(void ()) @baz, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 13) ]
; CHECK-NEXT: ret void
;
define void @test2() gc "statepoint-example" {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[STATEPOINT_TOKEN:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* elementtype(void ()) @foo, i32 0, i32 2, i32 0, i32 0) #[[ATTR0:[0-9]+]] [ "deopt"(i32 57) ]
+; CHECK-NEXT: [[STATEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(void ()) @foo, i32 0, i32 2, i32 0, i32 0) #[[ATTR0:[0-9]+]] [ "deopt"(i32 57) ]
; CHECK-NEXT: ret void
;
; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[SWITCH_TABLEIDX]], 4
; CHECK-NEXT: br i1 [[TMP0]], label [[SWITCH_LOOKUP:%.*]], label [[RETURN:%.*]]
; CHECK: switch.lookup:
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table.bar, i32 0, i32 [[SWITCH_TABLEIDX]]
-; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], ptr @switch.table.bar, i32 0, i32 [[SWITCH_TABLEIDX]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
; CHECK-NEXT: br label [[RETURN]]
; CHECK: return:
; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 15, [[ENTRY:%.*]] ]
; CHECK-NEXT: entry:
; CHECK-NEXT: [[SWITCH_TABLEIDX:%.*]] = sub i3 [[ARG:%.*]], -4
; CHECK-NEXT: [[SWITCH_TABLEIDX_ZEXT:%.*]] = zext i3 [[SWITCH_TABLEIDX]] to i4
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* @switch.table.test, i32 0, i4 [[SWITCH_TABLEIDX_ZEXT]]
-; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i64, i64* [[SWITCH_GEP]]
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [8 x i64], ptr @switch.table.test, i32 0, i4 [[SWITCH_TABLEIDX_ZEXT]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i64, ptr [[SWITCH_GEP]], align 8
; CHECK-NEXT: [[V3:%.*]] = add i64 [[SWITCH_LOAD]], 0
; CHECK-NEXT: ret i64 [[V3]]
;
; CHECK-NEXT: entry:
; CHECK-NEXT: [[SWITCH_TABLEIDX:%.*]] = sub i2 [[TMP0:%.*]], -2
; CHECK-NEXT: [[SWITCH_TABLEIDX_ZEXT:%.*]] = zext i2 [[SWITCH_TABLEIDX]] to i3
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* @switch.table._TFO6reduce1E5toRawfS0_FT_Si, i32 0, i3 [[SWITCH_TABLEIDX_ZEXT]]
-; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i64, i64* [[SWITCH_GEP]]
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i64], ptr @switch.table._TFO6reduce1E5toRawfS0_FT_Si, i32 0, i3 [[SWITCH_TABLEIDX_ZEXT]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i64, ptr [[SWITCH_GEP]], align 8
; CHECK-NEXT: ret i64 [[SWITCH_LOAD]]
;
entry:
; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i8 [ [[SWITCH_LOAD:%.*]], [[SWITCH_LOOKUP]] ], [ 10, [[START:%.*]] ]
; CHECK-NEXT: ret i8 [[COMMON_RET_OP]]
; CHECK: switch.lookup:
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i8], [3 x i8]* @switch.table.switch_to_lookup_i64, i32 0, i128 [[X]]
-; CHECK-NEXT: [[SWITCH_LOAD]] = load i8, i8* [[SWITCH_GEP]], align 1
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i8], ptr @switch.table.switch_to_lookup_i64, i32 0, i128 [[X]]
+; CHECK-NEXT: [[SWITCH_LOAD]] = load i8, ptr [[SWITCH_GEP]], align 1
; CHECK-NEXT: br label [[COMMON_RET]]
;
start:
; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i8 [ [[SWITCH_LOAD:%.*]], [[SWITCH_LOOKUP]] ], [ 10, [[START:%.*]] ]
; CHECK-NEXT: ret i8 [[COMMON_RET_OP]]
; CHECK: switch.lookup:
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i8], [3 x i8]* @switch.table.switch_to_lookup_i128, i32 0, i128 [[X]]
-; CHECK-NEXT: [[SWITCH_LOAD]] = load i8, i8* [[SWITCH_GEP]], align 1
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i8], ptr @switch.table.switch_to_lookup_i128, i32 0, i128 [[X]]
+; CHECK-NEXT: [[SWITCH_LOAD]] = load i8, ptr [[SWITCH_GEP]], align 1
; CHECK-NEXT: br label [[COMMON_RET]]
;
start:
; CHECK-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP4]], 4
; CHECK-NEXT: br i1 [[TMP5]], label [[SWITCH_LOOKUP:%.*]], label [[COMMON_RET:%.*]]
; CHECK: switch.lookup:
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table.test1, i32 0, i32 [[TMP4]]
-; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], ptr @switch.table.test1, i32 0, i32 [[TMP4]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
; CHECK-NEXT: br label [[COMMON_RET]]
; CHECK: common.ret:
; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 8867, [[TMP0:%.*]] ]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[SWITCH_TABLEIDX]], 3
; CHECK-NEXT: br i1 [[TMP1]], label [[SWITCH_LOOKUP:%.*]], label [[COMMON_RET:%.*]]
; CHECK: switch.lookup:
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* @switch.table.test3, i32 0, i32 [[SWITCH_TABLEIDX]]
-; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i32], ptr @switch.table.test3, i32 0, i32 [[SWITCH_TABLEIDX]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
; CHECK-NEXT: br label [[COMMON_RET]]
; CHECK: common.ret:
; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 8867, [[TMP0:%.*]] ]
; CHECK-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP4]], 4
; CHECK-NEXT: br i1 [[TMP5]], label [[SWITCH_LOOKUP:%.*]], label [[COMMON_RET:%.*]]
; CHECK: switch.lookup:
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table.test6, i32 0, i32 [[TMP4]]
-; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], ptr @switch.table.test6, i32 0, i32 [[TMP4]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
; CHECK-NEXT: br label [[COMMON_RET]]
; CHECK: common.ret:
; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 8867, [[TMP0:%.*]] ]
; CHECK-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP4]], 5
; CHECK-NEXT: br i1 [[TMP5]], label [[SWITCH_LOOKUP:%.*]], label [[COMMON_RET:%.*]]
; CHECK: switch.lookup:
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [5 x i32], [5 x i32]* @switch.table.test8, i32 0, i32 [[TMP4]]
-; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [5 x i32], ptr @switch.table.test8, i32 0, i32 [[TMP4]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
; CHECK-NEXT: br label [[COMMON_RET]]
; CHECK: common.ret:
; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 8867, [[TMP0:%.*]] ]
; CHECK-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP4]], 8
; CHECK-NEXT: br i1 [[TMP5]], label [[SWITCH_LOOKUP:%.*]], label [[COMMON_RET:%.*]]
; CHECK: switch.lookup:
-; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @switch.table.test9, i32 0, i32 [[TMP4]]
-; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [8 x i32], ptr @switch.table.test9, i32 0, i32 [[TMP4]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
; CHECK-NEXT: br label [[COMMON_RET]]
; CHECK: common.ret:
; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 8867, [[TMP0:%.*]] ]
target triple = "x86_64-unknown-linux-gnu"
; COMMON-LABEL: @llvm.compiler.used = appending global
-; SVML-SAME: [6 x i8*] [
-; SVML-SAME: i8* bitcast (<2 x double> (<2 x double>)* @__svml_sin2 to i8*),
-; SVML-SAME: i8* bitcast (<4 x double> (<4 x double>)* @__svml_sin4 to i8*),
-; SVML-SAME: i8* bitcast (<8 x double> (<8 x double>)* @__svml_sin8 to i8*),
-; SVML-SAME: i8* bitcast (<4 x float> (<4 x float>)* @__svml_log10f4 to i8*),
-; SVML-SAME: i8* bitcast (<8 x float> (<8 x float>)* @__svml_log10f8 to i8*),
-; SVML-SAME: i8* bitcast (<16 x float> (<16 x float>)* @__svml_log10f16 to i8*)
-; MASSV-SAME: [2 x i8*] [
-; MASSV-SAME: i8* bitcast (<2 x double> (<2 x double>)* @__sind2 to i8*),
-; MASSV-SAME: i8* bitcast (<4 x float> (<4 x float>)* @__log10f4 to i8*)
-; ACCELERATE-SAME: [1 x i8*] [
-; ACCELERATE-SAME: i8* bitcast (<4 x float> (<4 x float>)* @vlog10f to i8*)
-; LIBMVEC-X86-SAME: [2 x i8*] [
-; LIBMVEC-X86-SAME: i8* bitcast (<2 x double> (<2 x double>)* @_ZGVbN2v_sin to i8*),
-; LIBMVEC-X86-SAME: i8* bitcast (<4 x double> (<4 x double>)* @_ZGVdN4v_sin to i8*)
+; SVML-SAME: [6 x ptr] [
+; SVML-SAME: ptr @__svml_sin2,
+; SVML-SAME: ptr @__svml_sin4,
+; SVML-SAME: ptr @__svml_sin8,
+; SVML-SAME: ptr @__svml_log10f4,
+; SVML-SAME: ptr @__svml_log10f8,
+; SVML-SAME: ptr @__svml_log10f16
+; MASSV-SAME: [2 x ptr] [
+; MASSV-SAME: ptr @__sind2,
+; MASSV-SAME: ptr @__log10f4
+; ACCELERATE-SAME: [1 x ptr] [
+; ACCELERATE-SAME: ptr @vlog10f
+; LIBMVEC-X86-SAME: [2 x ptr] [
+; LIBMVEC-X86-SAME: ptr @_ZGVbN2v_sin,
+; LIBMVEC-X86-SAME: ptr @_ZGVdN4v_sin
; COMMON-SAME: ], section "llvm.metadata"
define double @sin_f64(double %in) {
; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
; CHECK: Attribute 'byval(i32)' applied to incompatible type!
-; CHECK-NEXT: void (i32)* @h
+; CHECK-NEXT: ptr @h
declare void @h(i32 byval(i32) %num)
define i32 @t2(i32 %IV, i32 %TC) {
; CHECK: Intrinsic has incorrect return type!
-; CHECK-NEXT: i32 (i32, i32)* @llvm.get.active.lane.mask.i32.i32
+; CHECK-NEXT: ptr @llvm.get.active.lane.mask.i32.i32
%res = call i32 @llvm.get.active.lane.mask.i32.i32(i32 %IV, i32 %TC)
ret i32 %res
}
; CHECK: Attribute 'jumptable' requires 'unnamed_addr'
-; CHECK: i32 ()* @f
+; CHECK: ptr @f
@llvm.compiler.used = appending global [1 x i32] [i32 0], section "llvm.metadata"
; CHECK: wrong type for intrinsic global variable
-; CHECK-NEXT: [1 x i32]* @llvm.compiler.used
+; CHECK-NEXT: ptr @llvm.compiler.used
@llvm.used = appending global [1 x i32] [i32 0], section "llvm.metadata"
; CHECK: wrong type for intrinsic global variable
-; CHECK-NEXT: [1 x i32]* @llvm.used
+; CHECK-NEXT: ptr @llvm.used
@llvm.used = appending global i32 0, section "llvm.metadata"
; CHECK: Only global arrays can have appending linkage!
-; CHECK-NEXT: i32* @llvm.used
+; CHECK-NEXT: ptr @llvm.used
; CHECK-NOT: !dbg
; CHECK: function !dbg attachment must be a subprogram
-; CHECK-NEXT: void ()* @bar
+; CHECK-NEXT: ptr @bar
; CHECK-NEXT: !{{[0-9]+}} = !{}
define void @bar() !dbg !3 {
unreachable
; The first extracted function is the region composed by the
; blocks if, then, and else from foo.
-; CHECK: define dso_local void @foo.if.split(i32 %arg1, i32 %arg, i32* %tmp.0.ce.out) {
+; CHECK: define dso_local void @foo.if.split(i32 %arg1, i32 %arg, ptr %tmp.0.ce.out) {
; CHECK: newFuncRoot:
; CHECK: br label %if.split
;
;
; CHECK: end.split: ; preds = %then, %else
; CHECK: %tmp.0.ce = phi i32 [ %tmp13, %then ], [ %tmp25, %else ]
-; CHECK: store i32 %tmp.0.ce, i32* %tmp.0.ce.out
+; CHECK: store i32 %tmp.0.ce, ptr %tmp.0.ce.out
; CHECK: br label %end.exitStub
;
; CHECK: end.exitStub: ; preds = %end.split
; The second extracted function is the region composed by the blocks
; bb14 and bb20 from bar.
-; CHECK: define dso_local i1 @bar.bb14(i32 %arg1, i32 %arg, i32* %tmp25.out) {
+; CHECK: define dso_local i1 @bar.bb14(i32 %arg1, i32 %arg, ptr %tmp25.out) {
; CHECK: newFuncRoot:
; CHECK: br label %bb14
;
; CHECK: %tmp22 = mul nsw i32 %arg, 3
; CHECK: %tmp24 = sdiv i32 %arg1, 6
; CHECK: %tmp25 = add nsw i32 %tmp24, %tmp22
-; CHECK: store i32 %tmp25, i32* %tmp25.out
+; CHECK: store i32 %tmp25, ptr %tmp25.out
; CHECK: br label %bb30.exitStub
;
; CHECK: bb26.exitStub: ; preds = %bb14
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-@i = external global i8*
-@llvm.used = appending global [1 x i8*] [i8* bitcast (i8** @i to i8*)], section "llvm.metadata"
+@i = external global ptr
+@llvm.used = appending global [1 x ptr] [ptr @i], section "llvm.metadata"
class OpenMPIRBuilderTest : public testing::Test {
protected:
void SetUp() override {
+ Ctx.setOpaquePointers(false); // TODO: Update tests for opaque pointers.
M.reset(new Module("MyModule", Ctx));
FunctionType *FTy =
FunctionType::get(Type::getVoidTy(Ctx), {Type::getInt32Ty(Ctx)},
Constant *Undef64 = UndefValue::get(Int64Ty);
Constant *PoisonV16 = PoisonValue::get(P6->getType());
-#define P0STR "ptrtoint (i32** @dummy to i32)"
-#define P1STR "uitofp (i32 ptrtoint (i32** @dummy to i32) to float)"
-#define P2STR "uitofp (i32 ptrtoint (i32** @dummy to i32) to double)"
-#define P3STR "ptrtoint (i32** @dummy to i1)"
-#define P4STR "ptrtoint (i32** @dummy2 to i32)"
-#define P5STR "uitofp (i32 ptrtoint (i32** @dummy2 to i32) to float)"
-#define P6STR "bitcast (i32 ptrtoint (i32** @dummy2 to i32) to <2 x i16>)"
+#define P0STR "ptrtoint (ptr @dummy to i32)"
+#define P1STR "uitofp (i32 ptrtoint (ptr @dummy to i32) to float)"
+#define P2STR "uitofp (i32 ptrtoint (ptr @dummy to i32) to double)"
+#define P3STR "ptrtoint (ptr @dummy to i1)"
+#define P4STR "ptrtoint (ptr @dummy2 to i32)"
+#define P5STR "uitofp (i32 ptrtoint (ptr @dummy2 to i32) to float)"
+#define P6STR "bitcast (i32 ptrtoint (ptr @dummy2 to i32) to <2 x i16>)"
CHECK(ConstantExpr::getNeg(P0), "sub i32 0, " P0STR);
CHECK(ConstantExpr::getFNeg(P1), "fneg float " P1STR);
// "getelementptr i32*, i32** @dummy, i32 1");
CHECK(ConstantExpr::getInBoundsGetElementPtr(PointerType::getUnqual(Int32Ty),
Global, V),
- "getelementptr inbounds i32*, i32** @dummy, i32 1");
+ "getelementptr inbounds ptr, ptr @dummy, i32 1");
CHECK(ConstantExpr::getExtractElement(P6, One),
"extractelement <2 x i16> " P6STR ", i32 1");
EXPECT_EQ(Start3->getArgOperand(0), Builder.getInt64(100));
EXPECT_EQ(Start1->getArgOperand(1), Var1);
- EXPECT_NE(Start2->getArgOperand(1), Var2);
+ EXPECT_EQ(Start2->getArgOperand(1)->stripPointerCasts(), Var2);
EXPECT_EQ(Start3->getArgOperand(1), Var3);
Value *End1 = Builder.CreateLifetimeEnd(Var1);
Value *PtrToInt = IRB.CreatePtrToInt(GEP, DL.getIntPtrType(GEP->getType()));
EXPECT_TRUE(match(PtrToInt, m_VScale(DL)));
- // Prior to this patch, this case would cause assertion failures when attempting to match m_VScale
+ // This used to cause assertion failures when attempting to match m_VScale.
+ // With opaque pointers the bitcast is no longer present.
Type *VecTy2 = ScalableVectorType::get(IRB.getInt8Ty(), 2);
Value *NullPtrVec2 = Constant::getNullValue(VecTy2->getPointerTo());
Value *BitCast = IRB.CreateBitCast(NullPtrVec2, VecPtrTy);
Value *GEP2 = IRB.CreateGEP(VecTy, BitCast, IRB.getInt64(1));
Value *PtrToInt2 =
IRB.CreatePtrToInt(GEP2, DL.getIntPtrType(GEP2->getType()));
- EXPECT_FALSE(match(PtrToInt2, m_VScale(DL)));
+ EXPECT_TRUE(match(PtrToInt2, m_VScale(DL)));
}
TEST_F(PatternMatchTest, NotForbidUndef) {
EXPECT_TRUE(P1C0->isOpaque());
LLVMContext CTypedPointers;
+ CTypedPointers.setOpaquePointers(false);
Type *Int8 = Type::getInt8Ty(CTypedPointers);
PointerType *P2 = PointerType::get(Int8, 1);
EXPECT_FALSE(P2->isOpaque());
// Make sure the address space isn't dropped when returning this.
Constant *DummyCast1 = M->getOrInsertGlobal("dummy_cast", Int8Ty);
EXPECT_EQ(1u, DummyCast1->getType()->getPointerAddressSpace());
- EXPECT_NE(DummyCast0, DummyCast1) << *DummyCast1;
}
#ifdef GTEST_HAS_DEATH_TEST
CHECK_PRINT_AS_OPERAND(I1, false, "%1");
CHECK_PRINT_AS_OPERAND(I0, true, "i32 %0");
CHECK_PRINT_AS_OPERAND(I1, true, "i32 %1");
- CHECK_PRINT_AS_OPERAND(G0, true, "%0* @g0");
- CHECK_PRINT_AS_OPERAND(G1, true, "%1* @g1");
+ CHECK_PRINT_AS_OPERAND(G0, true, "ptr @g0");
+ CHECK_PRINT_AS_OPERAND(G1, true, "ptr @g1");
#undef CHECK_PRINT_AS_OPERAND
}
EXPECT_TRUE(verifyModule(M2, &ErrorOS));
EXPECT_TRUE(StringRef(ErrorOS.str())
.equals("Global is referenced in a different module!\n"
- "i32 ()* @foo2\n"
+ "ptr @foo2\n"
"; ModuleID = 'M2'\n"
" %call = call i32 @foo2()\n"
- "i32 ()* @foo1\n"
+ "ptr @foo1\n"
"; ModuleID = 'M1'\n"
"Global is used by function in a different module\n"
- "i32 ()* @foo2\n"
+ "ptr @foo2\n"
"; ModuleID = 'M2'\n"
- "i32 ()* @foo3\n"
+ "ptr @foo3\n"
"; ModuleID = 'M3'\n"));
Error.clear();
"Referencing function in another module!\n"
" %call = call i32 @foo2()\n"
"; ModuleID = 'M1'\n"
- "i32 ()* @foo2\n"
+ "ptr @foo2\n"
"; ModuleID = 'M2'\n"));
Error.clear();
// RUN: mlir-translate --mlir-to-llvmir %s | FileCheck %s
-// CHECK-LABEL: define void @target(i8* %0)
+// CHECK-LABEL: define void @target(ptr %0)
// CHECK: %[[c:.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 16)
-// CHECK: call void @llvm.x86.tilestored64.internal(i16 16, i16 16, i8* %0, i64 32, x86_amx %[[c]]
+// CHECK: call void @llvm.x86.tilestored64.internal(i16 16, i16 16, ptr %0, i64 32, x86_amx %[[c]]
llvm.func @target(%ptr: !llvm.ptr<i8>) {
%c = llvm.mlir.constant(16 : i16) : i16
%s = llvm.mlir.constant(32 : i64) : i64
%17 = llvm.icmp "slt" %16, %arg10 : i64
llvm.cond_br %17, ^bb2, ^bb3
^bb2:
- // CHECK: extractvalue { float*, float*, i64, [1 x i64], [1 x i64] }
+ // CHECK: extractvalue { ptr, ptr, i64, [1 x i64], [1 x i64] }
%18 = llvm.extractvalue %5[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
array<1 x i64>,
array<1 x i64>)>
- // CHECK: etelementptr float, float*
+ // CHECK: getelementptr float, ptr
%19 = llvm.getelementptr %18[%16] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- // CHECK: bitcast float* %{{[0-9]+}} to <vscale x 4 x float>*
%20 = llvm.bitcast %19 : !llvm.ptr<f32> to !llvm.ptr<vector<[4]xf32>>
- // CHECK: load <vscale x 4 x float>, <vscale x 4 x float>*
+ // CHECK: load <vscale x 4 x float>, ptr
%21 = llvm.load %20 : !llvm.ptr<vector<[4]xf32>>
- // CHECK: extractvalue { float*, float*, i64, [1 x i64], [1 x i64] }
+ // CHECK: extractvalue { ptr, ptr, i64, [1 x i64], [1 x i64] }
%22 = llvm.extractvalue %11[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
array<1 x i64>,
array<1 x i64>)>
- // CHECK: getelementptr float, float* %32
+ // CHECK: getelementptr float, ptr
%23 = llvm.getelementptr %22[%16] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- // CHECK: bitcast float* %33 to <vscale x 4 x float>*
%24 = llvm.bitcast %23 : !llvm.ptr<f32> to !llvm.ptr<vector<[4]xf32>>
- // CHECK: store <vscale x 4 x float> %{{[0-9]+}}, <vscale x 4 x float>* %{{[0-9]+}}
+ // CHECK: store <vscale x 4 x float> %{{[0-9]+}}, ptr %{{[0-9]+}}
llvm.store %21, %24 : !llvm.ptr<vector<[4]xf32>>
%25 = llvm.add %16, %15 : i64
llvm.br ^bb1(%25 : i64)
"llvm.intr.fma"(%arg0, %arg1, %arg0) : (f32, f32, f32) -> f32
// CHECK: call <8 x float> @llvm.fma.v8f32
"llvm.intr.fma"(%arg2, %arg2, %arg2) : (vector<8xf32>, vector<8xf32>, vector<8xf32>) -> vector<8xf32>
- // CHECK: call void @llvm.prefetch.p0i8(i8* %3, i32 0, i32 3, i32 1)
+ // CHECK: call void @llvm.prefetch.p0(ptr %3, i32 0, i32 3, i32 1)
"llvm.intr.prefetch"(%arg3, %c0, %c3, %c1) : (!llvm.ptr<i8>, i32, i32, i32) -> ()
llvm.return
}
// CHECK: call <48 x float> @llvm.matrix.transpose.v48f32(<48 x float> %1, i32 3, i32 16)
%D = llvm.intr.matrix.transpose %B { rows = 3: i32, columns = 16: i32} :
vector<48 x f32> into vector<48 x f32>
- // CHECK: call <48 x float> @llvm.matrix.column.major.load.v48f32.i64(float* align 4 %2, i64 %3, i1 false, i32 3, i32 16)
+ // CHECK: call <48 x float> @llvm.matrix.column.major.load.v48f32.i64(ptr align 4 %2, i64 %3, i1 false, i32 3, i32 16)
%E = llvm.intr.matrix.column.major.load %ptr, <stride=%stride>
{ isVolatile = 0: i1, rows = 3: i32, columns = 16: i32} :
vector<48 x f32> from !llvm.ptr<f32> stride i64
- // CHECK: call void @llvm.matrix.column.major.store.v48f32.i64(<48 x float> %7, float* align 4 %2, i64 %3, i1 false, i32 3, i32 16)
+ // CHECK: call void @llvm.matrix.column.major.store.v48f32.i64(<48 x float> %7, ptr align 4 %2, i64 %3, i1 false, i32 3, i32 16)
llvm.intr.matrix.column.major.store %E, %ptr, <stride=%stride>
{ isVolatile = 0: i1, rows = 3: i32, columns = 16: i32} :
vector<48 x f32> to !llvm.ptr<f32> stride i64
// CHECK-LABEL: @masked_load_store_intrinsics
llvm.func @masked_load_store_intrinsics(%A: !llvm.ptr<vector<7xf32>>, %mask: vector<7xi1>) {
- // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0v7f32(<7 x float>* %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> undef)
+ // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0(ptr %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> undef)
%a = llvm.intr.masked.load %A, %mask { alignment = 1: i32} :
(!llvm.ptr<vector<7xf32>>, vector<7xi1>) -> vector<7xf32>
- // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0v7f32(<7 x float>* %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
+ // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0(ptr %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
%b = llvm.intr.masked.load %A, %mask, %a { alignment = 1: i32} :
(!llvm.ptr<vector<7xf32>>, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
- // CHECK: call void @llvm.masked.store.v7f32.p0v7f32(<7 x float> %{{.*}}, <7 x float>* %0, i32 {{.*}}, <7 x i1> %{{.*}})
+ // CHECK: call void @llvm.masked.store.v7f32.p0(<7 x float> %{{.*}}, ptr %0, i32 {{.*}}, <7 x i1> %{{.*}})
llvm.intr.masked.store %b, %A, %mask { alignment = 1: i32} :
vector<7xf32>, vector<7xi1> into !llvm.ptr<vector<7xf32>>
llvm.return
// CHECK-LABEL: @masked_gather_scatter_intrinsics
llvm.func @masked_gather_scatter_intrinsics(%M: !llvm.vec<7 x ptr<f32>>, %mask: vector<7xi1>) {
- // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0f32(<7 x float*> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> undef)
+ // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> undef)
%a = llvm.intr.masked.gather %M, %mask { alignment = 1: i32} :
(!llvm.vec<7 x ptr<f32>>, vector<7xi1>) -> vector<7xf32>
- // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0f32(<7 x float*> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
+ // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
%b = llvm.intr.masked.gather %M, %mask, %a { alignment = 1: i32} :
(!llvm.vec<7 x ptr<f32>>, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
- // CHECK: call void @llvm.masked.scatter.v7f32.v7p0f32(<7 x float> %{{.*}}, <7 x float*> %{{.*}}, i32 1, <7 x i1> %{{.*}})
+ // CHECK: call void @llvm.masked.scatter.v7f32.v7p0(<7 x float> %{{.*}}, <7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}})
llvm.intr.masked.scatter %b, %M, %mask { alignment = 1: i32} :
vector<7xf32>, vector<7xi1> into !llvm.vec<7 x ptr<f32>>
llvm.return
// CHECK-LABEL: @masked_expand_compress_intrinsics
llvm.func @masked_expand_compress_intrinsics(%ptr: !llvm.ptr<f32>, %mask: vector<7xi1>, %passthru: vector<7xf32>) {
- // CHECK: call <7 x float> @llvm.masked.expandload.v7f32(float* %{{.*}}, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
+ // CHECK: call <7 x float> @llvm.masked.expandload.v7f32(ptr %{{.*}}, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
%0 = "llvm.intr.masked.expandload"(%ptr, %mask, %passthru)
: (!llvm.ptr<f32>, vector<7xi1>, vector<7xf32>) -> (vector<7xf32>)
- // CHECK: call void @llvm.masked.compressstore.v7f32(<7 x float> %{{.*}}, float* %{{.*}}, <7 x i1> %{{.*}})
+ // CHECK: call void @llvm.masked.compressstore.v7f32(<7 x float> %{{.*}}, ptr %{{.*}}, <7 x i1> %{{.*}})
"llvm.intr.masked.compressstore"(%0, %ptr, %mask)
: (vector<7xf32>, !llvm.ptr<f32>, vector<7xi1>) -> ()
llvm.return
// CHECK-LABEL: @memcpy_test
llvm.func @memcpy_test(%arg0: i32, %arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>) {
%i1 = llvm.mlir.constant(false) : i1
- // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %{{.*}}, i8* %{{.*}}, i32 %{{.*}}, i1 {{.*}})
+ // CHECK: call void @llvm.memcpy.p0.p0.i32(ptr %{{.*}}, ptr %{{.*}}, i32 %{{.*}}, i1 {{.*}})
"llvm.intr.memcpy"(%arg2, %arg3, %arg0, %i1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
%sz = llvm.mlir.constant(10: i64) : i64
- // CHECK: call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* %{{.*}}, i8* %{{.*}}, i64 10, i1 {{.*}})
+ // CHECK: call void @llvm.memcpy.inline.p0.p0.i64(ptr %{{.*}}, ptr %{{.*}}, i64 10, i1 {{.*}})
"llvm.intr.memcpy.inline"(%arg2, %arg3, %sz, %i1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i64, i1) -> ()
llvm.return
}
// CHECK-LABEL: @memmove_test
llvm.func @memmove_test(%arg0: i32, %arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>) {
%i1 = llvm.mlir.constant(false) : i1
- // CHECK: call void @llvm.memmove.p0i8.p0i8.i32(i8* %{{.*}}, i8* %{{.*}}, i32 %{{.*}}, i1 {{.*}})
+ // CHECK: call void @llvm.memmove.p0.p0.i32(ptr %{{.*}}, ptr %{{.*}}, i32 %{{.*}}, i1 {{.*}})
"llvm.intr.memmove"(%arg2, %arg3, %arg0, %i1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
llvm.return
}
// CHECK-LABEL: @memset_test
llvm.func @memset_test(%arg0: i32, %arg2: !llvm.ptr<i8>, %arg3: i8) {
%i1 = llvm.mlir.constant(false) : i1
- // CHECK: call void @llvm.memset.p0i8.i32(i8* %{{.*}}, i8 %{{.*}}, i32 %{{.*}}, i1 {{.*}})
+ // CHECK: call void @llvm.memset.p0.i32(ptr %{{.*}}, i8 %{{.*}}, i32 %{{.*}}, i1 {{.*}})
"llvm.intr.memset"(%arg2, %arg3, %arg0, %i1) : (!llvm.ptr<i8>, i8, i32, i1) -> ()
llvm.return
}
llvm.func @coro_begin(%arg0: i32, %arg1: !llvm.ptr<i8>) {
%null = llvm.mlir.null : !llvm.ptr<i8>
%token = llvm.intr.coro.id %arg0, %arg1, %arg1, %null : !llvm.token
- // CHECK: call i8* @llvm.coro.begin
+ // CHECK: call ptr @llvm.coro.begin
llvm.intr.coro.begin %token, %arg1 : !llvm.ptr<i8>
llvm.return
}
llvm.func @coro_free(%arg0: i32, %arg1 : !llvm.ptr<i8>) {
%null = llvm.mlir.null : !llvm.ptr<i8>
%token = llvm.intr.coro.id %arg0, %arg1, %arg1, %null : !llvm.token
- // CHECK: call i8* @llvm.coro.free
+ // CHECK: call ptr @llvm.coro.free
%0 = llvm.intr.coro.free %token, %arg1 : !llvm.ptr<i8>
llvm.return
}
// CHECK-LABEL: @stack_save
llvm.func @stack_save() {
- // CHECK: call i8* @llvm.stacksave
+ // CHECK: call ptr @llvm.stacksave
%0 = llvm.intr.stacksave : !llvm.ptr<i8>
llvm.return
}
"llvm.intr.vp.merge" (%mask, %A, %B, %evl) :
(vector<8xi1>, vector<8xi32>, vector<8xi32>, i32) -> vector<8xi32>
- // CHECK: call void @llvm.vp.store.v8i32.p0i32
+ // CHECK: call void @llvm.vp.store.v8i32.p0
"llvm.intr.vp.store" (%A, %iptr, %mask, %evl) :
(vector<8xi32>, !llvm.ptr<i32>, vector<8xi1>, i32) -> ()
- // CHECK: call <8 x i32> @llvm.vp.load.v8i32.p0i32
+ // CHECK: call <8 x i32> @llvm.vp.load.v8i32.p0
"llvm.intr.vp.load" (%iptr, %mask, %evl) :
(!llvm.ptr<i32>, vector<8xi1>, i32) -> vector<8xi32>
- // CHECK: call void @llvm.experimental.vp.strided.store.v8i32.p0i32.i32
+ // CHECK: call void @llvm.experimental.vp.strided.store.v8i32.p0.i32
"llvm.intr.experimental.vp.strided.store" (%A, %iptr, %i, %mask, %evl) :
(vector<8xi32>, !llvm.ptr<i32>, i32, vector<8xi1>, i32) -> ()
- // CHECK: call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32
+ // CHECK: call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32
"llvm.intr.experimental.vp.strided.load" (%iptr, %i, %mask, %evl) :
(!llvm.ptr<i32>, i32, vector<8xi1>, i32) -> vector<8xi32>
"llvm.intr.vp.fptosi" (%F, %mask, %evl) :
(vector<8xf64>, vector<8xi1>, i32) -> vector<8xi64>
- // CHECK: call <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0i32
+ // CHECK: call <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0
"llvm.intr.vp.ptrtoint" (%G, %mask, %evl) :
(!llvm.vec<8 x !llvm.ptr<i32>>, vector<8xi1>, i32) -> vector<8xi64>
- // CHECK: call <8 x i32*> @llvm.vp.inttoptr.v8p0i32.v8i64
+ // CHECK: call <8 x ptr> @llvm.vp.inttoptr.v8p0.v8i64
"llvm.intr.vp.inttoptr" (%E, %mask, %evl) :
(vector<8xi64>, vector<8xi1>, i32) -> !llvm.vec<8 x !llvm.ptr<i32>>
llvm.return
// CHECK-DAG: declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) #0
// CHECK-DAG: declare float @llvm.fmuladd.f32(float, float, float)
// CHECK-DAG: declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>) #0
-// CHECK-DAG: declare void @llvm.prefetch.p0i8(i8* nocapture readonly, i32 immarg, i32 immarg, i32)
+// CHECK-DAG: declare void @llvm.prefetch.p0(ptr nocapture readonly, i32 immarg, i32 immarg, i32)
// CHECK-DAG: declare float @llvm.exp.f32(float)
// CHECK-DAG: declare <8 x float> @llvm.exp.v8f32(<8 x float>) #0
// CHECK-DAG: declare float @llvm.log.f32(float)
// CHECK-DAG: declare float @llvm.copysign.f32(float, float)
// CHECK-DAG: declare <12 x float> @llvm.matrix.multiply.v12f32.v64f32.v48f32(<64 x float>, <48 x float>, i32 immarg, i32 immarg, i32 immarg)
// CHECK-DAG: declare <48 x float> @llvm.matrix.transpose.v48f32(<48 x float>, i32 immarg, i32 immarg)
-// CHECK-DAG: declare <48 x float> @llvm.matrix.column.major.load.v48f32.i64(float* nocapture, i64, i1 immarg, i32 immarg, i32 immarg)
-// CHECK-DAG: declare void @llvm.matrix.column.major.store.v48f32.i64(<48 x float>, float* nocapture writeonly, i64, i1 immarg, i32 immarg, i32 immarg)
+// CHECK-DAG: declare <48 x float> @llvm.matrix.column.major.load.v48f32.i64(ptr nocapture, i64, i1 immarg, i32 immarg, i32 immarg)
+// CHECK-DAG: declare void @llvm.matrix.column.major.store.v48f32.i64(<48 x float>, ptr nocapture writeonly, i64, i1 immarg, i32 immarg, i32 immarg)
// CHECK-DAG: declare <7 x i1> @llvm.get.active.lane.mask.v7i1.i64(i64, i64)
-// CHECK-DAG: declare <7 x float> @llvm.masked.load.v7f32.p0v7f32(<7 x float>*, i32 immarg, <7 x i1>, <7 x float>)
-// CHECK-DAG: declare void @llvm.masked.store.v7f32.p0v7f32(<7 x float>, <7 x float>*, i32 immarg, <7 x i1>)
-// CHECK-DAG: declare <7 x float> @llvm.masked.gather.v7f32.v7p0f32(<7 x float*>, i32 immarg, <7 x i1>, <7 x float>)
-// CHECK-DAG: declare void @llvm.masked.scatter.v7f32.v7p0f32(<7 x float>, <7 x float*>, i32 immarg, <7 x i1>)
-// CHECK-DAG: declare <7 x float> @llvm.masked.expandload.v7f32(float*, <7 x i1>, <7 x float>)
-// CHECK-DAG: declare void @llvm.masked.compressstore.v7f32(<7 x float>, float*, <7 x i1>)
-// CHECK-DAG: declare void @llvm.memcpy.p0i8.p0i8.i32(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i32, i1 immarg)
-// CHECK-DAG: declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64 immarg, i1 immarg)
+// CHECK-DAG: declare <7 x float> @llvm.masked.load.v7f32.p0(ptr, i32 immarg, <7 x i1>, <7 x float>)
+// CHECK-DAG: declare void @llvm.masked.store.v7f32.p0(<7 x float>, ptr, i32 immarg, <7 x i1>)
+// CHECK-DAG: declare <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr>, i32 immarg, <7 x i1>, <7 x float>)
+// CHECK-DAG: declare void @llvm.masked.scatter.v7f32.v7p0(<7 x float>, <7 x ptr>, i32 immarg, <7 x i1>)
+// CHECK-DAG: declare <7 x float> @llvm.masked.expandload.v7f32(ptr, <7 x i1>, <7 x float>)
+// CHECK-DAG: declare void @llvm.masked.compressstore.v7f32(<7 x float>, ptr, <7 x i1>)
+// CHECK-DAG: declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg)
+// CHECK-DAG: declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64 immarg, i1 immarg)
// CHECK-DAG: declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
// CHECK-DAG: declare { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32>, <8 x i32>) #0
// CHECK-DAG: declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
// CHECK-DAG: declare { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32(<8 x i32>, <8 x i32>) #0
// CHECK-DAG: declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
// CHECK-DAG: declare { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32(<8 x i32>, <8 x i32>) #0
-// CHECK-DAG: declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*)
-// CHECK-DAG: declare i8* @llvm.coro.begin(token, i8* writeonly)
+// CHECK-DAG: declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
+// CHECK-DAG: declare ptr @llvm.coro.begin(token, ptr writeonly)
// CHECK-DAG: declare i64 @llvm.coro.size.i64()
// CHECK-DAG: declare i32 @llvm.coro.size.i32()
-// CHECK-DAG: declare token @llvm.coro.save(i8*)
+// CHECK-DAG: declare token @llvm.coro.save(ptr)
// CHECK-DAG: declare i8 @llvm.coro.suspend(token, i1)
-// CHECK-DAG: declare i1 @llvm.coro.end(i8*, i1)
-// CHECK-DAG: declare i8* @llvm.coro.free(token, i8* nocapture readonly)
-// CHECK-DAG: declare void @llvm.coro.resume(i8*)
+// CHECK-DAG: declare i1 @llvm.coro.end(ptr, i1)
+// CHECK-DAG: declare ptr @llvm.coro.free(token, ptr nocapture readonly)
+// CHECK-DAG: declare void @llvm.coro.resume(ptr)
// CHECK-DAG: declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) #2
// CHECK-DAG: declare <8 x i32> @llvm.vp.sub.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) #2
// CHECK-DAG: declare <8 x i32> @llvm.vp.mul.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) #2
// CHECK-DAG: declare float @llvm.vp.reduce.fmin.v8f32(float, <8 x float>, <8 x i1>, i32) #2
// CHECK-DAG: declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) #2
// CHECK-DAG: declare <8 x i32> @llvm.vp.merge.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) #2
-// CHECK-DAG: declare void @llvm.experimental.vp.strided.store.v8i32.p0i32.i32(<8 x i32>, i32* nocapture, i32, <8 x i1>, i32) #4
-// CHECK-DAG: declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32(i32* nocapture, i32, <8 x i1>, i32) #3
+// CHECK-DAG: declare void @llvm.experimental.vp.strided.store.v8i32.p0.i32(<8 x i32>, ptr nocapture, i32, <8 x i1>, i32) #4
+// CHECK-DAG: declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr nocapture, i32, <8 x i1>, i32) #3
// CHECK-DAG: declare <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64>, <8 x i1>, i32) #2
// CHECK-DAG: declare <8 x i64> @llvm.vp.zext.v8i64.v8i32(<8 x i32>, <8 x i1>, i32) #2
// CHECK-DAG: declare <8 x i64> @llvm.vp.sext.v8i64.v8i32(<8 x i32>, <8 x i1>, i32) #2
// CHECK-DAG: declare <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float>, <8 x i1>, i32) #2
// CHECK-DAG: declare <8 x i64> @llvm.vp.fptoui.v8i64.v8f64(<8 x double>, <8 x i1>, i32) #2
// CHECK-DAG: declare <8 x i64> @llvm.vp.fptosi.v8i64.v8f64(<8 x double>, <8 x i1>, i32) #2
-// CHECK-DAG: declare <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0i32(<8 x i32*>, <8 x i1>, i32) #2
-// CHECK-DAG: declare <8 x i32*> @llvm.vp.inttoptr.v8p0i32.v8i64(<8 x i64>, <8 x i1>, i32) #2
+// CHECK-DAG: declare <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0(<8 x ptr>, <8 x i1>, i32) #2
+// CHECK-DAG: declare <8 x ptr> @llvm.vp.inttoptr.v8p0.v8i64(<8 x i64>, <8 x i1>, i32) #2
llvm.func @f_void_variadic(...)
// CHECK: declare void @f_void_i32_i32_variadic(i32, i32, ...)
llvm.func @f_void_i32_i32_variadic(i32, i32, ...)
-// CHECK: declare i32 (i32)* @f_f_i32_i32()
+// CHECK: declare ptr @f_f_i32_i32()
llvm.func @f_f_i32_i32() -> !llvm.ptr<func<i32 (i32)>>
//
// Pointers.
//
-// CHECK: declare i8* @return_pi8()
+// CHECK: declare ptr @return_pi8()
llvm.func @return_pi8() -> !llvm.ptr<i8>
-// CHECK: declare float* @return_pfloat()
+// CHECK: declare ptr @return_pfloat()
llvm.func @return_pfloat() -> !llvm.ptr<f32>
-// CHECK: declare i8** @return_ppi8()
+// CHECK: declare ptr @return_ppi8()
llvm.func @return_ppi8() -> !llvm.ptr<ptr<i8>>
-// CHECK: declare i8***** @return_pppppi8()
+// CHECK: declare ptr @return_pppppi8()
llvm.func @return_pppppi8() -> !llvm.ptr<ptr<ptr<ptr<ptr<i8>>>>>
-// CHECK: declare i8* @return_pi8_0()
+// CHECK: declare ptr @return_pi8_0()
llvm.func @return_pi8_0() -> !llvm.ptr<i8, 0>
-// CHECK: declare i8 addrspace(1)* @return_pi8_1()
+// CHECK: declare ptr addrspace(1) @return_pi8_1()
llvm.func @return_pi8_1() -> !llvm.ptr<i8, 1>
-// CHECK: declare i8 addrspace(42)* @return_pi8_42()
+// CHECK: declare ptr addrspace(42) @return_pi8_42()
llvm.func @return_pi8_42() -> !llvm.ptr<i8, 42>
-// CHECK: declare i8 addrspace(42)* addrspace(9)* @return_ppi8_42_9()
+// CHECK: declare ptr addrspace(9) @return_ppi8_42_9()
llvm.func @return_ppi8_42_9() -> !llvm.ptr<ptr<i8, 42>, 9>
//
llvm.func @return_vs_4_i32() -> !llvm.vec<?x4 x i32>
// CHECK: declare <vscale x 8 x half> @return_vs_8_half()
llvm.func @return_vs_8_half() -> !llvm.vec<?x8 x f16>
-// CHECK: declare <4 x i8*> @return_v_4_pi8()
+// CHECK: declare <4 x ptr> @return_v_4_pi8()
llvm.func @return_v_4_pi8() -> !llvm.vec<4xptr<i8>>
//
llvm.func @return_a10_i32() -> !llvm.array<10 x i32>
// CHECK: declare [8 x float] @return_a8_float()
llvm.func @return_a8_float() -> !llvm.array<8 x f32>
-// CHECK: declare [10 x i32 addrspace(4)*] @return_a10_pi32_4()
+// CHECK: declare [10 x ptr addrspace(4)] @return_a10_pi32_4()
llvm.func @return_a10_pi32_4() -> !llvm.array<10 x ptr<i32, 4>>
// CHECK: declare [10 x [4 x float]] @return_a10_a4_float()
llvm.func @return_a10_a4_float() -> !llvm.array<10 x array<4 x f32>>
// CHECK: %empty = type {}
// CHECK: %opaque = type opaque
-// CHECK: %long = type { i32, { i32, i1 }, float, void ()* }
-// CHECK: %self-recursive = type { %self-recursive* }
+// CHECK: %long = type { i32, { i32, i1 }, float, ptr }
+// CHECK: %self-recursive = type { ptr }
// CHECK: %unpacked = type { i32 }
// CHECK: %packed = type <{ i32 }>
// CHECK: %"name with spaces and !^$@$#" = type <{ i32 }>
-// CHECK: %mutually-a = type { %mutually-b* }
-// CHECK: %mutually-b = type { %mutually-a addrspace(3)* }
+// CHECK: %mutually-a = type { ptr }
+// CHECK: %mutually-b = type { ptr addrspace(3) }
// CHECK: %struct-of-arrays = type { [10 x i32] }
// CHECK: %array-of-structs = type { i32 }
-// CHECK: %ptr-to-struct = type { i8 }
// CHECK: declare %empty
llvm.func @return_s_empty() -> !llvm.struct<"empty", ()>
llvm.func @return_s_struct_of_arrays() -> !llvm.struct<"struct-of-arrays", (array<10 x i32>)>
// CHECK: declare [10 x %array-of-structs]
llvm.func @return_s_array_of_structs() -> !llvm.array<10 x struct<"array-of-structs", (i32)>>
-// CHECK: declare %ptr-to-struct*
+// CHECK: declare ptr
llvm.func @return_s_ptr_to_struct() -> !llvm.ptr<struct<"ptr-to-struct", (i8)>>
llvm.return %0 : i32
}
-// CHECK: @int_gep = internal constant i32* getelementptr (i32, i32* @i32_global, i32 2)
+// CHECK: @int_gep = internal constant ptr getelementptr (i32, ptr @i32_global, i32 2)
llvm.mlir.global internal constant @int_gep() : !llvm.ptr<i32> {
%addr = llvm.mlir.addressof @i32_global : !llvm.ptr<i32>
%_c0 = llvm.mlir.constant(2: i32) :i32
// inserted before other functions in the module.
//
-// CHECK: declare i8* @malloc(i64)
+// CHECK: declare ptr @malloc(i64)
llvm.func @malloc(i64) -> !llvm.ptr<i8>
-// CHECK: declare void @free(i8*)
+// CHECK: declare void @free(ptr)
//
// CHECK-LABEL: @global_refs
llvm.func @global_refs() {
// Check load from globals.
- // CHECK: load i32, i32* @i32_global
+ // CHECK: load i32, ptr @i32_global
%0 = llvm.mlir.addressof @i32_global : !llvm.ptr<i32>
%1 = llvm.load %0 : !llvm.ptr<i32>
// Check the contracted form of load from array constants.
- // CHECK: load i8, i8* getelementptr inbounds ([6 x i8], [6 x i8]* @string_const, i64 0, i64 0)
+ // CHECK: load i8, ptr @string_const
%2 = llvm.mlir.addressof @string_const : !llvm.ptr<array<6 x i8>>
%c0 = llvm.mlir.constant(0 : index) : i64
%3 = llvm.getelementptr %2[%c0, %c0] : (!llvm.ptr<array<6 x i8>>, i64, i64) -> !llvm.ptr<i8>
// CHECK-LABEL: define void @memref_alloc()
llvm.func @memref_alloc() {
-// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 400)
-// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float* } undef, float* %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 400)
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr } undef, ptr %{{[0-9]+}}, 0
%0 = llvm.mlir.constant(10 : index) : i64
%1 = llvm.mlir.constant(10 : index) : i64
%2 = llvm.mul %0, %1 : i64
// CHECK-LABEL: define void @store_load_static()
llvm.func @store_load_static() {
^bb0:
-// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 40)
-// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float* } undef, float* %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 40)
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr } undef, ptr %{{[0-9]+}}, 0
%0 = llvm.mlir.constant(10 : index) : i64
%1 = llvm.mlir.undef : !llvm.struct<(ptr<f32>)>
%2 = llvm.mlir.constant(4 : index) : i64
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
llvm.cond_br %11, ^bb3, ^bb4
^bb3: // pred: ^bb2
-// CHECK: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: store float 1.000000e+00, float* %{{[0-9]+}}
+// CHECK: %{{[0-9]+}} = extractvalue { ptr } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: store float 1.000000e+00, ptr %{{[0-9]+}}
%12 = llvm.mlir.constant(10 : index) : i64
%13 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>)>
%14 = llvm.getelementptr %13[%10] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
llvm.cond_br %20, ^bb7, ^bb8
^bb7: // pred: ^bb6
-// CHECK: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
+// CHECK: %{{[0-9]+}} = extractvalue { ptr } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = load float, ptr %{{[0-9]+}}
%21 = llvm.mlir.constant(10 : index) : i64
%22 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>)>
%23 = llvm.getelementptr %22[%19] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
// CHECK-LABEL: define void @store_load_dynamic(i64 {{%.*}})
llvm.func @store_load_dynamic(%arg0: i64) {
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
-// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}})
-// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } undef, float* %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 %{{[0-9]+}})
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64 } undef, ptr %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
%0 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, i64)>
%1 = llvm.mlir.constant(4 : index) : i64
%2 = llvm.mul %arg0, %1 : i64
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
llvm.cond_br %10, ^bb3, ^bb4
^bb3: // pred: ^bb2
-// CHECK: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: store float 1.000000e+00, float* %{{[0-9]+}}
+// CHECK: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: store float 1.000000e+00, ptr %{{[0-9]+}}
%11 = llvm.extractvalue %6[1] : !llvm.struct<(ptr<f32>, i64)>
%12 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>, i64)>
%13 = llvm.getelementptr %12[%9] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
// CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
llvm.cond_br %18, ^bb7, ^bb8
^bb7: // pred: ^bb6
-// CHECK: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
+// CHECK: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = load float, ptr %{{[0-9]+}}
%19 = llvm.extractvalue %6[1] : !llvm.struct<(ptr<f32>, i64)>
%20 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>, i64)>
%21 = llvm.getelementptr %20[%17] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 10
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
-// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}})
-// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } undef, float* %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } %{{[0-9]+}}, i64 10, 2
+// CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 %{{[0-9]+}})
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64, i64 } undef, ptr %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64, i64 } %{{[0-9]+}}, i64 10, 2
%1 = llvm.mlir.constant(2 : index) : i64
%2 = llvm.mlir.constant(4 : index) : i64
%3 = llvm.mul %1, %arg0 : i64
%17 = llvm.call @get_index() : () -> i64
%18 = llvm.mlir.constant(4.200000e+01 : f32) : f32
%19 = llvm.mlir.constant(2 : index) : i64
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 2
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 2
// CHECK-NEXT: %{{[0-9]+}} = mul i64 1, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 2
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: store float 4.200000e+01, ptr %{{[0-9]+}}
%20 = llvm.extractvalue %13[1] : !llvm.struct<(ptr<f32>, i64, i64)>
%21 = llvm.mlir.constant(4 : index) : i64
%22 = llvm.extractvalue %13[2] : !llvm.struct<(ptr<f32>, i64, i64)>
%29 = llvm.extractvalue %13[0] : !llvm.struct<(ptr<f32>, i64, i64)>
%30 = llvm.getelementptr %29[%28] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
llvm.store %18, %30 : !llvm.ptr<f32>
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 2
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 2
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 2
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = load float, ptr %{{[0-9]+}}
%31 = llvm.mlir.constant(2 : index) : i64
%32 = llvm.extractvalue %13[1] : !llvm.struct<(ptr<f32>, i64, i64)>
%33 = llvm.mlir.constant(4 : index) : i64
llvm.return
}
-// CHECK-LABEL: define { float*, i64 } @memref_args_rets({ float* } {{%.*}}, { float*, i64 } {{%.*}}, { float*, i64 } {{%.*}})
+// CHECK-LABEL: define { ptr, i64 } @memref_args_rets({ ptr } {{%.*}}, { ptr, i64 } {{%.*}}, { ptr, i64 } {{%.*}})
llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<f32>)>, %arg1: !llvm.struct<(ptr<f32>, i64)>, %arg2: !llvm.struct<(ptr<f32>, i64)>) -> !llvm.struct<(ptr<f32>, i64)> {
%0 = llvm.mlir.constant(7 : index) : i64
// CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
%1 = llvm.call @get_index() : () -> i64
%2 = llvm.mlir.constant(4.200000e+01 : f32) : f32
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 7
-// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 7
+// CHECK-NEXT: store float 4.200000e+01, ptr %{{[0-9]+}}
%3 = llvm.mlir.constant(10 : index) : i64
%4 = llvm.extractvalue %arg0[0] : !llvm.struct<(ptr<f32>)>
%5 = llvm.getelementptr %4[%0] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
llvm.store %2, %5 : !llvm.ptr<f32>
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 7
-// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 7
+// CHECK-NEXT: store float 4.200000e+01, ptr %{{[0-9]+}}
%6 = llvm.extractvalue %arg1[1] : !llvm.struct<(ptr<f32>, i64)>
%7 = llvm.extractvalue %arg1[0] : !llvm.struct<(ptr<f32>, i64)>
%8 = llvm.getelementptr %7[%0] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
llvm.store %2, %8 : !llvm.ptr<f32>
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 1
// CHECK-NEXT: %{{[0-9]+}} = mul i64 7, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: store float 4.200000e+01, ptr %{{[0-9]+}}
%9 = llvm.mlir.constant(10 : index) : i64
%10 = llvm.extractvalue %arg2[1] : !llvm.struct<(ptr<f32>, i64)>
%11 = llvm.mul %0, %10 : i64
llvm.store %2, %14 : !llvm.ptr<f32>
// CHECK-NEXT: %{{[0-9]+}} = mul i64 10, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
-// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}})
-// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } undef, float* %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 %{{[0-9]+}})
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64 } undef, ptr %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
%15 = llvm.mlir.constant(10 : index) : i64
%16 = llvm.mul %15, %1 : i64
%17 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, i64)>
%21 = llvm.bitcast %20 : !llvm.ptr<i8> to !llvm.ptr<f32>
%22 = llvm.insertvalue %21, %17[0] : !llvm.struct<(ptr<f32>, i64)>
%23 = llvm.insertvalue %1, %22[1] : !llvm.struct<(ptr<f32>, i64)>
-// CHECK-NEXT: ret { float*, i64 } %{{[0-9]+}}
+// CHECK-NEXT: ret { ptr, i64 } %{{[0-9]+}}
llvm.return %23 : !llvm.struct<(ptr<f32>, i64)>
}
-// CHECK-LABEL: define i64 @memref_dim({ float*, i64, i64 } {{%.*}})
+// CHECK-LABEL: define i64 @memref_dim({ ptr, i64, i64 } {{%.*}})
llvm.func @memref_dim(%arg0: !llvm.struct<(ptr<f32>, i64, i64)>) -> i64 {
// Expecting this to create an LLVM constant.
%0 = llvm.mlir.constant(42 : index) : i64
-// CHECK-NEXT: %2 = extractvalue { float*, i64, i64 } %0, 1
+// CHECK-NEXT: %2 = extractvalue { ptr, i64, i64 } %0, 1
%1 = llvm.extractvalue %arg0[1] : !llvm.struct<(ptr<f32>, i64, i64)>
// Expecting this to create an LLVM constant.
%2 = llvm.mlir.constant(10 : index) : i64
-// CHECK-NEXT: %3 = extractvalue { float*, i64, i64 } %0, 2
+// CHECK-NEXT: %3 = extractvalue { ptr, i64, i64 } %0, 2
%3 = llvm.extractvalue %arg0[2] : !llvm.struct<(ptr<f32>, i64, i64)>
// Checking that the constant for d0 has been created.
// CHECK-NEXT: %4 = add i64 42, %2
llvm.func @get_f32() -> f32
llvm.func @get_memref() -> !llvm.struct<(ptr<f32>, i64, i64)>
-// CHECK-LABEL: define { i64, float, { float*, i64, i64 } } @multireturn()
+// CHECK-LABEL: define { i64, float, { ptr, i64, i64 } } @multireturn()
llvm.func @multireturn() -> !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)> {
%0 = llvm.call @get_i64() : () -> i64
%1 = llvm.call @get_f32() : () -> f32
%2 = llvm.call @get_memref() : () -> !llvm.struct<(ptr<f32>, i64, i64)>
-// CHECK: %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } undef, i64 %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } %{{[0-9]+}}, float %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } %{{[0-9]+}}, { float*, i64, i64 } %{{[0-9]+}}, 2
-// CHECK-NEXT: ret { i64, float, { float*, i64, i64 } } %{{[0-9]+}}
+// CHECK: %{{[0-9]+}} = insertvalue { i64, float, { ptr, i64, i64 } } undef, i64 %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { i64, float, { ptr, i64, i64 } } %{{[0-9]+}}, float %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { i64, float, { ptr, i64, i64 } } %{{[0-9]+}}, { ptr, i64, i64 } %{{[0-9]+}}, 2
+// CHECK-NEXT: ret { i64, float, { ptr, i64, i64 } } %{{[0-9]+}}
%3 = llvm.mlir.undef : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
%4 = llvm.insertvalue %0, %3[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
%5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
// CHECK-LABEL: define void @multireturn_caller()
llvm.func @multireturn_caller() {
-// CHECK-NEXT: %1 = call { i64, float, { float*, i64, i64 } } @multireturn()
-// CHECK-NEXT: [[ret0:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 0
-// CHECK-NEXT: [[ret1:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 1
-// CHECK-NEXT: [[ret2:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 2
+// CHECK-NEXT: %1 = call { i64, float, { ptr, i64, i64 } } @multireturn()
+// CHECK-NEXT: [[ret0:%[0-9]+]] = extractvalue { i64, float, { ptr, i64, i64 } } %1, 0
+// CHECK-NEXT: [[ret1:%[0-9]+]] = extractvalue { i64, float, { ptr, i64, i64 } } %1, 1
+// CHECK-NEXT: [[ret2:%[0-9]+]] = extractvalue { i64, float, { ptr, i64, i64 } } %1, 2
%0 = llvm.call @multireturn() : () -> !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
%1 = llvm.extractvalue %0[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
%2 = llvm.extractvalue %0[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
%7 = llvm.fadd %2, %6 : f32
%8 = llvm.mlir.constant(0 : index) : i64
%9 = llvm.mlir.constant(42 : index) : i64
-// CHECK: extractvalue { float*, i64, i64 } [[ret2]], 0
+// CHECK: extractvalue { ptr, i64, i64 } [[ret2]], 0
%10 = llvm.extractvalue %3[1] : !llvm.struct<(ptr<f32>, i64, i64)>
%11 = llvm.mlir.constant(10 : index) : i64
%12 = llvm.extractvalue %3[2] : !llvm.struct<(ptr<f32>, i64, i64)>
// CHECK-LABEL: @gep
llvm.func @gep(%ptr: !llvm.ptr<struct<(i32, struct<(i32, f32)>)>>, %idx: i64,
%ptr2: !llvm.ptr<struct<(array<10xf32>)>>) {
- // CHECK: = getelementptr { i32, { i32, float } }, { i32, { i32, float } }* %{{.*}}, i64 %{{.*}}, i32 1, i32 0
+ // CHECK: = getelementptr { i32, { i32, float } }, ptr %{{.*}}, i64 %{{.*}}, i32 1, i32 0
llvm.getelementptr %ptr[%idx, 1, 0] : (!llvm.ptr<struct<(i32, struct<(i32, f32)>)>>, i64) -> !llvm.ptr<i32>
- // CHECK: = getelementptr { [10 x float] }, { [10 x float] }* %{{.*}}, i64 %{{.*}}, i32 0, i64 %{{.*}}
+ // CHECK: = getelementptr { [10 x float] }, ptr %{{.*}}, i64 %{{.*}}, i32 0, i64 %{{.*}}
llvm.getelementptr %ptr2[%idx, 0, %idx] : (!llvm.ptr<struct<(array<10xf32>)>>, i64, i64) -> !llvm.ptr<f32>
llvm.return
}
llvm.return
}
-// CHECK-LABEL: define i32 @indirect_call(i32 (float)* {{%.*}}, float {{%.*}})
+// CHECK-LABEL: define i32 @indirect_call(ptr {{%.*}}, float {{%.*}})
llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (f32)>>, %arg1: f32) -> i32 {
// CHECK-NEXT: %3 = call i32 %0(float %1)
%0 = llvm.call %arg0(%arg1) : (f32) -> i32
llvm.br ^bb1(%arg1 : i1)
}
-// CHECK-LABEL: define void @llvm_noalias(float* noalias {{%*.}})
+// CHECK-LABEL: define void @llvm_noalias(ptr noalias {{%*.}})
llvm.func @llvm_noalias(%arg0: !llvm.ptr<f32> {llvm.noalias}) {
llvm.return
}
-// CHECK-LABEL: define void @byvalattr(i32* byval(i32) %
+// CHECK-LABEL: define void @byvalattr(ptr byval(i32) %
llvm.func @byvalattr(%arg0: !llvm.ptr<i32> {llvm.byval}) {
llvm.return
}
-// CHECK-LABEL: define void @sretattr(i32* sret(i32) %
+// CHECK-LABEL: define void @sretattr(ptr sret(i32) %
llvm.func @sretattr(%arg0: !llvm.ptr<i32> {llvm.sret}) {
llvm.return
}
-// CHECK-LABEL: define void @nestattr(i32* nest %
+// CHECK-LABEL: define void @nestattr(ptr nest %
llvm.func @nestattr(%arg0: !llvm.ptr<i32> {llvm.nest}) {
llvm.return
}
-// CHECK-LABEL: define void @llvm_align(float* align 4 {{%*.}})
+// CHECK-LABEL: define void @llvm_align(ptr align 4 {{%*.}})
llvm.func @llvm_align(%arg0: !llvm.ptr<f32> {llvm.align = 4}) {
llvm.return
}
llvm.func @llvm_varargs(...)
llvm.func @intpointerconversion(%arg0 : i32) -> i32 {
-// CHECK: %2 = inttoptr i32 %0 to i32*
-// CHECK-NEXT: %3 = ptrtoint i32* %2 to i32
+// CHECK: %2 = inttoptr i32 %0 to ptr
+// CHECK-NEXT: %3 = ptrtoint ptr %2 to i32
%1 = llvm.inttoptr %arg0 : i32 to !llvm.ptr<i32>
%2 = llvm.ptrtoint %1 : !llvm.ptr<i32> to i32
llvm.return %2 : i32
// CHECK-LABEL: @addrspace
llvm.func @addrspace(%arg0 : !llvm.ptr<i32>) -> !llvm.ptr<i32, 2> {
-// CHECK: %2 = addrspacecast i32* %0 to i32 addrspace(2)*
+// CHECK: %2 = addrspacecast ptr %0 to ptr addrspace(2)
%1 = llvm.addrspacecast %arg0 : !llvm.ptr<i32> to !llvm.ptr<i32, 2>
llvm.return %1 : !llvm.ptr<i32, 2>
}
// CHECK-LABEL: @null
llvm.func @null() -> !llvm.ptr<i32> {
%0 = llvm.mlir.null : !llvm.ptr<i32>
- // CHECK: ret i32* null
+ // CHECK: ret ptr null
llvm.return %0 : !llvm.ptr<i32>
}
llvm.func @atomicrmw(
%f32_ptr : !llvm.ptr<f32>, %f32 : f32,
%i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
- // CHECK: atomicrmw fadd float* %{{.*}}, float %{{.*}} monotonic
+ // CHECK: atomicrmw fadd ptr %{{.*}}, float %{{.*}} monotonic
%0 = llvm.atomicrmw fadd %f32_ptr, %f32 monotonic : f32
- // CHECK: atomicrmw fsub float* %{{.*}}, float %{{.*}} monotonic
+ // CHECK: atomicrmw fsub ptr %{{.*}}, float %{{.*}} monotonic
%1 = llvm.atomicrmw fsub %f32_ptr, %f32 monotonic : f32
- // CHECK: atomicrmw xchg float* %{{.*}}, float %{{.*}} monotonic
+ // CHECK: atomicrmw xchg ptr %{{.*}}, float %{{.*}} monotonic
%2 = llvm.atomicrmw xchg %f32_ptr, %f32 monotonic : f32
- // CHECK: atomicrmw add i32* %{{.*}}, i32 %{{.*}} acquire
+ // CHECK: atomicrmw add ptr %{{.*}}, i32 %{{.*}} acquire
%3 = llvm.atomicrmw add %i32_ptr, %i32 acquire : i32
- // CHECK: atomicrmw sub i32* %{{.*}}, i32 %{{.*}} release
+ // CHECK: atomicrmw sub ptr %{{.*}}, i32 %{{.*}} release
%4 = llvm.atomicrmw sub %i32_ptr, %i32 release : i32
- // CHECK: atomicrmw and i32* %{{.*}}, i32 %{{.*}} acq_rel
+ // CHECK: atomicrmw and ptr %{{.*}}, i32 %{{.*}} acq_rel
%5 = llvm.atomicrmw _and %i32_ptr, %i32 acq_rel : i32
- // CHECK: atomicrmw nand i32* %{{.*}}, i32 %{{.*}} seq_cst
+ // CHECK: atomicrmw nand ptr %{{.*}}, i32 %{{.*}} seq_cst
%6 = llvm.atomicrmw nand %i32_ptr, %i32 seq_cst : i32
- // CHECK: atomicrmw or i32* %{{.*}}, i32 %{{.*}} monotonic
+ // CHECK: atomicrmw or ptr %{{.*}}, i32 %{{.*}} monotonic
%7 = llvm.atomicrmw _or %i32_ptr, %i32 monotonic : i32
- // CHECK: atomicrmw xor i32* %{{.*}}, i32 %{{.*}} monotonic
+ // CHECK: atomicrmw xor ptr %{{.*}}, i32 %{{.*}} monotonic
%8 = llvm.atomicrmw _xor %i32_ptr, %i32 monotonic : i32
- // CHECK: atomicrmw max i32* %{{.*}}, i32 %{{.*}} monotonic
+ // CHECK: atomicrmw max ptr %{{.*}}, i32 %{{.*}} monotonic
%9 = llvm.atomicrmw max %i32_ptr, %i32 monotonic : i32
- // CHECK: atomicrmw min i32* %{{.*}}, i32 %{{.*}} monotonic
+ // CHECK: atomicrmw min ptr %{{.*}}, i32 %{{.*}} monotonic
%10 = llvm.atomicrmw min %i32_ptr, %i32 monotonic : i32
- // CHECK: atomicrmw umax i32* %{{.*}}, i32 %{{.*}} monotonic
+ // CHECK: atomicrmw umax ptr %{{.*}}, i32 %{{.*}} monotonic
%11 = llvm.atomicrmw umax %i32_ptr, %i32 monotonic : i32
- // CHECK: atomicrmw umin i32* %{{.*}}, i32 %{{.*}} monotonic
+ // CHECK: atomicrmw umin ptr %{{.*}}, i32 %{{.*}} monotonic
%12 = llvm.atomicrmw umin %i32_ptr, %i32 monotonic : i32
llvm.return
}
// CHECK-LABEL: @cmpxchg
llvm.func @cmpxchg(%ptr : !llvm.ptr<i32>, %cmp : i32, %val: i32) {
- // CHECK: cmpxchg i32* %{{.*}}, i32 %{{.*}}, i32 %{{.*}} acq_rel monotonic
+ // CHECK: cmpxchg ptr %{{.*}}, i32 %{{.*}}, i32 %{{.*}} acq_rel monotonic
%0 = llvm.cmpxchg %ptr, %cmp, %val acq_rel monotonic : i32
// CHECK: %{{[0-9]+}} = extractvalue { i32, i1 } %{{[0-9]+}}, 0
%1 = llvm.extractvalue %0[0] : !llvm.struct<(i32, i1)>
%4 = llvm.mlir.null : !llvm.ptr<ptr<i8>>
%5 = llvm.mlir.constant(1 : i32) : i32
%6 = llvm.alloca %5 x i8 : (i32) -> !llvm.ptr<i8>
-// CHECK: invoke void @foo(i8* %[[a1]])
+// CHECK: invoke void @foo(ptr %[[a1]])
// CHECK-NEXT: to label %[[normal:[0-9]+]] unwind label %[[unwind:[0-9]+]]
llvm.invoke @foo(%6) to ^bb2 unwind ^bb1 : (!llvm.ptr<i8>) -> ()
// CHECK: [[unwind]]:
^bb1:
-// CHECK: %{{[0-9]+}} = landingpad { i8*, i32 }
-// CHECK-NEXT: catch i8** null
-// CHECK-NEXT: catch i8* bitcast (i8** @_ZTIi to i8*)
+// CHECK: %{{[0-9]+}} = landingpad { ptr, i32 }
+// CHECK-NEXT: catch ptr null
+// CHECK-NEXT: catch ptr @_ZTIi
// CHECK-NEXT: filter [1 x i8] zeroinitializer
%7 = llvm.landingpad (catch %4 : !llvm.ptr<ptr<i8>>) (catch %3 : !llvm.ptr<i8>) (filter %1 : !llvm.array<1 x i8>) : !llvm.struct<(ptr<i8>, i32)>
// CHECK: br label %[[final:[0-9]+]]
llvm.return %5 : i32
// CHECK: [[final]]:
-// CHECK-NEXT: %{{[0-9]+}} = invoke i8* @bar(i8* %[[a1]])
+// CHECK-NEXT: %{{[0-9]+}} = invoke ptr @bar(ptr %[[a1]])
// CHECK-NEXT: to label %[[normal]] unwind label %[[unwind]]
^bb3: // pred: ^bb1
%8 = llvm.invoke @bar(%6) to ^bb2 unwind ^bb1 : (!llvm.ptr<i8>) -> !llvm.ptr<i8>
%0 = llvm.invoke @foo() to ^bb1 unwind ^bb2 : () -> i8
// CHECK: [[normal]]:
-// CHECK-NEXT: store i8 %[[a1]], i8* %[[a0]]
+// CHECK-NEXT: store i8 %[[a1]], ptr %[[a0]]
// CHECK-NEXT: ret void
^bb1:
llvm.store %0, %arg0 : !llvm.ptr<i8>
llvm.return
// CHECK: [[unwind]]:
-// CHECK-NEXT: landingpad { i8*, i32 }
+// CHECK-NEXT: landingpad { ptr, i32 }
// CHECK-NEXT: cleanup
// CHECK-NEXT: ret void
^bb2:
llvm.return %1 : i32
// CHECK: [[unwind]]:
-// CHECK-NEXT: landingpad { i8*, i32 }
+// CHECK-NEXT: landingpad { ptr, i32 }
// CHECK-NEXT: cleanup
// CHECK-NEXT: br label %[[normal]]
^bb2:
// -----
-// CHECK: @forward_use_of_address = linkonce global float* @address_declared_after_use
+// CHECK: @forward_use_of_address = linkonce global ptr @address_declared_after_use
llvm.mlir.global linkonce @forward_use_of_address() : !llvm.ptr<f32> {
%0 = llvm.mlir.addressof @address_declared_after_use : !llvm.ptr<f32>
llvm.return %0 : !llvm.ptr<f32>
// -----
-// CHECK: @take_self_address = linkonce global { i32, i32* } {{.*}} { i32, i32* }* @take_self_address
+// CHECK: @take_self_address = linkonce global { i32, ptr } {{.*}} ptr @take_self_address
llvm.mlir.global linkonce @take_self_address() : !llvm.struct<(i32, !llvm.ptr<i32>)> {
%z32 = llvm.mlir.constant(0 : i32) : i32
%0 = llvm.mlir.undef : !llvm.struct<(i32, !llvm.ptr<i32>)>
// -----
-// CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 0, void ()* @foo, i8* null }]
+// CHECK: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 0, ptr @foo, ptr null }]
llvm.mlir.global_ctors { ctors = [@foo], priorities = [0 : i32]}
llvm.func @foo() {
// -----
-// CHECK: @llvm.global_dtors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 0, void ()* @foo, i8* null }]
+// CHECK: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 0, ptr @foo, ptr null }]
llvm.mlir.global_dtors { dtors = [@foo], priorities = [0 : i32]}
llvm.func @foo() {
%val = llvm.mlir.constant(5 : i32) : i32
%size = llvm.mlir.constant(1 : i64) : i64
%0 = llvm.alloca %size x i32 : (i64) -> (!llvm.ptr<i32>)
- // CHECK: store volatile i32 5, i32* %{{.*}}
+ // CHECK: store volatile i32 5, ptr %{{.*}}
llvm.store volatile %val, %0 : !llvm.ptr<i32>
- // CHECK: %{{.*}} = load volatile i32, i32* %{{.*}}
+ // CHECK: %{{.*}} = load volatile i32, ptr %{{.*}}
%1 = llvm.load volatile %0: !llvm.ptr<i32>
llvm.return
}
llvm.cond_br %2, ^bb4, ^bb5 {llvm.loop = {parallel_access = [@metadata::@group1, @metadata::@group2], options = #llvm.loopopts<disable_licm = true, disable_unroll = true, interleave_count = 1, disable_pipeline = true, pipeline_initiation_interval = 2>}}
^bb4:
%3 = llvm.add %1, %arg2 : i32
- // CHECK: = load i32, i32* %{{.*}} !llvm.access.group ![[ACCESS_GROUPS_NODE:[0-9]+]]
+ // CHECK: = load i32, ptr %{{.*}} !llvm.access.group ![[ACCESS_GROUPS_NODE:[0-9]+]]
%5 = llvm.load %4 { access_groups = [@metadata::@group1, @metadata::@group2] } : !llvm.ptr<i32>
// CHECK: br label {{.*}} !llvm.loop ![[LOOP_NODE]]
llvm.br ^bb3(%3 : i32) {llvm.loop = {parallel_access = [@metadata::@group1, @metadata::@group2], options = #llvm.loopopts<disable_unroll = true, disable_licm = true, interleave_count = 1, disable_pipeline = true, pipeline_initiation_interval = 2>}}
// in the LLVM NVPTX backend.
// CHECK-LABEL: @gpu_wmma_load_op
llvm.func @gpu_wmma_load_op(%arg0: !llvm.ptr<i32, 3>, %arg1: i32) {
- // CHECK: call { <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half> } @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16.p3i32(i32 addrspace(3)* %{{.*}}, i32 %{{.*}})
+ // CHECK: call { <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half> } @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16.p3(ptr addrspace(3) %{{.*}}, i32 %{{.*}})
%0 = nvvm.wmma.load %arg0, %arg1
{eltype = #nvvm.mma_type<f16>, frag = #nvvm.mma_frag<a>, k = 16 : i32, layout = #nvvm.mma_layout<row>, m = 16 : i32, n = 16 : i32}
: (!llvm.ptr<i32, 3>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
llvm.func @gpu_wmma_store_op(%arg0: !llvm.ptr<i32, 3>, %arg1: i32,
%arg2: vector<2 x f16>, %arg3: vector<2 x f16>,
%arg4: vector<2 xf16>, %arg5: vector<2 x f16>) {
- // CHECK: call void @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16.p3i32(i32 addrspace(3)* %{{.*}}, <2 x half> {{.*}}, <2 x half> %{{.*}}, <2 x half> %{{.*}}, <2 x half> %{{.*}}, i32 %{{.*}})
+ // CHECK: call void @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16.p3(ptr addrspace(3) %{{.*}}, <2 x half> {{.*}}, <2 x half> %{{.*}}, <2 x half> %{{.*}}, <2 x half> %{{.*}}, i32 %{{.*}})
nvvm.wmma.store %arg0, %arg1, %arg2, %arg3, %arg4, %arg5
{eltype = #nvvm.mma_type<f16>, k = 16 : i32, layout = #nvvm.mma_layout<row>, m = 16 : i32, n = 16 : i32}
: !llvm.ptr<i32, 3>, vector<2 x f16>, vector<2 x f16>, vector<2 x f16>, vector<2 x f16>
// CHECK-LABEL: @nvvm_wmma_load_tf32
llvm.func @nvvm_wmma_load_tf32(%arg0: !llvm.ptr<i32>, %arg1 : i32) {
- // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.wmma.m16n16k8.load.a.row.stride.tf32.p0i32(i32* %{{.*}}, i32 %{{.*}})
+ // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.wmma.m16n16k8.load.a.row.stride.tf32.p0(ptr %{{.*}}, i32 %{{.*}})
%0 = nvvm.wmma.load %arg0, %arg1
{eltype = #nvvm.mma_type<tf32>, frag = #nvvm.mma_frag<a>, k = 8 : i32, layout = #nvvm.mma_layout<row>, m = 16 : i32, n = 16 : i32}
: (!llvm.ptr<i32>) -> !llvm.struct<(i32, i32, i32, i32)>
// CHECK-LABEL: @cp_async
llvm.func @cp_async(%arg0: !llvm.ptr<i8, 3>, %arg1: !llvm.ptr<i8, 1>) {
-// CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.4(i8 addrspace(3)* %{{.*}}, i8 addrspace(1)* %{{.*}})
+// CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.4(ptr addrspace(3) %{{.*}}, ptr addrspace(1) %{{.*}})
nvvm.cp.async.shared.global %arg0, %arg1, 4
-// CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.8(i8 addrspace(3)* %{{.*}}, i8 addrspace(1)* %{{.*}})
+// CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.8(ptr addrspace(3) %{{.*}}, ptr addrspace(1) %{{.*}})
nvvm.cp.async.shared.global %arg0, %arg1, 8
-// CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.16(i8 addrspace(3)* %{{.*}}, i8 addrspace(1)* %{{.*}})
+// CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.16(ptr addrspace(3) %{{.*}}, ptr addrspace(1) %{{.*}})
nvvm.cp.async.shared.global %arg0, %arg1, 16
-// CHECK: call void @llvm.nvvm.cp.async.cg.shared.global.16(i8 addrspace(3)* %{{.*}}, i8 addrspace(1)* %{{.*}})
+// CHECK: call void @llvm.nvvm.cp.async.cg.shared.global.16(ptr addrspace(3) %{{.*}}, ptr addrspace(1) %{{.*}})
nvvm.cp.async.shared.global %arg0, %arg1, 16 {bypass_l1}
// CHECK: call void @llvm.nvvm.cp.async.commit.group()
nvvm.cp.async.commit.group
// CHECK-LABEL: @ld_matrix
llvm.func @ld_matrix(%arg0: !llvm.ptr<i32, 3>) {
- // CHECK: call i32 @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.b16.p3i32(i32 addrspace(3)* %{{.*}})
+ // CHECK: call i32 @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.b16.p3(ptr addrspace(3) %{{.*}})
%l1 = nvvm.ldmatrix %arg0 {num = 1 : i32, layout = #nvvm.mma_layout<row>} : (!llvm.ptr<i32, 3>) -> i32
- // CHECK: call { i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.b16.p3i32(i32 addrspace(3)* %{{.*}})
+ // CHECK: call { i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.b16.p3(ptr addrspace(3) %{{.*}})
%l2 = nvvm.ldmatrix %arg0 {num = 2 : i32, layout = #nvvm.mma_layout<row>} : (!llvm.ptr<i32, 3>) -> !llvm.struct<(i32, i32)>
- // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.b16.p3i32(i32 addrspace(3)* %{{.*}})
+ // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.b16.p3(ptr addrspace(3) %{{.*}})
%l4 = nvvm.ldmatrix %arg0 {num = 4 : i32, layout = #nvvm.mma_layout<row>} : (!llvm.ptr<i32, 3>) -> !llvm.struct<(i32, i32, i32, i32)>
- // CHECK: call i32 @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.trans.b16.p3i32(i32 addrspace(3)* %{{.*}})
+ // CHECK: call i32 @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.trans.b16.p3(ptr addrspace(3) %{{.*}})
%l1t = nvvm.ldmatrix %arg0 {num = 1 : i32, layout = #nvvm.mma_layout<col>} : (!llvm.ptr<i32, 3>) -> i32
- // CHECK: call { i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.trans.b16.p3i32(i32 addrspace(3)* %{{.*}})
+ // CHECK: call { i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.trans.b16.p3(ptr addrspace(3) %{{.*}})
%l2t = nvvm.ldmatrix %arg0 {num = 2 : i32, layout = #nvvm.mma_layout<col>} : (!llvm.ptr<i32, 3>) -> !llvm.struct<(i32, i32)>
- // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.trans.b16.p3i32(i32 addrspace(3)* %{{.*}})
+ // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.trans.b16.p3(ptr addrspace(3) %{{.*}})
%l4t = nvvm.ldmatrix %arg0 {num = 4 : i32, layout = #nvvm.mma_layout<col>} : (!llvm.ptr<i32, 3>) -> !llvm.struct<(i32, i32, i32, i32)>
llvm.return
}
}
// CHECK: !nvvm.annotations =
-// CHECK-NOT: {i32 ()* @nvvm_special_regs, !"kernel", i32 1}
-// CHECK: {void ()* @kernel_func, !"kernel", i32 1}
+// CHECK-NOT: {ptr @nvvm_special_regs, !"kernel", i32 1}
+// CHECK: {ptr @kernel_func, !"kernel", i32 1}
llvm.return
}
-// CHECK: %struct.ident_t = type { i32, i32, i32, i32, i8* }
+// CHECK: %struct.ident_t = type { i32, i32, i32, i32, ptr }
// CHECK: [[LOCSTR:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};testenterdataop;{{[0-9]*}};{{[0-9]*}};;\00", align 1
-// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[LOCSTR]], i32 0, i32 0) }, align 8
+// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, ptr [[LOCSTR]] }, align 8
// CHECK: [[MAPNAME1:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
// CHECK: [[MAPNAME2:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
// CHECK: [[MAPTYPES:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i64] [i64 0, i64 1]
-// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x i8*] [i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME1]], i32 0, i32 0), i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME2]], i32 0, i32 0)]
+// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x ptr] [ptr [[MAPNAME1]], ptr [[MAPNAME2]]]
-// CHECK: define void @testenterdataop(float* %{{.*}}, float* %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, float* [[SIMPLEPTR:%.*]])
-// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
-// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
+// CHECK: define void @testenterdataop(ptr %{{.*}}, ptr %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, ptr [[SIMPLEPTR:%.*]])
+// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
+// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
// CHECK: [[SIZE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i64], align 8
// CHECK: [[ARGBASE:%.*]] = extractvalue %openacc_data %{{.*}}, 0
// CHECK: [[ARG:%.*]] = extractvalue %openacc_data %{{.*}}, 1
// CHECK: [[ARGSIZE:%.*]] = extractvalue %openacc_data %{{.*}}, 2
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to { float*, float*, i64, [1 x i64], [1 x i64] }*
-// CHECK: store { float*, float*, i64, [1 x i64], [1 x i64] } [[ARGBASE]], { float*, float*, i64, [1 x i64], [1 x i64] }* [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[ARG]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-// CHECK: store i64 [[ARGSIZE]], i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 1
-// CHECK: store i64 ptrtoint (float** getelementptr (float*, float** null, i32 1) to i64), i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-
-// CHECK: call void @__tgt_target_data_begin_mapper(%struct.ident_t* [[LOCGLOBAL]], i64 -1, i32 2, i8** [[ARGBASE_ALLOCA_GEP]], i8** [[ARG_ALLOCA_GEP]], i64* [[SIZE_ALLOCA_GEP]], i64* getelementptr inbounds ([{{[0-9]*}} x i64], [{{[0-9]*}} x i64]* [[MAPTYPES]], i32 0, i32 0), i8** getelementptr inbounds ([{{[0-9]*}} x i8*], [{{[0-9]*}} x i8*]* [[MAPNAMES]], i32 0, i32 0), i8** null)
-
-// CHECK: declare void @__tgt_target_data_begin_mapper(%struct.ident_t*, i64, i32, i8**, i8**, i64*, i64*, i8**, i8**) #0
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: store { ptr, ptr, i64, [1 x i64], [1 x i64] } [[ARGBASE]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: store ptr [[ARG]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+// CHECK: store i64 [[ARGSIZE]], ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 1
+// CHECK: store i64 ptrtoint (ptr getelementptr (ptr, ptr null, i32 1) to i64), ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+
+// CHECK: call void @__tgt_target_data_begin_mapper(ptr [[LOCGLOBAL]], i64 -1, i32 2, ptr [[ARGBASE_ALLOCA_GEP]], ptr [[ARG_ALLOCA_GEP]], ptr [[SIZE_ALLOCA_GEP]], ptr [[MAPTYPES]], ptr [[MAPNAMES]], ptr null)
+
+// CHECK: declare void @__tgt_target_data_begin_mapper(ptr, i64, i32, ptr, ptr, ptr, ptr, ptr, ptr) #0
// -----
llvm.return
}
-// CHECK: %struct.ident_t = type { i32, i32, i32, i32, i8* }
+// CHECK: %struct.ident_t = type { i32, i32, i32, i32, ptr }
// CHECK: [[LOCSTR:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};testexitdataop;{{[0-9]*}};{{[0-9]*}};;\00", align 1
-// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[LOCSTR]], i32 0, i32 0) }, align 8
+// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, ptr [[LOCSTR]] }, align 8
// CHECK: [[MAPNAME1:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
// CHECK: [[MAPNAME2:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
// CHECK: [[MAPTYPES:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i64] [i64 8, i64 2]
-// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x i8*] [i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME1]], i32 0, i32 0), i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME2]], i32 0, i32 0)]
+// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x ptr] [ptr [[MAPNAME1]], ptr [[MAPNAME2]]]
-// CHECK: define void @testexitdataop({ float*, float*, i64, [1 x i64], [1 x i64] } %{{.*}}, float* [[SIMPLEPTR:%.*]])
-// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
-// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
+// CHECK: define void @testexitdataop({ ptr, ptr, i64, [1 x i64], [1 x i64] } %{{.*}}, ptr [[SIMPLEPTR:%.*]])
+// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
+// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
// CHECK: [[SIZE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i64], align 8
// CHECK: [[ARGBASE:%.*]] = extractvalue %openacc_data %{{.*}}, 0
// CHECK: [[ARG:%.*]] = extractvalue %openacc_data %{{.*}}, 1
// CHECK: [[ARGSIZE:%.*]] = extractvalue %openacc_data %{{.*}}, 2
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to { float*, float*, i64, [1 x i64], [1 x i64] }*
-// CHECK: store { float*, float*, i64, [1 x i64], [1 x i64] } [[ARGBASE]], { float*, float*, i64, [1 x i64], [1 x i64] }* [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[ARG]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-// CHECK: store i64 [[ARGSIZE]], i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 1
-// CHECK: store i64 ptrtoint (float** getelementptr (float*, float** null, i32 1) to i64), i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-
-// CHECK: call void @__tgt_target_data_end_mapper(%struct.ident_t* [[LOCGLOBAL]], i64 -1, i32 2, i8** [[ARGBASE_ALLOCA_GEP]], i8** [[ARG_ALLOCA_GEP]], i64* [[SIZE_ALLOCA_GEP]], i64* getelementptr inbounds ([{{[0-9]*}} x i64], [{{[0-9]*}} x i64]* [[MAPTYPES]], i32 0, i32 0), i8** getelementptr inbounds ([{{[0-9]*}} x i8*], [{{[0-9]*}} x i8*]* [[MAPNAMES]], i32 0, i32 0), i8** null)
-
-// CHECK: declare void @__tgt_target_data_end_mapper(%struct.ident_t*, i64, i32, i8**, i8**, i64*, i64*, i8**, i8**) #0
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: store { ptr, ptr, i64, [1 x i64], [1 x i64] } [[ARGBASE]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: store ptr [[ARG]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+// CHECK: store i64 [[ARGSIZE]], ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 1
+// CHECK: store i64 ptrtoint (ptr getelementptr (ptr, ptr null, i32 1) to i64), ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+
+// CHECK: call void @__tgt_target_data_end_mapper(ptr [[LOCGLOBAL]], i64 -1, i32 2, ptr [[ARGBASE_ALLOCA_GEP]], ptr [[ARG_ALLOCA_GEP]], ptr [[SIZE_ALLOCA_GEP]], ptr [[MAPTYPES]], ptr [[MAPNAMES]], ptr null)
+
+// CHECK: declare void @__tgt_target_data_end_mapper(ptr, i64, i32, ptr, ptr, ptr, ptr, ptr, ptr) #0
// -----
llvm.return
}
-// CHECK: %struct.ident_t = type { i32, i32, i32, i32, i8* }
+// CHECK: %struct.ident_t = type { i32, i32, i32, i32, ptr }
// CHECK: [[LOCSTR:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};testupdateop;{{[0-9]*}};{{[0-9]*}};;\00", align 1
-// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[LOCSTR]], i32 0, i32 0) }, align 8
+// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, ptr [[LOCSTR]] }, align 8
// CHECK: [[MAPNAME1:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
// CHECK: [[MAPNAME2:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
// CHECK: [[MAPTYPES:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i64] [i64 2, i64 1]
-// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x i8*] [i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME1]], i32 0, i32 0), i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME2]], i32 0, i32 0)]
+// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x ptr] [ptr [[MAPNAME1]], ptr [[MAPNAME2]]]
-// CHECK: define void @testupdateop({ float*, float*, i64, [1 x i64], [1 x i64] } %{{.*}}, float* [[SIMPLEPTR:%.*]])
-// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
-// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
+// CHECK: define void @testupdateop({ ptr, ptr, i64, [1 x i64], [1 x i64] } %{{.*}}, ptr [[SIMPLEPTR:%.*]])
+// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
+// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
// CHECK: [[SIZE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i64], align 8
// CHECK: [[ARGBASE:%.*]] = extractvalue %openacc_data %{{.*}}, 0
// CHECK: [[ARG:%.*]] = extractvalue %openacc_data %{{.*}}, 1
// CHECK: [[ARGSIZE:%.*]] = extractvalue %openacc_data %{{.*}}, 2
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to { float*, float*, i64, [1 x i64], [1 x i64] }*
-// CHECK: store { float*, float*, i64, [1 x i64], [1 x i64] } [[ARGBASE]], { float*, float*, i64, [1 x i64], [1 x i64] }* [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[ARG]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-// CHECK: store i64 [[ARGSIZE]], i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 1
-// CHECK: store i64 ptrtoint (float** getelementptr (float*, float** null, i32 1) to i64), i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-
-// CHECK: call void @__tgt_target_data_update_mapper(%struct.ident_t* [[LOCGLOBAL]], i64 -1, i32 2, i8** [[ARGBASE_ALLOCA_GEP]], i8** [[ARG_ALLOCA_GEP]], i64* [[SIZE_ALLOCA_GEP]], i64* getelementptr inbounds ([{{[0-9]*}} x i64], [{{[0-9]*}} x i64]* [[MAPTYPES]], i32 0, i32 0), i8** getelementptr inbounds ([{{[0-9]*}} x i8*], [{{[0-9]*}} x i8*]* [[MAPNAMES]], i32 0, i32 0), i8** null)
-
-// CHECK: declare void @__tgt_target_data_update_mapper(%struct.ident_t*, i64, i32, i8**, i8**, i64*, i64*, i8**, i8**) #0
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: store { ptr, ptr, i64, [1 x i64], [1 x i64] } [[ARGBASE]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: store ptr [[ARG]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+// CHECK: store i64 [[ARGSIZE]], ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 1
+// CHECK: store i64 ptrtoint (ptr getelementptr (ptr, ptr null, i32 1) to i64), ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+
+// CHECK: call void @__tgt_target_data_update_mapper(ptr [[LOCGLOBAL]], i64 -1, i32 2, ptr [[ARGBASE_ALLOCA_GEP]], ptr [[ARG_ALLOCA_GEP]], ptr [[SIZE_ALLOCA_GEP]], ptr [[MAPTYPES]], ptr [[MAPNAMES]], ptr null)
+
+// CHECK: declare void @__tgt_target_data_update_mapper(ptr, i64, i32, ptr, ptr, ptr, ptr, ptr, ptr) #0
// -----
llvm.return
}
-// CHECK: %struct.ident_t = type { i32, i32, i32, i32, i8* }
+// CHECK: %struct.ident_t = type { i32, i32, i32, i32, ptr }
// CHECK: [[LOCSTR:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};testdataop;{{[0-9]*}};{{[0-9]*}};;\00", align 1
-// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[LOCSTR]], i32 0, i32 0) }, align 8
+// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, ptr [[LOCSTR]] }, align 8
// CHECK: [[MAPNAME1:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
// CHECK: [[MAPNAME2:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
// CHECK: [[MAPTYPES:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i64] [i64 8195, i64 8194]
-// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x i8*] [i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME1]], i32 0, i32 0), i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME2]], i32 0, i32 0)]
+// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x ptr] [ptr [[MAPNAME1]], ptr [[MAPNAME2]]]
-// CHECK: define void @testdataop({ float*, float*, i64, [1 x i64], [1 x i64] } %{{.*}}, float* [[SIMPLEPTR:%.*]], i32* %{{.*}})
-// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
-// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
+// CHECK: define void @testdataop({ ptr, ptr, i64, [1 x i64], [1 x i64] } %{{.*}}, ptr [[SIMPLEPTR:%.*]], ptr %{{.*}})
+// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
+// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
// CHECK: [[SIZE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i64], align 8
// CHECK: [[ARGBASE:%.*]] = extractvalue %openacc_data %{{.*}}, 0
// CHECK: [[ARG:%.*]] = extractvalue %openacc_data %{{.*}}, 1
// CHECK: [[ARGSIZE:%.*]] = extractvalue %openacc_data %{{.*}}, 2
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to { float*, float*, i64, [1 x i64], [1 x i64] }*
-// CHECK: store { float*, float*, i64, [1 x i64], [1 x i64] } [[ARGBASE]], { float*, float*, i64, [1 x i64], [1 x i64] }* [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[ARG]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-// CHECK: store i64 [[ARGSIZE]], i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 1
-// CHECK: store i64 ptrtoint (float** getelementptr (float*, float** null, i32 1) to i64), i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-// CHECK: call void @__tgt_target_data_begin_mapper(%struct.ident_t* [[LOCGLOBAL]], i64 -1, i32 2, i8** [[ARGBASE_ALLOCA_GEP]], i8** [[ARG_ALLOCA_GEP]], i64* [[SIZE_ALLOCA_GEP]], i64* getelementptr inbounds ([{{[0-9]*}} x i64], [{{[0-9]*}} x i64]* [[MAPTYPES]], i32 0, i32 0), i8** getelementptr inbounds ([{{[0-9]*}} x i8*], [{{[0-9]*}} x i8*]* [[MAPNAMES]], i32 0, i32 0), i8** null)
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: store { ptr, ptr, i64, [1 x i64], [1 x i64] } [[ARGBASE]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: store ptr [[ARG]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+// CHECK: store i64 [[ARGSIZE]], ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 1
+// CHECK: store i64 ptrtoint (ptr getelementptr (ptr, ptr null, i32 1) to i64), ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+// CHECK: call void @__tgt_target_data_begin_mapper(ptr [[LOCGLOBAL]], i64 -1, i32 2, ptr [[ARGBASE_ALLOCA_GEP]], ptr [[ARG_ALLOCA_GEP]], ptr [[SIZE_ALLOCA_GEP]], ptr [[MAPTYPES]], ptr [[MAPNAMES]], ptr null)
// CHECK: br label %acc.data
// CHECK: acc.data:
-// CHECK-NEXT: store i32 2, i32* %{{.*}}
+// CHECK-NEXT: store i32 2, ptr %{{.*}}
// CHECK-NEXT: br label %acc.end_data
// CHECK: acc.end_data:
-// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-// CHECK: call void @__tgt_target_data_end_mapper(%struct.ident_t* [[LOCGLOBAL]], i64 -1, i32 2, i8** [[ARGBASE_ALLOCA_GEP]], i8** [[ARG_ALLOCA_GEP]], i64* [[SIZE_ALLOCA_GEP]], i64* getelementptr inbounds ([{{[0-9]*}} x i64], [{{[0-9]*}} x i64]* [[MAPTYPES]], i32 0, i32 0), i8** getelementptr inbounds ([{{[0-9]*}} x i8*], [{{[0-9]*}} x i8*]* [[MAPNAMES]], i32 0, i32 0), i8** null)
+// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+// CHECK: call void @__tgt_target_data_end_mapper(ptr [[LOCGLOBAL]], i64 -1, i32 2, ptr [[ARGBASE_ALLOCA_GEP]], ptr [[ARG_ALLOCA_GEP]], ptr [[SIZE_ALLOCA_GEP]], ptr [[MAPTYPES]], ptr [[MAPNAMES]], ptr null)
-// CHECK: declare void @__tgt_target_data_begin_mapper(%struct.ident_t*, i64, i32, i8**, i8**, i64*, i64*, i8**, i8**)
-// CHECK: declare void @__tgt_target_data_end_mapper(%struct.ident_t*, i64, i32, i8**, i8**, i64*, i64*, i8**, i8**)
+// CHECK: declare void @__tgt_target_data_begin_mapper(ptr, i64, i32, ptr, ptr, ptr, ptr, ptr, ptr)
+// CHECK: declare void @__tgt_target_data_end_mapper(ptr, i64, i32, ptr, ptr, ptr, ptr, ptr, ptr)
// CHECK-LABEL: define void @test_stand_alone_directives()
llvm.func @test_stand_alone_directives() {
- // CHECK: [[OMP_THREAD:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
- // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD]])
+ // CHECK: [[OMP_THREAD:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+ // CHECK-NEXT: call void @__kmpc_barrier(ptr @{{[0-9]+}}, i32 [[OMP_THREAD]])
omp.barrier
- // CHECK: [[OMP_THREAD1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
- // CHECK-NEXT: [[RET_VAL:%.*]] = call i32 @__kmpc_omp_taskwait(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD1]])
+ // CHECK: [[OMP_THREAD1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+ // CHECK-NEXT: [[RET_VAL:%.*]] = call i32 @__kmpc_omp_taskwait(ptr @{{[0-9]+}}, i32 [[OMP_THREAD1]])
omp.taskwait
- // CHECK: [[OMP_THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
- // CHECK-NEXT: [[RET_VAL:%.*]] = call i32 @__kmpc_omp_taskyield(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD2]], i32 0)
+ // CHECK: [[OMP_THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+ // CHECK-NEXT: [[RET_VAL:%.*]] = call i32 @__kmpc_omp_taskyield(ptr @{{[0-9]+}}, i32 [[OMP_THREAD2]], i32 0)
omp.taskyield
// CHECK-NEXT: ret void
// CHECK-LABEL: define void @test_flush_construct(i32 %0)
llvm.func @test_flush_construct(%arg0: i32) {
- // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
+ // CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
omp.flush
- // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
+ // CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
omp.flush (%arg0 : i32)
- // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
+ // CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
omp.flush (%arg0, %arg0 : i32, i32)
%0 = llvm.mlir.constant(1 : i64) : i64
// CHECK: alloca {{.*}} align 4
%1 = llvm.alloca %0 x i32 {in_type = i32, name = "a"} : (i64) -> !llvm.ptr<i32>
- // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
+ // CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
omp.flush
- // CHECK: load i32, i32*
+ // CHECK: load i32, ptr
%2 = llvm.load %1 : !llvm.ptr<i32>
// CHECK-NEXT: ret void
// CHECK-LABEL: define void @test_omp_parallel_1()
llvm.func @test_omp_parallel_1() -> () {
- // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_1:.*]] to {{.*}}
+ // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_1:.*]])
omp.parallel {
omp.barrier
omp.terminator
// CHECK-LABEL: define void @test_omp_parallel_2()
llvm.func @test_omp_parallel_2() -> () {
- // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_2:.*]] to {{.*}}
+ // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_2:.*]])
omp.parallel {
^bb0:
%0 = llvm.mlir.constant(1 : index) : i64
// CHECK: define void @test_omp_parallel_num_threads_1(i32 %[[NUM_THREADS_VAR_1:.*]])
llvm.func @test_omp_parallel_num_threads_1(%arg0: i32) -> () {
- // CHECK: %[[GTN_NUM_THREADS_VAR_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_1:.*]])
- // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_1]], i32 %[[GTN_NUM_THREADS_VAR_1]], i32 %[[NUM_THREADS_VAR_1]])
- // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_1:.*]] to {{.*}}
+ // CHECK: %[[GTN_NUM_THREADS_VAR_1:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GTN_SI_VAR_1:.*]])
+ // CHECK: call void @__kmpc_push_num_threads(ptr @[[GTN_SI_VAR_1]], i32 %[[GTN_NUM_THREADS_VAR_1]], i32 %[[NUM_THREADS_VAR_1]])
+ // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_1:.*]])
omp.parallel num_threads(%arg0: i32) {
omp.barrier
omp.terminator
// CHECK: define void @test_omp_parallel_num_threads_2()
llvm.func @test_omp_parallel_num_threads_2() -> () {
%0 = llvm.mlir.constant(4 : index) : i32
- // CHECK: %[[GTN_NUM_THREADS_VAR_2:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_2:.*]])
- // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_2]], i32 %[[GTN_NUM_THREADS_VAR_2]], i32 4)
- // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_2:.*]] to {{.*}}
+ // CHECK: %[[GTN_NUM_THREADS_VAR_2:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GTN_SI_VAR_2:.*]])
+ // CHECK: call void @__kmpc_push_num_threads(ptr @[[GTN_SI_VAR_2]], i32 %[[GTN_NUM_THREADS_VAR_2]], i32 4)
+ // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_2:.*]])
omp.parallel num_threads(%0: i32) {
omp.barrier
omp.terminator
// CHECK: define void @test_omp_parallel_num_threads_3()
llvm.func @test_omp_parallel_num_threads_3() -> () {
%0 = llvm.mlir.constant(4 : index) : i32
- // CHECK: %[[GTN_NUM_THREADS_VAR_3_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_3_1:.*]])
- // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_3_1]], i32 %[[GTN_NUM_THREADS_VAR_3_1]], i32 4)
- // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_1:.*]] to {{.*}}
+ // CHECK: %[[GTN_NUM_THREADS_VAR_3_1:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GTN_SI_VAR_3_1:.*]])
+ // CHECK: call void @__kmpc_push_num_threads(ptr @[[GTN_SI_VAR_3_1]], i32 %[[GTN_NUM_THREADS_VAR_3_1]], i32 4)
+ // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_1:.*]])
omp.parallel num_threads(%0: i32) {
omp.barrier
omp.terminator
}
%1 = llvm.mlir.constant(8 : index) : i32
- // CHECK: %[[GTN_NUM_THREADS_VAR_3_2:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_3_2:.*]])
- // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_3_2]], i32 %[[GTN_NUM_THREADS_VAR_3_2]], i32 8)
- // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_2:.*]] to {{.*}}
+ // CHECK: %[[GTN_NUM_THREADS_VAR_3_2:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GTN_SI_VAR_3_2:.*]])
+ // CHECK: call void @__kmpc_push_num_threads(ptr @[[GTN_SI_VAR_3_2]], i32 %[[GTN_NUM_THREADS_VAR_3_2]], i32 8)
+ // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_2:.*]])
omp.parallel num_threads(%1: i32) {
omp.barrier
omp.terminator
%0 = llvm.mlir.constant(0 : index) : i32
%1 = llvm.icmp "slt" %arg0, %0 : i32
-// CHECK: %[[GTN_IF_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[SI_VAR_IF_1:.*]])
+// CHECK: %[[GTN_IF_1:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[SI_VAR_IF_1:.*]])
// CHECK: br i1 %[[IF_COND_VAR_1]], label %[[IF_COND_TRUE_BLOCK_1:.*]], label %[[IF_COND_FALSE_BLOCK_1:.*]]
// CHECK: [[IF_COND_TRUE_BLOCK_1]]:
// CHECK: br label %[[OUTLINED_CALL_IF_BLOCK_1:.*]]
// CHECK: [[OUTLINED_CALL_IF_BLOCK_1]]:
-// CHECK: call void {{.*}} @__kmpc_fork_call(%struct.ident_t* @[[SI_VAR_IF_1]], {{.*}} @[[OMP_OUTLINED_FN_IF_1:.*]] to void
+// CHECK: call void {{.*}} @__kmpc_fork_call(ptr @[[SI_VAR_IF_1]], {{.*}} @[[OMP_OUTLINED_FN_IF_1:.*]])
// CHECK: br label %[[OUTLINED_EXIT_IF_1:.*]]
// CHECK: [[OUTLINED_EXIT_IF_1]]:
// CHECK: br label %[[OUTLINED_EXIT_IF_2:.*]]
// CHECK: [[OUTLINED_EXIT_IF_2]]:
// CHECK: br label %[[RETURN_BLOCK_IF_1:.*]]
// CHECK: [[IF_COND_FALSE_BLOCK_1]]:
-// CHECK: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[SI_VAR_IF_1]], i32 %[[GTN_IF_1]])
+// CHECK: call void @__kmpc_serialized_parallel(ptr @[[SI_VAR_IF_1]], i32 %[[GTN_IF_1]])
// CHECK: call void @[[OMP_OUTLINED_FN_IF_1]]
-// CHECK: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[SI_VAR_IF_1]], i32 %[[GTN_IF_1]])
+// CHECK: call void @__kmpc_end_serialized_parallel(ptr @[[SI_VAR_IF_1]], i32 %[[GTN_IF_1]])
// CHECK: br label %[[RETURN_BLOCK_IF_1]]
omp.parallel if(%1 : i1) {
omp.barrier
// CHECK-LABEL: define void @test_omp_parallel_3()
llvm.func @test_omp_parallel_3() -> () {
- // CHECK: [[OMP_THREAD_3_1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
- // CHECK: call void @__kmpc_push_proc_bind(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_1]], i32 2)
- // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_1:.*]] to {{.*}}
+ // CHECK: [[OMP_THREAD_3_1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+ // CHECK: call void @__kmpc_push_proc_bind(ptr @{{[0-9]+}}, i32 [[OMP_THREAD_3_1]], i32 2)
+ // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_1:.*]])
omp.parallel proc_bind(master) {
omp.barrier
omp.terminator
}
- // CHECK: [[OMP_THREAD_3_2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
- // CHECK: call void @__kmpc_push_proc_bind(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_2]], i32 3)
- // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_2:.*]] to {{.*}}
+ // CHECK: [[OMP_THREAD_3_2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+ // CHECK: call void @__kmpc_push_proc_bind(ptr @{{[0-9]+}}, i32 [[OMP_THREAD_3_2]], i32 3)
+ // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_2:.*]])
omp.parallel proc_bind(close) {
omp.barrier
omp.terminator
}
- // CHECK: [[OMP_THREAD_3_3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
- // CHECK: call void @__kmpc_push_proc_bind(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_3]], i32 4)
- // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_3:.*]] to {{.*}}
+ // CHECK: [[OMP_THREAD_3_3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+ // CHECK: call void @__kmpc_push_proc_bind(ptr @{{[0-9]+}}, i32 [[OMP_THREAD_3_3]], i32 4)
+ // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_3:.*]])
omp.parallel proc_bind(spread) {
omp.barrier
omp.terminator
// CHECK-LABEL: define void @test_omp_parallel_4()
llvm.func @test_omp_parallel_4() -> () {
-// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_4_1:.*]] to
+// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_4_1:.*]])
// CHECK: define internal void @[[OMP_OUTLINED_FN_4_1]]
// CHECK: call void @__kmpc_barrier
-// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_4_1_1:.*]] to
+// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_4_1_1:.*]])
// CHECK: call void @__kmpc_barrier
omp.parallel {
omp.barrier
}
llvm.func @test_omp_parallel_5() -> () {
-// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1:.*]] to
+// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1:.*]])
// CHECK: define internal void @[[OMP_OUTLINED_FN_5_1]]
// CHECK: call void @__kmpc_barrier
-// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1_1:.*]] to
+// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1_1:.*]])
// CHECK: call void @__kmpc_barrier
omp.parallel {
omp.barrier
// CHECK: define internal void @[[OMP_OUTLINED_FN_5_1_1]]
omp.parallel {
-// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1_1_1:.*]] to
+// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1_1_1:.*]])
// CHECK: define internal void @[[OMP_OUTLINED_FN_5_1_1_1]]
// CHECK: call void @__kmpc_barrier
omp.parallel {
// CHECK-LABEL: define void @test_omp_master()
llvm.func @test_omp_master() -> () {
-// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @{{.*}} to
+// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @{{.*}})
// CHECK: omp.par.region1:
omp.parallel {
omp.master {
-// CHECK: [[OMP_THREAD_3_4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
-// CHECK: {{[0-9]+}} = call i32 @__kmpc_master(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_4]])
+// CHECK: [[OMP_THREAD_3_4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+// CHECK: {{[0-9]+}} = call i32 @__kmpc_master(ptr @{{[0-9]+}}, i32 [[OMP_THREAD_3_4]])
// CHECK: omp.master.region
-// CHECK: call void @__kmpc_end_master(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_4]])
+// CHECK: call void @__kmpc_end_master(ptr @{{[0-9]+}}, i32 [[OMP_THREAD_3_4]])
// CHECK: br label %omp_region.end
omp.terminator
}
// CHECK: %struct.ident_t = type
// CHECK: @[[$parallel_loc:.*]] = private unnamed_addr constant {{.*}} c";LLVMDialectModule;wsloop_simple;{{[0-9]+}};{{[0-9]+}};;\00"
-// CHECK: @[[$parallel_loc_struct:.*]] = private unnamed_addr constant %struct.ident_t {{.*}} @[[$parallel_loc]], {{.*}}
+// CHECK: @[[$parallel_loc_struct:.*]] = private unnamed_addr constant %struct.ident_t {{.*}} @[[$parallel_loc]] {{.*}}
// CHECK: @[[$wsloop_loc:.*]] = private unnamed_addr constant {{.*}} c";LLVMDialectModule;wsloop_simple;{{[0-9]+}};{{[0-9]+}};;\00"
-// CHECK: @[[$wsloop_loc_struct:.*]] = private unnamed_addr constant %struct.ident_t {{.*}} @[[$wsloop_loc]], {{.*}}
+// CHECK: @[[$wsloop_loc_struct:.*]] = private unnamed_addr constant %struct.ident_t {{.*}} @[[$wsloop_loc]] {{.*}}
// CHECK-LABEL: @wsloop_simple
llvm.func @wsloop_simple(%arg0: !llvm.ptr<f32>) {
// The form of the emitted IR is controlled by OpenMPIRBuilder and
// tested there. Just check that the right functions are called.
// CHECK: call i32 @__kmpc_global_thread_num
- // CHECK: call void @__kmpc_for_static_init_{{.*}}(%struct.ident_t* @[[$wsloop_loc_struct]],
+ // CHECK: call void @__kmpc_for_static_init_{{.*}}(ptr @[[$wsloop_loc_struct]],
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
%4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
llvm.store %3, %4 : !llvm.ptr<f32>
omp.yield
- // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* @[[$wsloop_loc_struct]],
+ // CHECK: call void @__kmpc_for_static_fini(ptr @[[$wsloop_loc_struct]],
}) {operand_segment_sizes = dense<[1, 1, 1, 0, 0, 0, 0]> : vector<7xi32>} : (i64, i64, i64) -> ()
omp.terminator
}
%0 = llvm.mlir.constant(42 : index) : i64
%1 = llvm.mlir.constant(10 : index) : i64
%2 = llvm.mlir.constant(1 : index) : i64
- // CHECK: store i64 31, i64* %{{.*}}upperbound
+ // CHECK: store i64 31, ptr %{{.*}}upperbound
"omp.wsloop"(%1, %0, %2) ({
^bb0(%arg1: i64):
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
%0 = llvm.mlir.constant(42 : index) : i64
%1 = llvm.mlir.constant(10 : index) : i64
%2 = llvm.mlir.constant(1 : index) : i64
- // CHECK: store i64 32, i64* %{{.*}}upperbound
+ // CHECK: store i64 32, ptr %{{.*}}upperbound
"omp.wsloop"(%1, %0, %2) ({
^bb0(%arg1: i64):
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
llvm.func @test_omp_wsloop_static_defchunk(%lb : i32, %ub : i32, %step : i32) -> () {
omp.wsloop schedule(static)
for (%iv) : i32 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_for_static_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 34, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32 1, i32 0)
+ // CHECK: call void @__kmpc_for_static_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 34, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, i32 1, i32 0)
// CHECK: call void @__kmpc_for_static_fini
llvm.call @body(%iv) : (i32) -> ()
omp.yield
%static_chunk_size = llvm.mlir.constant(1 : i32) : i32
omp.wsloop schedule(static = %static_chunk_size : i32)
for (%iv) : i32 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_for_static_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 33, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32 1, i32 1)
+ // CHECK: call void @__kmpc_for_static_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 33, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, i32 1, i32 1)
// CHECK: call void @__kmpc_for_static_fini
llvm.call @body(%iv) : (i32) -> ()
omp.yield
%static_chunk_size = llvm.mlir.constant(2 : i32) : i32
omp.wsloop schedule(static = %static_chunk_size : i32)
for (%iv) : i32 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_for_static_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 33, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32 1, i32 2)
+ // CHECK: call void @__kmpc_for_static_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 33, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, i32 1, i32 2)
// CHECK: call void @__kmpc_for_static_fini
llvm.call @body(%iv) : (i32) -> ()
omp.yield
%chunk_size_const = llvm.mlir.constant(2 : i16) : i16
omp.wsloop schedule(dynamic = %chunk_size_const : i16)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741859, i64 {{.*}}, i64 %{{.*}}, i64 {{.*}}, i64 2)
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i64 {{.*}}, i64 %{{.*}}, i64 {{.*}}, i64 2)
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
omp.wsloop schedule(dynamic = %chunk_size_var : i16)
for (%iv) : i32 = (%lb) to (%ub) step (%step) {
// CHECK: %[[CHUNK_SIZE:.*]] = sext i16 %{{.*}} to i32
- // CHECK: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %[[CHUNK_SIZE]])
+ // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %[[CHUNK_SIZE]])
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
omp.wsloop schedule(dynamic = %chunk_size_var : i64)
for (%iv) : i32 = (%lb) to (%ub) step (%step) {
// CHECK: %[[CHUNK_SIZE:.*]] = trunc i64 %{{.*}} to i32
- // CHECK: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %[[CHUNK_SIZE]])
+ // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %[[CHUNK_SIZE]])
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
llvm.func @test_omp_wsloop_dynamic_chunk_var3(%lb : i32, %ub : i32, %step : i32, %chunk_size : i32) -> () {
omp.wsloop schedule(dynamic = %chunk_size : i32)
for (%iv) : i32 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %{{.*}})
+ // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %{{.*}})
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
llvm.func @test_omp_wsloop_dynamic_nonmonotonic(%lb : i64, %ub : i64, %step : i64) -> () {
omp.wsloop schedule(dynamic, nonmonotonic)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741859
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
llvm.func @test_omp_wsloop_dynamic_monotonic(%lb : i64, %ub : i64, %step : i64) -> () {
omp.wsloop schedule(dynamic, monotonic)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 536870947
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 536870947
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
llvm.func @test_omp_wsloop_runtime_simd(%lb : i64, %ub : i64, %step : i64) -> () {
omp.wsloop schedule(runtime, simd)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741871
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741871
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
llvm.func @test_omp_wsloop_guided_simd(%lb : i64, %ub : i64, %step : i64) -> () {
omp.wsloop schedule(guided, simd)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741870
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741870
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
llvm.func @test_omp_wsloop_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
omp.wsloop ordered(0)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 66, i64 1, i64 %{{.*}}, i64 1, i64 1)
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 66, i64 1, i64 %{{.*}}, i64 1, i64 1)
// CHECK: call void @__kmpc_dispatch_fini_8u
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
llvm.func @test_omp_wsloop_static_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
omp.wsloop schedule(static) ordered(0)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 66, i64 1, i64 %{{.*}}, i64 1, i64 1)
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 66, i64 1, i64 %{{.*}}, i64 1, i64 1)
// CHECK: call void @__kmpc_dispatch_fini_8u
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
%static_chunk_size = llvm.mlir.constant(1 : i32) : i32
omp.wsloop schedule(static = %static_chunk_size : i32) ordered(0)
for (%iv) : i32 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 65, i32 1, i32 %{{.*}}, i32 1, i32 1)
+ // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 65, i32 1, i32 %{{.*}}, i32 1, i32 1)
// CHECK: call void @__kmpc_dispatch_fini_4u
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
llvm.func @test_omp_wsloop_dynamic_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
omp.wsloop schedule(dynamic) ordered(0)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 67, i64 1, i64 %{{.*}}, i64 1, i64 1)
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 67, i64 1, i64 %{{.*}}, i64 1, i64 1)
// CHECK: call void @__kmpc_dispatch_fini_8u
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
llvm.func @test_omp_wsloop_auto_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
omp.wsloop schedule(auto) ordered(0)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 70, i64 1, i64 %{{.*}}, i64 1, i64 1)
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 70, i64 1, i64 %{{.*}}, i64 1, i64 1)
// CHECK: call void @__kmpc_dispatch_fini_8u
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
llvm.func @test_omp_wsloop_runtime_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
omp.wsloop schedule(runtime) ordered(0)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 69, i64 1, i64 %{{.*}}, i64 1, i64 1)
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 69, i64 1, i64 %{{.*}}, i64 1, i64 1)
// CHECK: call void @__kmpc_dispatch_fini_8u
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
llvm.func @test_omp_wsloop_guided_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
omp.wsloop schedule(guided) ordered(0)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 68, i64 1, i64 %{{.*}}, i64 1, i64 1)
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 68, i64 1, i64 %{{.*}}, i64 1, i64 1)
// CHECK: call void @__kmpc_dispatch_fini_8u
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
llvm.func @test_omp_wsloop_dynamic_nonmonotonic_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
omp.wsloop schedule(dynamic, nonmonotonic) ordered(0)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741891, i64 1, i64 %{{.*}}, i64 1, i64 1)
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741891, i64 1, i64 %{{.*}}, i64 1, i64 1)
// CHECK: call void @__kmpc_dispatch_fini_8u
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
llvm.func @test_omp_wsloop_dynamic_monotonic_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
omp.wsloop schedule(dynamic, monotonic) ordered(0)
for (%iv) : i64 = (%lb) to (%ub) step (%step) {
- // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 536870979, i64 1, i64 %{{.*}}, i64 1, i64 1)
+ // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 536870979, i64 1, i64 %{{.*}}, i64 1, i64 1)
// CHECK: call void @__kmpc_dispatch_fini_8u
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
// is done by the OpenMPIRBuilder.
// CHECK-LABEL: @collapse_wsloop
-// CHECK: i32* noalias %[[TIDADDR:[0-9A-Za-z.]*]]
-// CHECK: load i32, i32* %[[TIDADDR]]
+// CHECK: ptr noalias %[[TIDADDR:[0-9A-Za-z.]*]]
+// CHECK: load i32, ptr %[[TIDADDR]]
// CHECK: store
// CHECK: load
// CHECK: %[[LB0:.*]] = load i32
// CHECK: br label %[[COLLAPSED_PREHEADER:.*]]
//
// CHECK: [[COLLAPSED_PREHEADER]]:
- // CHECK: store i32 0, i32*
+ // CHECK: store i32 0, ptr
// CHECK: %[[TOTAL_SUB_1:.*]] = sub i32 %[[TOTAL]], 1
- // CHECK: store i32 %[[TOTAL_SUB_1]], i32*
+ // CHECK: store i32 %[[TOTAL_SUB_1]], ptr
// CHECK: call void @__kmpc_for_static_init_4u
omp.wsloop collapse(3)
for (%arg0, %arg1, %arg2) : i32 = (%0, %1, %2) to (%3, %4, %5) step (%6, %7, %8) {
// detailed checking is done by the OpenMPIRBuilder.
// CHECK-LABEL: @collapse_wsloop_dynamic
-// CHECK: i32* noalias %[[TIDADDR:[0-9A-Za-z.]*]]
-// CHECK: load i32, i32* %[[TIDADDR]]
+// CHECK: ptr noalias %[[TIDADDR:[0-9A-Za-z.]*]]
+// CHECK: load i32, ptr %[[TIDADDR]]
// CHECK: store
// CHECK: load
// CHECK: %[[LB0:.*]] = load i32
// CHECK: br label %[[COLLAPSED_PREHEADER:.*]]
//
// CHECK: [[COLLAPSED_PREHEADER]]:
- // CHECK: store i32 1, i32*
- // CHECK: store i32 %[[TOTAL]], i32*
+ // CHECK: store i32 1, ptr
+ // CHECK: store i32 %[[TOTAL]], ptr
// CHECK: call void @__kmpc_dispatch_init_4u
omp.wsloop collapse(3) schedule(dynamic)
for (%arg0, %arg1, %arg2) : i32 = (%0, %1, %2) to (%3, %4, %5) step (%6, %7, %8) {
// CHECK: [[ADDR3:%.*]] = alloca [1 x i64], align 8
// CHECK: [[ADDR:%.*]] = alloca [1 x i64], align 8
- // CHECK: [[OMP_THREAD:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
- // CHECK-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[OMP_THREAD]])
+ // CHECK: [[OMP_THREAD:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
+ // CHECK-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_THREAD]])
omp.ordered_region {
omp.terminator
- // CHECK: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[OMP_THREAD]])
+ // CHECK: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[OMP_THREAD]])
}
omp.wsloop ordered(0)
for (%arg7) : i32 = (%arg0) to (%arg1) step (%arg2) {
- // CHECK: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[OMP_THREAD2:%.*]])
+ // CHECK: call void @__kmpc_ordered(ptr @[[GLOB3:[0-9]+]], i32 [[OMP_THREAD2:%.*]])
omp.ordered_region {
omp.terminator
- // CHECK: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB3]], i32 [[OMP_THREAD2]])
+ // CHECK: call void @__kmpc_end_ordered(ptr @[[GLOB3]], i32 [[OMP_THREAD2]])
}
omp.yield
}
omp.wsloop ordered(1)
for (%arg7) : i32 = (%arg0) to (%arg1) step (%arg2) {
- // CHECK: [[TMP:%.*]] = getelementptr inbounds [1 x i64], [1 x i64]* [[ADDR]], i64 0, i64 0
- // CHECK: store i64 [[ARG0:%.*]], i64* [[TMP]], align 8
- // CHECK: [[TMP2:%.*]] = getelementptr inbounds [1 x i64], [1 x i64]* [[ADDR]], i64 0, i64 0
- // CHECK: [[OMP_THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
- // CHECK: call void @__kmpc_doacross_wait(%struct.ident_t* @[[GLOB3]], i32 [[OMP_THREAD2]], i64* [[TMP2]])
+ // CHECK: [[TMP:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR]], i64 0, i64 0
+ // CHECK: store i64 [[ARG0:%.*]], ptr [[TMP]], align 8
+ // CHECK: [[TMP2:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR]], i64 0, i64 0
+ // CHECK: [[OMP_THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]])
+ // CHECK: call void @__kmpc_doacross_wait(ptr @[[GLOB3]], i32 [[OMP_THREAD2]], ptr [[TMP2]])
omp.ordered depend_type(dependsink) depend_vec(%arg3 : i64) {num_loops_val = 1 : i64}
- // CHECK: [[TMP3:%.*]] = getelementptr inbounds [1 x i64], [1 x i64]* [[ADDR3]], i64 0, i64 0
- // CHECK: store i64 [[ARG0]], i64* [[TMP3]], align 8
- // CHECK: [[TMP4:%.*]] = getelementptr inbounds [1 x i64], [1 x i64]* [[ADDR3]], i64 0, i64 0
- // CHECK: [[OMP_THREAD4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB5:[0-9]+]])
- // CHECK: call void @__kmpc_doacross_post(%struct.ident_t* @[[GLOB5]], i32 [[OMP_THREAD4]], i64* [[TMP4]])
+ // CHECK: [[TMP3:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR3]], i64 0, i64 0
+ // CHECK: store i64 [[ARG0]], ptr [[TMP3]], align 8
+ // CHECK: [[TMP4:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR3]], i64 0, i64 0
+ // CHECK: [[OMP_THREAD4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]])
+ // CHECK: call void @__kmpc_doacross_post(ptr @[[GLOB5]], i32 [[OMP_THREAD4]], ptr [[TMP4]])
omp.ordered depend_type(dependsource) depend_vec(%arg3 : i64) {num_loops_val = 1 : i64}
omp.yield
omp.wsloop ordered(2)
for (%arg7) : i32 = (%arg0) to (%arg1) step (%arg2) {
- // CHECK: [[TMP5:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR5]], i64 0, i64 0
- // CHECK: store i64 [[ARG0]], i64* [[TMP5]], align 8
- // CHECK: [[TMP6:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR5]], i64 0, i64 1
- // CHECK: store i64 [[ARG1:%.*]], i64* [[TMP6]], align 8
- // CHECK: [[TMP7:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR5]], i64 0, i64 0
- // CHECK: [[OMP_THREAD6:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB7:[0-9]+]])
- // CHECK: call void @__kmpc_doacross_wait(%struct.ident_t* @[[GLOB7]], i32 [[OMP_THREAD6]], i64* [[TMP7]])
- // CHECK: [[TMP8:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR7]], i64 0, i64 0
- // CHECK: store i64 [[ARG2:%.*]], i64* [[TMP8]], align 8
- // CHECK: [[TMP9:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR7]], i64 0, i64 1
- // CHECK: store i64 [[ARG3:%.*]], i64* [[TMP9]], align 8
- // CHECK: [[TMP10:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR7]], i64 0, i64 0
- // CHECK: [[OMP_THREAD8:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB7]])
- // CHECK: call void @__kmpc_doacross_wait(%struct.ident_t* @[[GLOB7]], i32 [[OMP_THREAD8]], i64* [[TMP10]])
+ // CHECK: [[TMP5:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR5]], i64 0, i64 0
+ // CHECK: store i64 [[ARG0]], ptr [[TMP5]], align 8
+ // CHECK: [[TMP6:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR5]], i64 0, i64 1
+ // CHECK: store i64 [[ARG1:%.*]], ptr [[TMP6]], align 8
+ // CHECK: [[TMP7:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR5]], i64 0, i64 0
+ // CHECK: [[OMP_THREAD6:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7:[0-9]+]])
+ // CHECK: call void @__kmpc_doacross_wait(ptr @[[GLOB7]], i32 [[OMP_THREAD6]], ptr [[TMP7]])
+ // CHECK: [[TMP8:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR7]], i64 0, i64 0
+ // CHECK: store i64 [[ARG2:%.*]], ptr [[TMP8]], align 8
+ // CHECK: [[TMP9:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR7]], i64 0, i64 1
+ // CHECK: store i64 [[ARG3:%.*]], ptr [[TMP9]], align 8
+ // CHECK: [[TMP10:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR7]], i64 0, i64 0
+ // CHECK: [[OMP_THREAD8:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7]])
+ // CHECK: call void @__kmpc_doacross_wait(ptr @[[GLOB7]], i32 [[OMP_THREAD8]], ptr [[TMP10]])
omp.ordered depend_type(dependsink) depend_vec(%arg3, %arg4, %arg5, %arg6 : i64, i64, i64, i64) {num_loops_val = 2 : i64}
- // CHECK: [[TMP11:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR9]], i64 0, i64 0
- // CHECK: store i64 [[ARG0]], i64* [[TMP11]], align 8
- // CHECK: [[TMP12:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR9]], i64 0, i64 1
- // CHECK: store i64 [[ARG1]], i64* [[TMP12]], align 8
- // CHECK: [[TMP13:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR9]], i64 0, i64 0
- // CHECK: [[OMP_THREAD10:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB9:[0-9]+]])
- // CHECK: call void @__kmpc_doacross_post(%struct.ident_t* @[[GLOB9]], i32 [[OMP_THREAD10]], i64* [[TMP13]])
+ // CHECK: [[TMP11:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR9]], i64 0, i64 0
+ // CHECK: store i64 [[ARG0]], ptr [[TMP11]], align 8
+ // CHECK: [[TMP12:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR9]], i64 0, i64 1
+ // CHECK: store i64 [[ARG1]], ptr [[TMP12]], align 8
+ // CHECK: [[TMP13:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR9]], i64 0, i64 0
+ // CHECK: [[OMP_THREAD10:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9:[0-9]+]])
+ // CHECK: call void @__kmpc_doacross_post(ptr @[[GLOB9]], i32 [[OMP_THREAD10]], ptr [[TMP13]])
omp.ordered depend_type(dependsource) depend_vec(%arg3, %arg4 : i64, i64) {num_loops_val = 2 : i64}
omp.yield
// -----
// CHECK-LABEL: @omp_atomic_read
-// CHECK-SAME: (i32* %[[ARG0:.*]], i32* %[[ARG1:.*]])
+// CHECK-SAME: (ptr %[[ARG0:.*]], ptr %[[ARG1:.*]])
llvm.func @omp_atomic_read(%arg0 : !llvm.ptr<i32>, %arg1 : !llvm.ptr<i32>) -> () {
- // CHECK: %[[X1:.*]] = load atomic i32, i32* %[[ARG0]] monotonic, align 4
- // CHECK: store i32 %[[X1]], i32* %[[ARG1]], align 4
+ // CHECK: %[[X1:.*]] = load atomic i32, ptr %[[ARG0]] monotonic, align 4
+ // CHECK: store i32 %[[X1]], ptr %[[ARG1]], align 4
omp.atomic.read %arg1 = %arg0 : !llvm.ptr<i32>
- // CHECK: %[[X2:.*]] = load atomic i32, i32* %[[ARG0]] seq_cst, align 4
- // CHECK: call void @__kmpc_flush(%{{.*}})
- // CHECK: store i32 %[[X2]], i32* %[[ARG1]], align 4
+ // CHECK: %[[X2:.*]] = load atomic i32, ptr %[[ARG0]] seq_cst, align 4
+ // CHECK: call void @__kmpc_flush(ptr @{{.*}})
+ // CHECK: store i32 %[[X2]], ptr %[[ARG1]], align 4
omp.atomic.read %arg1 = %arg0 memory_order(seq_cst) : !llvm.ptr<i32>
- // CHECK: %[[X3:.*]] = load atomic i32, i32* %[[ARG0]] acquire, align 4
- // CHECK: call void @__kmpc_flush(%{{.*}})
- // CHECK: store i32 %[[X3]], i32* %[[ARG1]], align 4
+ // CHECK: %[[X3:.*]] = load atomic i32, ptr %[[ARG0]] acquire, align 4
+ // CHECK: call void @__kmpc_flush(ptr @{{.*}})
+ // CHECK: store i32 %[[X3]], ptr %[[ARG1]], align 4
omp.atomic.read %arg1 = %arg0 memory_order(acquire) : !llvm.ptr<i32>
- // CHECK: %[[X4:.*]] = load atomic i32, i32* %[[ARG0]] monotonic, align 4
- // CHECK: store i32 %[[X4]], i32* %[[ARG1]], align 4
+ // CHECK: %[[X4:.*]] = load atomic i32, ptr %[[ARG0]] monotonic, align 4
+ // CHECK: store i32 %[[X4]], ptr %[[ARG1]], align 4
omp.atomic.read %arg1 = %arg0 memory_order(relaxed) : !llvm.ptr<i32>
llvm.return
}
// -----
// CHECK-LABEL: @omp_atomic_write
-// CHECK-SAME: (i32* %[[x:.*]], i32 %[[expr:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], i32 %[[expr:.*]])
llvm.func @omp_atomic_write(%x: !llvm.ptr<i32>, %expr: i32) -> () {
- // CHECK: store atomic i32 %[[expr]], i32* %[[x]] monotonic, align 4
+ // CHECK: store atomic i32 %[[expr]], ptr %[[x]] monotonic, align 4
omp.atomic.write %x = %expr : !llvm.ptr<i32>, i32
- // CHECK: store atomic i32 %[[expr]], i32* %[[x]] seq_cst, align 4
- // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{.*}})
+ // CHECK: store atomic i32 %[[expr]], ptr %[[x]] seq_cst, align 4
+ // CHECK: call void @__kmpc_flush(ptr @{{.*}})
omp.atomic.write %x = %expr memory_order(seq_cst) : !llvm.ptr<i32>, i32
- // CHECK: store atomic i32 %[[expr]], i32* %[[x]] release, align 4
- // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{.*}})
+ // CHECK: store atomic i32 %[[expr]], ptr %[[x]] release, align 4
+ // CHECK: call void @__kmpc_flush(ptr @{{.*}})
omp.atomic.write %x = %expr memory_order(release) : !llvm.ptr<i32>, i32
- // CHECK: store atomic i32 %[[expr]], i32* %[[x]] monotonic, align 4
+ // CHECK: store atomic i32 %[[expr]], ptr %[[x]] monotonic, align 4
omp.atomic.write %x = %expr memory_order(relaxed) : !llvm.ptr<i32>, i32
llvm.return
}
// Checking simple atomicrmw and cmpxchg based translation. This also checks for
// ambigous alloca insert point by putting llvm.mul as the first update operation.
// CHECK-LABEL: @omp_atomic_update
-// CHECK-SAME: (i32* %[[x:.*]], i32 %[[expr:.*]], i1* %[[xbool:.*]], i1 %[[exprbool:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], i32 %[[expr:.*]], ptr %[[xbool:.*]], i1 %[[exprbool:.*]])
llvm.func @omp_atomic_update(%x:!llvm.ptr<i32>, %expr: i32, %xbool: !llvm.ptr<i1>, %exprbool: i1) {
// CHECK: %[[t1:.*]] = mul i32 %[[x_old:.*]], %[[expr]]
- // CHECK: store i32 %[[t1]], i32* %[[x_new:.*]]
- // CHECK: %[[t2:.*]] = load i32, i32* %[[x_new]]
- // CHECK: cmpxchg i32* %[[x]], i32 %[[x_old]], i32 %[[t2]]
+ // CHECK: store i32 %[[t1]], ptr %[[x_new:.*]]
+ // CHECK: %[[t2:.*]] = load i32, ptr %[[x_new]]
+ // CHECK: cmpxchg ptr %[[x]], i32 %[[x_old]], i32 %[[t2]]
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
%newval = llvm.mul %xval, %expr : i32
omp.yield(%newval : i32)
}
- // CHECK: atomicrmw add i32* %[[x]], i32 %[[expr]] monotonic
+ // CHECK: atomicrmw add ptr %[[x]], i32 %[[expr]] monotonic
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
%newval = llvm.add %xval, %expr : i32
// Checking an order-dependent operation when the order is `expr binop x`
// CHECK-LABEL: @omp_atomic_update_ordering
-// CHECK-SAME: (i32* %[[x:.*]], i32 %[[expr:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], i32 %[[expr:.*]])
llvm.func @omp_atomic_update_ordering(%x:!llvm.ptr<i32>, %expr: i32) {
// CHECK: %[[t1:.*]] = shl i32 %[[expr]], %[[x_old:[^ ,]*]]
- // CHECK: store i32 %[[t1]], i32* %[[x_new:.*]]
- // CHECK: %[[t2:.*]] = load i32, i32* %[[x_new]]
- // CHECK: cmpxchg i32* %[[x]], i32 %[[x_old]], i32 %[[t2]]
+ // CHECK: store i32 %[[t1]], ptr %[[x_new:.*]]
+ // CHECK: %[[t2:.*]] = load i32, ptr %[[x_new]]
+ // CHECK: cmpxchg ptr %[[x]], i32 %[[x_old]], i32 %[[t2]]
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
%newval = llvm.shl %expr, %xval : i32
// Checking an order-dependent operation when the order is `x binop expr`
// CHECK-LABEL: @omp_atomic_update_ordering
-// CHECK-SAME: (i32* %[[x:.*]], i32 %[[expr:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], i32 %[[expr:.*]])
llvm.func @omp_atomic_update_ordering(%x:!llvm.ptr<i32>, %expr: i32) {
// CHECK: %[[t1:.*]] = shl i32 %[[x_old:.*]], %[[expr]]
- // CHECK: store i32 %[[t1]], i32* %[[x_new:.*]]
- // CHECK: %[[t2:.*]] = load i32, i32* %[[x_new]]
- // CHECK: cmpxchg i32* %[[x]], i32 %[[x_old]], i32 %[[t2]] monotonic
+ // CHECK: store i32 %[[t1]], ptr %[[x_new:.*]]
+ // CHECK: %[[t2:.*]] = load i32, ptr %[[x_new]]
+ // CHECK: cmpxchg ptr %[[x]], i32 %[[x_old]], i32 %[[t2]] monotonic
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
%newval = llvm.shl %xval, %expr : i32
// Checking intrinsic translation.
// CHECK-LABEL: @omp_atomic_update_intrinsic
-// CHECK-SAME: (i32* %[[x:.*]], i32 %[[expr:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], i32 %[[expr:.*]])
llvm.func @omp_atomic_update_intrinsic(%x:!llvm.ptr<i32>, %expr: i32) {
// CHECK: %[[t1:.*]] = call i32 @llvm.smax.i32(i32 %[[x_old:.*]], i32 %[[expr]])
- // CHECK: store i32 %[[t1]], i32* %[[x_new:.*]]
- // CHECK: %[[t2:.*]] = load i32, i32* %[[x_new]]
- // CHECK: cmpxchg i32* %[[x]], i32 %[[x_old]], i32 %[[t2]]
+ // CHECK: store i32 %[[t1]], ptr %[[x_new:.*]]
+ // CHECK: %[[t2:.*]] = load i32, ptr %[[x_new]]
+ // CHECK: cmpxchg ptr %[[x]], i32 %[[x_old]], i32 %[[t2]]
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
%newval = "llvm.intr.smax"(%xval, %expr) : (i32, i32) -> i32
omp.yield(%newval : i32)
}
// CHECK: %[[t1:.*]] = call i32 @llvm.umax.i32(i32 %[[x_old:.*]], i32 %[[expr]])
- // CHECK: store i32 %[[t1]], i32* %[[x_new:.*]]
- // CHECK: %[[t2:.*]] = load i32, i32* %[[x_new]]
- // CHECK: cmpxchg i32* %[[x]], i32 %[[x_old]], i32 %[[t2]]
+ // CHECK: store i32 %[[t1]], ptr %[[x_new:.*]]
+ // CHECK: %[[t2:.*]] = load i32, ptr %[[x_new]]
+ // CHECK: cmpxchg ptr %[[x]], i32 %[[x_old]], i32 %[[t2]]
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
%newval = "llvm.intr.umax"(%xval, %expr) : (i32, i32) -> i32
// -----
// CHECK-LABEL: @omp_atomic_capture_prefix_update
-// CHECK-SAME: (i32* %[[x:.*]], i32* %[[v:.*]], i32 %[[expr:.*]], float* %[[xf:.*]], float* %[[vf:.*]], float %[[exprf:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], ptr %[[v:.*]], i32 %[[expr:.*]], ptr %[[xf:.*]], ptr %[[vf:.*]], float %[[exprf:.*]])
llvm.func @omp_atomic_capture_prefix_update(
%x: !llvm.ptr<i32>, %v: !llvm.ptr<i32>, %expr: i32,
%xf: !llvm.ptr<f32>, %vf: !llvm.ptr<f32>, %exprf: f32) -> () {
- // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] monotonic
+ // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] monotonic
// CHECK-NEXT: %[[newval:.*]] = add i32 %[[res]], %[[expr]]
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
omp.atomic.read %v = %x : !llvm.ptr<i32>
}
- // CHECK: %[[res:.*]] = atomicrmw sub i32* %[[x]], i32 %[[expr]] monotonic
+ // CHECK: %[[res:.*]] = atomicrmw sub ptr %[[x]], i32 %[[expr]] monotonic
// CHECK-NEXT: %[[newval:.*]] = sub i32 %[[res]], %[[expr]]
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
omp.atomic.read %v = %x : !llvm.ptr<i32>
}
- // CHECK: %[[res:.*]] = atomicrmw and i32* %[[x]], i32 %[[expr]] monotonic
+ // CHECK: %[[res:.*]] = atomicrmw and ptr %[[x]], i32 %[[expr]] monotonic
// CHECK-NEXT: %[[newval:.*]] = and i32 %[[res]], %[[expr]]
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
omp.atomic.read %v = %x : !llvm.ptr<i32>
}
- // CHECK: %[[res:.*]] = atomicrmw or i32* %[[x]], i32 %[[expr]] monotonic
+ // CHECK: %[[res:.*]] = atomicrmw or ptr %[[x]], i32 %[[expr]] monotonic
// CHECK-NEXT: %[[newval:.*]] = or i32 %[[res]], %[[expr]]
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
omp.atomic.read %v = %x : !llvm.ptr<i32>
}
- // CHECK: %[[res:.*]] = atomicrmw xor i32* %[[x]], i32 %[[expr]] monotonic
+ // CHECK: %[[res:.*]] = atomicrmw xor ptr %[[x]], i32 %[[expr]] monotonic
// CHECK-NEXT: %[[newval:.*]] = xor i32 %[[res]], %[[expr]]
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = mul i32 %[[xval]], %[[expr]]
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = sdiv i32 %[[xval]], %[[expr]]
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = udiv i32 %[[xval]], %[[expr]]
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = shl i32 %[[xval]], %[[expr]]
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = lshr i32 %[[xval]], %[[expr]]
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = ashr i32 %[[xval]], %[[expr]]
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.smax.i32(i32 %[[xval]], i32 %[[expr]])
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.smin.i32(i32 %[[xval]], i32 %[[expr]])
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.umax.i32(i32 %[[xval]], i32 %[[expr]])
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.umin.i32(i32 %[[xval]], i32 %[[expr]])
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[newval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[newval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.update %x : !llvm.ptr<i32> {
^bb0(%xval: i32):
// CHECK: %[[xval:.*]] = phi i32
// CHECK: %[[newval:.*]] = fadd float %{{.*}}, %[[exprf]]
- // CHECK: store float %[[newval]], float* %{{.*}}
- // CHECK: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK: %[[xf_bitcast:.*]] = bitcast float* %[[xf]] to i32*
- // CHECK: %{{.*}} = cmpxchg i32* %[[xf_bitcast]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store float %[[newval]], float* %[[vf]]
+ // CHECK: store float %[[newval]], ptr %{{.*}}
+ // CHECK: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK: %{{.*}} = cmpxchg ptr %[[xf]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store float %[[newval]], ptr %[[vf]]
omp.atomic.capture {
omp.atomic.update %xf : !llvm.ptr<f32> {
^bb0(%xval: f32):
// CHECK: %[[xval:.*]] = phi i32
// CHECK: %[[newval:.*]] = fsub float %{{.*}}, %[[exprf]]
- // CHECK: store float %[[newval]], float* %{{.*}}
- // CHECK: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK: %[[xf_bitcast:.*]] = bitcast float* %[[xf]] to i32*
- // CHECK: %{{.*}} = cmpxchg i32* %[[xf_bitcast]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store float %[[newval]], float* %[[vf]]
+ // CHECK: store float %[[newval]], ptr %{{.*}}
+ // CHECK: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK: %{{.*}} = cmpxchg ptr %[[xf]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store float %[[newval]], ptr %[[vf]]
omp.atomic.capture {
omp.atomic.update %xf : !llvm.ptr<f32> {
^bb0(%xval: f32):
// -----
// CHECK-LABEL: @omp_atomic_capture_postfix_update
-// CHECK-SAME: (i32* %[[x:.*]], i32* %[[v:.*]], i32 %[[expr:.*]], float* %[[xf:.*]], float* %[[vf:.*]], float %[[exprf:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], ptr %[[v:.*]], i32 %[[expr:.*]], ptr %[[xf:.*]], ptr %[[vf:.*]], float %[[exprf:.*]])
llvm.func @omp_atomic_capture_postfix_update(
%x: !llvm.ptr<i32>, %v: !llvm.ptr<i32>, %expr: i32,
%xf: !llvm.ptr<f32>, %vf: !llvm.ptr<f32>, %exprf: f32) -> () {
- // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] monotonic
- // CHECK: store i32 %[[res]], i32* %[[v]]
+ // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] monotonic
+ // CHECK: store i32 %[[res]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
}
}
- // CHECK: %[[res:.*]] = atomicrmw sub i32* %[[x]], i32 %[[expr]] monotonic
- // CHECK: store i32 %[[res]], i32* %[[v]]
+ // CHECK: %[[res:.*]] = atomicrmw sub ptr %[[x]], i32 %[[expr]] monotonic
+ // CHECK: store i32 %[[res]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
}
}
- // CHECK: %[[res:.*]] = atomicrmw and i32* %[[x]], i32 %[[expr]] monotonic
- // CHECK: store i32 %[[res]], i32* %[[v]]
+ // CHECK: %[[res:.*]] = atomicrmw and ptr %[[x]], i32 %[[expr]] monotonic
+ // CHECK: store i32 %[[res]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
}
}
- // CHECK: %[[res:.*]] = atomicrmw or i32* %[[x]], i32 %[[expr]] monotonic
- // CHECK: store i32 %[[res]], i32* %[[v]]
+ // CHECK: %[[res:.*]] = atomicrmw or ptr %[[x]], i32 %[[expr]] monotonic
+ // CHECK: store i32 %[[res]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
}
}
- // CHECK: %[[res:.*]] = atomicrmw xor i32* %[[x]], i32 %[[expr]] monotonic
- // CHECK: store i32 %[[res]], i32* %[[v]]
+ // CHECK: %[[res:.*]] = atomicrmw xor ptr %[[x]], i32 %[[expr]] monotonic
+ // CHECK: store i32 %[[res]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = mul i32 %[[xval]], %[[expr]]
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[xval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[xval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = sdiv i32 %[[xval]], %[[expr]]
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[xval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[xval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = udiv i32 %[[xval]], %[[expr]]
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[xval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[xval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = shl i32 %[[xval]], %[[expr]]
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[xval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[xval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = lshr i32 %[[xval]], %[[expr]]
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[xval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[xval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = ashr i32 %[[xval]], %[[expr]]
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[xval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[xval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.smax.i32(i32 %[[xval]], i32 %[[expr]])
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[xval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[xval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.smin.i32(i32 %[[xval]], i32 %[[expr]])
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[xval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[xval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.umax.i32(i32 %[[xval]], i32 %[[expr]])
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[xval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[xval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
// CHECK: %[[xval:.*]] = phi i32
// CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.umin.i32(i32 %[[xval]], i32 %[[expr]])
- // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
- // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store i32 %[[xval]], i32* %[[v]]
+ // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+ // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store i32 %[[xval]], ptr %[[v]]
omp.atomic.capture {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
// CHECK: %[[xval:.*]] = phi i32
// CHECK: %[[xvalf:.*]] = bitcast i32 %[[xval]] to float
// CHECK: %[[newval:.*]] = fadd float %{{.*}}, %[[exprf]]
- // CHECK: store float %[[newval]], float* %{{.*}}
- // CHECK: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK: %[[xf_bitcast:.*]] = bitcast float* %[[xf]] to i32*
- // CHECK: %{{.*}} = cmpxchg i32* %[[xf_bitcast]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store float %[[xvalf]], float* %[[vf]]
+ // CHECK: store float %[[newval]], ptr %{{.*}}
+ // CHECK: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK: %{{.*}} = cmpxchg ptr %[[xf]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store float %[[xvalf]], ptr %[[vf]]
omp.atomic.capture {
omp.atomic.read %vf = %xf : !llvm.ptr<f32>
omp.atomic.update %xf : !llvm.ptr<f32> {
// CHECK: %[[xval:.*]] = phi i32
// CHECK: %[[xvalf:.*]] = bitcast i32 %[[xval]] to float
// CHECK: %[[newval:.*]] = fsub float %{{.*}}, %[[exprf]]
- // CHECK: store float %[[newval]], float* %{{.*}}
- // CHECK: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK: %[[xf_bitcast:.*]] = bitcast float* %[[xf]] to i32*
- // CHECK: %{{.*}} = cmpxchg i32* %[[xf_bitcast]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store float %[[xvalf]], float* %[[vf]]
+ // CHECK: store float %[[newval]], ptr %{{.*}}
+ // CHECK: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK: %{{.*}} = cmpxchg ptr %[[xf]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store float %[[xvalf]], ptr %[[vf]]
omp.atomic.capture {
omp.atomic.read %vf = %xf : !llvm.ptr<f32>
omp.atomic.update %xf : !llvm.ptr<f32> {
// -----
// CHECK-LABEL: @omp_atomic_capture_misc
-// CHECK-SAME: (i32* %[[x:.*]], i32* %[[v:.*]], i32 %[[expr:.*]], float* %[[xf:.*]], float* %[[vf:.*]], float %[[exprf:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], ptr %[[v:.*]], i32 %[[expr:.*]], ptr %[[xf:.*]], ptr %[[vf:.*]], float %[[exprf:.*]])
llvm.func @omp_atomic_capture_misc(
%x: !llvm.ptr<i32>, %v: !llvm.ptr<i32>, %expr: i32,
%xf: !llvm.ptr<f32>, %vf: !llvm.ptr<f32>, %exprf: f32) -> () {
- // CHECK: %[[xval:.*]] = atomicrmw xchg i32* %[[x]], i32 %[[expr]] monotonic
- // CHECK: store i32 %[[xval]], i32* %[[v]]
+ // CHECK: %[[xval:.*]] = atomicrmw xchg ptr %[[x]], i32 %[[expr]] monotonic
+ // CHECK: store i32 %[[xval]], ptr %[[v]]
omp.atomic.capture{
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.write %x = %expr : !llvm.ptr<i32>, i32
// CHECK: %[[xval:.*]] = phi i32
// CHECK: %[[xvalf:.*]] = bitcast i32 %[[xval]] to float
- // CHECK: store float %[[exprf]], float* %{{.*}}
- // CHECK: %[[newval_:.*]] = load i32, i32* %{{.*}}
- // CHECK: %[[xf_bitcast:.*]] = bitcast float* %[[xf]] to i32*
- // CHECK: %{{.*}} = cmpxchg i32* %[[xf_bitcast]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
- // CHECK: store float %[[xvalf]], float* %[[vf]]
+ // CHECK: store float %[[exprf]], ptr %{{.*}}
+ // CHECK: %[[newval_:.*]] = load i32, ptr %{{.*}}
+ // CHECK: %{{.*}} = cmpxchg ptr %[[xf]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+ // CHECK: store float %[[xvalf]], ptr %[[vf]]
omp.atomic.capture{
omp.atomic.read %vf = %xf : !llvm.ptr<f32>
omp.atomic.write %xf = %exprf : !llvm.ptr<f32>, f32
}
- // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] seq_cst
- // CHECK: store i32 %[[res]], i32* %[[v]]
+ // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] seq_cst
+ // CHECK: store i32 %[[res]], ptr %[[v]]
omp.atomic.capture memory_order(seq_cst) {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
}
}
- // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] acquire
- // CHECK: store i32 %[[res]], i32* %[[v]]
+ // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] acquire
+ // CHECK: store i32 %[[res]], ptr %[[v]]
omp.atomic.capture memory_order(acquire) {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
}
}
- // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] release
- // CHECK: store i32 %[[res]], i32* %[[v]]
+ // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] release
+ // CHECK: store i32 %[[res]], ptr %[[v]]
omp.atomic.capture memory_order(release) {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
}
}
- // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] monotonic
- // CHECK: store i32 %[[res]], i32* %[[v]]
+ // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] monotonic
+ // CHECK: store i32 %[[res]], ptr %[[v]]
omp.atomic.capture memory_order(relaxed) {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
}
}
- // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] acq_rel
- // CHECK: store i32 %[[res]], i32* %[[v]]
+ // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] acq_rel
+ // CHECK: store i32 %[[res]], ptr %[[v]]
omp.atomic.capture memory_order(acq_rel) {
omp.atomic.read %v = %x : !llvm.ptr<i32>
omp.atomic.update %x : !llvm.ptr<i32> {
// CHECK: [[REGION3]]:
// CHECK: %11 = add i32 %{{.*}}, %{{.*}}
%add = llvm.add %arg0, %arg1 : i32
- // CHECK: store i32 %{{.*}}, i32* %{{.*}}, align 4
+ // CHECK: store i32 %{{.*}}, ptr %{{.*}}, align 4
// CHECK: br label %{{.*}}
llvm.store %add, %arg2 : !llvm.ptr<i32>
omp.terminator
// -----
// CHECK-LABEL: @single
-// CHECK-SAME: (i32 %[[x:.*]], i32 %[[y:.*]], i32* %[[zaddr:.*]])
+// CHECK-SAME: (i32 %[[x:.*]], i32 %[[y:.*]], ptr %[[zaddr:.*]])
llvm.func @single(%x: i32, %y: i32, %zaddr: !llvm.ptr<i32>) {
// CHECK: %[[a:.*]] = sub i32 %[[x]], %[[y]]
%a = llvm.sub %x, %y : i32
- // CHECK: store i32 %[[a]], i32* %[[zaddr]]
+ // CHECK: store i32 %[[a]], ptr %[[zaddr]]
llvm.store %a, %zaddr : !llvm.ptr<i32>
// CHECK: call i32 @__kmpc_single
omp.single {
// CHECK: %[[z:.*]] = add i32 %[[x]], %[[y]]
%z = llvm.add %x, %y : i32
- // CHECK: store i32 %[[z]], i32* %[[zaddr]]
+ // CHECK: store i32 %[[z]], ptr %[[zaddr]]
llvm.store %z, %zaddr : !llvm.ptr<i32>
// CHECK: call void @__kmpc_end_single
// CHECK: call void @__kmpc_barrier
}
// CHECK: %[[b:.*]] = mul i32 %[[x]], %[[y]]
%b = llvm.mul %x, %y : i32
- // CHECK: store i32 %[[b]], i32* %[[zaddr]]
+ // CHECK: store i32 %[[b]], ptr %[[zaddr]]
llvm.store %b, %zaddr : !llvm.ptr<i32>
// CHECK: ret void
llvm.return
// -----
// CHECK-LABEL: @single_nowait
-// CHECK-SAME: (i32 %[[x:.*]], i32 %[[y:.*]], i32* %[[zaddr:.*]])
+// CHECK-SAME: (i32 %[[x:.*]], i32 %[[y:.*]], ptr %[[zaddr:.*]])
llvm.func @single_nowait(%x: i32, %y: i32, %zaddr: !llvm.ptr<i32>) {
// CHECK: %[[a:.*]] = sub i32 %[[x]], %[[y]]
%a = llvm.sub %x, %y : i32
- // CHECK: store i32 %[[a]], i32* %[[zaddr]]
+ // CHECK: store i32 %[[a]], ptr %[[zaddr]]
llvm.store %a, %zaddr : !llvm.ptr<i32>
// CHECK: call i32 @__kmpc_single
omp.single nowait {
// CHECK: %[[z:.*]] = add i32 %[[x]], %[[y]]
%z = llvm.add %x, %y : i32
- // CHECK: store i32 %[[z]], i32* %[[zaddr]]
+ // CHECK: store i32 %[[z]], ptr %[[zaddr]]
llvm.store %z, %zaddr : !llvm.ptr<i32>
// CHECK: call void @__kmpc_end_single
// CHECK-NOT: call void @__kmpc_barrier
}
// CHECK: %[[t:.*]] = mul i32 %[[x]], %[[y]]
%t = llvm.mul %x, %y : i32
- // CHECK: store i32 %[[t]], i32* %[[zaddr]]
+ // CHECK: store i32 %[[t]], ptr %[[zaddr]]
llvm.store %t, %zaddr : !llvm.ptr<i32>
// CHECK: ret void
llvm.return
// -----
// CHECK: @_QFsubEx = internal global i32 undef
-// CHECK: @_QFsubEx.cache = common global i8** null
+// CHECK: @_QFsubEx.cache = common global ptr null
// CHECK-LABEL: @omp_threadprivate
llvm.func @omp_threadprivate() {
-// CHECK: [[THREAD:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB:[0-9]+]])
-// CHECK: [[TMP1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB]], i32 [[THREAD]], i8* bitcast (i32* @_QFsubEx to i8*), i64 4, i8*** @_QFsubEx.cache)
-// CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
-// CHECK: store i32 1, i32* [[TMP2]], align 4
-// CHECK: store i32 3, i32* [[TMP2]], align 4
+// CHECK: [[THREAD:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB:[0-9]+]])
+// CHECK: [[TMP1:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB]], i32 [[THREAD]], ptr @_QFsubEx, i64 4, ptr @_QFsubEx.cache)
+// CHECK: store i32 1, ptr [[TMP1]], align 4
+// CHECK: store i32 3, ptr [[TMP1]], align 4
// CHECK-LABEL: omp.par.region{{.*}}
-// CHECK: [[THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
-// CHECK: [[TMP3:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB2]], i32 [[THREAD2]], i8* bitcast (i32* @_QFsubEx to i8*), i64 4, i8*** @_QFsubEx.cache)
-// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i32*
-// CHECK: store i32 2, i32* [[TMP4]], align 4
+// CHECK: [[THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]])
+// CHECK: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB2]], i32 [[THREAD2]], ptr @_QFsubEx, i64 4, ptr @_QFsubEx.cache)
+// CHECK: store i32 2, ptr [[TMP3]], align 4
%0 = llvm.mlir.constant(1 : i32) : i32
%1 = llvm.mlir.constant(2 : i32) : i32
}
-// CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @1, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @[[inner1:.+]] to void (i32*, i32*, ...)*))
+// CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @1, i32 0, ptr @[[inner1:.+]])
// CHECK: define internal void @[[inner1]]
-// CHECK: %[[structArg:.+]] = alloca { i64* }
-// CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @3, i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { i64* }*)* @[[inner2:.+]] to void (i32*, i32*, ...)*), { i64* }* %[[structArg]])
+// CHECK: %[[structArg:.+]] = alloca { ptr }
+// CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @3, i32 1, ptr @[[inner2:.+]], ptr %[[structArg]])
// Private reduction variable and its initialization.
// CHECK: %[[PRIVATE:.+]] = alloca float
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE]]
// Call to the reduction function.
// CHECK: call i32 @__kmpc_reduce
// CHECK-SAME: @[[REDFUNC:[A-Za-z_.][A-Za-z0-9_.]*]]
// Atomic reduction.
-// CHECK: %[[PARTIAL:.+]] = load float, float* %[[PRIVATE]]
-// CHECK: atomicrmw fadd float* %{{.*}}, float %[[PARTIAL]]
+// CHECK: %[[PARTIAL:.+]] = load float, ptr %[[PRIVATE]]
+// CHECK: atomicrmw fadd ptr %{{.*}}, float %[[PARTIAL]]
// Non-atomic reduction:
// CHECK: fadd float
// Update of the private variable using the reduction region
// (the body block currently comes after all the other blocks).
-// CHECK: %[[PARTIAL:.+]] = load float, float* %[[PRIVATE]]
+// CHECK: %[[PARTIAL:.+]] = load float, ptr %[[PRIVATE]]
// CHECK: %[[UPDATED:.+]] = fadd float %[[PARTIAL]], 2.000000e+00
-// CHECK: store float %[[UPDATED]], float* %[[PRIVATE]]
+// CHECK: store float %[[UPDATED]], ptr %[[PRIVATE]]
// Reduction function.
// CHECK: define internal void @[[REDFUNC]]
// Private reduction variable and its initialization.
// CHECK: %[[PRIVATE1:.+]] = alloca float
// CHECK: %[[PRIVATE2:.+]] = alloca float
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE1]]
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE2]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE1]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE2]]
// Call to the reduction function.
// CHECK: call i32 @__kmpc_reduce
// CHECK-SAME: @[[REDFUNC:[A-Za-z_.][A-Za-z0-9_.]*]]
// Atomic reduction.
-// CHECK: %[[PARTIAL1:.+]] = load float, float* %[[PRIVATE1]]
-// CHECK: atomicrmw fadd float* %{{.*}}, float %[[PARTIAL1]]
-// CHECK: %[[PARTIAL2:.+]] = load float, float* %[[PRIVATE2]]
-// CHECK: atomicrmw fadd float* %{{.*}}, float %[[PARTIAL2]]
+// CHECK: %[[PARTIAL1:.+]] = load float, ptr %[[PRIVATE1]]
+// CHECK: atomicrmw fadd ptr %{{.*}}, float %[[PARTIAL1]]
+// CHECK: %[[PARTIAL2:.+]] = load float, ptr %[[PRIVATE2]]
+// CHECK: atomicrmw fadd ptr %{{.*}}, float %[[PARTIAL2]]
// Non-atomic reduction:
// CHECK: fadd float
// Update of the private variable using the reduction region
// (the body block currently comes after all the other blocks).
-// CHECK: %[[PARTIAL1:.+]] = load float, float* %[[PRIVATE1]]
+// CHECK: %[[PARTIAL1:.+]] = load float, ptr %[[PRIVATE1]]
// CHECK: %[[UPDATED1:.+]] = fadd float %[[PARTIAL1]], 2.000000e+00
-// CHECK: store float %[[UPDATED1]], float* %[[PRIVATE1]]
-// CHECK: %[[PARTIAL2:.+]] = load float, float* %[[PRIVATE2]]
+// CHECK: store float %[[UPDATED1]], ptr %[[PRIVATE1]]
+// CHECK: %[[PARTIAL2:.+]] = load float, ptr %[[PRIVATE2]]
// CHECK: %[[UPDATED2:.+]] = fadd float %[[PARTIAL2]], 2.000000e+00
-// CHECK: store float %[[UPDATED2]], float* %[[PRIVATE2]]
+// CHECK: store float %[[UPDATED2]], ptr %[[PRIVATE2]]
// Reduction function.
// CHECK: define internal void @[[REDFUNC]]
// Private reduction variable and its initialization.
// CHECK: %[[PRIVATE1:.+]] = alloca float
// CHECK: %[[PRIVATE2:.+]] = alloca float
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE1]]
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE2]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE1]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE2]]
// Call to the reduction function.
// CHECK: call i32 @__kmpc_reduce
// CHECK-SAME: @[[REDFUNC:[A-Za-z_.][A-Za-z0-9_.]*]]
// Atomic reduction.
-// CHECK: %[[PARTIAL1:.+]] = load float, float* %[[PRIVATE1]]
-// CHECK: atomicrmw fadd float* %{{.*}}, float %[[PARTIAL1]]
-// CHECK: %[[PARTIAL2:.+]] = load float, float* %[[PRIVATE2]]
-// CHECK: atomicrmw fadd float* %{{.*}}, float %[[PARTIAL2]]
+// CHECK: %[[PARTIAL1:.+]] = load float, ptr %[[PRIVATE1]]
+// CHECK: atomicrmw fadd ptr %{{.*}}, float %[[PARTIAL1]]
+// CHECK: %[[PARTIAL2:.+]] = load float, ptr %[[PRIVATE2]]
+// CHECK: atomicrmw fadd ptr %{{.*}}, float %[[PARTIAL2]]
// Non-atomic reduction:
// CHECK: fadd float
// Update of the private variable using the reduction region
// (the body block currently comes after all the other blocks).
-// CHECK: %[[PARTIAL1:.+]] = load float, float* %[[PRIVATE1]]
+// CHECK: %[[PARTIAL1:.+]] = load float, ptr %[[PRIVATE1]]
// CHECK: %[[UPDATED1:.+]] = fadd float %[[PARTIAL1]], 2.000000e+00
-// CHECK: store float %[[UPDATED1]], float* %[[PRIVATE1]]
-// CHECK-NOT: %{{.*}} = load float, float* %[[PRIVATE2]]
+// CHECK: store float %[[UPDATED1]], ptr %[[PRIVATE1]]
+// CHECK-NOT: %{{.*}} = load float, ptr %[[PRIVATE2]]
// CHECK-NOT: %{{.*}} = fadd float %[[PARTIAL2]], 2.000000e+00
// Reduction function.
// Private reduction variable and its initialization.
// CHECK: %[[PRIVATE:.+]] = alloca float
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE]]
// Call to the reduction function.
// CHECK: call i32 @__kmpc_reduce
// CHECK-SAME: @[[REDFUNC:[A-Za-z_.][A-Za-z0-9_.]*]]
// Atomic reduction.
-// CHECK: %[[PARTIAL:.+]] = load float, float* %[[PRIVATE]]
-// CHECK: atomicrmw fadd float* %{{.*}}, float %[[PARTIAL]]
+// CHECK: %[[PARTIAL:.+]] = load float, ptr %[[PRIVATE]]
+// CHECK: atomicrmw fadd ptr %{{.*}}, float %[[PARTIAL]]
// Non-atomic reduction:
// CHECK: fadd float
// Update of the private variable using the reduction region
// (the body block currently comes after all the other blocks).
-// CHECK: %[[PARTIAL:.+]] = load float, float* %[[PRIVATE]]
+// CHECK: %[[PARTIAL:.+]] = load float, ptr %[[PRIVATE]]
// CHECK: %[[UPDATED:.+]] = fadd float %[[PARTIAL]], 2.000000e+00
-// CHECK: store float %[[UPDATED]], float* %[[PRIVATE]]
-// CHECK: %[[PARTIAL:.+]] = load float, float* %[[PRIVATE]]
+// CHECK: store float %[[UPDATED]], ptr %[[PRIVATE]]
+// CHECK: %[[PARTIAL:.+]] = load float, ptr %[[PRIVATE]]
// CHECK: %[[UPDATED:.+]] = fadd float %[[PARTIAL]], 2.000000e+00
-// CHECK: store float %[[UPDATED]], float* %[[PRIVATE]]
+// CHECK: store float %[[UPDATED]], ptr %[[PRIVATE]]
// Reduction function.
// CHECK: define internal void @[[REDFUNC]]
// Private reduction variable and its initialization.
// CHECK: %[[PRIVATE1:.+]] = alloca float
// CHECK: %[[PRIVATE2:.+]] = alloca float
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE1]]
-// CHECK: store float 1.000000e+00, float* %[[PRIVATE2]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE1]]
+// CHECK: store float 1.000000e+00, ptr %[[PRIVATE2]]
// Call to the reduction function.
// CHECK: call i32 @__kmpc_reduce
// Update of the private variable using the reduction region
// (the body block currently comes after all the other blocks).
-// CHECK: %[[PARTIAL1:.+]] = load float, float* %[[PRIVATE1]]
+// CHECK: %[[PARTIAL1:.+]] = load float, ptr %[[PRIVATE1]]
// CHECK: %[[UPDATED1:.+]] = fadd float %[[PARTIAL1]], 2.000000e+00
-// CHECK: store float %[[UPDATED1]], float* %[[PRIVATE1]]
-// CHECK: %[[PARTIAL2:.+]] = load float, float* %[[PRIVATE2]]
+// CHECK: store float %[[UPDATED1]], ptr %[[PRIVATE1]]
+// CHECK: %[[PARTIAL2:.+]] = load float, ptr %[[PRIVATE2]]
// CHECK: %[[UPDATED2:.+]] = fmul float %[[PARTIAL2]], 2.000000e+00
-// CHECK: store float %[[UPDATED2]], float* %[[PRIVATE2]]
+// CHECK: store float %[[UPDATED2]], ptr %[[PRIVATE2]]
// Reduction function.
// CHECK: define internal void @[[REDFUNC]]
; CHECK: %p_escaping = select i1 undef, i32 undef, i32 undef
;
; CHECK-LABEL: polly.stmt.polly.merge_new_and_old.exit:
-; CHECK: store i32 %p_escaping, i32* %escaping.s2a
+; CHECK: store i32 %p_escaping, ptr %escaping.s2a
define i32 @func() {
entry:
; This checks that the stored value is indeed from the generated code.
;
; CHECK-LABEL: polly.stmt.do.body.entry:
-; CHECK: a.phiops.reload = load i32, i32* %a.phiops
+; CHECK: a.phiops.reload = load i32, ptr %a.phiops
;
; CHECK-LABEL: polly.stmt.polly.merge_new_and_old.exit:
-; CHECK: store i32 %polly.a, i32* %a.s2a
+; CHECK: store i32 %polly.a, ptr %a.s2a
define void @func() {
entry:
; CHECK-NEXT: %_s.sroa.343.0.ph5161118 = phi i32 [ undef, %for.cond ], [ %_s.sroa.343.0.ph5161118.ph.merge, %polly.merge_new_and_old ]
; CHECK-LABEL: polly.exiting:
-; CHECK-NEXT: %_s.sroa.343.0.ph5161118.ph.final_reload = load i32, i32* %_s.sroa.343.0.ph5161118.s2a
+; CHECK-NEXT: %_s.sroa.343.0.ph5161118.ph.final_reload = load i32, ptr %_s.sroa.343.0.ph5161118.s2a
; Function Attrs: nounwind uwtable
define void @lzmaDecode() #0 {
; The first is currently generated by Polly and tested here.
; CHECK: polly.stmt.next:
-; CHECK-NEXT: store i32 2, i32* %phi.phiops
+; CHECK-NEXT: store i32 2, ptr %phi.phiops
; CHECK-NEXT: br label %polly.stmt.join
define i32 @func() {
; CHECK: %newval.merge = phi float [ %newval.final_reload, %polly.exiting ], [ %newval, %subregion_exit.region_exiting ]
;
; CHECK-LABEL: polly.start:
-; CHECK: store float %loop_carried.ph, float* %loop_carried.phiops
+; CHECK: store float %loop_carried.ph, ptr %loop_carried.phiops
;
; CHECK-LABEL: polly.stmt.subregion_entry.entry:
-; CHECK: %loop_carried.phiops.reload = load float, float* %loop_carried.phiops
+; CHECK: %loop_carried.phiops.reload = load float, ptr %loop_carried.phiops
;
; CHECK-LABEL: polly.stmt.subregion_entry:
; CHECK: %polly.loop_carried = phi float [ %loop_carried.phiops.reload, %polly.stmt.subregion_entry.entry ]
; CHECK: %p_newval = fadd float %polly.loop_carried, 1.000000e+00
;
; CHECK-LABEL: polly.stmt.polly.merge_new_and_old.exit:
-; CHECK: %newval.final_reload = load float, float* %newval.s2a
+; CHECK: %newval.final_reload = load float, ptr %newval.s2a
define void @func() {
entry: