; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- | not grep .comm.*X,0
-@X = linkonce global { } zeroinitializer ; <{ }*> [#uses=0]
+@X = linkonce global { } zeroinitializer ; <ptr> [#uses=0]
target datalayout = "E-p:32:32"
target triple = "powerpc-unknown-linux-gnu"
-define void @bar(i32 %G, i32 %E, i32 %F, i32 %A, i32 %B, i32 %C, i32 %D, i8* %fmt, ...) {
- %ap = alloca i8* ; <i8**> [#uses=2]
- %va.upgrd.1 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
- call void @llvm.va_start( i8* %va.upgrd.1 )
- %tmp.1 = load i8*, i8** %ap ; <i8*> [#uses=1]
- %tmp.0 = call double @foo( i8* %tmp.1 ) ; <double> [#uses=0]
+define void @bar(i32 %G, i32 %E, i32 %F, i32 %A, i32 %B, i32 %C, i32 %D, ptr %fmt, ...) {
+ %ap = alloca ptr ; <ptr> [#uses=2]
+ call void @llvm.va_start( ptr %ap )
+ %tmp.1 = load ptr, ptr %ap ; <ptr> [#uses=1]
+ %tmp.0 = call double @foo( ptr %tmp.1 ) ; <double> [#uses=0]
ret void
}
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
-declare double @foo(i8*)
+declare double @foo(ptr)
; REQUIRES: default_triple
define void @iterative_hash_host_wide_int() {
- %zero = alloca i32 ; <i32*> [#uses=2]
- %b = alloca i32 ; <i32*> [#uses=1]
- store i32 0, i32* %zero
- %tmp = load i32, i32* %zero ; <i32> [#uses=1]
+ %zero = alloca i32 ; <ptr> [#uses=2]
+ %b = alloca i32 ; <ptr> [#uses=1]
+ store i32 0, ptr %zero
+ %tmp = load i32, ptr %zero ; <i32> [#uses=1]
%tmp5 = bitcast i32 %tmp to i32 ; <i32> [#uses=1]
%tmp6.u = add i32 %tmp5, 32 ; <i32> [#uses=1]
%tmp6 = bitcast i32 %tmp6.u to i32 ; <i32> [#uses=1]
- %tmp7 = load i64, i64* null ; <i64> [#uses=1]
+ %tmp7 = load i64, ptr null ; <i64> [#uses=1]
%tmp6.upgrd.1 = trunc i32 %tmp6 to i8 ; <i8> [#uses=1]
%shift.upgrd.2 = zext i8 %tmp6.upgrd.1 to i64 ; <i64> [#uses=1]
%tmp8 = ashr i64 %tmp7, %shift.upgrd.2 ; <i64> [#uses=1]
%tmp8.upgrd.3 = trunc i64 %tmp8 to i32 ; <i32> [#uses=1]
- store i32 %tmp8.upgrd.3, i32* %b
+ store i32 %tmp8.upgrd.3, ptr %b
unreachable
}
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s
-define void @test(<8 x i16>* %P) {
+define void @test(ptr %P) {
; CHECK: vspltish {{[0-9]+}}, 10
- %tmp = load <8 x i16>, <8 x i16>* %P ; <<8 x i16>> [#uses=1]
+ %tmp = load <8 x i16>, ptr %P ; <<8 x i16>> [#uses=1]
%tmp1 = add <8 x i16> %tmp, < i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10 > ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp1, <8 x i16>* %P
+ store <8 x i16> %tmp1, ptr %P
ret void
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5
; END.
-define void @test(i8* %stack) {
+define void @test(ptr %stack) {
entry:
%tmp9 = icmp eq i32 0, 0 ; <i1> [#uses=1]
%tmp30 = icmp eq i32 0, 0 ; <i1> [#uses=1]
cond_true389: ; preds = %cond_false385
ret void
cond_next463: ; preds = %cond_false385
- %tmp1208107 = icmp ugt i8* null, %stack ; <i1> [#uses=1]
+ %tmp1208107 = icmp ugt ptr null, %stack ; <i1> [#uses=1]
br i1 %tmp1208107, label %cond_true1209.preheader, label %bb1212
cond_true498: ; preds = %cond_true1209.preheader
ret void
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32--
; END.
- %struct.attr_desc = type { i8*, %struct.attr_desc*, %struct.attr_value*, %struct.attr_value*, i32 }
- %struct.attr_value = type { %struct.rtx_def*, %struct.attr_value*, %struct.insn_ent*, i32, i32 }
- %struct.insn_def = type { %struct.insn_def*, %struct.rtx_def*, i32, i32, i32, i32, i32 }
- %struct.insn_ent = type { %struct.insn_ent*, %struct.insn_def* }
+ %struct.attr_desc = type { ptr, ptr, ptr, ptr, i32 }
+ %struct.attr_value = type { ptr, ptr, ptr, i32, i32 }
+ %struct.insn_def = type { ptr, ptr, i32, i32, i32, i32, i32 }
+ %struct.insn_ent = type { ptr, ptr }
%struct.rtx_def = type { i16, i8, i8, %struct.u }
%struct.u = type { [1 x i64] }
define void @find_attr() {
entry:
- %tmp26 = icmp eq %struct.attr_desc* null, null ; <i1> [#uses=1]
+ %tmp26 = icmp eq ptr null, null ; <i1> [#uses=1]
br i1 %tmp26, label %bb30, label %cond_true27
cond_true27: ; preds = %entry
ret void
bb30: ; preds = %entry
- %tmp67 = icmp eq %struct.attr_desc* null, null ; <i1> [#uses=1]
+ %tmp67 = icmp eq ptr null, null ; <i1> [#uses=1]
br i1 %tmp67, label %cond_next92, label %cond_true68
cond_true68: ; preds = %bb30
ret void
cond_next92: ; preds = %bb30
- %tmp173 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
- %tmp174 = load i32, i32* %tmp173 ; <i32> [#uses=1]
+ %tmp173 = getelementptr %struct.attr_desc, ptr null, i32 0, i32 4 ; <ptr> [#uses=2]
+ %tmp174 = load i32, ptr %tmp173 ; <i32> [#uses=1]
%tmp177 = and i32 %tmp174, -9 ; <i32> [#uses=1]
- store i32 %tmp177, i32* %tmp173
- %tmp180 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
- %tmp181 = load i32, i32* %tmp180 ; <i32> [#uses=1]
- %tmp185 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
- %tmp186 = load i32, i32* %tmp185 ; <i32> [#uses=1]
+ store i32 %tmp177, ptr %tmp173
+ %tmp180 = getelementptr %struct.attr_desc, ptr null, i32 0, i32 4 ; <ptr> [#uses=1]
+ %tmp181 = load i32, ptr %tmp180 ; <i32> [#uses=1]
+ %tmp185 = getelementptr %struct.attr_desc, ptr null, i32 0, i32 4 ; <ptr> [#uses=2]
+ %tmp186 = load i32, ptr %tmp185 ; <i32> [#uses=1]
%tmp183187 = shl i32 %tmp181, 1 ; <i32> [#uses=1]
%tmp188 = and i32 %tmp183187, 16 ; <i32> [#uses=1]
%tmp190 = and i32 %tmp186, -17 ; <i32> [#uses=1]
%tmp191 = or i32 %tmp190, %tmp188 ; <i32> [#uses=1]
- store i32 %tmp191, i32* %tmp185
- %tmp193 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
- %tmp194 = load i32, i32* %tmp193 ; <i32> [#uses=1]
- %tmp198 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
- %tmp199 = load i32, i32* %tmp198 ; <i32> [#uses=1]
+ store i32 %tmp191, ptr %tmp185
+ %tmp193 = getelementptr %struct.attr_desc, ptr null, i32 0, i32 4 ; <ptr> [#uses=1]
+ %tmp194 = load i32, ptr %tmp193 ; <i32> [#uses=1]
+ %tmp198 = getelementptr %struct.attr_desc, ptr null, i32 0, i32 4 ; <ptr> [#uses=2]
+ %tmp199 = load i32, ptr %tmp198 ; <i32> [#uses=1]
%tmp196200 = shl i32 %tmp194, 2 ; <i32> [#uses=1]
%tmp201 = and i32 %tmp196200, 64 ; <i32> [#uses=1]
%tmp203 = and i32 %tmp199, -65 ; <i32> [#uses=1]
%tmp204 = or i32 %tmp203, %tmp201 ; <i32> [#uses=1]
- store i32 %tmp204, i32* %tmp198
- %tmp206 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
- %tmp207 = load i32, i32* %tmp206 ; <i32> [#uses=1]
- %tmp211 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
- %tmp212 = load i32, i32* %tmp211 ; <i32> [#uses=1]
+ store i32 %tmp204, ptr %tmp198
+ %tmp206 = getelementptr %struct.attr_desc, ptr null, i32 0, i32 4 ; <ptr> [#uses=1]
+ %tmp207 = load i32, ptr %tmp206 ; <i32> [#uses=1]
+ %tmp211 = getelementptr %struct.attr_desc, ptr null, i32 0, i32 4 ; <ptr> [#uses=2]
+ %tmp212 = load i32, ptr %tmp211 ; <i32> [#uses=1]
%tmp209213 = shl i32 %tmp207, 1 ; <i32> [#uses=1]
%tmp214 = and i32 %tmp209213, 128 ; <i32> [#uses=1]
%tmp216 = and i32 %tmp212, -129 ; <i32> [#uses=1]
%tmp217 = or i32 %tmp216, %tmp214 ; <i32> [#uses=1]
- store i32 %tmp217, i32* %tmp211
+ store i32 %tmp217, ptr %tmp211
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s
-@lens = external global i8* ; <i8**> [#uses=1]
-@vals = external global i32* ; <i32**> [#uses=1]
+@lens = external global ptr ; <ptr> [#uses=1]
+@vals = external global ptr ; <ptr> [#uses=1]
define i32 @test(i32 %i) {
; CHECK-LABEL: test:
; CHECK-NEXT: sldi 3, 3, 2
; CHECK-NEXT: lwzx 3, 4, 3
; CHECK-NEXT: blr
- %tmp = load i8*, i8** @lens ; <i8*> [#uses=1]
- %tmp1 = getelementptr i8, i8* %tmp, i32 %i ; <i8*> [#uses=1]
- %tmp.upgrd.1 = load i8, i8* %tmp1 ; <i8> [#uses=1]
+ %tmp = load ptr, ptr @lens ; <ptr> [#uses=1]
+ %tmp1 = getelementptr i8, ptr %tmp, i32 %i ; <ptr> [#uses=1]
+ %tmp.upgrd.1 = load i8, ptr %tmp1 ; <i8> [#uses=1]
%tmp2 = zext i8 %tmp.upgrd.1 to i32 ; <i32> [#uses=1]
- %tmp3 = load i32*, i32** @vals ; <i32*> [#uses=1]
+ %tmp3 = load ptr, ptr @vals ; <ptr> [#uses=1]
%tmp5 = sub i32 1, %tmp2 ; <i32> [#uses=1]
- %tmp6 = getelementptr i32, i32* %tmp3, i32 %tmp5 ; <i32*> [#uses=1]
- %tmp7 = load i32, i32* %tmp6 ; <i32> [#uses=1]
+ %tmp6 = getelementptr i32, ptr %tmp3, i32 %tmp5 ; <ptr> [#uses=1]
+ %tmp7 = load i32, ptr %tmp6 ; <i32> [#uses=1]
ret i32 %tmp7
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32--
-define void @img2buf(i32 %symbol_size_in_bytes, i16* %ui16) nounwind {
- %tmp93 = load i16, i16* null ; <i16> [#uses=1]
+define void @img2buf(i32 %symbol_size_in_bytes, ptr %ui16) nounwind {
+ %tmp93 = load i16, ptr null ; <i16> [#uses=1]
%tmp99 = call i16 @llvm.bswap.i16( i16 %tmp93 ) ; <i16> [#uses=1]
- store i16 %tmp99, i16* %ui16
+ store i16 %tmp99, ptr %ui16
ret void
}
define fastcc void @immed_double_const(i32 %i0, i32 %i1) {
entry:
- %tmp1 = load i32, i32* null ; <i32> [#uses=1]
+ %tmp1 = load i32, ptr null ; <i32> [#uses=1]
switch i32 %tmp1, label %bb103 [
i32 1, label %bb
i32 3, label %bb
; RUN: llc < %s -mtriple=ppc64--
-define i32* @foo(i32 %n) {
- %A = alloca i32, i32 %n ; <i32*> [#uses=1]
- ret i32* %A
+define ptr @foo(i32 %n) {
+ %A = alloca i32, i32 %n ; <ptr> [#uses=1]
+ ret ptr %A
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- | grep rlwimi
-define void @test(i16 %div.0.i.i.i.i, i32 %L_num.0.i.i.i.i, i32 %tmp1.i.i206.i.i, i16* %P) {
+define void @test(i16 %div.0.i.i.i.i, i32 %L_num.0.i.i.i.i, i32 %tmp1.i.i206.i.i, ptr %P) {
%X = shl i16 %div.0.i.i.i.i, 1 ; <i16> [#uses=1]
%tmp28.i.i.i.i = shl i32 %L_num.0.i.i.i.i, 1 ; <i32> [#uses=1]
%tmp31.i.i.i.i = icmp slt i32 %tmp28.i.i.i.i, %tmp1.i.i206.i.i ; <i1> [#uses=1]
%tmp31.i.i.i.i.upgrd.1 = zext i1 %tmp31.i.i.i.i to i16 ; <i16> [#uses=1]
%tmp371.i.i.i.i1 = or i16 %tmp31.i.i.i.i.upgrd.1, %X ; <i16> [#uses=1]
%div.0.be.i.i.i.i = xor i16 %tmp371.i.i.i.i1, 1 ; <i16> [#uses=1]
- store i16 %div.0.be.i.i.i.i, i16* %P
+ store i16 %div.0.be.i.i.i.i, ptr %P
ret void
}
%tmp3030030304.i = bitcast <4 x float> %tmp26355.i to <8 x i16> ; <<8 x i16>> [#uses=1]
%tmp30305.i = shufflevector <8 x i16> zeroinitializer, <8 x i16> %tmp3030030304.i, <8 x i32> < i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15 > ; <<8 x i16>> [#uses=1]
%tmp30305.i.upgrd.1 = bitcast <8 x i16> %tmp30305.i to <4 x i32> ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp30305.i.upgrd.1, <4 x i32>* null
+ store <4 x i32> %tmp30305.i.upgrd.1, ptr null
ret void
}
define void @bitap() {
entry:
- %RMask.i = alloca [256 x i32], align 16 ; <[256 x i32]*> [#uses=1]
- %buffer = alloca [147456 x i8], align 16 ; <[147456 x i8]*> [#uses=0]
+ %RMask.i = alloca [256 x i32], align 16 ; <ptr> [#uses=1]
+ %buffer = alloca [147456 x i8], align 16 ; <ptr> [#uses=0]
br i1 false, label %bb19, label %bb.preheader
bb.preheader: ; preds = %entry
ret void
bb12.i: ; preds = %bb12.i, %bb19
%i.0.i = phi i32 [ %tmp11.i, %bb12.i ], [ 0, %bb19 ] ; <i32> [#uses=2]
%gep.upgrd.1 = zext i32 %i.0.i to i64 ; <i64> [#uses=1]
- %tmp9.i = getelementptr [256 x i32], [256 x i32]* %RMask.i, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
- store i32 0, i32* %tmp9.i
+ %tmp9.i = getelementptr [256 x i32], ptr %RMask.i, i32 0, i64 %gep.upgrd.1 ; <ptr> [#uses=1]
+ store i32 0, ptr %tmp9.i
%tmp11.i = add i32 %i.0.i, 1 ; <i32> [#uses=1]
br label %bb12.i
cond_next39: ; preds = %bb19
; RUN: llc -verify-machineinstrs < %s
; REQUIRES: default_triple
-@qsz.b = external global i1 ; <i1*> [#uses=1]
+@qsz.b = external global i1 ; <ptr> [#uses=1]
define fastcc void @qst() {
entry:
cond_true: ; preds = %entry
ret void
cond_next71: ; preds = %entry
- %tmp73.b = load i1, i1* @qsz.b ; <i1> [#uses=1]
+ %tmp73.b = load i1, ptr @qsz.b ; <i1> [#uses=1]
%ii.4.ph = select i1 %tmp73.b, i64 4, i64 0 ; <i64> [#uses=1]
br label %bb139
bb82: ; preds = %bb139
define i32 @foo() nounwind {
entry:
; CHECK: cntlzw 3, 3
- %retval = alloca i32, align 4 ; <i32*> [#uses=2]
- %temp = alloca i32, align 4 ; <i32*> [#uses=2]
- %ctz_x = alloca i32, align 4 ; <i32*> [#uses=3]
- %ctz_c = alloca i32, align 4 ; <i32*> [#uses=2]
- store i32 61440, i32* %ctz_x
- %tmp = load i32, i32* %ctz_x ; <i32> [#uses=1]
+ %retval = alloca i32, align 4 ; <ptr> [#uses=2]
+ %temp = alloca i32, align 4 ; <ptr> [#uses=2]
+ %ctz_x = alloca i32, align 4 ; <ptr> [#uses=3]
+ %ctz_c = alloca i32, align 4 ; <ptr> [#uses=2]
+ store i32 61440, ptr %ctz_x
+ %tmp = load i32, ptr %ctz_x ; <i32> [#uses=1]
%tmp1 = sub i32 0, %tmp ; <i32> [#uses=1]
- %tmp2 = load i32, i32* %ctz_x ; <i32> [#uses=1]
+ %tmp2 = load i32, ptr %ctz_x ; <i32> [#uses=1]
%tmp3 = and i32 %tmp1, %tmp2 ; <i32> [#uses=1]
%tmp4 = call i32 asm "$(cntlz$|cntlzw$) $0,$1", "=r,r,~{dirflag},~{fpsr},~{flags}"( i32 %tmp3 ) ; <i32> [#uses=1]
- store i32 %tmp4, i32* %ctz_c
- %tmp5 = load i32, i32* %ctz_c ; <i32> [#uses=1]
- store i32 %tmp5, i32* %temp
- %tmp6 = load i32, i32* %temp ; <i32> [#uses=1]
- store i32 %tmp6, i32* %retval
+ store i32 %tmp4, ptr %ctz_c
+ %tmp5 = load i32, ptr %ctz_c ; <i32> [#uses=1]
+ store i32 %tmp5, ptr %temp
+ %tmp6 = load i32, ptr %temp ; <i32> [#uses=1]
+ store i32 %tmp6, ptr %retval
br label %return
return: ; preds = %entry
- %retval2 = load i32, i32* %retval ; <i32> [#uses=1]
+ %retval2 = load i32, ptr %retval ; <i32> [#uses=1]
ret i32 %retval2
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32--
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64--
-define i16 @test(i8* %d1, i16* %d2) {
- %tmp237 = call i16 asm "lhbrx $0, $2, $1", "=r,r,bO,m"( i8* %d1, i32 0, i16* %d2 ) ; <i16> [#uses=1]
+define i16 @test(ptr %d1, ptr %d2) {
+ %tmp237 = call i16 asm "lhbrx $0, $2, $1", "=r,r,bO,m"( ptr %d1, i32 0, ptr %d2 ) ; <i16> [#uses=1]
ret i16 %tmp237
}
define void @test1() {
entry:
- %Out = alloca %struct.A, align 4 ; <%struct.A*> [#uses=1]
- %tmp2 = getelementptr %struct.A, %struct.A* %Out, i32 0, i32 1
- %tmp5 = call i32 asm "lwbrx $0, $1", "=r,m"(i32* %tmp2 )
+ %Out = alloca %struct.A, align 4 ; <ptr> [#uses=1]
+ %tmp2 = getelementptr %struct.A, ptr %Out, i32 0, i32 1
+ %tmp5 = call i32 asm "lwbrx $0, $1", "=r,m"(ptr %tmp2 )
ret void
}
define void @test2() {
entry:
- %Out = alloca %struct.A, align 4 ; <%struct.A*> [#uses=1]
- %tmp2 = getelementptr %struct.A, %struct.A* %Out, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp5 = call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,m"( i8* null, i32 0, i32* %tmp2 ) ; <i32> [#uses=0]
+ %Out = alloca %struct.A, align 4 ; <ptr> [#uses=1]
+ %tmp2 = getelementptr %struct.A, ptr %Out, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp5 = call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,m"( ptr null, i32 0, ptr %tmp2 ) ; <i32> [#uses=0]
ret void
}
; RUN: llc -verify-machineinstrs < %s | FileCheck %s
target triple = "powerpc-unknown-linux-gnu"
-@str = internal constant [18 x i8] c"hello world!, %d\0A\00" ; <[18 x i8]*> [#uses=1]
+@str = internal constant [18 x i8] c"hello world!, %d\0A\00" ; <ptr> [#uses=1]
define i32 @main() {
; CHECK: mflr
; CHECK-NOT: mflr
; CHECK: mtlr
- %tmp = tail call i32 (i8*, ...) @printf( i8* getelementptr ([18 x i8], [18 x i8]* @str, i32 0, i32 0) ) ; <i32> [#uses=0]
+ %tmp = tail call i32 (ptr, ...) @printf( ptr @str ) ; <i32> [#uses=0]
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define i32 @_ZNK4llvm5APInt17countLeadingZerosEv(i64 *%t) nounwind {
- %tmp19 = load i64, i64* %t
+define i32 @_ZNK4llvm5APInt17countLeadingZerosEv(ptr %t) nounwind {
+ %tmp19 = load i64, ptr %t
%tmp22 = tail call i64 @llvm.ctlz.i64( i64 %tmp19, i1 true ) ; <i64> [#uses=1]
%tmp23 = trunc i64 %tmp22 to i32
%tmp89 = add i32 %tmp23, -64 ; <i32> [#uses=1]
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5
-define void @test(<4 x float>*, { { i16, i16, i32 } }*) {
+define void @test(ptr, ptr) {
xOperationInitMasks.exit:
- %.sub7896 = getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 0 ; <<4 x i32>*> [#uses=24]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 175, i32 3 ; <<4 x float>*>:2 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 2 ; <<4 x float>*>:3 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 3 ; <<4 x float>*>:4 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 1 ; <<4 x float>*>:5 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 2 ; <<4 x float>*>:6 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 3 ; <<4 x float>*>:7 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 1 ; <<4 x float>*>:8 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 2 ; <<4 x float>*>:9 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 3 ; <<4 x float>*>:10 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 1 ; <<4 x float>*>:11 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 2 ; <<4 x float>*>:12 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 3 ; <<4 x float>*>:13 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 1 ; <<4 x float>*>:14 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 2 ; <<4 x float>*>:15 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 3 ; <<4 x float>*>:16 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 1 ; <<4 x float>*>:17 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 2 ; <<4 x float>*>:18 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 3 ; <<4 x float>*>:19 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 1 ; <<4 x float>*>:20 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 2 ; <<4 x float>*>:21 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 3 ; <<4 x float>*>:22 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 1 ; <<4 x float>*>:23 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 2 ; <<4 x float>*>:24 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 3 ; <<4 x float>*>:25 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 1 ; <<4 x float>*>:26 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 2 ; <<4 x float>*>:27 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 3 ; <<4 x float>*>:28 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 1 ; <<4 x float>*>:29 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 2 ; <<4 x float>*>:30 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 3 ; <<4 x float>*>:31 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 1 ; <<4 x float>*>:32 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 2 ; <<4 x float>*>:33 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 3 ; <<4 x float>*>:34 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 1 ; <<4 x float>*>:35 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 2 ; <<4 x float>*>:36 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 3 ; <<4 x float>*>:37 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 1 ; <<4 x float>*>:38 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 2 ; <<4 x float>*>:39 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 3 ; <<4 x float>*>:40 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 1 ; <<4 x float>*>:41 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 2 ; <<4 x float>*>:42 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 3 ; <<4 x float>*>:43 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 1 ; <<4 x float>*>:44 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 2 ; <<4 x float>*>:45 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 3 ; <<4 x float>*>:46 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 1 ; <<4 x float>*>:47 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 2 ; <<4 x float>*>:48 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 3 ; <<4 x float>*>:49 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 1 ; <<4 x float>*>:50 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 2 ; <<4 x float>*>:51 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 3 ; <<4 x float>*>:52 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 1 ; <<4 x float>*>:53 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 2 ; <<4 x float>*>:54 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 3 ; <<4 x float>*>:55 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 1 ; <<4 x float>*>:56 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 2 ; <<4 x float>*>:57 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 3 ; <<4 x float>*>:58 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 1 ; <<4 x float>*>:59 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 2 ; <<4 x float>*>:60 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 3 ; <<4 x float>*>:61 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 1 ; <<4 x float>*>:62 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 2 ; <<4 x float>*>:63 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 3 ; <<4 x float>*>:64 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 1 ; <<4 x float>*>:65 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 2 ; <<4 x float>*>:66 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 3 ; <<4 x float>*>:67 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 1 ; <<4 x float>*>:68 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 2 ; <<4 x float>*>:69 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 3 ; <<4 x float>*>:70 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 1 ; <<4 x float>*>:71 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 2 ; <<4 x float>*>:72 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 3 ; <<4 x float>*>:73 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 1 ; <<4 x float>*>:74 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 2 ; <<4 x float>*>:75 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 3 ; <<4 x float>*>:76 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 1 ; <<4 x float>*>:77 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 2 ; <<4 x float>*>:78 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 3 ; <<4 x float>*>:79 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 1 ; <<4 x float>*>:80 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 2 ; <<4 x float>*>:81 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 3 ; <<4 x float>*>:82 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 1 ; <<4 x float>*>:83 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 2 ; <<4 x float>*>:84 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 3 ; <<4 x float>*>:85 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 1 ; <<4 x float>*>:86 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 2 ; <<4 x float>*>:87 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 3 ; <<4 x float>*>:88 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 1 ; <<4 x float>*>:89 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 2 ; <<4 x float>*>:90 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 3 ; <<4 x float>*>:91 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 1 ; <<4 x float>*>:92 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 2 ; <<4 x float>*>:93 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 3 ; <<4 x float>*>:94 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 1 ; <<4 x float>*>:95 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 2 ; <<4 x float>*>:96 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 3 ; <<4 x float>*>:97 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 1 ; <<4 x float>*>:98 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 2 ; <<4 x float>*>:99 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 3 ; <<4 x float>*>:100 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 1 ; <<4 x float>*>:101 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 2 ; <<4 x float>*>:102 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 3 ; <<4 x float>*>:103 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 1 ; <<4 x float>*>:104 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 2 ; <<4 x float>*>:105 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 3 ; <<4 x float>*>:106 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 1 ; <<4 x float>*>:107 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 2 ; <<4 x float>*>:108 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 3 ; <<4 x float>*>:109 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 1 ; <<4 x float>*>:110 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 2 ; <<4 x float>*>:111 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 3 ; <<4 x float>*>:112 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 1 ; <<4 x float>*>:113 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 2 ; <<4 x float>*>:114 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 3 ; <<4 x float>*>:115 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 1 ; <<4 x float>*>:116 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 2 ; <<4 x float>*>:117 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 3 ; <<4 x float>*>:118 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 1 ; <<4 x float>*>:119 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 2 ; <<4 x float>*>:120 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 3 ; <<4 x float>*>:121 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 1 ; <<4 x float>*>:122 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 2 ; <<4 x float>*>:123 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 3 ; <<4 x float>*>:124 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 1 ; <<4 x float>*>:125 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 2 ; <<4 x float>*>:126 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 3 ; <<4 x float>*>:127 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 1 ; <<4 x float>*>:128 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 2 ; <<4 x float>*>:129 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 3 ; <<4 x float>*>:130 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 1 ; <<4 x float>*>:131 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 2 ; <<4 x float>*>:132 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 3 ; <<4 x float>*>:133 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 1 ; <<4 x float>*>:134 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 2 ; <<4 x float>*>:135 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 3 ; <<4 x float>*>:136 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 1 ; <<4 x float>*>:137 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 2 ; <<4 x float>*>:138 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 3 ; <<4 x float>*>:139 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 1 ; <<4 x float>*>:140 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 2 ; <<4 x float>*>:141 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 3 ; <<4 x float>*>:142 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 1 ; <<4 x float>*>:143 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 2 ; <<4 x float>*>:144 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 3 ; <<4 x float>*>:145 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 1 ; <<4 x float>*>:146 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 2 ; <<4 x float>*>:147 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 3 ; <<4 x float>*>:148 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 1 ; <<4 x float>*>:149 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 2 ; <<4 x float>*>:150 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 3 ; <<4 x float>*>:151 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 1 ; <<4 x float>*>:152 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 2 ; <<4 x float>*>:153 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 3 ; <<4 x float>*>:154 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 1 ; <<4 x float>*>:155 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 2 ; <<4 x float>*>:156 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 3 ; <<4 x float>*>:157 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 1 ; <<4 x float>*>:158 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 2 ; <<4 x float>*>:159 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 3 ; <<4 x float>*>:160 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 1 ; <<4 x float>*>:161 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 2 ; <<4 x float>*>:162 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 3 ; <<4 x float>*>:163 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 1 ; <<4 x float>*>:164 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 2 ; <<4 x float>*>:165 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 3 ; <<4 x float>*>:166 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 1 ; <<4 x float>*>:167 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 2 ; <<4 x float>*>:168 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 3 ; <<4 x float>*>:169 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 1 ; <<4 x float>*>:170 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 2 ; <<4 x float>*>:171 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 3 ; <<4 x float>*>:172 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 1 ; <<4 x float>*>:173 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 2 ; <<4 x float>*>:174 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 3 ; <<4 x float>*>:175 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 1 ; <<4 x float>*>:176 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 2 ; <<4 x float>*>:177 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 3 ; <<4 x float>*>:178 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 1 ; <<4 x float>*>:179 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 2 ; <<4 x float>*>:180 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 3 ; <<4 x float>*>:181 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 1 ; <<4 x float>*>:182 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 2 ; <<4 x float>*>:183 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 3 ; <<4 x float>*>:184 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 1 ; <<4 x float>*>:185 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 2 ; <<4 x float>*>:186 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 3 ; <<4 x float>*>:187 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 1 ; <<4 x float>*>:188 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 2 ; <<4 x float>*>:189 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 3 ; <<4 x float>*>:190 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 1 ; <<4 x float>*>:191 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 2 ; <<4 x float>*>:192 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 3 ; <<4 x float>*>:193 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 1 ; <<4 x float>*>:194 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 2 ; <<4 x float>*>:195 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 3 ; <<4 x float>*>:196 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 1 ; <<4 x float>*>:197 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 2 ; <<4 x float>*>:198 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 3 ; <<4 x float>*>:199 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 1 ; <<4 x float>*>:200 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 2 ; <<4 x float>*>:201 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 3 ; <<4 x float>*>:202 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 1 ; <<4 x float>*>:203 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 2 ; <<4 x float>*>:204 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 3 ; <<4 x float>*>:205 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 1 ; <<4 x float>*>:206 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 2 ; <<4 x float>*>:207 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 3 ; <<4 x float>*>:208 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 1 ; <<4 x float>*>:209 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 2 ; <<4 x float>*>:210 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 3 ; <<4 x float>*>:211 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 1 ; <<4 x float>*>:212 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 2 ; <<4 x float>*>:213 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 3 ; <<4 x float>*>:214 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 1 ; <<4 x float>*>:215 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 2 ; <<4 x float>*>:216 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 3 ; <<4 x float>*>:217 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 1 ; <<4 x float>*>:218 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 2 ; <<4 x float>*>:219 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 3 ; <<4 x float>*>:220 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 1 ; <<4 x float>*>:221 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 2 ; <<4 x float>*>:222 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 3 ; <<4 x float>*>:223 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 1 ; <<4 x float>*>:224 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 2 ; <<4 x float>*>:225 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 3 ; <<4 x float>*>:226 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 1 ; <<4 x float>*>:227 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 2 ; <<4 x float>*>:228 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 3 ; <<4 x float>*>:229 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 1 ; <<4 x float>*>:230 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 2 ; <<4 x float>*>:231 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 3 ; <<4 x float>*>:232 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 1 ; <<4 x float>*>:233 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 2 ; <<4 x float>*>:234 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 3 ; <<4 x float>*>:235 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 1 ; <<4 x float>*>:236 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 2 ; <<4 x float>*>:237 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 3 ; <<4 x float>*>:238 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 1 ; <<4 x float>*>:239 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 2 ; <<4 x float>*>:240 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 3 ; <<4 x float>*>:241 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 1 ; <<4 x float>*>:242 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 2 ; <<4 x float>*>:243 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 3 ; <<4 x float>*>:244 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 1 ; <<4 x float>*>:245 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 2 ; <<4 x float>*>:246 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 3 ; <<4 x float>*>:247 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 1 ; <<4 x float>*>:248 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 2 ; <<4 x float>*>:249 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 3 ; <<4 x float>*>:250 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 1 ; <<4 x float>*>:251 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 2 ; <<4 x float>*>:252 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 3 ; <<4 x float>*>:253 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 1 ; <<4 x float>*>:254 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 2 ; <<4 x float>*>:255 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 3 ; <<4 x float>*>:256 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 1 ; <<4 x float>*>:257 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 2 ; <<4 x float>*>:258 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 3 ; <<4 x float>*>:259 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 1 ; <<4 x float>*>:260 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 2 ; <<4 x float>*>:261 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 3 ; <<4 x float>*>:262 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 1 ; <<4 x float>*>:263 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 2 ; <<4 x float>*>:264 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 3 ; <<4 x float>*>:265 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 1 ; <<4 x float>*>:266 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 2 ; <<4 x float>*>:267 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 3 ; <<4 x float>*>:268 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 1 ; <<4 x float>*>:269 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 2 ; <<4 x float>*>:270 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 3 ; <<4 x float>*>:271 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 1 ; <<4 x float>*>:272 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 2 ; <<4 x float>*>:273 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 3 ; <<4 x float>*>:274 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 1 ; <<4 x float>*>:275 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 2 ; <<4 x float>*>:276 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 3 ; <<4 x float>*>:277 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 1 ; <<4 x float>*>:278 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 2 ; <<4 x float>*>:279 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 3 ; <<4 x float>*>:280 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 1 ; <<4 x float>*>:281 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 2 ; <<4 x float>*>:282 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 3 ; <<4 x float>*>:283 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 1 ; <<4 x float>*>:284 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 2 ; <<4 x float>*>:285 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 3 ; <<4 x float>*>:286 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 1 ; <<4 x float>*>:287 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 2 ; <<4 x float>*>:288 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 3 ; <<4 x float>*>:289 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 1 ; <<4 x float>*>:290 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 2 ; <<4 x float>*>:291 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 3 ; <<4 x float>*>:292 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 1 ; <<4 x float>*>:293 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 2 ; <<4 x float>*>:294 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 3 ; <<4 x float>*>:295 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 1 ; <<4 x float>*>:296 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 2 ; <<4 x float>*>:297 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 3 ; <<4 x float>*>:298 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 1 ; <<4 x float>*>:299 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 2 ; <<4 x float>*>:300 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 3 ; <<4 x float>*>:301 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 1 ; <<4 x float>*>:302 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 2 ; <<4 x float>*>:303 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 3 ; <<4 x float>*>:304 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 1 ; <<4 x float>*>:305 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 2 ; <<4 x float>*>:306 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 3 ; <<4 x float>*>:307 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 1 ; <<4 x float>*>:308 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 2 ; <<4 x float>*>:309 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 3 ; <<4 x float>*>:310 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 1 ; <<4 x float>*>:311 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 2 ; <<4 x float>*>:312 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 3 ; <<4 x float>*>:313 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 1 ; <<4 x float>*>:314 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 2 ; <<4 x float>*>:315 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 3 ; <<4 x float>*>:316 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 1 ; <<4 x float>*>:317 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 2 ; <<4 x float>*>:318 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 3 ; <<4 x float>*>:319 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 1 ; <<4 x float>*>:320 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 2 ; <<4 x float>*>:321 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 3 ; <<4 x float>*>:322 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 1 ; <<4 x float>*>:323 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 2 ; <<4 x float>*>:324 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 3 ; <<4 x float>*>:325 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 1 ; <<4 x float>*>:326 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 2 ; <<4 x float>*>:327 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 3 ; <<4 x float>*>:328 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 1 ; <<4 x float>*>:329 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 2 ; <<4 x float>*>:330 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 3 ; <<4 x float>*>:331 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 1 ; <<4 x float>*>:332 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 2 ; <<4 x float>*>:333 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 3 ; <<4 x float>*>:334 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 1 ; <<4 x float>*>:335 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 2 ; <<4 x float>*>:336 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 3 ; <<4 x float>*>:337 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 1 ; <<4 x float>*>:338 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 2 ; <<4 x float>*>:339 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 3 ; <<4 x float>*>:340 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 1 ; <<4 x float>*>:341 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 2 ; <<4 x float>*>:342 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 3 ; <<4 x float>*>:343 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 1 ; <<4 x float>*>:344 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 2 ; <<4 x float>*>:345 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 3 ; <<4 x float>*>:346 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 1 ; <<4 x float>*>:347 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 2 ; <<4 x float>*>:348 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 3 ; <<4 x float>*>:349 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 1 ; <<4 x float>*>:350 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 2 ; <<4 x float>*>:351 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 3 ; <<4 x float>*>:352 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 1 ; <<4 x float>*>:353 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 2 ; <<4 x float>*>:354 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 3 ; <<4 x float>*>:355 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 1 ; <<4 x float>*>:356 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 2 ; <<4 x float>*>:357 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 3 ; <<4 x float>*>:358 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 1 ; <<4 x float>*>:359 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 2 ; <<4 x float>*>:360 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 3 ; <<4 x float>*>:361 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 1 ; <<4 x float>*>:362 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 2 ; <<4 x float>*>:363 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 3 ; <<4 x float>*>:364 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 1 ; <<4 x float>*>:365 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 2 ; <<4 x float>*>:366 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 3 ; <<4 x float>*>:367 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 1 ; <<4 x float>*>:368 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 2 ; <<4 x float>*>:369 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 3 ; <<4 x float>*>:370 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 1 ; <<4 x float>*>:371 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 2 ; <<4 x float>*>:372 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 3 ; <<4 x float>*>:373 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 1 ; <<4 x float>*>:374 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 2 ; <<4 x float>*>:375 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 3 ; <<4 x float>*>:376 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 1 ; <<4 x float>*>:377 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 2 ; <<4 x float>*>:378 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 3 ; <<4 x float>*>:379 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 1 ; <<4 x float>*>:380 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 2 ; <<4 x float>*>:381 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 3 ; <<4 x float>*>:382 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 1 ; <<4 x float>*>:383 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 2 ; <<4 x float>*>:384 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 3 ; <<4 x float>*>:385 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 1 ; <<4 x float>*>:386 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 2 ; <<4 x float>*>:387 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 3 ; <<4 x float>*>:388 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 1 ; <<4 x float>*>:389 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 2 ; <<4 x float>*>:390 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 3 ; <<4 x float>*>:391 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 1 ; <<4 x float>*>:392 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 2 ; <<4 x float>*>:393 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 3 ; <<4 x float>*>:394 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 1 ; <<4 x float>*>:395 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 2 ; <<4 x float>*>:396 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 3 ; <<4 x float>*>:397 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 1 ; <<4 x float>*>:398 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 2 ; <<4 x float>*>:399 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 3 ; <<4 x float>*>:400 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 1 ; <<4 x float>*>:401 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 2 ; <<4 x float>*>:402 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 3 ; <<4 x float>*>:403 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 1 ; <<4 x float>*>:404 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 2 ; <<4 x float>*>:405 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 3 ; <<4 x float>*>:406 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 1 ; <<4 x float>*>:407 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 2 ; <<4 x float>*>:408 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 3 ; <<4 x float>*>:409 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 1 ; <<4 x float>*>:410 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 2 ; <<4 x float>*>:411 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 3 ; <<4 x float>*>:412 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 1 ; <<4 x float>*>:413 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 2 ; <<4 x float>*>:414 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 3 ; <<4 x float>*>:415 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 1 ; <<4 x float>*>:416 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 2 ; <<4 x float>*>:417 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 3 ; <<4 x float>*>:418 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 1 ; <<4 x float>*>:419 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 2 ; <<4 x float>*>:420 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 3 ; <<4 x float>*>:421 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 1 ; <<4 x float>*>:422 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 2 ; <<4 x float>*>:423 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 3 ; <<4 x float>*>:424 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 1 ; <<4 x float>*>:425 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 2 ; <<4 x float>*>:426 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 3 ; <<4 x float>*>:427 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 1 ; <<4 x float>*>:428 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 2 ; <<4 x float>*>:429 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 3 ; <<4 x float>*>:430 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 1 ; <<4 x float>*>:431 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 2 ; <<4 x float>*>:432 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 3 ; <<4 x float>*>:433 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 1 ; <<4 x float>*>:434 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 2 ; <<4 x float>*>:435 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 3 ; <<4 x float>*>:436 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 1 ; <<4 x float>*>:437 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 2 ; <<4 x float>*>:438 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 3 ; <<4 x float>*>:439 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 1 ; <<4 x float>*>:440 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 2 ; <<4 x float>*>:441 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 3 ; <<4 x float>*>:442 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 1 ; <<4 x float>*>:443 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 2 ; <<4 x float>*>:444 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 3 ; <<4 x float>*>:445 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 1 ; <<4 x float>*>:446 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 2 ; <<4 x float>*>:447 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 3 ; <<4 x float>*>:448 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 1 ; <<4 x float>*>:449 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 2 ; <<4 x float>*>:450 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 3 ; <<4 x float>*>:451 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 1 ; <<4 x float>*>:452 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 2 ; <<4 x float>*>:453 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 3 ; <<4 x float>*>:454 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 1 ; <<4 x float>*>:455 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 2 ; <<4 x float>*>:456 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 3 ; <<4 x float>*>:457 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 1 ; <<4 x float>*>:458 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 2 ; <<4 x float>*>:459 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 3 ; <<4 x float>*>:460 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 1 ; <<4 x float>*>:461 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 2 ; <<4 x float>*>:462 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 3 ; <<4 x float>*>:463 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 1 ; <<4 x float>*>:464 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 2 ; <<4 x float>*>:465 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 3 ; <<4 x float>*>:466 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 1 ; <<4 x float>*>:467 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 2 ; <<4 x float>*>:468 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 3 ; <<4 x float>*>:469 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 1 ; <<4 x float>*>:470 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 2 ; <<4 x float>*>:471 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 3 ; <<4 x float>*>:472 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 1 ; <<4 x float>*>:473 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 2 ; <<4 x float>*>:474 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 3 ; <<4 x float>*>:475 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 1 ; <<4 x float>*>:476 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 2 ; <<4 x float>*>:477 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 3 ; <<4 x float>*>:478 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 1 ; <<4 x float>*>:479 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 2 ; <<4 x float>*>:480 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 3 ; <<4 x float>*>:481 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 1 ; <<4 x float>*>:482 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 2 ; <<4 x float>*>:483 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 3 ; <<4 x float>*>:484 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:485 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:486 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:487 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:488 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 2 ; <<4 x float>*>:489 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 3 ; <<4 x float>*>:490 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 1 ; <<4 x float>*>:491 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 2 ; <<4 x float>*>:492 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 3 ; <<4 x float>*>:493 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 1 ; <<4 x float>*>:494 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 2 ; <<4 x float>*>:495 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 3 ; <<4 x float>*>:496 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 1 ; <<4 x float>*>:497 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 2 ; <<4 x float>*>:498 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 3 ; <<4 x float>*>:499 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 1 ; <<4 x float>*>:500 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 2 ; <<4 x float>*>:501 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 3 ; <<4 x float>*>:502 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 1 ; <<4 x float>*>:503 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 2 ; <<4 x float>*>:504 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 3 ; <<4 x float>*>:505 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 1 ; <<4 x float>*>:506 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 2 ; <<4 x float>*>:507 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 3 ; <<4 x float>*>:508 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 1 ; <<4 x float>*>:509 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 2 ; <<4 x float>*>:510 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 3 ; <<4 x float>*>:511 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 1 ; <<4 x float>*>:512 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 2 ; <<4 x float>*>:513 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 3 ; <<4 x float>*>:514 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 1 ; <<4 x float>*>:515 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 2 ; <<4 x float>*>:516 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 3 ; <<4 x float>*>:517 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 1 ; <<4 x float>*>:518 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 2 ; <<4 x float>*>:519 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 3 ; <<4 x float>*>:520 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 1 ; <<4 x float>*>:521 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 2 ; <<4 x float>*>:522 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 3 ; <<4 x float>*>:523 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 1 ; <<4 x float>*>:524 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 2 ; <<4 x float>*>:525 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 3 ; <<4 x float>*>:526 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:527 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:528 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:529 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:530 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:531 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:532 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:533 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:534 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:535 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 1 ; <<4 x float>*>:536 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 2 ; <<4 x float>*>:537 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 3 ; <<4 x float>*>:538 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 1 ; <<4 x float>*>:539 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 2 ; <<4 x float>*>:540 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 3 ; <<4 x float>*>:541 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:542 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:543 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:544 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 1 ; <<4 x float>*>:545 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 2 ; <<4 x float>*>:546 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 3 ; <<4 x float>*>:547 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 1 ; <<4 x float>*>:548 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 2 ; <<4 x float>*>:549 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 3 ; <<4 x float>*>:550 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:551 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 1 ; <<4 x float>*>:552 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 2 ; <<4 x float>*>:553 [#uses=1]
- load <4 x float>, <4 x float>* %553 ; <<4 x float>>:554 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 3 ; <<4 x float>*>:555 [#uses=0]
+ %.sub7896 = getelementptr [4 x <4 x i32>], ptr null, i32 0, i32 0 ; <ptr> [#uses=24]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 175, i32 3 ; <ptr>:2 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 174, i32 2 ; <ptr>:3 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 174, i32 3 ; <ptr>:4 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 173, i32 1 ; <ptr>:5 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 173, i32 2 ; <ptr>:6 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 173, i32 3 ; <ptr>:7 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 172, i32 1 ; <ptr>:8 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 172, i32 2 ; <ptr>:9 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 172, i32 3 ; <ptr>:10 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 171, i32 1 ; <ptr>:11 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 171, i32 2 ; <ptr>:12 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 171, i32 3 ; <ptr>:13 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 170, i32 1 ; <ptr>:14 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 170, i32 2 ; <ptr>:15 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 170, i32 3 ; <ptr>:16 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 169, i32 1 ; <ptr>:17 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 169, i32 2 ; <ptr>:18 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 169, i32 3 ; <ptr>:19 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 168, i32 1 ; <ptr>:20 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 168, i32 2 ; <ptr>:21 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 168, i32 3 ; <ptr>:22 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 167, i32 1 ; <ptr>:23 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 167, i32 2 ; <ptr>:24 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 167, i32 3 ; <ptr>:25 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 166, i32 1 ; <ptr>:26 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 166, i32 2 ; <ptr>:27 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 166, i32 3 ; <ptr>:28 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 165, i32 1 ; <ptr>:29 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 165, i32 2 ; <ptr>:30 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 165, i32 3 ; <ptr>:31 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 164, i32 1 ; <ptr>:32 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 164, i32 2 ; <ptr>:33 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 164, i32 3 ; <ptr>:34 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 163, i32 1 ; <ptr>:35 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 163, i32 2 ; <ptr>:36 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 163, i32 3 ; <ptr>:37 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 162, i32 1 ; <ptr>:38 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 162, i32 2 ; <ptr>:39 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 162, i32 3 ; <ptr>:40 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 161, i32 1 ; <ptr>:41 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 161, i32 2 ; <ptr>:42 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 161, i32 3 ; <ptr>:43 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 160, i32 1 ; <ptr>:44 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 160, i32 2 ; <ptr>:45 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 160, i32 3 ; <ptr>:46 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 159, i32 1 ; <ptr>:47 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 159, i32 2 ; <ptr>:48 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 159, i32 3 ; <ptr>:49 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 158, i32 1 ; <ptr>:50 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 158, i32 2 ; <ptr>:51 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 158, i32 3 ; <ptr>:52 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 157, i32 1 ; <ptr>:53 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 157, i32 2 ; <ptr>:54 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 157, i32 3 ; <ptr>:55 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 156, i32 1 ; <ptr>:56 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 156, i32 2 ; <ptr>:57 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 156, i32 3 ; <ptr>:58 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 155, i32 1 ; <ptr>:59 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 155, i32 2 ; <ptr>:60 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 155, i32 3 ; <ptr>:61 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 154, i32 1 ; <ptr>:62 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 154, i32 2 ; <ptr>:63 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 154, i32 3 ; <ptr>:64 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 153, i32 1 ; <ptr>:65 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 153, i32 2 ; <ptr>:66 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 153, i32 3 ; <ptr>:67 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 152, i32 1 ; <ptr>:68 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 152, i32 2 ; <ptr>:69 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 152, i32 3 ; <ptr>:70 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 151, i32 1 ; <ptr>:71 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 151, i32 2 ; <ptr>:72 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 151, i32 3 ; <ptr>:73 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 150, i32 1 ; <ptr>:74 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 150, i32 2 ; <ptr>:75 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 150, i32 3 ; <ptr>:76 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 149, i32 1 ; <ptr>:77 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 149, i32 2 ; <ptr>:78 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 149, i32 3 ; <ptr>:79 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 148, i32 1 ; <ptr>:80 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 148, i32 2 ; <ptr>:81 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 148, i32 3 ; <ptr>:82 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 147, i32 1 ; <ptr>:83 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 147, i32 2 ; <ptr>:84 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 147, i32 3 ; <ptr>:85 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 146, i32 1 ; <ptr>:86 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 146, i32 2 ; <ptr>:87 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 146, i32 3 ; <ptr>:88 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 145, i32 1 ; <ptr>:89 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 145, i32 2 ; <ptr>:90 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 145, i32 3 ; <ptr>:91 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 144, i32 1 ; <ptr>:92 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 144, i32 2 ; <ptr>:93 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 144, i32 3 ; <ptr>:94 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 143, i32 1 ; <ptr>:95 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 143, i32 2 ; <ptr>:96 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 143, i32 3 ; <ptr>:97 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 142, i32 1 ; <ptr>:98 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 142, i32 2 ; <ptr>:99 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 142, i32 3 ; <ptr>:100 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 141, i32 1 ; <ptr>:101 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 141, i32 2 ; <ptr>:102 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 141, i32 3 ; <ptr>:103 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 140, i32 1 ; <ptr>:104 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 140, i32 2 ; <ptr>:105 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 140, i32 3 ; <ptr>:106 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 139, i32 1 ; <ptr>:107 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 139, i32 2 ; <ptr>:108 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 139, i32 3 ; <ptr>:109 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 138, i32 1 ; <ptr>:110 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 138, i32 2 ; <ptr>:111 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 138, i32 3 ; <ptr>:112 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 137, i32 1 ; <ptr>:113 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 137, i32 2 ; <ptr>:114 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 137, i32 3 ; <ptr>:115 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 136, i32 1 ; <ptr>:116 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 136, i32 2 ; <ptr>:117 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 136, i32 3 ; <ptr>:118 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 135, i32 1 ; <ptr>:119 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 135, i32 2 ; <ptr>:120 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 135, i32 3 ; <ptr>:121 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 134, i32 1 ; <ptr>:122 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 134, i32 2 ; <ptr>:123 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 134, i32 3 ; <ptr>:124 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 133, i32 1 ; <ptr>:125 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 133, i32 2 ; <ptr>:126 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 133, i32 3 ; <ptr>:127 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 132, i32 1 ; <ptr>:128 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 132, i32 2 ; <ptr>:129 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 132, i32 3 ; <ptr>:130 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 131, i32 1 ; <ptr>:131 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 131, i32 2 ; <ptr>:132 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 131, i32 3 ; <ptr>:133 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 130, i32 1 ; <ptr>:134 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 130, i32 2 ; <ptr>:135 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 130, i32 3 ; <ptr>:136 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 129, i32 1 ; <ptr>:137 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 129, i32 2 ; <ptr>:138 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 129, i32 3 ; <ptr>:139 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 128, i32 1 ; <ptr>:140 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 128, i32 2 ; <ptr>:141 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 128, i32 3 ; <ptr>:142 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 127, i32 1 ; <ptr>:143 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 127, i32 2 ; <ptr>:144 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 127, i32 3 ; <ptr>:145 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 126, i32 1 ; <ptr>:146 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 126, i32 2 ; <ptr>:147 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 126, i32 3 ; <ptr>:148 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 125, i32 1 ; <ptr>:149 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 125, i32 2 ; <ptr>:150 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 125, i32 3 ; <ptr>:151 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 124, i32 1 ; <ptr>:152 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 124, i32 2 ; <ptr>:153 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 124, i32 3 ; <ptr>:154 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 123, i32 1 ; <ptr>:155 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 123, i32 2 ; <ptr>:156 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 123, i32 3 ; <ptr>:157 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 122, i32 1 ; <ptr>:158 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 122, i32 2 ; <ptr>:159 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 122, i32 3 ; <ptr>:160 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 121, i32 1 ; <ptr>:161 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 121, i32 2 ; <ptr>:162 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 121, i32 3 ; <ptr>:163 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 120, i32 1 ; <ptr>:164 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 120, i32 2 ; <ptr>:165 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 120, i32 3 ; <ptr>:166 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 119, i32 1 ; <ptr>:167 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 119, i32 2 ; <ptr>:168 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 119, i32 3 ; <ptr>:169 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 118, i32 1 ; <ptr>:170 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 118, i32 2 ; <ptr>:171 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 118, i32 3 ; <ptr>:172 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 117, i32 1 ; <ptr>:173 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 117, i32 2 ; <ptr>:174 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 117, i32 3 ; <ptr>:175 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 116, i32 1 ; <ptr>:176 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 116, i32 2 ; <ptr>:177 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 116, i32 3 ; <ptr>:178 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 115, i32 1 ; <ptr>:179 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 115, i32 2 ; <ptr>:180 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 115, i32 3 ; <ptr>:181 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 114, i32 1 ; <ptr>:182 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 114, i32 2 ; <ptr>:183 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 114, i32 3 ; <ptr>:184 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 113, i32 1 ; <ptr>:185 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 113, i32 2 ; <ptr>:186 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 113, i32 3 ; <ptr>:187 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 112, i32 1 ; <ptr>:188 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 112, i32 2 ; <ptr>:189 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 112, i32 3 ; <ptr>:190 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 111, i32 1 ; <ptr>:191 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 111, i32 2 ; <ptr>:192 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 111, i32 3 ; <ptr>:193 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 110, i32 1 ; <ptr>:194 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 110, i32 2 ; <ptr>:195 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 110, i32 3 ; <ptr>:196 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 109, i32 1 ; <ptr>:197 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 109, i32 2 ; <ptr>:198 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 109, i32 3 ; <ptr>:199 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 108, i32 1 ; <ptr>:200 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 108, i32 2 ; <ptr>:201 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 108, i32 3 ; <ptr>:202 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 107, i32 1 ; <ptr>:203 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 107, i32 2 ; <ptr>:204 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 107, i32 3 ; <ptr>:205 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 106, i32 1 ; <ptr>:206 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 106, i32 2 ; <ptr>:207 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 106, i32 3 ; <ptr>:208 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 105, i32 1 ; <ptr>:209 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 105, i32 2 ; <ptr>:210 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 105, i32 3 ; <ptr>:211 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 104, i32 1 ; <ptr>:212 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 104, i32 2 ; <ptr>:213 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 104, i32 3 ; <ptr>:214 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 103, i32 1 ; <ptr>:215 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 103, i32 2 ; <ptr>:216 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 103, i32 3 ; <ptr>:217 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 102, i32 1 ; <ptr>:218 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 102, i32 2 ; <ptr>:219 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 102, i32 3 ; <ptr>:220 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 101, i32 1 ; <ptr>:221 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 101, i32 2 ; <ptr>:222 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 101, i32 3 ; <ptr>:223 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 100, i32 1 ; <ptr>:224 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 100, i32 2 ; <ptr>:225 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 100, i32 3 ; <ptr>:226 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 99, i32 1 ; <ptr>:227 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 99, i32 2 ; <ptr>:228 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 99, i32 3 ; <ptr>:229 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 98, i32 1 ; <ptr>:230 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 98, i32 2 ; <ptr>:231 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 98, i32 3 ; <ptr>:232 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 97, i32 1 ; <ptr>:233 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 97, i32 2 ; <ptr>:234 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 97, i32 3 ; <ptr>:235 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 96, i32 1 ; <ptr>:236 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 96, i32 2 ; <ptr>:237 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 96, i32 3 ; <ptr>:238 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 95, i32 1 ; <ptr>:239 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 95, i32 2 ; <ptr>:240 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 95, i32 3 ; <ptr>:241 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 94, i32 1 ; <ptr>:242 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 94, i32 2 ; <ptr>:243 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 94, i32 3 ; <ptr>:244 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 93, i32 1 ; <ptr>:245 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 93, i32 2 ; <ptr>:246 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 93, i32 3 ; <ptr>:247 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 92, i32 1 ; <ptr>:248 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 92, i32 2 ; <ptr>:249 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 92, i32 3 ; <ptr>:250 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 91, i32 1 ; <ptr>:251 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 91, i32 2 ; <ptr>:252 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 91, i32 3 ; <ptr>:253 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 90, i32 1 ; <ptr>:254 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 90, i32 2 ; <ptr>:255 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 90, i32 3 ; <ptr>:256 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 89, i32 1 ; <ptr>:257 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 89, i32 2 ; <ptr>:258 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 89, i32 3 ; <ptr>:259 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 88, i32 1 ; <ptr>:260 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 88, i32 2 ; <ptr>:261 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 88, i32 3 ; <ptr>:262 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 87, i32 1 ; <ptr>:263 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 87, i32 2 ; <ptr>:264 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 87, i32 3 ; <ptr>:265 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 86, i32 1 ; <ptr>:266 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 86, i32 2 ; <ptr>:267 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 86, i32 3 ; <ptr>:268 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 85, i32 1 ; <ptr>:269 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 85, i32 2 ; <ptr>:270 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 85, i32 3 ; <ptr>:271 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 84, i32 1 ; <ptr>:272 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 84, i32 2 ; <ptr>:273 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 84, i32 3 ; <ptr>:274 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 83, i32 1 ; <ptr>:275 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 83, i32 2 ; <ptr>:276 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 83, i32 3 ; <ptr>:277 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 82, i32 1 ; <ptr>:278 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 82, i32 2 ; <ptr>:279 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 82, i32 3 ; <ptr>:280 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 81, i32 1 ; <ptr>:281 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 81, i32 2 ; <ptr>:282 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 81, i32 3 ; <ptr>:283 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 80, i32 1 ; <ptr>:284 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 80, i32 2 ; <ptr>:285 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 80, i32 3 ; <ptr>:286 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 79, i32 1 ; <ptr>:287 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 79, i32 2 ; <ptr>:288 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 79, i32 3 ; <ptr>:289 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 78, i32 1 ; <ptr>:290 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 78, i32 2 ; <ptr>:291 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 78, i32 3 ; <ptr>:292 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 77, i32 1 ; <ptr>:293 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 77, i32 2 ; <ptr>:294 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 77, i32 3 ; <ptr>:295 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 76, i32 1 ; <ptr>:296 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 76, i32 2 ; <ptr>:297 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 76, i32 3 ; <ptr>:298 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 75, i32 1 ; <ptr>:299 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 75, i32 2 ; <ptr>:300 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 75, i32 3 ; <ptr>:301 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 74, i32 1 ; <ptr>:302 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 74, i32 2 ; <ptr>:303 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 74, i32 3 ; <ptr>:304 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 73, i32 1 ; <ptr>:305 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 73, i32 2 ; <ptr>:306 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 73, i32 3 ; <ptr>:307 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 72, i32 1 ; <ptr>:308 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 72, i32 2 ; <ptr>:309 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 72, i32 3 ; <ptr>:310 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 71, i32 1 ; <ptr>:311 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 71, i32 2 ; <ptr>:312 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 71, i32 3 ; <ptr>:313 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 70, i32 1 ; <ptr>:314 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 70, i32 2 ; <ptr>:315 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 70, i32 3 ; <ptr>:316 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 69, i32 1 ; <ptr>:317 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 69, i32 2 ; <ptr>:318 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 69, i32 3 ; <ptr>:319 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 68, i32 1 ; <ptr>:320 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 68, i32 2 ; <ptr>:321 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 68, i32 3 ; <ptr>:322 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 67, i32 1 ; <ptr>:323 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 67, i32 2 ; <ptr>:324 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 67, i32 3 ; <ptr>:325 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 66, i32 1 ; <ptr>:326 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 66, i32 2 ; <ptr>:327 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 66, i32 3 ; <ptr>:328 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 65, i32 1 ; <ptr>:329 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 65, i32 2 ; <ptr>:330 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 65, i32 3 ; <ptr>:331 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 64, i32 1 ; <ptr>:332 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 64, i32 2 ; <ptr>:333 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 64, i32 3 ; <ptr>:334 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 63, i32 1 ; <ptr>:335 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 63, i32 2 ; <ptr>:336 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 63, i32 3 ; <ptr>:337 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 62, i32 1 ; <ptr>:338 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 62, i32 2 ; <ptr>:339 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 62, i32 3 ; <ptr>:340 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 61, i32 1 ; <ptr>:341 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 61, i32 2 ; <ptr>:342 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 61, i32 3 ; <ptr>:343 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 60, i32 1 ; <ptr>:344 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 60, i32 2 ; <ptr>:345 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 60, i32 3 ; <ptr>:346 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 59, i32 1 ; <ptr>:347 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 59, i32 2 ; <ptr>:348 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 59, i32 3 ; <ptr>:349 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 58, i32 1 ; <ptr>:350 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 58, i32 2 ; <ptr>:351 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 58, i32 3 ; <ptr>:352 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 57, i32 1 ; <ptr>:353 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 57, i32 2 ; <ptr>:354 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 57, i32 3 ; <ptr>:355 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 56, i32 1 ; <ptr>:356 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 56, i32 2 ; <ptr>:357 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 56, i32 3 ; <ptr>:358 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 55, i32 1 ; <ptr>:359 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 55, i32 2 ; <ptr>:360 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 55, i32 3 ; <ptr>:361 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 54, i32 1 ; <ptr>:362 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 54, i32 2 ; <ptr>:363 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 54, i32 3 ; <ptr>:364 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 53, i32 1 ; <ptr>:365 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 53, i32 2 ; <ptr>:366 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 53, i32 3 ; <ptr>:367 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 52, i32 1 ; <ptr>:368 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 52, i32 2 ; <ptr>:369 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 52, i32 3 ; <ptr>:370 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 51, i32 1 ; <ptr>:371 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 51, i32 2 ; <ptr>:372 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 51, i32 3 ; <ptr>:373 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 50, i32 1 ; <ptr>:374 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 50, i32 2 ; <ptr>:375 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 50, i32 3 ; <ptr>:376 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 49, i32 1 ; <ptr>:377 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 49, i32 2 ; <ptr>:378 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 49, i32 3 ; <ptr>:379 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 48, i32 1 ; <ptr>:380 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 48, i32 2 ; <ptr>:381 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 48, i32 3 ; <ptr>:382 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 47, i32 1 ; <ptr>:383 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 47, i32 2 ; <ptr>:384 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 47, i32 3 ; <ptr>:385 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 46, i32 1 ; <ptr>:386 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 46, i32 2 ; <ptr>:387 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 46, i32 3 ; <ptr>:388 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 45, i32 1 ; <ptr>:389 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 45, i32 2 ; <ptr>:390 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 45, i32 3 ; <ptr>:391 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 44, i32 1 ; <ptr>:392 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 44, i32 2 ; <ptr>:393 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 44, i32 3 ; <ptr>:394 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 43, i32 1 ; <ptr>:395 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 43, i32 2 ; <ptr>:396 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 43, i32 3 ; <ptr>:397 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 42, i32 1 ; <ptr>:398 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 42, i32 2 ; <ptr>:399 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 42, i32 3 ; <ptr>:400 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 41, i32 1 ; <ptr>:401 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 41, i32 2 ; <ptr>:402 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 41, i32 3 ; <ptr>:403 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 40, i32 1 ; <ptr>:404 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 40, i32 2 ; <ptr>:405 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 40, i32 3 ; <ptr>:406 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 39, i32 1 ; <ptr>:407 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 39, i32 2 ; <ptr>:408 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 39, i32 3 ; <ptr>:409 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 38, i32 1 ; <ptr>:410 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 38, i32 2 ; <ptr>:411 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 38, i32 3 ; <ptr>:412 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 37, i32 1 ; <ptr>:413 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 37, i32 2 ; <ptr>:414 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 37, i32 3 ; <ptr>:415 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 36, i32 1 ; <ptr>:416 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 36, i32 2 ; <ptr>:417 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 36, i32 3 ; <ptr>:418 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 35, i32 1 ; <ptr>:419 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 35, i32 2 ; <ptr>:420 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 35, i32 3 ; <ptr>:421 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 34, i32 1 ; <ptr>:422 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 34, i32 2 ; <ptr>:423 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 34, i32 3 ; <ptr>:424 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 33, i32 1 ; <ptr>:425 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 33, i32 2 ; <ptr>:426 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 33, i32 3 ; <ptr>:427 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 32, i32 1 ; <ptr>:428 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 32, i32 2 ; <ptr>:429 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 32, i32 3 ; <ptr>:430 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 31, i32 1 ; <ptr>:431 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 31, i32 2 ; <ptr>:432 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 31, i32 3 ; <ptr>:433 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 30, i32 1 ; <ptr>:434 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 30, i32 2 ; <ptr>:435 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 30, i32 3 ; <ptr>:436 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 29, i32 1 ; <ptr>:437 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 29, i32 2 ; <ptr>:438 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 29, i32 3 ; <ptr>:439 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 28, i32 1 ; <ptr>:440 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 28, i32 2 ; <ptr>:441 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 28, i32 3 ; <ptr>:442 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 27, i32 1 ; <ptr>:443 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 27, i32 2 ; <ptr>:444 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 27, i32 3 ; <ptr>:445 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 26, i32 1 ; <ptr>:446 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 26, i32 2 ; <ptr>:447 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 26, i32 3 ; <ptr>:448 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 25, i32 1 ; <ptr>:449 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 25, i32 2 ; <ptr>:450 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 25, i32 3 ; <ptr>:451 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 24, i32 1 ; <ptr>:452 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 24, i32 2 ; <ptr>:453 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 24, i32 3 ; <ptr>:454 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 23, i32 1 ; <ptr>:455 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 23, i32 2 ; <ptr>:456 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 23, i32 3 ; <ptr>:457 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 22, i32 1 ; <ptr>:458 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 22, i32 2 ; <ptr>:459 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 22, i32 3 ; <ptr>:460 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 21, i32 1 ; <ptr>:461 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 21, i32 2 ; <ptr>:462 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 21, i32 3 ; <ptr>:463 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 20, i32 1 ; <ptr>:464 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 20, i32 2 ; <ptr>:465 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 20, i32 3 ; <ptr>:466 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 19, i32 1 ; <ptr>:467 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 19, i32 2 ; <ptr>:468 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 19, i32 3 ; <ptr>:469 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 18, i32 1 ; <ptr>:470 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 18, i32 2 ; <ptr>:471 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 18, i32 3 ; <ptr>:472 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 17, i32 1 ; <ptr>:473 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 17, i32 2 ; <ptr>:474 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 17, i32 3 ; <ptr>:475 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 16, i32 1 ; <ptr>:476 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 16, i32 2 ; <ptr>:477 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 16, i32 3 ; <ptr>:478 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 15, i32 1 ; <ptr>:479 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 15, i32 2 ; <ptr>:480 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 15, i32 3 ; <ptr>:481 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 14, i32 1 ; <ptr>:482 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 14, i32 2 ; <ptr>:483 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 14, i32 3 ; <ptr>:484 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 1 ; <ptr>:485 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 2 ; <ptr>:486 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 3 ; <ptr>:487 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 3, i32 1 ; <ptr>:488 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 3, i32 2 ; <ptr>:489 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 3, i32 3 ; <ptr>:490 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 180, i32 1 ; <ptr>:491 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 180, i32 2 ; <ptr>:492 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 180, i32 3 ; <ptr>:493 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 181, i32 1 ; <ptr>:494 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 181, i32 2 ; <ptr>:495 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 181, i32 3 ; <ptr>:496 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 182, i32 1 ; <ptr>:497 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 182, i32 2 ; <ptr>:498 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 182, i32 3 ; <ptr>:499 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 183, i32 1 ; <ptr>:500 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 183, i32 2 ; <ptr>:501 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 183, i32 3 ; <ptr>:502 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 184, i32 1 ; <ptr>:503 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 184, i32 2 ; <ptr>:504 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 184, i32 3 ; <ptr>:505 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 185, i32 1 ; <ptr>:506 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 185, i32 2 ; <ptr>:507 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 185, i32 3 ; <ptr>:508 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 190, i32 1 ; <ptr>:509 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 190, i32 2 ; <ptr>:510 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 190, i32 3 ; <ptr>:511 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 9, i32 1 ; <ptr>:512 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 9, i32 2 ; <ptr>:513 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 9, i32 3 ; <ptr>:514 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 10, i32 1 ; <ptr>:515 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 10, i32 2 ; <ptr>:516 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 10, i32 3 ; <ptr>:517 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 11, i32 1 ; <ptr>:518 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 11, i32 2 ; <ptr>:519 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 11, i32 3 ; <ptr>:520 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 12, i32 1 ; <ptr>:521 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 12, i32 2 ; <ptr>:522 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 12, i32 3 ; <ptr>:523 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 13, i32 1 ; <ptr>:524 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 13, i32 2 ; <ptr>:525 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 13, i32 3 ; <ptr>:526 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 1 ; <ptr>:527 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 2 ; <ptr>:528 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 3 ; <ptr>:529 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 1 ; <ptr>:530 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 2 ; <ptr>:531 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 3 ; <ptr>:532 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 2, i32 1 ; <ptr>:533 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 2, i32 2 ; <ptr>:534 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 2, i32 3 ; <ptr>:535 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 6, i32 1 ; <ptr>:536 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 6, i32 2 ; <ptr>:537 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 6, i32 3 ; <ptr>:538 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 7, i32 1 ; <ptr>:539 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 7, i32 2 ; <ptr>:540 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 7, i32 3 ; <ptr>:541 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 1 ; <ptr>:542 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 2 ; <ptr>:543 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 3 ; <ptr>:544 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 187, i32 1 ; <ptr>:545 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 187, i32 2 ; <ptr>:546 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 187, i32 3 ; <ptr>:547 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 8, i32 1 ; <ptr>:548 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 8, i32 2 ; <ptr>:549 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 8, i32 3 ; <ptr>:550 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:551 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 188, i32 1 ; <ptr>:552 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 188, i32 2 ; <ptr>:553 [#uses=1]
+ load <4 x float>, ptr %553 ; <<4 x float>>:554 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 188, i32 3 ; <ptr>:555 [#uses=0]
shufflevector <4 x float> %554, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:556 [#uses=1]
call <4 x i32> @llvm.ppc.altivec.vcmpgtfp( <4 x float> zeroinitializer, <4 x float> %556 ) ; <<4 x i32>>:557 [#uses=0]
bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>>:558 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:559 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 2 ; <<4 x float>*>:560 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %560
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:561 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:562 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 2 ; <<4 x float>*>:563 [#uses=0]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:564 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 0 ; <ptr>:559 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 2 ; <ptr>:560 [#uses=1]
+ store <4 x float> zeroinitializer, ptr %560
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 3 ; <ptr>:561 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 1 ; <ptr>:562 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 2 ; <ptr>:563 [#uses=0]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:564 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:565 [#uses=1]
- store <4 x float> %565, <4 x float>* null
+ store <4 x float> %565, ptr null
icmp eq i32 0, 0 ; <i1>:566 [#uses=1]
br i1 %566, label %.critedge, label %xPIF.exit
.critedge: ; preds = %xOperationInitMasks.exit
- getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 3 ; <<4 x i32>*>:567 [#uses=0]
+ getelementptr [4 x <4 x i32>], ptr null, i32 0, i32 3 ; <ptr>:567 [#uses=0]
and <4 x i32> zeroinitializer, zeroinitializer ; <<4 x i32>>:568 [#uses=0]
or <4 x i32> zeroinitializer, zeroinitializer ; <<4 x i32>>:569 [#uses=0]
icmp eq i32 0, 0 ; <i1>:570 [#uses=1]
br label %xPIF.exit
xPIF.exit: ; preds = %.critedge7898, %xOperationInitMasks.exit
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 1 ; <<4 x float>*>:571 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:572 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 188, i32 1 ; <ptr>:571 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:572 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:573 [#uses=0]
icmp eq i32 0, 0 ; <i1>:574 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:575 [#uses=0]
- load <4 x float>, <4 x float>* %0 ; <<4 x float>>:576 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 3, i32 1 ; <ptr>:575 [#uses=0]
+ load <4 x float>, ptr %0 ; <<4 x float>>:576 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:577 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 0 ; <<4 x float>*>:578 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:579 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:580 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:581 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:582 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:583 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:584 [#uses=1]
- load <4 x float>, <4 x float>* %584 ; <<4 x float>>:585 [#uses=1]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:586 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:587 [#uses=1]
- load <4 x float>, <4 x float>* %587 ; <<4 x float>>:588 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 2, i32 0 ; <ptr>:578 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 2, i32 1 ; <ptr>:579 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 2, i32 2 ; <ptr>:580 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 2, i32 3 ; <ptr>:581 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 3 ; <ptr>:582 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:583 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 1 ; <ptr>:584 [#uses=1]
+ load <4 x float>, ptr %584 ; <<4 x float>>:585 [#uses=1]
+ load <4 x float>, ptr null ; <<4 x float>>:586 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 3 ; <ptr>:587 [#uses=1]
+ load <4 x float>, ptr %587 ; <<4 x float>>:588 [#uses=1]
shufflevector <4 x float> %583, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:589 [#uses=1]
shufflevector <4 x float> %585, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:590 [#uses=1]
shufflevector <4 x float> %588, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:591 [#uses=1]
fmul <4 x float> zeroinitializer, %590 ; <<4 x float>>:593 [#uses=0]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:594 [#uses=1]
fmul <4 x float> zeroinitializer, %591 ; <<4 x float>>:595 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:596 [#uses=2]
- load <4 x float>, <4 x float>* %596 ; <<4 x float>>:597 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %596
- load <4 x float>, <4 x float>* null ; <<4 x float>>:598 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:599 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 0 ; <ptr>:596 [#uses=2]
+ load <4 x float>, ptr %596 ; <<4 x float>>:597 [#uses=0]
+ store <4 x float> zeroinitializer, ptr %596
+ load <4 x float>, ptr null ; <<4 x float>>:598 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 2 ; <ptr>:599 [#uses=0]
shufflevector <4 x float> %594, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:600 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:601 [#uses=2]
- load <4 x float>, <4 x float>* %601 ; <<4 x float>>:602 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %601
- load <4 x float>, <4 x float>* null ; <<4 x float>>:603 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:604 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:605 [#uses=1]
- load <4 x float>, <4 x float>* %605 ; <<4 x float>>:606 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 3 ; <ptr>:601 [#uses=2]
+ load <4 x float>, ptr %601 ; <<4 x float>>:602 [#uses=0]
+ store <4 x float> zeroinitializer, ptr %601
+ load <4 x float>, ptr null ; <<4 x float>>:603 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:604 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 2 ; <ptr>:605 [#uses=1]
+ load <4 x float>, ptr %605 ; <<4 x float>>:606 [#uses=1]
fsub <4 x float> zeroinitializer, %604 ; <<4 x float>>:607 [#uses=2]
fsub <4 x float> zeroinitializer, %606 ; <<4 x float>>:608 [#uses=2]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:609 [#uses=0]
br i1 false, label %617, label %610
; <label>:610 ; preds = %xPIF.exit
- load <4 x float>, <4 x float>* null ; <<4 x float>>:611 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:612 [#uses=2]
- load <4 x float>, <4 x float>* %612 ; <<4 x float>>:613 [#uses=1]
+ load <4 x float>, ptr null ; <<4 x float>>:611 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 1 ; <ptr>:612 [#uses=2]
+ load <4 x float>, ptr %612 ; <<4 x float>>:613 [#uses=1]
shufflevector <4 x float> %607, <4 x float> %613, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:614 [#uses=1]
- store <4 x float> %614, <4 x float>* %612
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:615 [#uses=2]
- load <4 x float>, <4 x float>* %615 ; <<4 x float>>:616 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %615
+ store <4 x float> %614, ptr %612
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 3 ; <ptr>:615 [#uses=2]
+ load <4 x float>, ptr %615 ; <<4 x float>>:616 [#uses=0]
+ store <4 x float> zeroinitializer, ptr %615
br label %xST.exit400
; <label>:617 ; preds = %xPIF.exit
br i1 %621, label %625, label %622
; <label>:622 ; preds = %617
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:623 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 1 ; <ptr>:623 [#uses=0]
shufflevector <4 x float> %607, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:624 [#uses=0]
br label %625
; <label>:625 ; preds = %622, %617
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:626 [#uses=0]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:626 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:627 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:628 [#uses=1]
- load <4 x float>, <4 x float>* %628 ; <<4 x float>>:629 [#uses=0]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:630 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 2 ; <ptr>:628 [#uses=1]
+ load <4 x float>, ptr %628 ; <<4 x float>>:629 [#uses=0]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:630 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:631 [#uses=1]
icmp eq i32 %631, 0 ; <i1>:632 [#uses=1]
br i1 %632, label %xST.exit400, label %633
; <label>:633 ; preds = %625
- load <4 x float>, <4 x float>* null ; <<4 x float>>:634 [#uses=1]
+ load <4 x float>, ptr null ; <<4 x float>>:634 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %634, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:635 [#uses=1]
- store <4 x float> %635, <4 x float>* null
+ store <4 x float> %635, ptr null
br label %xST.exit400
xST.exit400: ; preds = %633, %625, %610
%.17218 = phi <4 x float> [ zeroinitializer, %610 ], [ %608, %633 ], [ %608, %625 ] ; <<4 x float>> [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 0 ; <<4 x float>*>:636 [#uses=1]
- load <4 x float>, <4 x float>* %636 ; <<4 x float>>:637 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:638 [#uses=2]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:639 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:640 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 0 ; <ptr>:636 [#uses=1]
+ load <4 x float>, ptr %636 ; <<4 x float>>:637 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:638 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 2 ; <ptr>:639 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:640 [#uses=2]
fmul <4 x float> %638, %638 ; <<4 x float>>:641 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:642 [#uses=0]
fmul <4 x float> %640, %640 ; <<4 x float>>:643 [#uses=2]
br i1 %656, label %665, label %657
; <label>:657 ; preds = %xST.exit400
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:658 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 0 ; <ptr>:658 [#uses=0]
shufflevector <4 x float> %653, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:659 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:660 [#uses=1]
- load <4 x float>, <4 x float>* %660 ; <<4 x float>>:661 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:662 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:663 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 1 ; <ptr>:660 [#uses=1]
+ load <4 x float>, ptr %660 ; <<4 x float>>:661 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 2 ; <ptr>:662 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 3 ; <ptr>:663 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:664 [#uses=0]
br label %xST.exit402
br i1 false, label %669, label %667
; <label>:667 ; preds = %665
- load <4 x float>, <4 x float>* null ; <<4 x float>>:668 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:668 [#uses=0]
br label %669
; <label>:669 ; preds = %667, %665
br label %xST.exit402
xST.exit402: ; preds = %669, %657
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:671 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:672 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:673 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:674 [#uses=1]
- load <4 x float>, <4 x float>* %674 ; <<4 x float>>:675 [#uses=1]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:676 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 0 ; <ptr>:671 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:672 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 2 ; <ptr>:673 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 1 ; <ptr>:674 [#uses=1]
+ load <4 x float>, ptr %674 ; <<4 x float>>:675 [#uses=1]
+ load <4 x float>, ptr null ; <<4 x float>>:676 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:677 [#uses=1]
shufflevector <4 x float> %675, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:678 [#uses=1]
fmul <4 x float> zeroinitializer, %677 ; <<4 x float>>:679 [#uses=0]
br i1 %682, label %689, label %683
; <label>:683 ; preds = %xST.exit402
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:684 [#uses=1]
- load <4 x float>, <4 x float>* %684 ; <<4 x float>>:685 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:686 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:687 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 1 ; <ptr>:684 [#uses=1]
+ load <4 x float>, ptr %684 ; <<4 x float>>:685 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 2 ; <ptr>:686 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 3 ; <ptr>:687 [#uses=0]
shufflevector <4 x float> %681, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:688 [#uses=0]
br label %xST.exit405
; <label>:689 ; preds = %xST.exit402
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:690 [#uses=0]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:691 [#uses=1]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:691 [#uses=1]
shufflevector <4 x i32> %691, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:692 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %692, <4 x i32> zeroinitializer ) ; <i32>:693 [#uses=1]
icmp eq i32 %693, 0 ; <i1>:694 [#uses=0]
br label %xST.exit405
xST.exit405: ; preds = %689, %683
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:695 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 3 ; <ptr>:695 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:696 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:697 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:698 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:699 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:698 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 2 ; <ptr>:699 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:700 [#uses=1]
fadd <4 x float> zeroinitializer, %700 ; <<4 x float>>:701 [#uses=0]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:702 [#uses=1]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:702 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %702, <4 x i32> zeroinitializer ) ; <i32>:703 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:704 [#uses=2]
- load <4 x float>, <4 x float>* %704 ; <<4 x float>>:705 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %704
- load <4 x float>, <4 x float>* null ; <<4 x float>>:706 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:707 [#uses=2]
- load <4 x float>, <4 x float>* %707 ; <<4 x float>>:708 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %707
- load <4 x float>, <4 x float>* null ; <<4 x float>>:709 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:710 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:711 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 1 ; <ptr>:704 [#uses=2]
+ load <4 x float>, ptr %704 ; <<4 x float>>:705 [#uses=0]
+ store <4 x float> zeroinitializer, ptr %704
+ load <4 x float>, ptr null ; <<4 x float>>:706 [#uses=0]
+ store <4 x float> zeroinitializer, ptr null
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 3 ; <ptr>:707 [#uses=2]
+ load <4 x float>, ptr %707 ; <<4 x float>>:708 [#uses=0]
+ store <4 x float> zeroinitializer, ptr %707
+ load <4 x float>, ptr null ; <<4 x float>>:709 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:710 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:711 [#uses=1]
shufflevector <4 x float> %711, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>>:712 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:713 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:714 [#uses=1]
- load <4 x float>, <4 x float>* %714 ; <<4 x float>>:715 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 1 ; <ptr>:713 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 2 ; <ptr>:714 [#uses=1]
+ load <4 x float>, ptr %714 ; <<4 x float>>:715 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:716 [#uses=0]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:717 [#uses=1]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:718 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 0 ; <<4 x float>*>:719 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %719
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:720 [#uses=1]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:718 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 0 ; <ptr>:719 [#uses=1]
+ store <4 x float> zeroinitializer, ptr %719
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 1 ; <ptr>:720 [#uses=1]
shufflevector <4 x float> %717, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:721 [#uses=1]
- store <4 x float> %721, <4 x float>* %720
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:722 [#uses=1]
- load <4 x float>, <4 x float>* %722 ; <<4 x float>>:723 [#uses=1]
+ store <4 x float> %721, ptr %720
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 2 ; <ptr>:722 [#uses=1]
+ load <4 x float>, ptr %722 ; <<4 x float>>:723 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %723, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:724 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:725 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %725
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:726 [#uses=1]
- load <4 x float>, <4 x float>* %726 ; <<4 x float>>:727 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:728 [#uses=1]
- load <4 x float>, <4 x float>* %728 ; <<4 x float>>:729 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:730 [#uses=1]
- load <4 x float>, <4 x float>* %730 ; <<4 x float>>:731 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:732 [#uses=1]
- load <4 x float>, <4 x float>* %732 ; <<4 x float>>:733 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:734 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 3 ; <ptr>:725 [#uses=1]
+ store <4 x float> zeroinitializer, ptr %725
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 2 ; <ptr>:726 [#uses=1]
+ load <4 x float>, ptr %726 ; <<4 x float>>:727 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 5, i32 3 ; <ptr>:728 [#uses=1]
+ load <4 x float>, ptr %728 ; <<4 x float>>:729 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 0 ; <ptr>:730 [#uses=1]
+ load <4 x float>, ptr %730 ; <<4 x float>>:731 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 1 ; <ptr>:732 [#uses=1]
+ load <4 x float>, ptr %732 ; <<4 x float>>:733 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 3 ; <ptr>:734 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:735 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:736 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:737 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:739 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:740 [#uses=1]
icmp eq i32 %740, 0 ; <i1>:741 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:742 [#uses=2]
- load <4 x float>, <4 x float>* %742 ; <<4 x float>>:743 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 0 ; <ptr>:742 [#uses=2]
+ load <4 x float>, ptr %742 ; <<4 x float>>:743 [#uses=1]
shufflevector <4 x float> %736, <4 x float> %743, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:744 [#uses=1]
- store <4 x float> %744, <4 x float>* %742
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:745 [#uses=1]
- load <4 x float>, <4 x float>* %745 ; <<4 x float>>:746 [#uses=1]
+ store <4 x float> %744, ptr %742
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 1 ; <ptr>:745 [#uses=1]
+ load <4 x float>, ptr %745 ; <<4 x float>>:746 [#uses=1]
shufflevector <4 x float> %737, <4 x float> %746, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:747 [#uses=0]
shufflevector <4 x float> %738, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:748 [#uses=1]
- store <4 x float> %748, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:749 [#uses=1]
- load <4 x float>, <4 x float>* %749 ; <<4 x float>>:750 [#uses=1]
+ store <4 x float> %748, ptr null
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 3 ; <ptr>:749 [#uses=1]
+ load <4 x float>, ptr %749 ; <<4 x float>>:750 [#uses=1]
shufflevector <4 x float> %739, <4 x float> %750, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:751 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:752 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:753 [#uses=1]
- load <4 x float>, <4 x float>* %753 ; <<4 x float>>:754 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:755 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:756 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 0 ; <ptr>:752 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 1 ; <ptr>:753 [#uses=1]
+ load <4 x float>, ptr %753 ; <<4 x float>>:754 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 2 ; <ptr>:755 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:756 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:757 [#uses=1]
shufflevector <4 x float> %756, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:758 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:759 [#uses=1]
- load <4 x float>, <4 x float>* %759 ; <<4 x float>>:760 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:761 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 2 ; <ptr>:759 [#uses=1]
+ load <4 x float>, ptr %759 ; <<4 x float>>:760 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 3 ; <ptr>:761 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:762 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:763 [#uses=1]
fadd <4 x float> %757, zeroinitializer ; <<4 x float>>:764 [#uses=0]
br i1 false, label %773, label %767
; <label>:767 ; preds = %xST.exit405
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:768 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:769 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 1 ; <ptr>:768 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:769 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %769, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:770 [#uses=1]
- store <4 x float> %770, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:771 [#uses=1]
- load <4 x float>, <4 x float>* %771 ; <<4 x float>>:772 [#uses=0]
+ store <4 x float> %770, ptr null
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 3 ; <ptr>:771 [#uses=1]
+ load <4 x float>, ptr %771 ; <<4 x float>>:772 [#uses=0]
br label %xST.exit422
; <label>:773 ; preds = %xST.exit405
xST.exit422: ; preds = %773, %767
%.07267 = phi <4 x float> [ %766, %767 ], [ undef, %773 ] ; <<4 x float>> [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:774 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 3 ; <ptr>:774 [#uses=0]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:775 [#uses=0]
icmp eq i32 0, 0 ; <i1>:776 [#uses=1]
br i1 %776, label %780, label %777
; <label>:777 ; preds = %xST.exit422
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:778 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:779 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 2 ; <ptr>:778 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 3 ; <ptr>:779 [#uses=0]
br label %xST.exit431
; <label>:780 ; preds = %xST.exit422
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:781 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:782 [#uses=2]
- load <4 x float>, <4 x float>* %782 ; <<4 x float>>:783 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %782
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:784 [#uses=1]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:781 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 2 ; <ptr>:782 [#uses=2]
+ load <4 x float>, ptr %782 ; <<4 x float>>:783 [#uses=0]
+ store <4 x float> zeroinitializer, ptr %782
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:784 [#uses=1]
shufflevector <4 x i32> %784, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:785 [#uses=0]
icmp eq i32 0, 0 ; <i1>:786 [#uses=0]
br label %xST.exit431
xST.exit431: ; preds = %780, %777
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:787 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:788 [#uses=0]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:789 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 2 ; <ptr>:787 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:788 [#uses=0]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:789 [#uses=2]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %789, <4 x i32> zeroinitializer ) ; <i32>:790 [#uses=1]
icmp eq i32 %790, 0 ; <i1>:791 [#uses=0]
shufflevector <4 x i32> %789, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:792 [#uses=1]
br i1 %794, label %797, label %795
; <label>:795 ; preds = %xST.exit431
- load <4 x float>, <4 x float>* null ; <<4 x float>>:796 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* null
+ load <4 x float>, ptr null ; <<4 x float>>:796 [#uses=0]
+ store <4 x float> zeroinitializer, ptr null
br label %797
; <label>:797 ; preds = %795, %xST.exit431
br i1 false, label %xST.exit434, label %799
; <label>:799 ; preds = %797
- load <4 x float>, <4 x float>* null ; <<4 x float>>:800 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* null
+ load <4 x float>, ptr null ; <<4 x float>>:800 [#uses=0]
+ store <4 x float> zeroinitializer, ptr null
br label %xST.exit434
xST.exit434: ; preds = %799, %797
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:801 [#uses=1]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:801 [#uses=1]
shufflevector <4 x i32> %801, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:802 [#uses=0]
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:803 [#uses=0]
icmp eq i32 0, 0 ; <i1>:804 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 0 ; <<4 x float>*>:805 [#uses=1]
- load <4 x float>, <4 x float>* %805 ; <<4 x float>>:806 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:807 [#uses=1]
- load <4 x float>, <4 x float>* %807 ; <<4 x float>>:808 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:809 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:810 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:811 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:812 [#uses=1]
- load <4 x float>, <4 x float>* %812 ; <<4 x float>>:813 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:814 [#uses=1]
- load <4 x float>, <4 x float>* %814 ; <<4 x float>>:815 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 0 ; <ptr>:805 [#uses=1]
+ load <4 x float>, ptr %805 ; <<4 x float>>:806 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 1 ; <ptr>:807 [#uses=1]
+ load <4 x float>, ptr %807 ; <<4 x float>>:808 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:809 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:810 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 0 ; <ptr>:811 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 2 ; <ptr>:812 [#uses=1]
+ load <4 x float>, ptr %812 ; <<4 x float>>:813 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 3 ; <ptr>:814 [#uses=1]
+ load <4 x float>, ptr %814 ; <<4 x float>>:815 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:816 [#uses=0]
unreachable
xPBRK.exit: ; preds = %.critedge
- store <4 x i32> < i32 -1, i32 -1, i32 -1, i32 -1 >, <4 x i32>* %.sub7896
- store <4 x i32> zeroinitializer, <4 x i32>* null
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:817 [#uses=1]
- load <4 x float>, <4 x float>* %817 ; <<4 x float>>:818 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:819 [#uses=1]
- load <4 x float>, <4 x float>* %819 ; <<4 x float>>:820 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:821 [#uses=1]
- load <4 x float>, <4 x float>* %821 ; <<4 x float>>:822 [#uses=1]
+ store <4 x i32> < i32 -1, i32 -1, i32 -1, i32 -1 >, ptr %.sub7896
+ store <4 x i32> zeroinitializer, ptr null
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 1 ; <ptr>:817 [#uses=1]
+ load <4 x float>, ptr %817 ; <<4 x float>>:818 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 2 ; <ptr>:819 [#uses=1]
+ load <4 x float>, ptr %819 ; <<4 x float>>:820 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 3 ; <ptr>:821 [#uses=1]
+ load <4 x float>, ptr %821 ; <<4 x float>>:822 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:823 [#uses=1]
shufflevector <4 x float> %818, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:824 [#uses=1]
shufflevector <4 x float> %820, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:825 [#uses=1]
shufflevector <4 x float> %822, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:826 [#uses=1]
shufflevector <4 x float> %823, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:827 [#uses=0]
shufflevector <4 x float> %824, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:828 [#uses=1]
- store <4 x float> %828, <4 x float>* null
- load <4 x float>, <4 x float>* null ; <<4 x float>>:829 [#uses=1]
+ store <4 x float> %828, ptr null
+ load <4 x float>, ptr null ; <<4 x float>>:829 [#uses=1]
shufflevector <4 x float> %825, <4 x float> %829, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:830 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:831 [#uses=2]
- load <4 x float>, <4 x float>* %831 ; <<4 x float>>:832 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 3 ; <ptr>:831 [#uses=2]
+ load <4 x float>, ptr %831 ; <<4 x float>>:832 [#uses=1]
shufflevector <4 x float> %826, <4 x float> %832, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:833 [#uses=1]
- store <4 x float> %833, <4 x float>* %831
+ store <4 x float> %833, ptr %831
br label %xLS.exit449
xLS.exit449: ; preds = %1215, %xPBRK.exit
%.17731 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07730, %1215 ] ; <<4 x float>> [#uses=2]
%.17735 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07734, %1215 ] ; <<4 x float>> [#uses=2]
%.17770 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07769, %1215 ] ; <<4 x float>> [#uses=2]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 0 ; <<4 x float>*>:834 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:835 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:836 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:837 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 0 ; <ptr>:834 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:835 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 2 ; <ptr>:836 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 3 ; <ptr>:837 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:838 [#uses=0]
shufflevector <4 x float> %835, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:839 [#uses=1]
- getelementptr <4 x float>, <4 x float>* null, i32 878 ; <<4 x float>*>:840 [#uses=1]
- load <4 x float>, <4 x float>* %840 ; <<4 x float>>:841 [#uses=0]
+ getelementptr <4 x float>, ptr null, i32 878 ; <ptr>:840 [#uses=1]
+ load <4 x float>, ptr %840 ; <<4 x float>>:841 [#uses=0]
call <4 x float> @llvm.ppc.altivec.vcfsx( <4 x i32> zeroinitializer, i32 0 ) ; <<4 x float>>:842 [#uses=1]
shufflevector <4 x float> %842, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:843 [#uses=2]
call <4 x i32> @llvm.ppc.altivec.vcmpgtfp( <4 x float> %843, <4 x float> %839 ) ; <<4 x i32>>:844 [#uses=1]
; <label>:849 ; preds = %xLS.exit449
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:850 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:851 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %851
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 1 ; <ptr>:851 [#uses=1]
+ store <4 x float> zeroinitializer, ptr %851
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:852 [#uses=1]
- store <4 x float> %852, <4 x float>* null
+ store <4 x float> %852, ptr null
shufflevector <4 x float> %847, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:853 [#uses=0]
br label %xST.exit451
br i1 false, label %859, label %856
; <label>:856 ; preds = %854
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:857 [#uses=2]
- load <4 x float>, <4 x float>* %857 ; <<4 x float>>:858 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %857
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 0 ; <ptr>:857 [#uses=2]
+ load <4 x float>, ptr %857 ; <<4 x float>>:858 [#uses=0]
+ store <4 x float> zeroinitializer, ptr %857
br label %859
; <label>:859 ; preds = %856, %854
br i1 false, label %864, label %861
; <label>:861 ; preds = %859
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:862 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 1 ; <ptr>:862 [#uses=1]
shufflevector <4 x float> %845, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:863 [#uses=1]
- store <4 x float> %863, <4 x float>* %862
+ store <4 x float> %863, ptr %862
br label %864
; <label>:864 ; preds = %861, %859
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:865 [#uses=1]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:865 [#uses=1]
shufflevector <4 x i32> %865, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:866 [#uses=0]
br i1 false, label %868, label %867
; <label>:867 ; preds = %864
- store <4 x float> zeroinitializer, <4 x float>* null
+ store <4 x float> zeroinitializer, ptr null
br label %868
; <label>:868 ; preds = %867, %864
br label %xST.exit451
xST.exit451: ; preds = %868, %849
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:870 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:871 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:872 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 0 ; <ptr>:870 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 1 ; <ptr>:871 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:872 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:873 [#uses=1]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:874 [#uses=1]
xor <4 x i32> %874, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>>:875 [#uses=0]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:878 [#uses=1]
xor <4 x i32> %878, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>>:879 [#uses=1]
bitcast <4 x i32> %879 to <4 x float> ; <<4 x float>>:880 [#uses=0]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:881 [#uses=1]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:881 [#uses=1]
icmp eq i32 0, 0 ; <i1>:882 [#uses=1]
br i1 %882, label %888, label %883
; <label>:883 ; preds = %xST.exit451
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:884 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %884
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:885 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 0 ; <ptr>:884 [#uses=1]
+ store <4 x float> zeroinitializer, ptr %884
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 1 ; <ptr>:885 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:886 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:887 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 3 ; <ptr>:887 [#uses=0]
br label %xST.exit453
; <label>:888 ; preds = %xST.exit451
br i1 false, label %894, label %891
; <label>:891 ; preds = %888
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:892 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 1 ; <ptr>:892 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:893 [#uses=1]
- store <4 x float> %893, <4 x float>* %892
+ store <4 x float> %893, ptr %892
br label %894
; <label>:894 ; preds = %891, %888
br label %898
; <label>:898 ; preds = %897, %894
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:899 [#uses=0]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:899 [#uses=0]
br i1 false, label %xST.exit453, label %900
; <label>:900 ; preds = %898
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:901 [#uses=1]
- load <4 x float>, <4 x float>* %901 ; <<4 x float>>:902 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 3 ; <ptr>:901 [#uses=1]
+ load <4 x float>, ptr %901 ; <<4 x float>>:902 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %902, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:903 [#uses=0]
br label %xST.exit453
xST.exit453: ; preds = %900, %898, %883
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:904 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:905 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:906 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 1 ; <ptr>:904 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:905 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 189, i32 3 ; <ptr>:906 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:907 [#uses=1]
shufflevector <4 x float> %905, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:908 [#uses=1]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:909 [#uses=0]
bitcast <4 x float> %908 to <4 x i32> ; <<4 x i32>>:910 [#uses=0]
bitcast <4 x float> %907 to <4 x i32> ; <<4 x i32>>:911 [#uses=0]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:912 [#uses=0]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:913 [#uses=0]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:913 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 2, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:914 [#uses=0]
br i1 false, label %915, label %xPIF.exit455
; <label>:915 ; preds = %xST.exit453
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:916 [#uses=0]
- getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 3 ; <<4 x i32>*>:917 [#uses=1]
- store <4 x i32> zeroinitializer, <4 x i32>* %917
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:918 [#uses=1]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:916 [#uses=0]
+ getelementptr [4 x <4 x i32>], ptr null, i32 0, i32 3 ; <ptr>:917 [#uses=1]
+ store <4 x i32> zeroinitializer, ptr %917
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:918 [#uses=1]
and <4 x i32> %918, zeroinitializer ; <<4 x i32>>:919 [#uses=0]
br label %.critedge7899
unreachable
xPIF.exit455: ; preds = %xST.exit453
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 0 ; <<4 x float>*>:922 [#uses=1]
- load <4 x float>, <4 x float>* %922 ; <<4 x float>>:923 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:924 [#uses=1]
- load <4 x float>, <4 x float>* %924 ; <<4 x float>>:925 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:926 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:927 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 0 ; <ptr>:922 [#uses=1]
+ load <4 x float>, ptr %922 ; <<4 x float>>:923 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 1 ; <ptr>:924 [#uses=1]
+ load <4 x float>, ptr %924 ; <<4 x float>>:925 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 2 ; <ptr>:926 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 186, i32 3 ; <ptr>:927 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:928 [#uses=0]
- bitcast { { i16, i16, i32 } }* %1 to <4 x float>* ; <<4 x float>*>:929 [#uses=0]
+ bitcast ptr %1 to ptr ; <ptr>:929 [#uses=0]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:930 [#uses=0]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:931 [#uses=0]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:931 [#uses=0]
icmp eq i32 0, 0 ; <i1>:932 [#uses=1]
br i1 %932, label %934, label %933
; <label>:933 ; preds = %xPIF.exit455
- store <4 x float> zeroinitializer, <4 x float>* null
+ store <4 x float> zeroinitializer, ptr null
br label %934
; <label>:934 ; preds = %933, %xPIF.exit455
xST.exit459: ; preds = %937, %934
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:938 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %938, <4 x i32> zeroinitializer ) ; <i32>:939 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 2 ; <<4 x float>*>:940 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %940
- load <4 x float>, <4 x float>* null ; <<4 x float>>:941 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 188, i32 2 ; <ptr>:940 [#uses=1]
+ store <4 x float> zeroinitializer, ptr %940
+ load <4 x float>, ptr null ; <<4 x float>>:941 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %941, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:942 [#uses=1]
- store <4 x float> %942, <4 x float>* null
+ store <4 x float> %942, ptr null
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:943 [#uses=0]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:944 [#uses=0]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:944 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:945 [#uses=0]
br i1 false, label %947, label %946
; <label>:950 ; preds = %947
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:951 [#uses=1]
- call void @llvm.ppc.altivec.stvewx( <4 x i32> %951, i8* null )
+ call void @llvm.ppc.altivec.stvewx( <4 x i32> %951, ptr null )
br label %952
; <label>:952 ; preds = %950, %947
br i1 false, label %955, label %953
; <label>:953 ; preds = %952
- getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 2 ; <<4 x i32>*>:954 [#uses=0]
+ getelementptr [4 x <4 x i32>], ptr null, i32 0, i32 2 ; <ptr>:954 [#uses=0]
br label %955
; <label>:955 ; preds = %953, %952
; <label>:958 ; preds = %955
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:959 [#uses=1]
- call void @llvm.ppc.altivec.stvewx( <4 x i32> %959, i8* null )
+ call void @llvm.ppc.altivec.stvewx( <4 x i32> %959, ptr null )
br label %xStoreDestAddressWithMask.exit461
xStoreDestAddressWithMask.exit461: ; preds = %958, %955
- load <4 x float>, <4 x float>* %0 ; <<4 x float>>:960 [#uses=0]
+ load <4 x float>, ptr %0 ; <<4 x float>>:960 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:961 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 0 ; <<4 x float>*>:962 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 3, i32 0 ; <ptr>:962 [#uses=0]
br i1 false, label %968, label %xST.exit463
xST.exit463: ; preds = %xStoreDestAddressWithMask.exit461
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:963 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 2 ; <<4 x float>*>:964 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 3 ; <<4 x float>*>:965 [#uses=0]
- load <4 x float>, <4 x float>* %0 ; <<4 x float>>:966 [#uses=3]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 3, i32 1 ; <ptr>:963 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 3, i32 2 ; <ptr>:964 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 3, i32 3 ; <ptr>:965 [#uses=0]
+ load <4 x float>, ptr %0 ; <<4 x float>>:966 [#uses=3]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:967 [#uses=0]
br i1 false, label %972, label %969
unreachable
; <label>:969 ; preds = %xST.exit463
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:970 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:971 [#uses=1]
- store <4 x float> %966, <4 x float>* %971
- store <4 x float> %966, <4 x float>* null
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 2, i32 1 ; <ptr>:970 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 2, i32 2 ; <ptr>:971 [#uses=1]
+ store <4 x float> %966, ptr %971
+ store <4 x float> %966, ptr null
br label %xST.exit465
; <label>:972 ; preds = %xST.exit463
call <4 x i32> @llvm.ppc.altivec.vsel( <4 x i32> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <<4 x i32>>:973 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* null
- store <4 x float> zeroinitializer, <4 x float>* null
- load <4 x float>, <4 x float>* null ; <<4 x float>>:974 [#uses=0]
+ store <4 x float> zeroinitializer, ptr null
+ store <4 x float> zeroinitializer, ptr null
+ load <4 x float>, ptr null ; <<4 x float>>:974 [#uses=0]
bitcast <4 x float> %966 to <4 x i32> ; <<4 x i32>>:975 [#uses=1]
call <4 x i32> @llvm.ppc.altivec.vsel( <4 x i32> zeroinitializer, <4 x i32> %975, <4 x i32> zeroinitializer ) ; <<4 x i32>>:976 [#uses=1]
bitcast <4 x i32> %976 to <4 x float> ; <<4 x float>>:977 [#uses=1]
- store <4 x float> %977, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:978 [#uses=0]
+ store <4 x float> %977, ptr null
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 2, i32 3 ; <ptr>:978 [#uses=0]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:979 [#uses=1]
call <4 x i32> @llvm.ppc.altivec.vsel( <4 x i32> %979, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <<4 x i32>>:980 [#uses=1]
bitcast <4 x i32> %980 to <4 x float> ; <<4 x float>>:981 [#uses=0]
br label %xST.exit465
xST.exit465: ; preds = %972, %969
- load <4 x float>, <4 x float>* %0 ; <<4 x float>>:982 [#uses=3]
+ load <4 x float>, ptr %0 ; <<4 x float>>:982 [#uses=3]
icmp eq i32 0, 0 ; <i1>:983 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:984 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 0 ; <ptr>:984 [#uses=1]
br i1 %983, label %989, label %985
; <label>:985 ; preds = %xST.exit465
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:986 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:987 [#uses=1]
- store <4 x float> %982, <4 x float>* %987
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:988 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 1 ; <ptr>:986 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 2 ; <ptr>:987 [#uses=1]
+ store <4 x float> %982, ptr %987
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 3 ; <ptr>:988 [#uses=0]
br label %xST.exit467
; <label>:989 ; preds = %xST.exit465
bitcast <4 x float> %982 to <4 x i32> ; <<4 x i32>>:990 [#uses=0]
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:991 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %984
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:992 [#uses=0]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:993 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:994 [#uses=0]
+ store <4 x float> zeroinitializer, ptr %984
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 1 ; <ptr>:992 [#uses=0]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:993 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 2 ; <ptr>:994 [#uses=0]
bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>>:995 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:996 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 1, i32 3 ; <ptr>:996 [#uses=0]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:997 [#uses=1]
bitcast <4 x float> %982 to <4 x i32> ; <<4 x i32>>:998 [#uses=1]
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:999 [#uses=1]
br label %xST.exit467
xST.exit467: ; preds = %989, %985
- load <4 x float>, <4 x float>* %0 ; <<4 x float>>:1002 [#uses=5]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:1003 [#uses=2]
+ load <4 x float>, ptr %0 ; <<4 x float>>:1002 [#uses=5]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:1003 [#uses=2]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %1003, <4 x i32> zeroinitializer ) ; <i32>:1004 [#uses=0]
br i1 false, label %1011, label %1005
; <label>:1005 ; preds = %xST.exit467
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1006 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:1007 [#uses=1]
- load <4 x float>, <4 x float>* %1007 ; <<4 x float>>:1008 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1009 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1010 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:1006 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 1 ; <ptr>:1007 [#uses=1]
+ load <4 x float>, ptr %1007 ; <<4 x float>>:1008 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:1009 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 3 ; <ptr>:1010 [#uses=0]
br label %xST.exit469
; <label>:1011 ; preds = %xST.exit467
br i1 %1017, label %1021, label %1018
; <label>:1018 ; preds = %1015
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:1019 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 1 ; <ptr>:1019 [#uses=0]
shufflevector <4 x float> %1002, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:1020 [#uses=0]
br label %1021
br i1 %1022, label %1025, label %1023
; <label>:1023 ; preds = %1021
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1024 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %1024
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 2 ; <ptr>:1024 [#uses=1]
+ store <4 x float> zeroinitializer, ptr %1024
br label %1025
; <label>:1025 ; preds = %1023, %1021
br i1 %1026, label %xST.exit469, label %1027
; <label>:1027 ; preds = %1025
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1028 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 3 ; <ptr>:1028 [#uses=0]
br label %xST.exit469
xST.exit469: ; preds = %1027, %1025, %1005
%.17463 = phi <4 x float> [ %.27464, %1005 ], [ %.07462, %1027 ], [ %.07462, %1025 ] ; <<4 x float>> [#uses=1]
%.17468 = phi <4 x float> [ %.27469, %1005 ], [ %.07467, %1027 ], [ %.07467, %1025 ] ; <<4 x float>> [#uses=1]
%.07489 = phi <4 x float> [ %1002, %1005 ], [ %.17490, %1027 ], [ %.17490, %1025 ] ; <<4 x float>> [#uses=1]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1029 [#uses=0]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1030 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:1029 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:1030 [#uses=0]
fsub <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1031 [#uses=1]
br i1 false, label %1037, label %1032
; <label>:1032 ; preds = %xST.exit469
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1033 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:1034 [#uses=1]
- load <4 x float>, <4 x float>* %1034 ; <<4 x float>>:1035 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:1036 [#uses=0]
+ load <4 x float>, ptr null ; <<4 x float>>:1033 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 2 ; <ptr>:1034 [#uses=1]
+ load <4 x float>, ptr %1034 ; <<4 x float>>:1035 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 3 ; <ptr>:1036 [#uses=0]
br label %xST.exit472
; <label>:1037 ; preds = %xST.exit469
br i1 %1041, label %1045, label %1042
; <label>:1042 ; preds = %1040
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:1043 [#uses=1]
- load <4 x float>, <4 x float>* %1043 ; <<4 x float>>:1044 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 4, i32 1 ; <ptr>:1043 [#uses=1]
+ load <4 x float>, ptr %1043 ; <<4 x float>>:1044 [#uses=0]
br label %1045
; <label>:1045 ; preds = %1042, %1040
br label %xST.exit474
xST.exit474: ; preds = %1059, %1058, %1051
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1060 [#uses=1]
+ load <4 x float>, ptr null ; <<4 x float>>:1060 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1061 [#uses=1]
fmul <4 x float> %1060, zeroinitializer ; <<4 x float>>:1062 [#uses=2]
br i1 false, label %1065, label %1063
; <label>:1063 ; preds = %xST.exit474
shufflevector <4 x float> %1062, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:1064 [#uses=1]
- store <4 x float> %1064, <4 x float>* null
+ store <4 x float> %1064, ptr null
br label %xST.exit476
; <label>:1065 ; preds = %xST.exit474
br label %xST.exit492
xST.exit492: ; preds = %1118, %1117, %1110
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1119 [#uses=1]
+ load <4 x float>, ptr null ; <<4 x float>>:1119 [#uses=1]
fmul <4 x float> %1119, zeroinitializer ; <<4 x float>>:1120 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1121 [#uses=1]
br i1 false, label %1123, label %1122
xST.exit495: ; preds = %1130, %1129, %1122
%.07582 = phi <4 x float> [ %1121, %1122 ], [ %.17583, %1130 ], [ %.17583, %1129 ] ; <<4 x float>> [#uses=1]
%.07590 = phi <4 x float> [ %1120, %1122 ], [ %.17591, %1130 ], [ %.17591, %1129 ] ; <<4 x float>> [#uses=1]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1131 [#uses=1]
+ load <4 x float>, ptr null ; <<4 x float>>:1131 [#uses=1]
fadd <4 x float> %1131, zeroinitializer ; <<4 x float>>:1132 [#uses=1]
fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1133 [#uses=1]
br i1 false, label %1135, label %1134
xST.exit498: ; preds = %1142, %1141, %1134
%.07617 = phi <4 x float> [ %1133, %1134 ], [ %.17618, %1142 ], [ %.17618, %1141 ] ; <<4 x float>> [#uses=1]
%.07621 = phi <4 x float> [ %1132, %1134 ], [ %.17622, %1142 ], [ %.17622, %1141 ] ; <<4 x float>> [#uses=1]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1143 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1144 [#uses=1]
- load <4 x float>, <4 x float>* %1144 ; <<4 x float>>:1145 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1146 [#uses=1]
- load <4 x float>, <4 x float>* %1146 ; <<4 x float>>:1147 [#uses=1]
+ load <4 x float>, ptr null ; <<4 x float>>:1143 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 2 ; <ptr>:1144 [#uses=1]
+ load <4 x float>, ptr %1144 ; <<4 x float>>:1145 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 3 ; <ptr>:1146 [#uses=1]
+ load <4 x float>, ptr %1146 ; <<4 x float>>:1147 [#uses=1]
shufflevector <4 x float> %1143, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1148 [#uses=1]
shufflevector <4 x float> %1145, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1149 [#uses=1]
shufflevector <4 x float> %1147, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1150 [#uses=1]
%.07656 = phi <4 x float> [ %1153, %1155 ], [ %.17657, %1163 ], [ %.17657, %1162 ] ; <<4 x float>> [#uses=1]
%.07660 = phi <4 x float> [ %1152, %1155 ], [ %.17661, %1163 ], [ %.17661, %1162 ] ; <<4 x float>> [#uses=1]
%.07664 = phi <4 x float> [ %1151, %1155 ], [ %.17665, %1163 ], [ %.17665, %1162 ] ; <<4 x float>> [#uses=1]
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1164 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1165 [#uses=1]
- load <4 x float>, <4 x float>* %1165 ; <<4 x float>>:1166 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1167 [#uses=1]
- load <4 x float>, <4 x float>* %1167 ; <<4 x float>>:1168 [#uses=1]
+ load <4 x float>, ptr null ; <<4 x float>>:1164 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 2 ; <ptr>:1165 [#uses=1]
+ load <4 x float>, ptr %1165 ; <<4 x float>>:1166 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], ptr null, i32 0, i32 0, i32 3 ; <ptr>:1167 [#uses=1]
+ load <4 x float>, ptr %1167 ; <<4 x float>>:1168 [#uses=1]
fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1169 [#uses=1]
fadd <4 x float> zeroinitializer, %1164 ; <<4 x float>>:1170 [#uses=1]
fadd <4 x float> zeroinitializer, %1166 ; <<4 x float>>:1171 [#uses=1]
br i1 false, label %1188, label %1187
; <label>:1187 ; preds = %1186
- store <4 x float> zeroinitializer, <4 x float>* null
+ store <4 x float> zeroinitializer, ptr null
br label %1188
; <label>:1188 ; preds = %1187, %1186
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:1189 [#uses=1]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:1189 [#uses=1]
shufflevector <4 x i32> %1189, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:1190 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %1190, <4 x i32> zeroinitializer ) ; <i32>:1191 [#uses=1]
icmp eq i32 %1191, 0 ; <i1>:1192 [#uses=1]
br i1 %1192, label %1196, label %1193
; <label>:1193 ; preds = %1188
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1194 [#uses=1]
+ load <4 x float>, ptr null ; <<4 x float>>:1194 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %1194, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:1195 [#uses=1]
- store <4 x float> %1195, <4 x float>* null
+ store <4 x float> %1195, ptr null
br label %1196
; <label>:1196 ; preds = %1193, %1188
%.07742 = phi <4 x float> [ zeroinitializer, %1193 ], [ zeroinitializer, %1188 ] ; <<4 x float>> [#uses=0]
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:1197 [#uses=1]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:1197 [#uses=1]
shufflevector <4 x i32> %1197, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:1198 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %1198, <4 x i32> zeroinitializer ) ; <i32>:1199 [#uses=1]
icmp eq i32 %1199, 0 ; <i1>:1200 [#uses=1]
br i1 %1200, label %xST.exit507, label %1201
; <label>:1201 ; preds = %1196
- store <4 x float> zeroinitializer, <4 x float>* null
+ store <4 x float> zeroinitializer, ptr null
br label %xST.exit507
xST.exit507: ; preds = %1201, %1196, %1183
br i1 %1203, label %1207, label %1204
; <label>:1204 ; preds = %xST.exit507
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1205 [#uses=1]
+ load <4 x float>, ptr null ; <<4 x float>>:1205 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %1205, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:1206 [#uses=1]
- store <4 x float> %1206, <4 x float>* null
+ store <4 x float> %1206, ptr null
br label %1207
; <label>:1207 ; preds = %1204, %xST.exit507
- load <4 x i32>, <4 x i32>* %.sub7896 ; <<4 x i32>>:1208 [#uses=1]
+ load <4 x i32>, ptr %.sub7896 ; <<4 x i32>>:1208 [#uses=1]
shufflevector <4 x i32> %1208, <4 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x i32>>:1209 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %1209, <4 x i32> zeroinitializer ) ; <i32>:1210 [#uses=1]
icmp eq i32 %1210, 0 ; <i1>:1211 [#uses=1]
br i1 %1211, label %1215, label %1212
; <label>:1212 ; preds = %1207
- load <4 x float>, <4 x float>* null ; <<4 x float>>:1213 [#uses=1]
+ load <4 x float>, ptr null ; <<4 x float>>:1213 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %1213, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:1214 [#uses=1]
- store <4 x float> %1214, <4 x float>* null
+ store <4 x float> %1214, ptr null
br label %1215
; <label>:1215 ; preds = %1212, %1207
- store <4 x float> zeroinitializer, <4 x float>* null
+ store <4 x float> zeroinitializer, ptr null
br label %xLS.exit449
}
declare <4 x i32> @llvm.ppc.altivec.vsel(<4 x i32>, <4 x i32>, <4 x i32>)
-declare void @llvm.ppc.altivec.stvewx(<4 x i32>, i8*)
+declare void @llvm.ppc.altivec.stvewx(<4 x i32>, ptr)
declare <4 x float> @llvm.ppc.altivec.vrsqrtefp(<4 x float>)
define i64 @test(i32 %A, i32 %B, i32 %C) nounwind {
entry:
- %Y = alloca i32, align 4 ; <i32*> [#uses=2]
- %tmp4 = call i32 asm "subf${3:I}c $1,$4,$3\0A\09subfze $0,$2", "=r,=*&r,r,rI,r"( i32* elementtype( i32) %Y, i32 %A, i32 %B, i32 %C ) ; <i32> [#uses=1]
- %tmp5 = load i32, i32* %Y ; <i32> [#uses=1]
+ %Y = alloca i32, align 4 ; <ptr> [#uses=2]
+ %tmp4 = call i32 asm "subf${3:I}c $1,$4,$3\0A\09subfze $0,$2", "=r,=*&r,r,rI,r"( ptr elementtype( i32) %Y, i32 %A, i32 %B, i32 %C ) ; <i32> [#uses=1]
+ %tmp5 = load i32, ptr %Y ; <i32> [#uses=1]
%tmp56 = zext i32 %tmp5 to i64 ; <i64> [#uses=1]
%tmp7 = shl i64 %tmp56, 32 ; <i64> [#uses=1]
%tmp89 = zext i32 %tmp4 to i64 ; <i64> [#uses=1]
;target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "powerpc-unknown-linux-gnu"
-@x = global [2 x i32] [ i32 1, i32 2 ] ; <[2 x i32]*> [#uses=1]
+@x = global [2 x i32] [ i32 1, i32 2 ] ; <ptr> [#uses=1]
define void @foo() {
entry:
- tail call void asm sideeffect "$0 $1", "s,i"( i8* bitcast (i32* getelementptr ([2 x i32], [2 x i32]* @x, i32 0, i32 1) to i8*), i8* bitcast (i32* getelementptr ([2 x i32], [2 x i32]* @x, i32 0, i32 1) to i8*) )
+ tail call void asm sideeffect "$0 $1", "s,i"( ptr getelementptr ([2 x i32], ptr @x, i32 0, i32 1), ptr getelementptr ([2 x i32], ptr @x, i32 0, i32 1) )
ret void
}
%struct.A = type { %struct.anon }
%struct.anon = type <{ }>
-define void @bork(%struct.A* %In0P) {
+define void @bork(ptr %In0P) {
entry:
- %tmp56 = bitcast %struct.A* %In0P to float* ; <float*> [#uses=1]
br label %bb
bb: ; preds = %bb, %entry
%i.035.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %tmp8 = getelementptr float, float* %tmp56, i32 %i.035.0 ; <float*> [#uses=2]
- %tmp101112 = bitcast float* %tmp8 to i8* ; <i8*> [#uses=1]
- %tmp1617 = bitcast float* %tmp8 to i32* ; <i32*> [#uses=1]
- %tmp21 = tail call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,*m"(i8* %tmp101112, i32 0, i32* elementtype(i32) %tmp1617 ) ; <i32> [#uses=0]
+ %tmp8 = getelementptr float, ptr %In0P, i32 %i.035.0 ; <ptr> [#uses=2]
+ %tmp21 = tail call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,*m"(ptr %tmp8, i32 0, ptr elementtype(i32) %tmp8 ) ; <i32> [#uses=0]
%indvar.next = add i32 %i.035.0, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, 4 ; <i1> [#uses=1]
br i1 %exitcond, label %return, label %bb
define i32 @f(i32 %i, i32 %q) {
entry:
- %i_addr = alloca i32 ; <i32*> [#uses=2]
- %q_addr = alloca i32 ; <i32*> [#uses=2]
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- store i32 %i, i32* %i_addr
- store i32 %q, i32* %q_addr
- %tmp = load i32, i32* %i_addr ; <i32> [#uses=1]
+ %i_addr = alloca i32 ; <ptr> [#uses=2]
+ %q_addr = alloca i32 ; <ptr> [#uses=2]
+ %retval = alloca i32, align 4 ; <ptr> [#uses=1]
+ store i32 %i, ptr %i_addr
+ store i32 %q, ptr %q_addr
+ %tmp = load i32, ptr %i_addr ; <i32> [#uses=1]
%tmp1 = icmp ne i32 %tmp, 0 ; <i1> [#uses=1]
%tmp12 = zext i1 %tmp1 to i8 ; <i8> [#uses=1]
%toBool = icmp ne i8 %tmp12, 0 ; <i1> [#uses=1]
cond_true: ; preds = %entry
%tmp3 = call i32 (...) @bar( ) ; <i32> [#uses=0]
%tmp4 = call i32 (...) @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
- %tmp7 = load i32, i32* %q_addr ; <i32> [#uses=1]
+ %tmp7 = load i32, ptr %q_addr ; <i32> [#uses=1]
%tmp8 = icmp ne i32 %tmp7, 0 ; <i1> [#uses=1]
%tmp89 = zext i1 %tmp8 to i8 ; <i8> [#uses=1]
%toBool10 = icmp ne i8 %tmp89, 0 ; <i1> [#uses=1]
cond_false: ; preds = %entry
%tmp5 = call i32 (...) @foo( ) ; <i32> [#uses=0]
%tmp6 = call i32 (...) @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
- %tmp27 = load i32, i32* %q_addr ; <i32> [#uses=1]
+ %tmp27 = load i32, ptr %q_addr ; <i32> [#uses=1]
%tmp28 = icmp ne i32 %tmp27, 0 ; <i1> [#uses=1]
%tmp289 = zext i1 %tmp28 to i8 ; <i8> [#uses=1]
%toBool210 = icmp ne i8 %tmp289, 0 ; <i1> [#uses=1]
br label %return
return: ; preds = %cond_next18
- %retval20 = load i32, i32* %retval ; <i32> [#uses=1]
+ %retval20 = load i32, ptr %retval ; <i32> [#uses=1]
ret i32 %retval20
}
%struct.XClearC = type { double, %struct.GIC4, %struct.GIC4, float, i32 }
%struct.XClipPlane = type { i32, [6 x %struct.GIC4] }
%struct.XCBuffer = type { i16, i16, [8 x i16] }
- %struct.XCMatrix = type { [16 x float]*, %struct.XICSS }
- %struct.XConvolution = type { %struct.GIC4, %struct.XICSS, i16, i16, float*, i32, i32 }
+ %struct.XCMatrix = type { ptr, %struct.XICSS }
+ %struct.XConvolution = type { %struct.GIC4, %struct.XICSS, i16, i16, ptr, i32, i32 }
%struct.XDepthTest = type { i16, i16, i8, i8, i8, i8, double, double }
- %struct.XFixedFunctionProgram = type { %struct.PPSToken* }
+ %struct.XFixedFunctionProgram = type { ptr }
%struct.XFogMode = type { %struct.GIC4, float, float, float, float, float, i16, i16, i16, i8, i8 }
%struct.XFramebufferAttachment = type { i32, i32, i32, i32 }
%struct.XHintMode = type { i16, i16, i16, i16, i16, i16, i16, i16, i16, i16 }
- %struct.XHistogram = type { %struct.XFramebufferAttachment*, i32, i16, i8, i8 }
+ %struct.XHistogram = type { ptr, i32, i16, i8, i8 }
%struct.XICSS = type { %struct.GTCoord2, %struct.GTCoord2, %struct.GTCoord2, %struct.GTCoord2 }
%struct.XISubset = type { %struct.XConvolution, %struct.XConvolution, %struct.XConvolution, %struct.XCMatrix, %struct.XMinmax, %struct.XHistogram, %struct.XICSS, %struct.XICSS, %struct.XICSS, %struct.XICSS, i32 }
%struct.XLight = type { %struct.GIC4, %struct.GIC4, %struct.GIC4, %struct.GIC4, %struct.XPointLineLimits, float, float, float, float, float, %struct.XPointLineLimits, float, float, float, float, float }
%struct.XLogicOp = type { i16, i8, i8 }
%struct.XMaskMode = type { i32, [3 x i32], i8, i8, i8, i8, i8, i8, i8, i8 }
%struct.XMaterial = type { %struct.GIC4, %struct.GIC4, %struct.GIC4, %struct.GIC4, float, float, float, float, [8 x %struct.XLightProduct], %struct.GIC4, [6 x i32], [2 x i32] }
- %struct.XMinmax = type { %struct.XMinmaxTable*, i16, i8, i8 }
+ %struct.XMinmax = type { ptr, i16, i8, i8 }
%struct.XMinmaxTable = type { %struct.GIC4, %struct.GIC4 }
- %struct.XMipmaplevel = type { [4 x i32], [4 x i32], [4 x float], [4 x i32], i32, i32, float*, i8*, i16, i16, i16, i16, [2 x float] }
+ %struct.XMipmaplevel = type { [4 x i32], [4 x i32], [4 x float], [4 x i32], i32, i32, ptr, ptr, i16, i16, i16, i16, [2 x float] }
%struct.XMultisample = type { float, i8, i8, i8, i8, i8, i8, i8, i8 }
- %struct.XPipelineProgramState = type { i8, i8, i8, i8, %struct.GIC4* }
- %struct.XPMap = type { i32*, float*, float*, float*, float*, float*, float*, float*, float*, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+ %struct.XPipelineProgramState = type { i8, i8, i8, i8, ptr }
+ %struct.XPMap = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
%struct.XPMode = type { float, float, %struct.XPStore, %struct.XPTransfer, %struct.XPMap, %struct.XISubset, i32, i32 }
%struct.XPPack = type { i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8 }
%struct.XPStore = type { %struct.XPPack, %struct.XPPack }
%struct.XRegisterCCsPerStageState = type { [2 x %struct.XRegisterCCsPerPortionState], [2 x %struct.GIC4] }
%struct.XRegisterCCsPerVariableState = type { i16, i16, i16, i16 }
%struct.XScissorTest = type { %struct.XFramebufferAttachment, i8, i8, i8, i8 }
- %struct.XState = type { i16, i16, i16, i16, i32, i32, [256 x %struct.GIC4], [128 x %struct.GIC4], %struct.XViewport, %struct.XXF, %struct.XLightModel, %struct.XATest, %struct.XBlendMode, %struct.XClearC, %struct.XCBuffer, %struct.XDepthTest, %struct.XArrayRange, %struct.XFogMode, %struct.XHintMode, %struct.XLineMode, %struct.XLogicOp, %struct.XMaskMode, %struct.XPMode, %struct.XPointMode, %struct.XPGMode, %struct.XScissorTest, i32, %struct.XStencilTest, [16 x %struct.XTMode], %struct.XArrayRange, [8 x %struct.XTCoordGen], %struct.XClipPlane, %struct.XMultisample, %struct.XRegisterCCs, %struct.XArrayRange, %struct.XArrayRange, [3 x %struct.XPipelineProgramState], %struct.XXFFeedback, i32*, %struct.XFixedFunctionProgram, [3 x i32] }
+ %struct.XState = type { i16, i16, i16, i16, i32, i32, [256 x %struct.GIC4], [128 x %struct.GIC4], %struct.XViewport, %struct.XXF, %struct.XLightModel, %struct.XATest, %struct.XBlendMode, %struct.XClearC, %struct.XCBuffer, %struct.XDepthTest, %struct.XArrayRange, %struct.XFogMode, %struct.XHintMode, %struct.XLineMode, %struct.XLogicOp, %struct.XMaskMode, %struct.XPMode, %struct.XPointMode, %struct.XPGMode, %struct.XScissorTest, i32, %struct.XStencilTest, [16 x %struct.XTMode], %struct.XArrayRange, [8 x %struct.XTCoordGen], %struct.XClipPlane, %struct.XMultisample, %struct.XRegisterCCs, %struct.XArrayRange, %struct.XArrayRange, [3 x %struct.XPipelineProgramState], %struct.XXFFeedback, ptr, %struct.XFixedFunctionProgram, [3 x i32] }
%struct.XStencilTest = type { [3 x { i32, i32, i16, i16, i16, i16 }], i32, [4 x i8] }
%struct.XTCoordGen = type { { i16, i16, %struct.GIC4, %struct.GIC4 }, { i16, i16, %struct.GIC4, %struct.GIC4 }, { i16, i16, %struct.GIC4, %struct.GIC4 }, { i16, i16, %struct.GIC4, %struct.GIC4 }, i8, i8, i8, i8 }
%struct.XTGeomState = type { i16, i16, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, [6 x i16], [6 x i16] }
- %struct.XTLevel = type { i32, i32, i16, i16, i16, i8, i8, i16, i16, i16, i16, i8* }
+ %struct.XTLevel = type { i32, i32, i16, i16, i16, i8, i8, i16, i16, i16, i16, ptr }
%struct.XTMode = type { %struct.GIC4, i32, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, float, float, float, i16, i16, i16, i16, i16, i16, [4 x i16], i8, i8, i8, i8, [3 x float], [4 x float], float, float }
- %struct.XTParamState = type { i16, i16, i16, i16, i16, i16, %struct.GIC4, float, float, float, float, i16, i16, i16, i16, float, i16, i8, i8, i32, i8* }
- %struct.XTRec = type { %struct.XTState*, float, float, float, float, %struct.XMipmaplevel*, %struct.XMipmaplevel*, i32, i32, i32, i32, i32, i32, i32, [2 x %struct.PPSToken] }
- %struct.XTState = type { i16, i8, i8, i16, i16, float, i32, %struct.GISWRSurface*, %struct.XTParamState, %struct.XTGeomState, %struct.XTLevel, [6 x [15 x %struct.XTLevel]] }
+ %struct.XTParamState = type { i16, i16, i16, i16, i16, i16, %struct.GIC4, float, float, float, float, i16, i16, i16, i16, float, i16, i8, i8, i32, ptr }
+ %struct.XTRec = type { ptr, float, float, float, float, ptr, ptr, i32, i32, i32, i32, i32, i32, i32, [2 x %struct.PPSToken] }
+ %struct.XTState = type { i16, i8, i8, i16, i16, float, i32, ptr, %struct.XTParamState, %struct.XTGeomState, %struct.XTLevel, [6 x [15 x %struct.XTLevel]] }
%struct.XXF = type { [24 x [16 x float]], [24 x [16 x float]], [16 x float], float, float, float, float, float, i8, i8, i8, i8, i32, i32, i32, i16, i16, i8, i8, i8, i8, i32 }
%struct.XXFFeedback = type { i8, i8, i8, i8, [16 x i32], [16 x i32] }
%struct.XViewport = type { float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, double, double, i32, i32, i32, i32, float, float, float, float }
%struct.GIC4 = type { float, float, float, float }
- %struct.GISWRSurface = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i8*, [4 x i8*], i32 }
+ %struct.GISWRSurface = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, [4 x ptr], i32 }
%struct.GTCoord2 = type { float, float }
%struct.GVMFPContext = type { float, i32, i32, i32, float, [3 x float] }
- %struct.GVMFPStack = type { [8 x i8*], i8*, i8*, i32, i32, { <4 x float> }, { <4 x float> }, <4 x i32> }
+ %struct.GVMFPStack = type { [8 x ptr], ptr, ptr, i32, i32, { <4 x float> }, { <4 x float> }, <4 x i32> }
%struct.GVMFGAttrib = type { <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, [8 x <4 x float>] }
- %struct.GVMTs = type { [16 x %struct.XTRec*] }
+ %struct.GVMTs = type { [16 x ptr] }
%struct.PPSToken = type { { i16, i16, i32 } }
%struct._GVMConstants = type { <4 x i32>, <4 x i32>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, float, float, float, float, float, float, float, float, float, float, float, float, [256 x float], [528 x i8] }
-declare <4 x i32> @llvm.ppc.altivec.lvewx(i8*)
+declare <4 x i32> @llvm.ppc.altivec.lvewx(ptr)
declare i32 @llvm.ppc.altivec.vcmpequw.p(i32, <4 x i32>, <4 x i32>)
-define void @test(%struct.XState* %gldst, <4 x float>* %prgrm, <4 x float>** %buffs, %struct._GVMConstants* %cnstn, %struct.PPSToken* %pstrm, %struct.GVMFPContext* %vmctx, %struct.GVMTs* %txtrs, %struct.GVMFPStack* %fpstk, %struct.GVMFGAttrib* %start, %struct.GVMFGAttrib* %deriv, i32 %fragx, i32 %fragy) {
+define void @test(ptr %gldst, ptr %prgrm, ptr %buffs, ptr %cnstn, ptr %pstrm, ptr %vmctx, ptr %txtrs, ptr %fpstk, ptr %start, ptr %deriv, i32 %fragx, i32 %fragy) {
bb58.i:
- %tmp3405.i = getelementptr %struct.XTRec, %struct.XTRec* null, i32 0, i32 1 ; <float*> [#uses=1]
- %tmp34053406.i = bitcast float* %tmp3405.i to i8* ; <i8*> [#uses=1]
- %tmp3407.i = call <4 x i32> @llvm.ppc.altivec.lvewx( i8* %tmp34053406.i ) ; <<4 x i32>> [#uses=0]
+ %tmp3405.i = getelementptr %struct.XTRec, ptr null, i32 0, i32 1 ; <ptr> [#uses=1]
+ %tmp3407.i = call <4 x i32> @llvm.ppc.altivec.lvewx( ptr %tmp3405.i ) ; <<4 x i32>> [#uses=0]
%tmp4146.i = call i32 @llvm.ppc.altivec.vcmpequw.p( i32 3, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32> [#uses=1]
%tmp4147.i = icmp eq i32 %tmp4146.i, 0 ; <i1> [#uses=1]
br i1 %tmp4147.i, label %bb8799.i, label %bb4150.i
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64--
; PR1596
- %struct._obstack_chunk = type { i8* }
- %struct.obstack = type { i8*, %struct._obstack_chunk* (i8*, i64)*, i8*, i8 }
+ %struct._obstack_chunk = type { ptr }
+ %struct.obstack = type { ptr, ptr, ptr, i8 }
-define i32 @_obstack_newchunk(%struct.obstack* %h, i32 %length) {
+define i32 @_obstack_newchunk(ptr %h, i32 %length) {
entry:
br i1 false, label %cond_false, label %cond_true
br i1 false, label %cond_true28, label %cond_next30
cond_false: ; preds = %entry
- %tmp22 = tail call %struct._obstack_chunk* null( i64 undef ) ; <%struct._obstack_chunk*> [#uses=2]
+ %tmp22 = tail call ptr null( i64 undef ) ; <ptr> [#uses=2]
br i1 false, label %cond_true28, label %cond_next30
cond_true28: ; preds = %cond_false, %cond_true
- %iftmp.0.043.0 = phi %struct._obstack_chunk* [ null, %cond_true ], [ %tmp22, %cond_false ] ; <%struct._obstack_chunk*> [#uses=1]
+ %iftmp.0.043.0 = phi ptr [ null, %cond_true ], [ %tmp22, %cond_false ] ; <ptr> [#uses=1]
tail call void null( )
br label %cond_next30
cond_next30: ; preds = %cond_true28, %cond_false, %cond_true
- %iftmp.0.043.1 = phi %struct._obstack_chunk* [ %iftmp.0.043.0, %cond_true28 ], [ null, %cond_true ], [ %tmp22, %cond_false ] ; <%struct._obstack_chunk*> [#uses=1]
- %tmp41 = getelementptr %struct._obstack_chunk, %struct._obstack_chunk* %iftmp.0.043.1, i32 0, i32 0 ; <i8**> [#uses=1]
- store i8* null, i8** %tmp41, align 8
+ %iftmp.0.043.1 = phi ptr [ %iftmp.0.043.0, %cond_true28 ], [ null, %cond_true ], [ %tmp22, %cond_false ] ; <ptr> [#uses=1]
+ %tmp41 = getelementptr %struct._obstack_chunk, ptr %iftmp.0.043.1, i32 0, i32 0 ; <ptr> [#uses=1]
+ store ptr null, ptr %tmp41, align 8
ret i32 undef
}
; RUN: FileCheck %s --check-prefix=AIX32
-define hidden void @_Z4borkPc(i8* %image) {
+define hidden void @_Z4borkPc(ptr %image) {
; CHECK-LABEL: _Z4borkPc:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 8
; AIX32-NEXT: nop
; AIX32-NEXT: blr
entry:
- tail call void @llvm.ppc.altivec.dst( i8* %image, i32 8, i32 0 )
- tail call void @llvm.ppc.altivec.dstt( i8* %image, i32 8, i32 0 )
- tail call void @llvm.ppc.altivec.dstst( i8* %image, i32 8, i32 0 )
- tail call void @llvm.ppc.altivec.dststt( i8* %image, i32 8, i32 0 )
+ tail call void @llvm.ppc.altivec.dst( ptr %image, i32 8, i32 0 )
+ tail call void @llvm.ppc.altivec.dstt( ptr %image, i32 8, i32 0 )
+ tail call void @llvm.ppc.altivec.dstst( ptr %image, i32 8, i32 0 )
+ tail call void @llvm.ppc.altivec.dststt( ptr %image, i32 8, i32 0 )
ret void
}
-declare void @llvm.ppc.altivec.dst(i8*, i32, i32)
-declare void @llvm.ppc.altivec.dstt(i8*, i32, i32)
-declare void @llvm.ppc.altivec.dstst(i8*, i32, i32)
-declare void @llvm.ppc.altivec.dststt(i8*, i32, i32)
+declare void @llvm.ppc.altivec.dst(ptr, i32, i32)
+declare void @llvm.ppc.altivec.dstt(ptr, i32, i32)
+declare void @llvm.ppc.altivec.dstst(ptr, i32, i32)
+declare void @llvm.ppc.altivec.dststt(ptr, i32, i32)
define void @foo() {
entry:
- %ttype = alloca i32, align 4 ; <i32*> [#uses=1]
- %regs = alloca [1024 x %struct.__db_region], align 16 ; <[1024 x %struct.__db_region]*> [#uses=0]
- %tmp = load i32, i32* %ttype, align 4 ; <i32> [#uses=1]
+ %ttype = alloca i32, align 4 ; <ptr> [#uses=1]
+ %regs = alloca [1024 x %struct.__db_region], align 16 ; <ptr> [#uses=0]
+ %tmp = load i32, ptr %ttype, align 4 ; <i32> [#uses=1]
%tmp1 = call i32 (...) @bork( i32 %tmp ) ; <i32> [#uses=0]
ret void
; CHECK-OPT: blr
}
-define signext i32 @test(i32* noalias nocapture readonly %b, i32 signext %n) {
+define signext i32 @test(ptr noalias nocapture readonly %b, i32 signext %n) {
entry:
%idxprom = sext i32 %n to i64
- %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%mul = mul nsw i32 %0, 7
ret i32 %mul
target triple = "powerpc-unknown-linux-gnu"
%struct.anon = type <{ i8, float }>
-@s = global %struct.anon <{ i8 3, float 0x4014666660000000 }> ; <%struct.anon*> [#uses=1]
-@u = global <{ i8, double }> <{ i8 3, double 5.100000e+00 }> ; <<{ i8, double }>*> [#uses=1]
-@t = weak global %struct.anon zeroinitializer ; <%struct.anon*> [#uses=2]
-@v = weak global <{ i8, double }> zeroinitializer ; <<{ i8, double }>*> [#uses=2]
-@.str = internal constant [8 x i8] c"%f %lf\0A\00" ; <[8 x i8]*> [#uses=1]
+@s = global %struct.anon <{ i8 3, float 0x4014666660000000 }> ; <ptr> [#uses=1]
+@u = global <{ i8, double }> <{ i8 3, double 5.100000e+00 }> ; <ptr> [#uses=1]
+@t = weak global %struct.anon zeroinitializer ; <ptr> [#uses=2]
+@v = weak global <{ i8, double }> zeroinitializer ; <ptr> [#uses=2]
+@.str = internal constant [8 x i8] c"%f %lf\0A\00" ; <ptr> [#uses=1]
define i32 @foo() {
; CHECK-LABEL: foo:
; CHECK-NEXT: addi 1, 1, 16
; CHECK-NEXT: blr
entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
+ %retval = alloca i32, align 4 ; <ptr> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = getelementptr %struct.anon, %struct.anon* @s, i32 0, i32 1 ; <float*> [#uses=1]
- %tmp1 = load float, float* %tmp, align 1 ; <float> [#uses=1]
- %tmp2 = getelementptr %struct.anon, %struct.anon* @t, i32 0, i32 1 ; <float*> [#uses=1]
- store float %tmp1, float* %tmp2, align 1
- %tmp3 = getelementptr <{ i8, double }>, <{ i8, double }>* @u, i32 0, i32 1 ; <double*> [#uses=1]
- %tmp4 = load double, double* %tmp3, align 1 ; <double> [#uses=1]
- %tmp5 = getelementptr <{ i8, double }>, <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
- store double %tmp4, double* %tmp5, align 1
+ %tmp = getelementptr %struct.anon, ptr @s, i32 0, i32 1 ; <ptr> [#uses=1]
+ %tmp1 = load float, ptr %tmp, align 1 ; <float> [#uses=1]
+ %tmp2 = getelementptr %struct.anon, ptr @t, i32 0, i32 1 ; <ptr> [#uses=1]
+ store float %tmp1, ptr %tmp2, align 1
+ %tmp3 = getelementptr <{ i8, double }>, ptr @u, i32 0, i32 1 ; <ptr> [#uses=1]
+ %tmp4 = load double, ptr %tmp3, align 1 ; <double> [#uses=1]
+ %tmp5 = getelementptr <{ i8, double }>, ptr @v, i32 0, i32 1 ; <ptr> [#uses=1]
+ store double %tmp4, ptr %tmp5, align 1
br label %return
return: ; preds = %entry
- %retval6 = load i32, i32* %retval ; <i32> [#uses=1]
+ %retval6 = load i32, ptr %retval ; <i32> [#uses=1]
ret i32 %retval6
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
+ %retval = alloca i32, align 4 ; <ptr> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%tmp = call i32 @foo( ) ; <i32> [#uses=0]
- %tmp1 = getelementptr %struct.anon, %struct.anon* @t, i32 0, i32 1 ; <float*> [#uses=1]
- %tmp2 = load float, float* %tmp1, align 1 ; <float> [#uses=1]
+ %tmp1 = getelementptr %struct.anon, ptr @t, i32 0, i32 1 ; <ptr> [#uses=1]
+ %tmp2 = load float, ptr %tmp1, align 1 ; <float> [#uses=1]
%tmp23 = fpext float %tmp2 to double ; <double> [#uses=1]
- %tmp4 = getelementptr <{ i8, double }>, <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
- %tmp5 = load double, double* %tmp4, align 1 ; <double> [#uses=1]
- %tmp6 = getelementptr [8 x i8], [8 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
- %tmp7 = call i32 (i8*, ...) @printf( i8* %tmp6, double %tmp23, double %tmp5 ) ; <i32> [#uses=0]
+ %tmp4 = getelementptr <{ i8, double }>, ptr @v, i32 0, i32 1 ; <ptr> [#uses=1]
+ %tmp5 = load double, ptr %tmp4, align 1 ; <double> [#uses=1]
+ %tmp6 = getelementptr [8 x i8], ptr @.str, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp7 = call i32 (ptr, ...) @printf( ptr %tmp6, double %tmp23, double %tmp5 ) ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
- %retval8 = load i32, i32* %retval ; <i32> [#uses=1]
+ %retval8 = load i32, ptr %retval ; <i32> [#uses=1]
ret i32 %retval8
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
%struct.TCMalloc_SpinLock = type { i32 }
-define void @_ZN17TCMalloc_SpinLock4LockEv(%struct.TCMalloc_SpinLock* %this) {
+define void @_ZN17TCMalloc_SpinLock4LockEv(ptr %this) {
entry:
- %tmp3 = call i32 asm sideeffect "1: lwarx $0, 0, $1\0A\09stwcx. $2, 0, $1\0A\09bne- 1b\0A\09isync", "=&r,=*r,r,1,~{dirflag},~{fpsr},~{flags},~{memory}"(i32** elementtype(i32*) null, i32 1, i32* null) ; <i32> [#uses=0]
+ %tmp3 = call i32 asm sideeffect "1: lwarx $0, 0, $1\0A\09stwcx. $2, 0, $1\0A\09bne- 1b\0A\09isync", "=&r,=*r,r,1,~{dirflag},~{fpsr},~{flags},~{memory}"(ptr elementtype(ptr) null, i32 1, ptr null) ; <i32> [#uses=0]
unreachable
}
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s
-declare void @cxa_atexit_check_1(i8*)
+declare void @cxa_atexit_check_1(ptr)
; TODO: KB: ORiginal test case was just checking it compiles; is this worth keeping?
; CHECK: check_cxa_atexit:
; CHECK: blr
-define i32 @check_cxa_atexit(i32 (void (i8*)*, i8*, i8*)* %cxa_atexit, void (i8*)* %cxa_finalize) {
+define i32 @check_cxa_atexit(ptr %cxa_atexit, ptr %cxa_finalize) {
entry:
- %tmp7 = call i32 null( void (i8*)* @cxa_atexit_check_1, i8* null, i8* null ) ; <i32> [#uses=0]
+ %tmp7 = call i32 null( ptr @cxa_atexit_check_1, ptr null, ptr null ) ; <i32> [#uses=0]
br i1 false, label %cond_true, label %cond_next
cond_true: ; preds = %entry
define i32 @test() {
entry:
- %data = alloca i32 ; <i32*> [#uses=1]
- %compressedPage = alloca %struct._StorePageMax ; <%struct._StorePageMax*> [#uses=0]
- %tmp107 = call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,*m"( i8* null, i32 0, i32* elementtype(i32) %data ) ; <i32> [#uses=0]
+ %data = alloca i32 ; <ptr> [#uses=1]
+ %compressedPage = alloca %struct._StorePageMax ; <ptr> [#uses=0]
+ %tmp107 = call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,*m"( ptr null, i32 0, ptr elementtype(i32) %data ) ; <i32> [#uses=0]
unreachable
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- -mattr=+altivec
%struct.inoutprops = type <{ i8, [3 x i8] }>
-define void @bork(float* %argA, float* %argB, float* %res, i8 %inoutspec.0) {
+define void @bork(ptr %argA, ptr %argB, ptr %res, i8 %inoutspec.0) {
entry:
%.mask = and i8 %inoutspec.0, -16 ; <i8> [#uses=1]
%tmp6 = icmp eq i8 %.mask, 16 ; <i1> [#uses=1]
br i1 %tmp6, label %cond_true, label %UnifiedReturnBlock
cond_true: ; preds = %entry
- %tmp89 = bitcast float* %res to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp1011 = bitcast float* %argA to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp14 = load <4 x i32>, <4 x i32>* %tmp1011, align 16 ; <<4 x i32>> [#uses=1]
- %tmp1516 = bitcast float* %argB to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp18 = load <4 x i32>, <4 x i32>* %tmp1516, align 16 ; <<4 x i32>> [#uses=1]
+ %tmp14 = load <4 x i32>, ptr %argA, align 16 ; <<4 x i32>> [#uses=1]
+ %tmp18 = load <4 x i32>, ptr %argB, align 16 ; <<4 x i32>> [#uses=1]
%tmp19 = sdiv <4 x i32> %tmp14, %tmp18 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp19, <4 x i32>* %tmp89, align 16
+ store <4 x i32> %tmp19, ptr %res, align 16
ret void
UnifiedReturnBlock: ; preds = %entry
%struct.NSPersistentStoreCoordinator = type opaque
%struct.NSString = type opaque
%struct.NSURL = type opaque
- %struct._message_ref_t = type { %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*, %struct.objc_selector* }
+ %struct._message_ref_t = type { ptr, ptr }
%struct.objc_object = type { }
%struct.objc_selector = type opaque
-@"\01L_OBJC_MESSAGE_REF_2" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=1]
-@"\01L_OBJC_MESSAGE_REF_6" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=1]
-@NSXMLStoreType = external constant %struct.NSString* ; <%struct.NSString**> [#uses=1]
-@"\01L_OBJC_MESSAGE_REF_5" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=2]
-@"\01L_OBJC_MESSAGE_REF_4" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=1]
+@"\01L_OBJC_MESSAGE_REF_2" = external global %struct._message_ref_t ; <ptr> [#uses=1]
+@"\01L_OBJC_MESSAGE_REF_6" = external global %struct._message_ref_t ; <ptr> [#uses=1]
+@NSXMLStoreType = external constant ptr ; <ptr> [#uses=1]
+@"\01L_OBJC_MESSAGE_REF_5" = external global %struct._message_ref_t ; <ptr> [#uses=2]
+@"\01L_OBJC_MESSAGE_REF_4" = external global %struct._message_ref_t ; <ptr> [#uses=1]
; TODO: KB: ORiginal test case was just checking it compiles; is this worth keeping?
; CHECK: managedObjectContextWithModelURL
; CHECK-NOT: blr
; CHECK: .cfi_endproc
-define %struct.NSManagedObjectContext* @"+[ListGenerator(Private) managedObjectContextWithModelURL:storeURL:]"(%struct.objc_object* %self, %struct._message_ref_t* %_cmd, %struct.NSURL* %modelURL, %struct.NSURL* %storeURL) {
+define ptr @"+[ListGenerator(Private) managedObjectContextWithModelURL:storeURL:]"(ptr %self, ptr %_cmd, ptr %modelURL, ptr %storeURL) {
entry:
- %storeCoordinator = alloca %struct.NSPersistentStoreCoordinator* ; <%struct.NSPersistentStoreCoordinator**> [#uses=0]
- %tmp29 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) null( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_2" ) ; <%struct.objc_object*> [#uses=0]
- %tmp34 = load %struct.NSString*, %struct.NSString** @NSXMLStoreType, align 8 ; <%struct.NSString*> [#uses=1]
- %tmp37 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*, %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_5", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
- %tmp42 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) null( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_4", i32 1 ) ; <%struct.objc_object*> [#uses=1]
- %tmp45 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) %tmp37( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_5", %struct.objc_object* %tmp42, %struct.NSString* null ) ; <%struct.objc_object*> [#uses=1]
- %tmp48 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) null( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_6", %struct.NSString* %tmp34, i8* null, %struct.NSURL* null, %struct.objc_object* %tmp45, %struct.NSError** null ) ; <%struct.objc_object*> [#uses=0]
+ %storeCoordinator = alloca ptr ; <ptr> [#uses=0]
+ %tmp29 = call ptr (ptr, ptr, ...) null( ptr null, ptr @"\01L_OBJC_MESSAGE_REF_2" ) ; <ptr> [#uses=0]
+ %tmp34 = load ptr, ptr @NSXMLStoreType, align 8 ; <ptr> [#uses=1]
+ %tmp37 = load ptr, ptr @"\01L_OBJC_MESSAGE_REF_5", align 8 ; <ptr> [#uses=1]
+ %tmp42 = call ptr (ptr, ptr, ...) null( ptr null, ptr @"\01L_OBJC_MESSAGE_REF_4", i32 1 ) ; <ptr> [#uses=1]
+ %tmp45 = call ptr (ptr, ptr, ...) %tmp37( ptr null, ptr @"\01L_OBJC_MESSAGE_REF_5", ptr %tmp42, ptr null ) ; <ptr> [#uses=1]
+ %tmp48 = call ptr (ptr, ptr, ...) null( ptr null, ptr @"\01L_OBJC_MESSAGE_REF_6", ptr %tmp34, ptr null, ptr null, ptr %tmp45, ptr null ) ; <ptr> [#uses=0]
unreachable
}
%struct.NSManagedObjectContext = type opaque
%struct.NSString = type opaque
%struct.NSURL = type opaque
- %struct._message_ref_t = type { %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*, %struct.objc_selector* }
+ %struct._message_ref_t = type { ptr, ptr }
%struct.objc_object = type { }
%struct.objc_selector = type opaque
-@"\01L_OBJC_MESSAGE_REF_2" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=2]
-@"\01L_OBJC_MESSAGE_REF_6" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=2]
-@NSXMLStoreType = external constant %struct.NSString* ; <%struct.NSString**> [#uses=1]
-@"\01L_OBJC_MESSAGE_REF_4" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=2]
+@"\01L_OBJC_MESSAGE_REF_2" = external global %struct._message_ref_t ; <ptr> [#uses=2]
+@"\01L_OBJC_MESSAGE_REF_6" = external global %struct._message_ref_t ; <ptr> [#uses=2]
+@NSXMLStoreType = external constant ptr ; <ptr> [#uses=1]
+@"\01L_OBJC_MESSAGE_REF_4" = external global %struct._message_ref_t ; <ptr> [#uses=2]
; TODO: KB: ORiginal test case was just checking it compiles; is this worth keeping?
; CHECK: managedObjectContextWithModelURL
; CHECK-NOT: blr
; CHECK: .cfi_endproc
-define %struct.NSManagedObjectContext* @"+[ListGenerator(Private) managedObjectContextWithModelURL:storeURL:]"(%struct.objc_object* %self, %struct._message_ref_t* %_cmd, %struct.NSURL* %modelURL, %struct.NSURL* %storeURL) {
+define ptr @"+[ListGenerator(Private) managedObjectContextWithModelURL:storeURL:]"(ptr %self, ptr %_cmd, ptr %modelURL, ptr %storeURL) {
entry:
- %tmp27 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*, %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_2", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
- %tmp29 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) %tmp27( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_2" ) ; <%struct.objc_object*> [#uses=0]
- %tmp33 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*, %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_6", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
- %tmp34 = load %struct.NSString*, %struct.NSString** @NSXMLStoreType, align 8 ; <%struct.NSString*> [#uses=1]
- %tmp40 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*, %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_4", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
- %tmp42 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) %tmp40( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_4", i32 1 ) ; <%struct.objc_object*> [#uses=0]
- %tmp48 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...) %tmp33( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_6", %struct.NSString* %tmp34, i8* null, %struct.NSURL* null, %struct.objc_object* null, %struct.NSError** null ) ; <%struct.objc_object*> [#uses=0]
+ %tmp27 = load ptr, ptr @"\01L_OBJC_MESSAGE_REF_2", align 8 ; <ptr> [#uses=1]
+ %tmp29 = call ptr (ptr, ptr, ...) %tmp27( ptr null, ptr @"\01L_OBJC_MESSAGE_REF_2" ) ; <ptr> [#uses=0]
+ %tmp33 = load ptr, ptr @"\01L_OBJC_MESSAGE_REF_6", align 8 ; <ptr> [#uses=1]
+ %tmp34 = load ptr, ptr @NSXMLStoreType, align 8 ; <ptr> [#uses=1]
+ %tmp40 = load ptr, ptr @"\01L_OBJC_MESSAGE_REF_4", align 8 ; <ptr> [#uses=1]
+ %tmp42 = call ptr (ptr, ptr, ...) %tmp40( ptr null, ptr @"\01L_OBJC_MESSAGE_REF_4", i32 1 ) ; <ptr> [#uses=0]
+ %tmp48 = call ptr (ptr, ptr, ...) %tmp33( ptr null, ptr @"\01L_OBJC_MESSAGE_REF_6", ptr %tmp34, ptr null, ptr null, ptr null, ptr null ) ; <ptr> [#uses=0]
unreachable
}
target triple = "powerpc64-unknown-linux-gnu"
%struct.Range = type { i64, i64 }
-define void @Bork(i64 %range.0.0, i64 %range.0.1, i64 %size) personality i32 (...)* @__gxx_personality_v0 {
+define void @Bork(i64 %range.0.0, i64 %range.0.1, i64 %size) personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: Bork:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr 0
; CHECK-NEXT: bl _Unwind_Resume
; CHECK-NEXT: nop
entry:
- %effectiveRange = alloca %struct.Range, align 8 ; <%struct.Range*> [#uses=2]
- %tmp4 = call i8* @llvm.stacksave() ; <i8*> [#uses=1]
+ %effectiveRange = alloca %struct.Range, align 8 ; <ptr> [#uses=2]
+ %tmp4 = call ptr @llvm.stacksave() ; <ptr> [#uses=1]
%size1 = trunc i64 %size to i32 ; <i32> [#uses=1]
- %tmp17 = alloca i8*, i32 %size1 ; <i8**> [#uses=1]
- invoke void @Foo(i8** %tmp17)
+ %tmp17 = alloca ptr, i32 %size1 ; <ptr> [#uses=1]
+ invoke void @Foo(ptr %tmp17)
to label %bb30.preheader unwind label %unwind
bb30.preheader: ; preds = %entry
- %tmp26 = getelementptr %struct.Range, %struct.Range* %effectiveRange, i64 0, i32 1 ; <i64*> [#uses=1]
+ %tmp26 = getelementptr %struct.Range, ptr %effectiveRange, i64 0, i32 1 ; <ptr> [#uses=1]
br label %bb30
unwind: ; preds = %cond_true, %entry
- %exn = landingpad {i8*, i32}
+ %exn = landingpad {ptr, i32}
cleanup
- call void @llvm.stackrestore(i8* %tmp4)
- resume { i8*, i32 } %exn
+ call void @llvm.stackrestore(ptr %tmp4)
+ resume { ptr, i32 } %exn
invcont23: ; preds = %cond_true
- %tmp27 = load i64, i64* %tmp26, align 8 ; <i64> [#uses=1]
+ %tmp27 = load i64, ptr %tmp26, align 8 ; <i64> [#uses=1]
%tmp28 = sub i64 %range_addr.1.0, %tmp27 ; <i64> [#uses=1]
br label %bb30
br i1 %tmp33, label %cleanup, label %cond_true
cond_true: ; preds = %bb30
- invoke void @Bar(i64 %range.0.0, %struct.Range* %effectiveRange)
+ invoke void @Bar(i64 %range.0.0, ptr %effectiveRange)
to label %invcont23 unwind label %unwind
cleanup: ; preds = %bb30
ret void
}
-declare i8* @llvm.stacksave() nounwind
+declare ptr @llvm.stacksave() nounwind
-declare void @Foo(i8**)
+declare void @Foo(ptr)
-declare void @Bar(i64, %struct.Range*)
+declare void @Bar(i64, ptr)
-declare void @llvm.stackrestore(i8*) nounwind
+declare void @llvm.stackrestore(ptr) nounwind
declare i32 @__gxx_personality_v0(...)
; PR1811
; REQUIRES: default_triple
-define void @execute_shader(<4 x float>* %OUT, <4 x float>* %IN, <4 x float>*
+define void @execute_shader(ptr %OUT, ptr %IN, ptr
%CONST) {
entry:
- %input2 = load <4 x float>, <4 x float>* null, align 16 ; <<4 x float>>
+ %input2 = load <4 x float>, ptr null, align 16 ; <<4 x float>>
%shuffle7 = shufflevector <4 x float> %input2, <4 x float> < float 0.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>> [#uses=1]
%mul1 = fmul <4 x float> %shuffle7, zeroinitializer ; <<4 x
%add2 = fadd <4 x float> %mul1, %input2 ; <<4 x float>>
- store <4 x float> %add2, <4 x float>* null, align 16
+ store <4 x float> %add2, ptr null, align 16
ret void
}
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s
- %struct.Handle = type { %struct.oopDesc** }
- %struct.JNI_ArgumentPusher = type { %struct.SignatureIterator, %struct.JavaCallArguments* }
- %struct.JNI_ArgumentPusherArray = type { %struct.JNI_ArgumentPusher, %struct.JvmtiEventEnabled* }
- %struct.JavaCallArguments = type { [9 x i32], [9 x i32], i32*, i32*, i32, i32, i32 }
+ %struct.Handle = type { ptr }
+ %struct.JNI_ArgumentPusher = type { %struct.SignatureIterator, ptr }
+ %struct.JNI_ArgumentPusherArray = type { %struct.JNI_ArgumentPusher, ptr }
+ %struct.JavaCallArguments = type { [9 x i32], [9 x i32], ptr, ptr, i32, i32, i32 }
%struct.JvmtiEventEnabled = type { i64 }
%struct.KlassHandle = type { %struct.Handle }
- %struct.SignatureIterator = type { i32 (...)**, %struct.KlassHandle, i32, i32, i32 }
+ %struct.SignatureIterator = type { ptr, %struct.KlassHandle, i32, i32, i32 }
%struct.instanceOopDesc = type { %struct.oopDesc }
- %struct.oopDesc = type { %struct.instanceOopDesc*, %struct.instanceOopDesc* }
-@.str = external constant [44 x i8] ; <[44 x i8]*> [#uses=1]
+ %struct.oopDesc = type { ptr, ptr }
+@.str = external constant [44 x i8] ; <ptr> [#uses=1]
; CHECK: _ZN23JNI_ArgumentPusherArray7iterateEy
; CHECK: blr
-define void @_ZN23JNI_ArgumentPusherArray7iterateEy(%struct.JNI_ArgumentPusherArray* %this, i64 %fingerprint) nounwind {
+define void @_ZN23JNI_ArgumentPusherArray7iterateEy(ptr %this, i64 %fingerprint) nounwind {
entry:
br label %bb113
bb113: ; preds = %bb113, %bb93, %bb82, %bb52, %entry
%fingerprint_addr.0.reg2mem.9 = phi i64 [ 0, %entry ], [ 0, %bb52 ], [ 0, %bb82 ], [ 0, %bb93 ], [ %tmp118, %bb113 ] ; <i64> [#uses=1]
- tail call void @_Z28report_should_not_reach_herePKci( i8* getelementptr ([44 x i8], [44 x i8]* @.str, i32 0, i32 0), i32 817 ) nounwind
+ tail call void @_Z28report_should_not_reach_herePKci( ptr @.str, i32 817 ) nounwind
%tmp118 = lshr i64 %fingerprint_addr.0.reg2mem.9, 4 ; <i64> [#uses=2]
%tmp21158 = and i64 %tmp118, 15 ; <i64> [#uses=1]
switch i64 %tmp21158, label %bb113 [
ret void
}
-declare void @_Z28report_should_not_reach_herePKci(i8*, i32)
+declare void @_Z28report_should_not_reach_herePKci(ptr, i32)
; CHECK: blr
define i32 @bork(i64 %foo, i64 %bar) {
entry:
- %tmp = load i64, i64* null, align 8 ; <i64> [#uses=2]
+ %tmp = load i64, ptr null, align 8 ; <i64> [#uses=2]
%tmp2 = icmp ule i64 %tmp, 0 ; <i1> [#uses=1]
%min = select i1 %tmp2, i64 %tmp, i64 0 ; <i64> [#uses=1]
- store i64 %min, i64* null, align 8
+ store i64 %min, ptr null, align 8
ret i32 0
}
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s
-declare i8* @bar(i32)
+declare ptr @bar(i32)
; CHECK: @foo
; CHECK: blr
-define void @foo(i8* %pp) nounwind {
+define void @foo(ptr %pp) nounwind {
entry:
- %tmp2 = tail call i8* @bar( i32 14 ) nounwind ; <i8*> [#uses=0]
- %tmp28 = bitcast i8* %pp to void ()** ; <void ()**> [#uses=1]
- %tmp38 = load void ()*, void ()** %tmp28, align 4 ; <void ()*> [#uses=2]
+ %tmp2 = tail call ptr @bar( i32 14 ) nounwind ; <ptr> [#uses=0]
+ %tmp38 = load ptr, ptr %pp, align 4 ; <ptr> [#uses=2]
br i1 false, label %bb34, label %bb25
bb25: ; preds = %entry
- %tmp30 = bitcast void ()* %tmp38 to void (i8*)* ; <void (i8*)*> [#uses=1]
- tail call void %tmp30( i8* null ) nounwind
+ tail call void %tmp38( ptr null ) nounwind
ret void
bb34: ; preds = %entry
tail call void %tmp38( ) nounwind
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32--
- %struct._cpp_strbuf = type { i8*, i32, i32 }
- %struct.cpp_string = type { i32, i8* }
+ %struct._cpp_strbuf = type { ptr, i32, i32 }
+ %struct.cpp_string = type { i32, ptr }
-declare fastcc void @emit_numeric_escape(i32, i32, %struct._cpp_strbuf*, i32) nounwind
+declare fastcc void @emit_numeric_escape(i32, i32, ptr, i32) nounwind
-define i32 @cpp_interpret_string(i32 %pfile, %struct.cpp_string* %from, i32 %wide) nounwind {
+define i32 @cpp_interpret_string(i32 %pfile, ptr %from, i32 %wide) nounwind {
entry:
- %tmp61 = load i32, i32* null, align 4 ; <i32> [#uses=1]
+ %tmp61 = load i32, ptr null, align 4 ; <i32> [#uses=1]
%toBool = icmp eq i32 %wide, 0 ; <i1> [#uses=2]
%iftmp.87.0 = select i1 %toBool, i32 %tmp61, i32 0 ; <i32> [#uses=2]
%tmp69 = icmp ult i32 %iftmp.87.0, 33 ; <i1> [#uses=1]
bb103: ; preds = %bb79
ret i32 0
bb130.preheader: ; preds = %bb94
- %tmp134 = getelementptr %struct.cpp_string, %struct.cpp_string* %from, i32 0, i32 1 ; <i8**> [#uses=0]
+ %tmp134 = getelementptr %struct.cpp_string, ptr %from, i32 0, i32 1 ; <ptr> [#uses=0]
ret i32 0
bb729: ; preds = %bb94
- call fastcc void @emit_numeric_escape( i32 %pfile, i32 0, %struct._cpp_strbuf* null, i32 %wide ) nounwind
+ call fastcc void @emit_numeric_escape( i32 %pfile, i32 0, ptr null, i32 %wide ) nounwind
ret i32 1
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64--
-define i16 @test(i8* %d1, i16* %d2) {
- %tmp237 = call i16 asm "lhbrx $0, $2, $1", "=r,r,bO,m"( i8* %d1, i32 0, i16* %d2 )
+define i16 @test(ptr %d1, ptr %d2) {
+ %tmp237 = call i16 asm "lhbrx $0, $2, $1", "=r,r,bO,m"( ptr %d1, i32 0, ptr %d2 )
ret i16 %tmp237
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64--
-define fastcc i8* @page_rec_get_next(i8* %rec) nounwind {
+define fastcc ptr @page_rec_get_next(ptr %rec) nounwind {
entry:
- %tmp2627 = ptrtoint i8* %rec to i64 ; <i64> [#uses=2]
+ %tmp2627 = ptrtoint ptr %rec to i64 ; <i64> [#uses=2]
%tmp28 = and i64 %tmp2627, -16384 ; <i64> [#uses=2]
- %tmp2829 = inttoptr i64 %tmp28 to i8* ; <i8*> [#uses=1]
- %tmp37 = getelementptr i8, i8* %tmp2829, i64 42 ; <i8*> [#uses=1]
- %tmp40 = load i8, i8* %tmp37, align 1 ; <i8> [#uses=1]
+ %tmp2829 = inttoptr i64 %tmp28 to ptr ; <ptr> [#uses=1]
+ %tmp37 = getelementptr i8, ptr %tmp2829, i64 42 ; <ptr> [#uses=1]
+ %tmp40 = load i8, ptr %tmp37, align 1 ; <i8> [#uses=1]
%tmp4041 = zext i8 %tmp40 to i64 ; <i64> [#uses=1]
%tmp42 = shl i64 %tmp4041, 8 ; <i64> [#uses=1]
%tmp47 = add i64 %tmp42, 0 ; <i64> [#uses=1]
%tmp72 = icmp eq i64 %tmp52, 0 ; <i1> [#uses=1]
br i1 %tmp72, label %bb91, label %bb
bb: ; preds = %entry
- ret i8* null
+ ret ptr null
bb91: ; preds = %entry
br i1 false, label %bb100, label %bb185
bb100: ; preds = %bb91
%tmp106 = sub i64 %tmp2627, %tmp28 ; <i64> [#uses=0]
- ret i8* null
+ ret ptr null
bb185: ; preds = %bb91
- ret i8* null
+ ret ptr null
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32--
- %struct..0objc_object = type { %struct.objc_class* }
+ %struct..0objc_object = type { ptr }
%struct.NSArray = type { %struct..0objc_object }
%struct.NSMutableArray = type { %struct.NSArray }
- %struct.PFTPersistentSymbols = type { %struct..0objc_object, %struct.VMUSymbolicator*, %struct.NSMutableArray*, %struct.__CFDictionary*, %struct.__CFDictionary*, %struct.__CFDictionary*, %struct.__CFDictionary*, %struct.NSMutableArray*, i8, %struct.pthread_mutex_t, %struct.NSMutableArray*, %struct.pthread_rwlock_t }
+ %struct.PFTPersistentSymbols = type { %struct..0objc_object, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i8, %struct.pthread_mutex_t, ptr, %struct.pthread_rwlock_t }
%struct.VMUMachTaskContainer = type { %struct..0objc_object, i32, i32 }
- %struct.VMUSymbolicator = type { %struct..0objc_object, %struct.NSMutableArray*, %struct.NSArray*, %struct.NSArray*, %struct.VMUMachTaskContainer*, i8 }
+ %struct.VMUSymbolicator = type { %struct..0objc_object, ptr, ptr, ptr, ptr, i8 }
%struct.__CFDictionary = type opaque
- %struct.__builtin_CFString = type { i32*, i32, i8*, i32 }
+ %struct.__builtin_CFString = type { ptr, i32, ptr, i32 }
%struct.objc_class = type opaque
%struct.objc_selector = type opaque
%struct.pthread_mutex_t = type { i32, [40 x i8] }
%struct.pthread_rwlock_t = type { i32, [124 x i8] }
-@0 = external constant %struct.__builtin_CFString ; <%struct.__builtin_CFString*>:0 [#uses=1]
+@0 = external constant %struct.__builtin_CFString ; <ptr>:0 [#uses=1]
-define void @"-[PFTPersistentSymbols saveSymbolWithName:address:path:lineNumber:flags:owner:]"(%struct.PFTPersistentSymbols* %self, %struct.objc_selector* %_cmd, %struct.NSArray* %name, i64 %address, %struct.NSArray* %path, i32 %lineNumber, i64 %flags, %struct..0objc_object* %owner) nounwind {
+define void @"-[PFTPersistentSymbols saveSymbolWithName:address:path:lineNumber:flags:owner:]"(ptr %self, ptr %_cmd, ptr %name, i64 %address, ptr %path, i32 %lineNumber, i64 %flags, ptr %owner) nounwind {
entry:
br i1 false, label %bb12, label %bb21
bb12: ; preds = %entry
- %tmp17 = tail call signext i8 inttoptr (i64 4294901504 to i8 (%struct..0objc_object*, %struct.objc_selector*, %struct.NSArray*)*)( %struct..0objc_object* null, %struct.objc_selector* null, %struct.NSArray* bitcast (%struct.__builtin_CFString* @0 to %struct.NSArray*) ) nounwind ; <i8> [#uses=0]
+ %tmp17 = tail call signext i8 inttoptr (i64 4294901504 to ptr)( ptr null, ptr null, ptr @0 ) nounwind ; <i8> [#uses=0]
br i1 false, label %bb25, label %bb21
bb21: ; preds = %bb12, %entry
%tmp24 = or i64 %flags, 4 ; <i64> [#uses=1]
; CHECK: blr
define i32 @t(i64 %byteStart, i32 %activeIndex) nounwind {
entry:
- %tmp50 = load i32, i32* null, align 4 ; <i32> [#uses=1]
+ %tmp50 = load i32, ptr null, align 4 ; <i32> [#uses=1]
%tmp5051 = zext i32 %tmp50 to i64 ; <i64> [#uses=3]
%tmp53 = udiv i64 %byteStart, %tmp5051 ; <i64> [#uses=1]
%tmp5354 = trunc i64 %tmp53 to i32 ; <i32> [#uses=1]
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s
; Avoid reading memory that's already freed.
-@llvm.used = appending global [1 x i8*] [ i8* bitcast (i32 (i64)* @_Z13GetSectorSizey to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
+@llvm.used = appending global [1 x ptr] [ ptr @_Z13GetSectorSizey ], section "llvm.metadata" ; <ptr> [#uses=0]
; CHECK: @_Z13GetSectorSizey
; CHECK: blr
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s
-@_ZL10DeviceCode = internal global i16 0 ; <i16*> [#uses=1]
-@.str19 = internal constant [64 x i8] c"unlock_then_erase_sector: failed to erase block (status= 0x%x)\0A\00" ; <[64 x i8]*> [#uses=1]
-@.str34 = internal constant [68 x i8] c"ProgramByWords - Erasing sector 0x%llx to 0x%llx (size 0x%x bytes)\0A\00" ; <[68 x i8]*> [#uses=1]
-@.str35 = internal constant [37 x i8] c"ProgramByWords - Done erasing flash\0A\00" ; <[37 x i8]*> [#uses=1]
-@.str36 = internal constant [48 x i8] c"ProgramByWords - Starting to write to FLASH...\0A\00" ; <[48 x i8]*> [#uses=1]
+@_ZL10DeviceCode = internal global i16 0 ; <ptr> [#uses=1]
+@.str19 = internal constant [64 x i8] c"unlock_then_erase_sector: failed to erase block (status= 0x%x)\0A\00" ; <ptr> [#uses=1]
+@.str34 = internal constant [68 x i8] c"ProgramByWords - Erasing sector 0x%llx to 0x%llx (size 0x%x bytes)\0A\00" ; <ptr> [#uses=1]
+@.str35 = internal constant [37 x i8] c"ProgramByWords - Done erasing flash\0A\00" ; <ptr> [#uses=1]
+@.str36 = internal constant [48 x i8] c"ProgramByWords - Starting to write to FLASH...\0A\00" ; <ptr> [#uses=1]
-declare void @IOLog(i8*, ...)
+declare void @IOLog(ptr, ...)
declare void @IODelay(i32)
; CHECK: @_Z14ProgramByWordsPvyy
; CHECK: blr
-define i32 @_Z14ProgramByWordsPvyy(i8* %buffer, i64 %Offset, i64 %bufferSize) nounwind {
+define i32 @_Z14ProgramByWordsPvyy(ptr %buffer, i64 %Offset, i64 %bufferSize) nounwind {
entry:
- store volatile i8 -1, i8* null, align 1
+ store volatile i8 -1, ptr null, align 1
%tmp28 = icmp eq i8 0, 0 ; <i1> [#uses=1]
br i1 %tmp28, label %bb107, label %bb
bb31: ; preds = %_Z24unlock_then_erase_sectory.exit, %bb
%Pos.0.reg2mem.0 = phi i64 [ %tmp93, %_Z24unlock_then_erase_sectory.exit ], [ %Offset, %bb ] ; <i64> [#uses=3]
- %tmp35 = load i16, i16* @_ZL10DeviceCode, align 2 ; <i16> [#uses=1]
+ %tmp35 = load i16, ptr @_ZL10DeviceCode, align 2 ; <i16> [#uses=1]
%tmp3536 = zext i16 %tmp35 to i32 ; <i32> [#uses=2]
%tmp37 = and i32 %tmp3536, 65520 ; <i32> [#uses=1]
%tmp38 = icmp eq i32 %tmp37, 35008 ; <i1> [#uses=1]
ret i32 0
bb68: ; preds = %bb31
- tail call void (i8*, ...) @IOLog( i8* getelementptr ([68 x i8], [68 x i8]* @.str34, i32 0, i32 0), i64 %tmp34, i64 0, i32 131072 ) nounwind
+ tail call void (ptr, ...) @IOLog( ptr @.str34, i64 %tmp34, i64 0, i32 131072 ) nounwind
%tmp2021.i = trunc i64 %Pos.0.reg2mem.0 to i32 ; <i32> [#uses=1]
- %tmp202122.i = inttoptr i32 %tmp2021.i to i8* ; <i8*> [#uses=1]
+ %tmp202122.i = inttoptr i32 %tmp2021.i to ptr ; <ptr> [#uses=1]
tail call void @IODelay( i32 500 ) nounwind
- %tmp53.i = load volatile i16, i16* null, align 2 ; <i16> [#uses=2]
+ %tmp53.i = load volatile i16, ptr null, align 2 ; <i16> [#uses=2]
%tmp5455.i = zext i16 %tmp53.i to i32 ; <i32> [#uses=1]
br i1 false, label %bb.i, label %bb65.i
br i1 %tmp67.i, label %_Z24unlock_then_erase_sectory.exit, label %bb70.i
bb70.i: ; preds = %bb65.i
- tail call void (i8*, ...) @IOLog( i8* getelementptr ([64 x i8], [64 x i8]* @.str19, i32 0, i32 0), i32 %tmp5455.i ) nounwind
+ tail call void (ptr, ...) @IOLog( ptr @.str19, i32 %tmp5455.i ) nounwind
ret i32 0
_Z24unlock_then_erase_sectory.exit: ; preds = %bb65.i
- store volatile i8 -1, i8* %tmp202122.i, align 1
+ store volatile i8 -1, ptr %tmp202122.i, align 1
%tmp93 = add i64 0, %Pos.0.reg2mem.0 ; <i64> [#uses=2]
%tmp98 = add i64 0, %Offset ; <i64> [#uses=1]
%tmp100 = icmp ugt i64 %tmp98, %tmp93 ; <i1> [#uses=1]
br i1 %tmp100, label %bb31, label %bb103
bb103: ; preds = %_Z24unlock_then_erase_sectory.exit, %bb
- tail call void (i8*, ...) @IOLog( i8* getelementptr ([37 x i8], [37 x i8]* @.str35, i32 0, i32 0) ) nounwind
+ tail call void (ptr, ...) @IOLog( ptr @.str35 ) nounwind
ret i32 0
bb107: ; preds = %entry
- tail call void (i8*, ...) @IOLog( i8* getelementptr ([48 x i8], [48 x i8]* @.str36, i32 0, i32 0) ) nounwind
- %tmp114115 = bitcast i8* %buffer to i16* ; <i16*> [#uses=1]
+ tail call void (ptr, ...) @IOLog( ptr @.str36 ) nounwind
%tmp256 = lshr i64 %bufferSize, 1 ; <i64> [#uses=1]
%tmp256257 = trunc i64 %tmp256 to i32 ; <i32> [#uses=1]
- %tmp258 = getelementptr i16, i16* %tmp114115, i32 %tmp256257 ; <i16*> [#uses=0]
+ %tmp258 = getelementptr i16, ptr %buffer, i32 %tmp256257 ; <ptr> [#uses=0]
ret i32 0
}
-define i32 @_Z17program_64B_blockyPm(i64 %Base, i32* %pData) nounwind {
+define i32 @_Z17program_64B_blockyPm(i64 %Base, ptr %pData) nounwind {
entry:
unreachable
}
@h = external global ppc_fp128
define void @f() {
- %tmp = load ppc_fp128, ppc_fp128* @g
- store ppc_fp128 %tmp, ppc_fp128* @h
+ %tmp = load ppc_fp128, ptr @g
+ store ppc_fp128 %tmp, ptr @h
ret void
}
br i1 true, label %bb1, label %bb3
bb1:
- %tmp1 = load i8, i8* null, align 1
+ %tmp1 = load i8, ptr null, align 1
%tmp2 = icmp eq i8 %tmp1, 0
br label %bb2
%"struct.DecodeComplexityInfo::InterStats" = type { i32, i32, i32, i32, [5 x i32], [3 x i32], [4 x [4 x i32]], [4 x i32], i32, %struct.MotionVectors, %struct.MotionVectors }
%"struct.DecodeComplexityInfo::IntraStats" = type { i32, i32, i32, [5 x i32], [3 x i32], [4 x i32], [3 x i32] }
%struct.DecodeComplexityOptions = type { i8, i8, i32, double, i8, float, i8, float, i8, i8, i8, i8, i8 }
- %struct.DescriptorAllocator = type { %struct.Mutex*, %struct.Mutex*, i8**, i32, i32, i8**, i32, i32, i8**, i32, i32 }
+ %struct.DescriptorAllocator = type { ptr, ptr, ptr, i32, i32, ptr, i32, i32, ptr, i32, i32 }
%struct.DetailsFromSliceType = type <{ i8 }>
- %struct.FlatnessAnalysis = type { i16, i16, i32, i32*, i8*, [512 x i32], [256 x i32] }
- %struct.Frame = type <{ i8, i8, i8, i8, i8, [3 x i8], i32, i32, %struct.Mutex*, %struct.Condv*, [8 x i8], %struct.FramePixels, %struct.FrameMotionVectorCache, %struct.FrameIndex, i32, i8*, i8*, i8*, i8*, i16*, %struct.FlatnessAnalysis, %struct.NoiseAnalysis, %struct.VisualActivity, %struct.FrameMotionInfo, %struct.FrameMotionAnalysis, %struct.FrameDataRateParameters, %struct.FrameEncoderTags, %struct.DecodeComplexityInfo, %struct.DecodeComplexityOptions, %struct.MotionInfoFor16x16_FasterSP*, [1 x i32] }>
+ %struct.FlatnessAnalysis = type { i16, i16, i32, ptr, ptr, [512 x i32], [256 x i32] }
+ %struct.Frame = type <{ i8, i8, i8, i8, i8, [3 x i8], i32, i32, ptr, ptr, [8 x i8], %struct.FramePixels, %struct.FrameMotionVectorCache, %struct.FrameIndex, i32, ptr, ptr, ptr, ptr, ptr, %struct.FlatnessAnalysis, %struct.NoiseAnalysis, %struct.VisualActivity, %struct.FrameMotionInfo, %struct.FrameMotionAnalysis, %struct.FrameDataRateParameters, %struct.FrameEncoderTags, %struct.DecodeComplexityInfo, %struct.DecodeComplexityOptions, ptr, [1 x i32] }>
%struct.FrameDataRateParameters = type { i32, float, i8, i8 }
%struct.FrameEncoderTags = type { i8, i8, i32, i8, i8, float }
- %struct.FrameIndex = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i32, i32, %struct.Frame*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %struct.DHBFLayerId }
- %struct.FrameMotionAnalysis = type { i32, i32, i32, %struct.MoEstMotion16x16*, %struct.MbAnalysis*, i32, i32, i16, i16, i32, i32, i32, i32, i8, i8 }
- %struct.FrameMotionInfo = type { i32, i32, %struct.MoEstMbMotionInfo*, i32, i32, i32, i32, i32 }
- %struct.FrameMotionVectorCache = type <{ %struct.ThreadAllocator**, i32, i32, i32, %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor, [3 x %struct.BiPartSrcDescriptor*], %struct.BiPartSrcDescriptor** }>
- %struct.FramePixels = type <{ i8, i8, i8, i8, i8, i8, i8, i8, i8*, i8*, i32, [4 x i8*], [4 x i8*], [2 x [4 x i32]], [2 x [4 x i32]], %struct.PixelData, %struct.InterpolationCache*, %struct.InterpolationCache*, %struct.InterpolationCache*, [16 x i16], [16 x i16], [12 x i8], %"struct.PortableSInt32Array<4>", %"struct.PortableSInt32Array<8>", %struct.ICOffsetArraysY, %struct.UVSrcOffsetEtcX_Struct*, i32*, i32*, [3 x i32] }>
+ %struct.FrameIndex = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i32, i32, ptr, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %struct.DHBFLayerId }
+ %struct.FrameMotionAnalysis = type { i32, i32, i32, ptr, ptr, i32, i32, i16, i16, i32, i32, i32, i32, i8, i8 }
+ %struct.FrameMotionInfo = type { i32, i32, ptr, i32, i32, i32, i32, i32 }
+ %struct.FrameMotionVectorCache = type <{ ptr, i32, i32, i32, %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor, [3 x ptr], ptr }>
+ %struct.FramePixels = type <{ i8, i8, i8, i8, i8, i8, i8, i8, ptr, ptr, i32, [4 x ptr], [4 x ptr], [2 x [4 x i32]], [2 x [4 x i32]], %struct.PixelData, ptr, ptr, ptr, [16 x i16], [16 x i16], [12 x i8], %"struct.PortableSInt32Array<4>", %"struct.PortableSInt32Array<8>", %struct.ICOffsetArraysY, ptr, ptr, ptr, [3 x i32] }>
%struct.ICOffsetArraysY = type { [21 x i32], [21 x i32], [4 x [21 x i32]] }
%struct.InterpolationCache = type opaque
- %struct.LoopFilterInfo = type { %struct.BiPartSrcDescriptor**, i32, i32, i32, i32, i32*, i32, %"struct.LoopFilterInfo::SliceInfoStruct"*, i32, %struct.Mutex*, i16*, %struct.FramePixels*, i8*, i8*, i8*, i8*, i8*, %struct.PerMacroblockBoundaryStrengths*, %struct.Mutex*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8*, i8*, i8, void (i8*, i8*, i32, i32, i32, i32, i32, i8*, i32)*, void (i8*, i8*, i32, i32, i32, i32, i32, i8*, i32, i8*)*, i32 }
- %"struct.LoopFilterInfo::SliceInfoStruct" = type { %"struct.LoopFilterInfo::SliceInfoStruct::LFDisableStats", i8, i8, i8, i8, [17 x %struct.Frame*], [17 x %struct.Frame*] }
+ %struct.LoopFilterInfo = type { ptr, i32, i32, i32, i32, ptr, i32, ptr, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, ptr, ptr, i8, ptr, ptr, i32 }
+ %"struct.LoopFilterInfo::SliceInfoStruct" = type { %"struct.LoopFilterInfo::SliceInfoStruct::LFDisableStats", i8, i8, i8, i8, [17 x ptr], [17 x ptr] }
%"struct.LoopFilterInfo::SliceInfoStruct::LFDisableStats" = type { i32, i32 }
- %struct.LoopFilterParam = type { i32, %struct.LoopFilterInfo*, %struct.FramePixels*, %struct.FrameMotionVectorCache* }
+ %struct.LoopFilterParam = type { i32, ptr, ptr, ptr }
%struct.Map4x4ToPartIdx = type { i16 }
%struct.MbAnalysis = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %struct.RdCost, %struct.RdCost, i32 }
%struct.MoEstMbMotionInfo = type { i32, i32, i32, i32, [16 x %struct.MoEstPartMotionInfo] }
%struct.MotionVectors = type { %"struct.MotionVectors::$_103" }
%"struct.MotionVectors::$_103" = type { i32 }
%struct.Mutex = type opaque
- %struct.NoiseAnalysis = type { i16, i16, i32, i8*, i8*, i8*, [512 x i32] }
+ %struct.NoiseAnalysis = type { i16, i16, i32, ptr, ptr, ptr, [512 x i32] }
%struct.PartGeom = type { %struct.Map4x4ToPartIdx }
%struct.PerMacroblockBoundaryStrengths = type { [16 x i8], [16 x i8], [4 x i8], [4 x i8], [2 x i32] }
- %struct.PixelData = type { i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i8 }
+ %struct.PixelData = type { ptr, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i8 }
%"struct.PortableSInt32Array<4>" = type { [4 x i32] }
%"struct.PortableSInt32Array<8>" = type { [8 x i32] }
%struct.RdCost = type { i32, i32, i32, double }
- %struct.ThreadAllocator = type { %struct.DescriptorAllocator*, %struct.BiPartSrcDescriptor*, [256 x %struct.BiPartSrcDescriptor*], i32, i32, i32 }
+ %struct.ThreadAllocator = type { ptr, ptr, [256 x ptr], i32, i32, i32 }
%struct.ThreadedBatch = type opaque
%struct.UVSrcOffsetEtcX_Struct = type <{ i16 }>
- %struct.VisualActivity = type { i16, i16, i32, i32, i32*, i32*, i32, i32, i32*, i32, i32, i32, i32, i32, i8*, i32, [2 x i32], i32, i32, i32, i16*, i16, i16, i16, i16, float, i8*, i32*, i32, i32, i8 }
-@_ZL33table_8_14_indexA_to_alpha_scalar = external constant [64 x i8] ; <[64 x i8]*> [#uses=0]
-@_ZL32table_8_14_indexB_to_beta_scalar = external constant [64 x i8] ; <[64 x i8]*> [#uses=0]
-@_ZL34table_8_15_indexA_bS_to_tc0_scalar = external constant [64 x [4 x i8]] ; <[64 x [4 x i8]]*> [#uses=0]
-@gkDummy = external global i32 ; <i32*> [#uses=0]
-@gkDetailsFromSliceTypeArray = external constant [10 x %struct.DetailsFromSliceType] ; <[10 x %struct.DetailsFromSliceType]*> [#uses=0]
+ %struct.VisualActivity = type { i16, i16, i32, i32, ptr, ptr, i32, i32, ptr, i32, i32, i32, i32, i32, ptr, i32, [2 x i32], i32, i32, i32, ptr, i16, i16, i16, i16, float, ptr, ptr, i32, i32, i8 }
+@_ZL33table_8_14_indexA_to_alpha_scalar = external constant [64 x i8] ; <ptr> [#uses=0]
+@_ZL32table_8_14_indexB_to_beta_scalar = external constant [64 x i8] ; <ptr> [#uses=0]
+@_ZL34table_8_15_indexA_bS_to_tc0_scalar = external constant [64 x [4 x i8]] ; <ptr> [#uses=0]
+@gkDummy = external global i32 ; <ptr> [#uses=0]
+@gkDetailsFromSliceTypeArray = external constant [10 x %struct.DetailsFromSliceType] ; <ptr> [#uses=0]
-declare i32 @_Z20LoopFilter_ConstructP14LoopFilterInfojj(%struct.LoopFilterInfo*, i32, i32)
+declare i32 @_Z20LoopFilter_ConstructP14LoopFilterInfojj(ptr, i32, i32)
-declare i32 @_Z25LF_Threading2_assert_doneP14LoopFilterInfo(%struct.LoopFilterInfo*) nounwind
+declare i32 @_Z25LF_Threading2_assert_doneP14LoopFilterInfo(ptr) nounwind
-declare i32 @_Z54S_CalcIfLargeMVDeltaForBMbBothPredictionsFromSameFramePK19BiPartSrcDescriptorS1_ijj(%struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor*, i32, i32, i32) nounwind
+declare i32 @_Z54S_CalcIfLargeMVDeltaForBMbBothPredictionsFromSameFramePK19BiPartSrcDescriptorS1_ijj(ptr, ptr, i32, i32, i32) nounwind
-declare void @_Z30LoopFilter_Internal_FilterLumaPhiiiiii(i8*, i32, i32, i32, i32, i32, i32) nounwind
+declare void @_Z30LoopFilter_Internal_FilterLumaPhiiiiii(ptr, i32, i32, i32, i32, i32, i32) nounwind
-declare void @_Z33LoopFilter_Internal_FilterChromaVPhiiiiiiiiii(i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind
+declare void @_Z33LoopFilter_Internal_FilterChromaVPhiiiiiiiiii(ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind
-declare void @_Z33LoopFilter_Internal_FilterChromaHPhiiiiii(i8*, i32, i32, i32, i32, i32, i32) nounwind
+declare void @_Z33LoopFilter_Internal_FilterChromaHPhiiiiii(ptr, i32, i32, i32, i32, i32, i32) nounwind
-declare void @_Z42LoopFilter_Internal_filter_macroblock_lumaPK14LoopFilterInfoPhS2_iiiPK30PerMacroblockBoundaryStrengthsjj(%struct.LoopFilterInfo*, i8*, i8*, i32, i32, i32, %struct.PerMacroblockBoundaryStrengths*, i32, i32) nounwind
+declare void @_Z42LoopFilter_Internal_filter_macroblock_lumaPK14LoopFilterInfoPhS2_iiiPK30PerMacroblockBoundaryStrengthsjj(ptr, ptr, ptr, i32, i32, i32, ptr, i32, i32) nounwind
-declare i32 @_Z40LoopFilter_Internal_FilterLumaPlaneMBAFFPK14LoopFilterInfojjj(%struct.LoopFilterInfo*, i32, i32, i32) nounwind
+declare i32 @_Z40LoopFilter_Internal_FilterLumaPlaneMBAFFPK14LoopFilterInfojjj(ptr, i32, i32, i32) nounwind
-declare void @_Z18LoopFilter_DestroyP14LoopFilterInfo(%struct.LoopFilterInfo*)
+declare void @_Z18LoopFilter_DestroyP14LoopFilterInfo(ptr)
-declare void @MutexDispose(%struct.Mutex*)
+declare void @MutexDispose(ptr)
-declare void @_ZdaPv(i8*) nounwind
+declare void @_ZdaPv(ptr) nounwind
-declare void @jvtDisposePTRVectorAligned(i8*)
+declare void @jvtDisposePTRVectorAligned(ptr)
-declare void @jvtDisposePTR(i8*)
+declare void @jvtDisposePTR(ptr)
-declare void @jvtDisposePTRMemAligned(i8*)
+declare void @jvtDisposePTRMemAligned(ptr)
-declare void @_Z31LoopFilter_Internal_ResetTablesP14LoopFilterInfo(%struct.LoopFilterInfo*) nounwind
+declare void @_Z31LoopFilter_Internal_ResetTablesP14LoopFilterInfo(ptr) nounwind
; CHECK: _Z60LoopFilter_Internal_CalculateBoundaryStrengths_MbaffFramePicPK14LoopFilterInfoP22FrameMotionVectorCachejj
; CHECK: blr
-define i32 @_Z60LoopFilter_Internal_CalculateBoundaryStrengths_MbaffFramePicPK14LoopFilterInfoP22FrameMotionVectorCachejj(%struct.LoopFilterInfo* %lfiPtr, %struct.FrameMotionVectorCache* %frameMotionVectorCachePtr, i32 %mbY_min, i32 %mbY_maxPlus1) nounwind {
+define i32 @_Z60LoopFilter_Internal_CalculateBoundaryStrengths_MbaffFramePicPK14LoopFilterInfoP22FrameMotionVectorCachejj(ptr %lfiPtr, ptr %frameMotionVectorCachePtr, i32 %mbY_min, i32 %mbY_maxPlus1) nounwind {
entry:
icmp ult i32 %mbY_min, %mbY_maxPlus1 ; <i1>:0 [#uses=1]
br i1 %0, label %bb16, label %bb642
bb16: ; preds = %entry
- bitcast %struct.PerMacroblockBoundaryStrengths* null to i32* ; <i32*>:1 [#uses=3]
- getelementptr i32, i32* %1, i32 1 ; <i32*>:2 [#uses=0]
- getelementptr i32, i32* %1, i32 2 ; <i32*>:3 [#uses=0]
- getelementptr i32, i32* %1, i32 3 ; <i32*>:4 [#uses=0]
- bitcast [16 x i8]* null to i32* ; <i32*>:5 [#uses=3]
- getelementptr i32, i32* %5, i32 1 ; <i32*>:6 [#uses=0]
- getelementptr i32, i32* %5, i32 2 ; <i32*>:7 [#uses=0]
- getelementptr i32, i32* %5, i32 3 ; <i32*>:8 [#uses=0]
+ bitcast ptr null to ptr ; <ptr>:1 [#uses=3]
+ getelementptr i32, ptr %1, i32 1 ; <ptr>:2 [#uses=0]
+ getelementptr i32, ptr %1, i32 2 ; <ptr>:3 [#uses=0]
+ getelementptr i32, ptr %1, i32 3 ; <ptr>:4 [#uses=0]
+ bitcast ptr null to ptr ; <ptr>:5 [#uses=3]
+ getelementptr i32, ptr %5, i32 1 ; <ptr>:6 [#uses=0]
+ getelementptr i32, ptr %5, i32 2 ; <ptr>:7 [#uses=0]
+ getelementptr i32, ptr %5, i32 3 ; <ptr>:8 [#uses=0]
icmp eq i32 0, 0 ; <i1>:9 [#uses=0]
lshr i32 0, 30 ; <i32>:10 [#uses=0]
and i32 0, 268435455 ; <i32>:11 [#uses=0]
%.not658 = icmp ne i32 0, 0 ; <i1> [#uses=1]
and i32 0, 268369920 ; <i32>:20 [#uses=1]
icmp eq i32 %20, 268369920 ; <i1>:21 [#uses=2]
- getelementptr %struct.PerMacroblockBoundaryStrengths, %struct.PerMacroblockBoundaryStrengths* null, i32 0, i32 2 ; <[4 x i8]*>:22 [#uses=1]
- getelementptr %struct.PerMacroblockBoundaryStrengths, %struct.PerMacroblockBoundaryStrengths* null, i32 0, i32 2, i32 0 ; <i8*>:23 [#uses=0]
+ getelementptr %struct.PerMacroblockBoundaryStrengths, ptr null, i32 0, i32 2 ; <ptr>:22 [#uses=1]
+ getelementptr %struct.PerMacroblockBoundaryStrengths, ptr null, i32 0, i32 2, i32 0 ; <ptr>:23 [#uses=0]
and i32 0, -2 ; <i32>:24 [#uses=1]
add i32 %24, -1 ; <i32>:25 [#uses=0]
- bitcast [4 x i8]* %22 to i32* ; <i32*>:26 [#uses=3]
- getelementptr i32, i32* %26, i32 1 ; <i32*>:27 [#uses=0]
- getelementptr i32, i32* %26, i32 2 ; <i32*>:28 [#uses=0]
- getelementptr i32, i32* %26, i32 3 ; <i32*>:29 [#uses=0]
+ bitcast ptr %22 to ptr ; <ptr>:26 [#uses=3]
+ getelementptr i32, ptr %26, i32 1 ; <ptr>:27 [#uses=0]
+ getelementptr i32, ptr %26, i32 2 ; <ptr>:28 [#uses=0]
+ getelementptr i32, ptr %26, i32 3 ; <ptr>:29 [#uses=0]
br label %bb144
bb144: ; preds = %bb395, %bb16
%mbXYLeft.2775 = phi i32 [ 0, %bb16 ], [ %mbXYLeft.2775, %bb395 ] ; <i32> [#uses=1]
%mixedModeLeftEdgeOfMbFlag.2774 = phi i32 [ 0, %bb16 ], [ 0, %bb395 ] ; <i32> [#uses=0]
%mbIndexLeft.2772 = phi i32 [ 0, %bb16 ], [ %mbIndexLeft.2772, %bb395 ] ; <i32> [#uses=2]
- %boundaryStrengthsV.1771 = phi i8* [ null, %bb16 ], [ %158, %bb395 ] ; <i8*> [#uses=2]
+ %boundaryStrengthsV.1771 = phi ptr [ null, %bb16 ], [ %158, %bb395 ] ; <ptr> [#uses=2]
%numEdgesToTest.1770 = phi i32 [ 4, %bb16 ], [ %numEdgesToTest.2, %bb395 ] ; <i32> [#uses=1]
icmp eq i32 %idxEachField11.0773, 0 ; <i1>:30 [#uses=0]
- getelementptr %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** null, i32 %mbIndexLeft.2772 ; <%struct.BiPartSrcDescriptor**>:31 [#uses=1]
- load %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** %31, align 4 ; <%struct.BiPartSrcDescriptor*>:32 [#uses=0]
+ getelementptr ptr, ptr null, i32 %mbIndexLeft.2772 ; <ptr>:31 [#uses=1]
+ load ptr, ptr %31, align 4 ; <ptr>:32 [#uses=0]
%fMacroblockHasNonZeroBS.4 = select i1 %21, i32 1, i32 0 ; <i32> [#uses=1]
%numEdgesToTest.2 = select i1 %21, i32 1, i32 %numEdgesToTest.1770 ; <i32> [#uses=2]
- store i8 32, i8* %boundaryStrengthsV.1771, align 1
+ store i8 32, ptr %boundaryStrengthsV.1771, align 1
br label %labelContinueEdgesLoopV
bb200: ; preds = %labelContinueEdgesLoopV
br i1 %40, label %bb205, label %bb206
bb205: ; preds = %bb200
- store i8 32, i8* %158, align 1
+ store i8 32, ptr %158, align 1
br label %labelContinueEdgesLoopV
bb206: ; preds = %bb200
add i32 %52, %42 ; <i32>:53 [#uses=1]
mul i32 %51, 0 ; <i32>:54 [#uses=1]
add i32 %46, %54 ; <i32>:55 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** null, i32 %53 ; <%struct.BiPartSrcDescriptor**>:56 [#uses=1]
- load %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** %56, align 4 ; <%struct.BiPartSrcDescriptor*>:57 [#uses=7]
- getelementptr %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** null, i32 %55 ; <%struct.BiPartSrcDescriptor**>:58 [#uses=1]
- load %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** %58, align 4 ; <%struct.BiPartSrcDescriptor*>:59 [#uses=5]
+ getelementptr ptr, ptr null, i32 %53 ; <ptr>:56 [#uses=1]
+ load ptr, ptr %56, align 4 ; <ptr>:57 [#uses=7]
+ getelementptr ptr, ptr null, i32 %55 ; <ptr>:58 [#uses=1]
+ load ptr, ptr %58, align 4 ; <ptr>:59 [#uses=5]
icmp slt i32 %159, 0 ; <i1>:60 [#uses=0]
- icmp eq %struct.BiPartSrcDescriptor* %57, %59 ; <i1>:61 [#uses=0]
- bitcast %struct.BiPartSrcDescriptor* %57 to i16* ; <i16*>:62 [#uses=5]
- load i16, i16* %62, align 2 ; <i16>:63 [#uses=2]
- getelementptr i16, i16* %62, i32 1 ; <i16*>:64 [#uses=1]
- load i16, i16* %64, align 2 ; <i16>:65 [#uses=2]
- getelementptr i16, i16* %62, i32 2 ; <i16*>:66 [#uses=1]
- load i16, i16* %66, align 2 ; <i16>:67 [#uses=2]
- getelementptr i16, i16* %62, i32 3 ; <i16*>:68 [#uses=1]
- load i16, i16* %68, align 2 ; <i16>:69 [#uses=2]
- getelementptr i16, i16* %62, i32 6 ; <i16*>:70 [#uses=1]
- load i16, i16* %70, align 2 ; <i16>:71 [#uses=2]
- bitcast %struct.BiPartSrcDescriptor* %59 to i16* ; <i16*>:72 [#uses=5]
- load i16, i16* %72, align 2 ; <i16>:73 [#uses=2]
- getelementptr i16, i16* %72, i32 1 ; <i16*>:74 [#uses=1]
- load i16, i16* %74, align 2 ; <i16>:75 [#uses=2]
- getelementptr i16, i16* %72, i32 2 ; <i16*>:76 [#uses=1]
- load i16, i16* %76, align 2 ; <i16>:77 [#uses=2]
- getelementptr i16, i16* %72, i32 3 ; <i16*>:78 [#uses=1]
- load i16, i16* %78, align 2 ; <i16>:79 [#uses=2]
- getelementptr i16, i16* %72, i32 6 ; <i16*>:80 [#uses=1]
- load i16, i16* %80, align 2 ; <i16>:81 [#uses=2]
+ icmp eq ptr %57, %59 ; <i1>:61 [#uses=0]
+ bitcast ptr %57 to ptr ; <ptr>:62 [#uses=5]
+ load i16, ptr %62, align 2 ; <i16>:63 [#uses=2]
+ getelementptr i16, ptr %62, i32 1 ; <ptr>:64 [#uses=1]
+ load i16, ptr %64, align 2 ; <i16>:65 [#uses=2]
+ getelementptr i16, ptr %62, i32 2 ; <ptr>:66 [#uses=1]
+ load i16, ptr %66, align 2 ; <i16>:67 [#uses=2]
+ getelementptr i16, ptr %62, i32 3 ; <ptr>:68 [#uses=1]
+ load i16, ptr %68, align 2 ; <i16>:69 [#uses=2]
+ getelementptr i16, ptr %62, i32 6 ; <ptr>:70 [#uses=1]
+ load i16, ptr %70, align 2 ; <i16>:71 [#uses=2]
+ bitcast ptr %59 to ptr ; <ptr>:72 [#uses=5]
+ load i16, ptr %72, align 2 ; <i16>:73 [#uses=2]
+ getelementptr i16, ptr %72, i32 1 ; <ptr>:74 [#uses=1]
+ load i16, ptr %74, align 2 ; <i16>:75 [#uses=2]
+ getelementptr i16, ptr %72, i32 2 ; <ptr>:76 [#uses=1]
+ load i16, ptr %76, align 2 ; <i16>:77 [#uses=2]
+ getelementptr i16, ptr %72, i32 3 ; <ptr>:78 [#uses=1]
+ load i16, ptr %78, align 2 ; <i16>:79 [#uses=2]
+ getelementptr i16, ptr %72, i32 6 ; <ptr>:80 [#uses=1]
+ load i16, ptr %80, align 2 ; <i16>:81 [#uses=2]
sub i16 %63, %73 ; <i16>:82 [#uses=3]
sub i16 %65, %75 ; <i16>:83 [#uses=3]
sub i16 %67, %77 ; <i16>:84 [#uses=3]
sub i16 0, %86 ; <i16>:95 [#uses=1]
icmp slt i16 %86, 0 ; <i1>:96 [#uses=1]
%.663 = select i1 %96, i16 %95, i16 %86 ; <i16> [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 1, i32 0 ; <i8*>:97 [#uses=1]
- load i8, i8* %97, align 1 ; <i8>:98 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, ptr %57, i32 0, i32 0, i32 0, i32 1, i32 0 ; <ptr>:97 [#uses=1]
+ load i8, ptr %97, align 1 ; <i8>:98 [#uses=1]
zext i8 %98 to i32 ; <i32>:99 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 1, i32 1 ; <i8*>:100 [#uses=1]
- load i8, i8* %100, align 1 ; <i8>:101 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, ptr %57, i32 0, i32 0, i32 0, i32 1, i32 1 ; <ptr>:100 [#uses=1]
+ load i8, ptr %100, align 1 ; <i8>:101 [#uses=1]
zext i8 %101 to i32 ; <i32>:102 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 3, i32 0 ; <i8*>:103 [#uses=1]
- load i8, i8* %103, align 1 ; <i8>:104 [#uses=2]
+ getelementptr %struct.BiPartSrcDescriptor, ptr %57, i32 0, i32 0, i32 0, i32 3, i32 0 ; <ptr>:103 [#uses=1]
+ load i8, ptr %103, align 1 ; <i8>:104 [#uses=2]
zext i8 %104 to i32 ; <i32>:105 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %59, i32 0, i32 0, i32 0, i32 3, i32 0 ; <i8*>:106 [#uses=1]
- load i8, i8* %106, align 1 ; <i8>:107 [#uses=2]
+ getelementptr %struct.BiPartSrcDescriptor, ptr %59, i32 0, i32 0, i32 0, i32 3, i32 0 ; <ptr>:106 [#uses=1]
+ load i8, ptr %106, align 1 ; <i8>:107 [#uses=2]
zext i8 %107 to i32 ; <i32>:108 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 3, i32 1 ; <i8*>:109 [#uses=1]
- load i8, i8* %109, align 1 ; <i8>:110 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, ptr %57, i32 0, i32 0, i32 0, i32 3, i32 1 ; <ptr>:109 [#uses=1]
+ load i8, ptr %109, align 1 ; <i8>:110 [#uses=1]
zext i8 %110 to i32 ; <i32>:111 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %59, i32 0, i32 0, i32 0, i32 3, i32 1 ; <i8*>:112 [#uses=1]
- load i8, i8* %112, align 1 ; <i8>:113 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, ptr %59, i32 0, i32 0, i32 0, i32 3, i32 1 ; <ptr>:112 [#uses=1]
+ load i8, ptr %112, align 1 ; <i8>:113 [#uses=1]
zext i8 %113 to i32 ; <i32>:114 [#uses=1]
lshr i32 %99, 4 ; <i32>:115 [#uses=1]
and i32 %115, 2 ; <i32>:116 [#uses=1]
sub i32 %111, %108 ; <i32>:148 [#uses=1]
or i32 %147, %148 ; <i32>:149 [#uses=1]
icmp eq i32 %149, 0 ; <i1>:150 [#uses=0]
- call i32 @_Z54S_CalcIfLargeMVDeltaForBMbBothPredictionsFromSameFramePK19BiPartSrcDescriptorS1_ijj( %struct.BiPartSrcDescriptor* %57, %struct.BiPartSrcDescriptor* %59, i32 %19, i32 0, i32 0 ) nounwind ; <i32>:151 [#uses=0]
+ call i32 @_Z54S_CalcIfLargeMVDeltaForBMbBothPredictionsFromSameFramePK19BiPartSrcDescriptorS1_ijj( ptr %57, ptr %59, i32 %19, i32 0, i32 0 ) nounwind ; <i32>:151 [#uses=0]
unreachable
labelContinueEdgesLoopV: ; preds = %bb206, %bb205, %bb144
%fMacroblockHasNonZeroBS.6 = phi i32 [ %152, %bb205 ], [ %fMacroblockHasNonZeroBS.4, %bb144 ], [ %152, %bb206 ] ; <i32> [#uses=1]
%ixEdge.1 = phi i32 [ %160, %bb205 ], [ 0, %bb144 ], [ %160, %bb206 ] ; <i32> [#uses=1]
%bfNZ12.2 = phi i32 [ %159, %bb205 ], [ 0, %bb144 ], [ %159, %bb206 ] ; <i32> [#uses=1]
- %boundaryStrengthsV.3 = phi i8* [ %158, %bb205 ], [ %boundaryStrengthsV.1771, %bb144 ], [ %158, %bb206 ] ; <i8*> [#uses=3]
+ %boundaryStrengthsV.3 = phi ptr [ %158, %bb205 ], [ %boundaryStrengthsV.1771, %bb144 ], [ %158, %bb206 ] ; <ptr> [#uses=3]
or i32 %fMacroblockHasNonZeroBS.6, %fEdgeHasNonZeroBS.0 ; <i32>:152 [#uses=2]
- load i8, i8* %boundaryStrengthsV.3, align 1 ; <i8>:153 [#uses=1]
+ load i8, ptr %boundaryStrengthsV.3, align 1 ; <i8>:153 [#uses=1]
trunc i32 %fEdgeHasNonZeroBS.0 to i8 ; <i8>:154 [#uses=1]
shl i8 %154, 5 ; <i8>:155 [#uses=1]
xor i8 %155, 32 ; <i8>:156 [#uses=1]
or i8 %153, %156 ; <i8>:157 [#uses=1]
- store i8 %157, i8* %boundaryStrengthsV.3, align 1
- getelementptr i8, i8* %boundaryStrengthsV.3, i32 4 ; <i8*>:158 [#uses=4]
+ store i8 %157, ptr %boundaryStrengthsV.3, align 1
+ getelementptr i8, ptr %boundaryStrengthsV.3, i32 4 ; <ptr>:158 [#uses=4]
shl i32 %bfNZ12.2, 4 ; <i32>:159 [#uses=4]
add i32 %ixEdge.1, 1 ; <i32>:160 [#uses=6]
icmp ult i32 %160, %numEdgesToTest.2 ; <i1>:161 [#uses=1]
br i1 %163, label %bb398, label %bb144
bb398: ; preds = %bb395
- call void asm sideeffect "dcbt $0, $1", "b%,r,~{memory}"( i32 19, i32* null ) nounwind
+ call void asm sideeffect "dcbt $0, $1", "b%,r,~{memory}"( i32 19, ptr null ) nounwind
unreachable
bb642: ; preds = %entry
declare i16 @llvm.bswap.i16(i16) nounwind readnone
-declare i8* @jvtNewPtrVectorAligned(i32)
+declare ptr @jvtNewPtrVectorAligned(i32)
-declare i8* @jvtNewPtr(i32)
+declare ptr @jvtNewPtr(i32)
-declare i8* @jvtNewPtrMemAligned(i32)
+declare ptr @jvtNewPtrMemAligned(i32)
-declare %struct.Mutex* @MutexNew()
+declare ptr @MutexNew()
-declare i8* @_Znam(i32)
+declare ptr @_Znam(i32)
-declare i32 @_Z24LoopFilter_FilterMbGroupP14LoopFilterInfoP11FramePixelsP22FrameMotionVectorCacheP19ThreadedBatchStructjjij(%struct.LoopFilterInfo*, %struct.FramePixels*, %struct.FrameMotionVectorCache*, %struct.ThreadedBatch*, i32, i32, i32, i32)
+declare i32 @_Z24LoopFilter_FilterMbGroupP14LoopFilterInfoP11FramePixelsP22FrameMotionVectorCacheP19ThreadedBatchStructjjij(ptr, ptr, ptr, ptr, i32, i32, i32, i32)
-declare void @MutexLock(%struct.Mutex*)
+declare void @MutexLock(ptr)
-declare void @MutexUnlock(%struct.Mutex*)
+declare void @MutexUnlock(ptr)
-declare i32 @_Z35LoopFilter_Internal_FilterLumaPlanePK14LoopFilterInfojjjjj(%struct.LoopFilterInfo*, i32, i32, i32, i32, i32)
+declare i32 @_Z35LoopFilter_Internal_FilterLumaPlanePK14LoopFilterInfojjjjj(ptr, i32, i32, i32, i32, i32)
-declare i32 @_Z37LoopFilter_Internal_FilterChromaPlanePK14LoopFilterInfojjjjj(%struct.LoopFilterInfo*, i32, i32, i32, i32, i32)
+declare i32 @_Z37LoopFilter_Internal_FilterChromaPlanePK14LoopFilterInfojjjjj(ptr, i32, i32, i32, i32, i32)
-declare void @_Z44LoopFilter_Internal_filter_macroblock_chromaPK14LoopFilterInfoPhS2_iiiPK30PerMacroblockBoundaryStrengthsjj(%struct.LoopFilterInfo*, i8*, i8*, i32, i32, i32, %struct.PerMacroblockBoundaryStrengths*, i32, i32) nounwind
+declare void @_Z44LoopFilter_Internal_filter_macroblock_chromaPK14LoopFilterInfoPhS2_iiiPK30PerMacroblockBoundaryStrengthsjj(ptr, ptr, ptr, i32, i32, i32, ptr, i32, i32) nounwind
-declare i32 @_Z42LoopFilter_Internal_FilterChromaPlaneMBAFFPK14LoopFilterInfojjj(%struct.LoopFilterInfo*, i32, i32, i32) nounwind
+declare i32 @_Z42LoopFilter_Internal_FilterChromaPlaneMBAFFPK14LoopFilterInfojjj(ptr, i32, i32, i32) nounwind
-declare i32 @_Z26LF_Threading2_ProcessTasksP14LoopFilterInfoP11FramePixelsP22FrameMotionVectorCacheij(%struct.LoopFilterInfo*, %struct.FramePixels*, %struct.FrameMotionVectorCache*, i32, i32)
+declare i32 @_Z26LF_Threading2_ProcessTasksP14LoopFilterInfoP11FramePixelsP22FrameMotionVectorCacheij(ptr, ptr, ptr, i32, i32)
-declare i32 @_Z46LoopFilter_Internal_CalculateBoundaryStrengthsPK14LoopFilterInfoP22FrameMotionVectorCachejj(%struct.LoopFilterInfo*, %struct.FrameMotionVectorCache*, i32, i32)
+declare i32 @_Z46LoopFilter_Internal_CalculateBoundaryStrengthsPK14LoopFilterInfoP22FrameMotionVectorCachejj(ptr, ptr, i32, i32)
-declare i32 @_Z44LoopFilter_Internal_FilterLumaChromaPlane_PPP14LoopFilterInfojjjjj(%struct.LoopFilterInfo*, i32, i32, i32, i32, i32)
+declare i32 @_Z44LoopFilter_Internal_FilterLumaChromaPlane_PPP14LoopFilterInfojjjjj(ptr, i32, i32, i32, i32, i32)
-declare i32 @_Z22LoopFilter_FilterFrameP14LoopFilterInfoP11FramePixelsP22FrameMotionVectorCacheP19ThreadedBatchStructij(%struct.LoopFilterInfo*, %struct.FramePixels*, %struct.FrameMotionVectorCache*, %struct.ThreadedBatch*, i32, i32)
+declare i32 @_Z22LoopFilter_FilterFrameP14LoopFilterInfoP11FramePixelsP22FrameMotionVectorCacheP19ThreadedBatchStructij(ptr, ptr, ptr, ptr, i32, i32)
-declare void @_Z34LF_Threading2_ProcessTasks_WrapperPv(i8*)
+declare void @_Z34LF_Threading2_ProcessTasks_WrapperPv(ptr)
%fmul = fmul ppc_fp128 0xM00000000000000000000000000000000, 0xM00000000000000000000000000000000
%fadd = fadd ppc_fp128 %2, %fmul
%tmpi = fadd ppc_fp128 %fadd, 0xM00000000000000000000000000000000
- store ppc_fp128 %tmpi, ppc_fp128* null, align 16
+ store ppc_fp128 %tmpi, ptr null, align 16
ret i256 0
}
; CHECK: extsb 5, 3
; CHECK: sth 5
; CHECK: blr
-define signext i16 @t(i16* %dct) nounwind {
+define signext i16 @t(ptr %dct) nounwind {
entry:
- load i16, i16* null, align 2 ; <i16>:0 [#uses=2]
+ load i16, ptr null, align 2 ; <i16>:0 [#uses=2]
lshr i16 %0, 11 ; <i16>:1 [#uses=0]
trunc i16 %0 to i8 ; <i8>:2 [#uses=1]
sext i8 %2 to i16 ; <i16>:3 [#uses=1]
sext i16 %4 to i32 ; <i32>:5 [#uses=1]
%dcval.0.in = shl i32 %5, 0 ; <i32> [#uses=1]
%dcval.0 = trunc i32 %dcval.0.in to i16 ; <i16> [#uses=1]
- store i16 %dcval.0, i16* %dct, align 2
+ store i16 %dcval.0, ptr %dct, align 2
ret i16 0
}
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s
-@"\01LC" = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
+@"\01LC" = internal constant [4 x i8] c"%d\0A\00" ; <ptr> [#uses=1]
; CHECK: llvm_static_func
; CHECK: lwz {{[0-9]+}}, 228(1)
define void @llvm_static_func(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15) nounwind {
entry:
- tail call i32 (i8*, ...) @printf( i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i64 0), i32 %a8 ) nounwind ; <i32>:0 [#uses=0]
+ tail call i32 (ptr, ...) @printf( ptr @"\01LC", i32 %a8 ) nounwind ; <i32>:0 [#uses=0]
ret void
}
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s
- %struct.CGLDI = type { %struct.cgli*, i32, i32, i32, i32, i32, i8*, i32, void (%struct.CGLSI*, i32, %struct.CGLDI*)*, i8*, %struct.vv_t }
- %struct.cgli = type { i32, %struct.cgli*, void (%struct.cgli*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32)*, i32, i8*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i8*, i32*, %struct._cgro*, %struct._cgro*, float, float, float, float, i32, i8*, float, i8*, [16 x i32] }
- %struct.CGLSI = type { %struct.cgli*, i32, i8*, i8*, i32, i32, i8*, void (%struct.cgli*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32)*, %struct.vv_t, %struct.vv_t, %struct.xx_t* }
+ %struct.CGLDI = type { ptr, i32, i32, i32, i32, i32, ptr, i32, ptr, ptr, %struct.vv_t }
+ %struct.cgli = type { i32, ptr, ptr, i32, ptr, ptr, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, ptr, ptr, ptr, ptr, float, float, float, float, i32, ptr, float, ptr, [16 x i32] }
+ %struct.CGLSI = type { ptr, i32, ptr, ptr, i32, i32, ptr, ptr, %struct.vv_t, %struct.vv_t, ptr }
%struct._cgro = type opaque
- %struct.xx_t = type { [3 x %struct.vv_t], [2 x %struct.vv_t], [2 x [3 x i8*]] }
+ %struct.xx_t = type { [3 x %struct.vv_t], [2 x %struct.vv_t], [2 x [3 x ptr]] }
%struct.vv_t = type { <16 x i8> }
-@llvm.used = appending global [1 x i8*] [ i8* bitcast (void (%struct.CGLSI*, i32, %struct.CGLDI*)* @lb to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
+@llvm.used = appending global [1 x ptr] [ ptr @lb ], section "llvm.metadata" ; <ptr> [#uses=0]
; CHECK: lb
; CHECK: blr
-define void @lb(%struct.CGLSI* %src, i32 %n, %struct.CGLDI* %dst) nounwind {
+define void @lb(ptr %src, i32 %n, ptr %dst) nounwind {
entry:
- %0 = load i32, i32* null, align 4 ; <i32> [#uses=1]
+ %0 = load i32, ptr null, align 4 ; <i32> [#uses=1]
%1 = icmp sgt i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %bb.nph4945, label %return
bb.nph4945: ; preds = %entry
- %2 = bitcast [2 x %struct.vv_t]* null to i64* ; <i64*> [#uses=6]
- %3 = getelementptr [2 x i64], [2 x i64]* null, i32 0, i32 1 ; <i64*> [#uses=6]
- %4 = bitcast %struct.vv_t* null to i64* ; <i64*> [#uses=5]
- %5 = getelementptr [2 x i64], [2 x i64]* null, i32 0, i32 1 ; <i64*> [#uses=3]
+ %2 = getelementptr [2 x i64], ptr null, i32 0, i32 1 ; <ptr> [#uses=6]
+ %3 = getelementptr [2 x i64], ptr null, i32 0, i32 1 ; <ptr> [#uses=3]
br label %bb2326
bb2217: ; preds = %bb2326
- %6 = or i64 0, 0 ; <i64> [#uses=2]
- %7 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %8 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %9 = getelementptr float, float* null, i32 2 ; <float*> [#uses=1]
- %10 = load float, float* %9, align 4 ; <float> [#uses=1]
- %11 = getelementptr float, float* null, i32 3 ; <float*> [#uses=1]
- %12 = load float, float* %11, align 4 ; <float> [#uses=1]
+ %4 = or i64 0, 0 ; <i64> [#uses=2]
+ %5 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
+ %6 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
+ %7 = getelementptr float, ptr null, i32 2 ; <ptr> [#uses=1]
+ %8 = load float, ptr %7, align 4 ; <float> [#uses=1]
+ %9 = getelementptr float, ptr null, i32 3 ; <ptr> [#uses=1]
+ %10 = load float, ptr %9, align 4 ; <float> [#uses=1]
+ %11 = fmul float %8, 6.553500e+04 ; <float> [#uses=1]
+ %12 = fadd float %11, 5.000000e-01 ; <float> [#uses=1]
%13 = fmul float %10, 6.553500e+04 ; <float> [#uses=1]
- %14 = fadd float %13, 5.000000e-01 ; <float> [#uses=1]
- %15 = fmul float %12, 6.553500e+04 ; <float> [#uses=1]
- %16 = fadd float %15, 5.000000e-01 ; <float> [#uses=3]
- %17 = fcmp olt float %14, 0.000000e+00 ; <i1> [#uses=0]
- %18 = fcmp olt float %16, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %18, label %bb2265, label %bb2262
+ %14 = fadd float %13, 5.000000e-01 ; <float> [#uses=3]
+ %15 = fcmp olt float %12, 0.000000e+00 ; <i1> [#uses=0]
+ %16 = fcmp olt float %14, 0.000000e+00 ; <i1> [#uses=1]
+ br i1 %16, label %bb2265, label %bb2262
bb2262: ; preds = %bb2217
- %19 = fcmp ogt float %16, 6.553500e+04 ; <i1> [#uses=1]
- br i1 %19, label %bb2264, label %bb2265
+ %17 = fcmp ogt float %14, 6.553500e+04 ; <i1> [#uses=1]
+ br i1 %17, label %bb2264, label %bb2265
bb2264: ; preds = %bb2262
br label %bb2265
bb2265: ; preds = %bb2264, %bb2262, %bb2217
- %f3596.0 = phi float [ 6.553500e+04, %bb2264 ], [ 0.000000e+00, %bb2217 ], [ %16, %bb2262 ] ; <float> [#uses=1]
- %20 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %21 = fptosi float %f3596.0 to i32 ; <i32> [#uses=1]
- %22 = zext i32 %7 to i64 ; <i64> [#uses=1]
- %23 = shl i64 %22, 48 ; <i64> [#uses=1]
- %24 = zext i32 %8 to i64 ; <i64> [#uses=1]
- %25 = shl i64 %24, 32 ; <i64> [#uses=1]
- %26 = sext i32 %20 to i64 ; <i64> [#uses=1]
- %27 = shl i64 %26, 16 ; <i64> [#uses=1]
- %28 = sext i32 %21 to i64 ; <i64> [#uses=1]
- %29 = or i64 %25, %23 ; <i64> [#uses=1]
- %30 = or i64 %29, %27 ; <i64> [#uses=1]
- %31 = or i64 %30, %28 ; <i64> [#uses=2]
- %32 = shl i64 %6, 48 ; <i64> [#uses=1]
- %33 = shl i64 %31, 32 ; <i64> [#uses=1]
- %34 = and i64 %33, 281470681743360 ; <i64> [#uses=1]
- store i64 %6, i64* %2, align 16
- store i64 %31, i64* %3, align 8
- %35 = getelementptr i8, i8* null, i32 0 ; <i8*> [#uses=1]
- %36 = bitcast i8* %35 to float* ; <float*> [#uses=4]
- %37 = load float, float* %36, align 4 ; <float> [#uses=1]
- %38 = getelementptr float, float* %36, i32 1 ; <float*> [#uses=1]
- %39 = load float, float* %38, align 4 ; <float> [#uses=1]
- %40 = fmul float %37, 6.553500e+04 ; <float> [#uses=1]
- %41 = fadd float %40, 5.000000e-01 ; <float> [#uses=1]
- %42 = fmul float %39, 6.553500e+04 ; <float> [#uses=1]
- %43 = fadd float %42, 5.000000e-01 ; <float> [#uses=3]
- %44 = fcmp olt float %41, 0.000000e+00 ; <i1> [#uses=0]
- %45 = fcmp olt float %43, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %45, label %bb2277, label %bb2274
+ %f3596.0 = phi float [ 6.553500e+04, %bb2264 ], [ 0.000000e+00, %bb2217 ], [ %14, %bb2262 ] ; <float> [#uses=1]
+ %18 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
+ %19 = fptosi float %f3596.0 to i32 ; <i32> [#uses=1]
+ %20 = zext i32 %5 to i64 ; <i64> [#uses=1]
+ %21 = shl i64 %20, 48 ; <i64> [#uses=1]
+ %22 = zext i32 %6 to i64 ; <i64> [#uses=1]
+ %23 = shl i64 %22, 32 ; <i64> [#uses=1]
+ %24 = sext i32 %18 to i64 ; <i64> [#uses=1]
+ %25 = shl i64 %24, 16 ; <i64> [#uses=1]
+ %26 = sext i32 %19 to i64 ; <i64> [#uses=1]
+ %27 = or i64 %23, %21 ; <i64> [#uses=1]
+ %28 = or i64 %27, %25 ; <i64> [#uses=1]
+ %29 = or i64 %28, %26 ; <i64> [#uses=2]
+ %30 = shl i64 %4, 48 ; <i64> [#uses=1]
+ %31 = shl i64 %29, 32 ; <i64> [#uses=1]
+ %32 = and i64 %31, 281470681743360 ; <i64> [#uses=1]
+ store i64 %4, ptr null, align 16
+ store i64 %29, ptr %2, align 8
+ %33 = getelementptr i8, ptr null, i32 0 ; <ptr> [#uses=1]
+ %34 = load float, ptr %33, align 4 ; <float> [#uses=1]
+ %35 = getelementptr float, ptr %33, i32 1 ; <ptr> [#uses=1]
+ %36 = load float, ptr %35, align 4 ; <float> [#uses=1]
+ %37 = fmul float %34, 6.553500e+04 ; <float> [#uses=1]
+ %38 = fadd float %37, 5.000000e-01 ; <float> [#uses=1]
+ %39 = fmul float %36, 6.553500e+04 ; <float> [#uses=1]
+ %40 = fadd float %39, 5.000000e-01 ; <float> [#uses=3]
+ %41 = fcmp olt float %38, 0.000000e+00 ; <i1> [#uses=0]
+ %42 = fcmp olt float %40, 0.000000e+00 ; <i1> [#uses=1]
+ br i1 %42, label %bb2277, label %bb2274
bb2274: ; preds = %bb2265
- %46 = fcmp ogt float %43, 6.553500e+04 ; <i1> [#uses=0]
+ %43 = fcmp ogt float %40, 6.553500e+04 ; <i1> [#uses=0]
br label %bb2277
bb2277: ; preds = %bb2274, %bb2265
- %f1582.0 = phi float [ 0.000000e+00, %bb2265 ], [ %43, %bb2274 ] ; <float> [#uses=1]
- %47 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %48 = fptosi float %f1582.0 to i32 ; <i32> [#uses=1]
- %49 = getelementptr float, float* %36, i32 2 ; <float*> [#uses=1]
- %50 = load float, float* %49, align 4 ; <float> [#uses=1]
- %51 = getelementptr float, float* %36, i32 3 ; <float*> [#uses=1]
- %52 = load float, float* %51, align 4 ; <float> [#uses=1]
- %53 = fmul float %50, 6.553500e+04 ; <float> [#uses=1]
- %54 = fadd float %53, 5.000000e-01 ; <float> [#uses=1]
- %55 = fmul float %52, 6.553500e+04 ; <float> [#uses=1]
- %56 = fadd float %55, 5.000000e-01 ; <float> [#uses=1]
- %57 = fcmp olt float %54, 0.000000e+00 ; <i1> [#uses=0]
- %58 = fcmp olt float %56, 0.000000e+00 ; <i1> [#uses=0]
- %59 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %60 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %61 = zext i32 %47 to i64 ; <i64> [#uses=1]
- %62 = shl i64 %61, 48 ; <i64> [#uses=1]
- %63 = zext i32 %48 to i64 ; <i64> [#uses=1]
- %64 = shl i64 %63, 32 ; <i64> [#uses=1]
- %65 = sext i32 %59 to i64 ; <i64> [#uses=1]
- %66 = shl i64 %65, 16 ; <i64> [#uses=1]
- %67 = sext i32 %60 to i64 ; <i64> [#uses=1]
- %68 = or i64 %64, %62 ; <i64> [#uses=1]
- %69 = or i64 %68, %66 ; <i64> [#uses=1]
- %70 = or i64 %69, %67 ; <i64> [#uses=2]
- %71 = getelementptr i8, i8* null, i32 0 ; <i8*> [#uses=1]
- %72 = bitcast i8* %71 to float* ; <float*> [#uses=4]
- %73 = load float, float* %72, align 4 ; <float> [#uses=1]
- %74 = getelementptr float, float* %72, i32 1 ; <float*> [#uses=1]
- %75 = load float, float* %74, align 4 ; <float> [#uses=1]
- %76 = fmul float %73, 6.553500e+04 ; <float> [#uses=1]
- %77 = fadd float %76, 5.000000e-01 ; <float> [#uses=3]
- %78 = fmul float %75, 6.553500e+04 ; <float> [#uses=1]
- %79 = fadd float %78, 5.000000e-01 ; <float> [#uses=1]
- %80 = fcmp olt float %77, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %80, label %bb2295, label %bb2292
+ %f1582.0 = phi float [ 0.000000e+00, %bb2265 ], [ %40, %bb2274 ] ; <float> [#uses=1]
+ %44 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
+ %45 = fptosi float %f1582.0 to i32 ; <i32> [#uses=1]
+ %46 = getelementptr float, ptr %33, i32 2 ; <ptr> [#uses=1]
+ %47 = load float, ptr %46, align 4 ; <float> [#uses=1]
+ %48 = getelementptr float, ptr %33, i32 3 ; <ptr> [#uses=1]
+ %49 = load float, ptr %48, align 4 ; <float> [#uses=1]
+ %50 = fmul float %47, 6.553500e+04 ; <float> [#uses=1]
+ %51 = fadd float %50, 5.000000e-01 ; <float> [#uses=1]
+ %52 = fmul float %49, 6.553500e+04 ; <float> [#uses=1]
+ %53 = fadd float %52, 5.000000e-01 ; <float> [#uses=1]
+ %54 = fcmp olt float %51, 0.000000e+00 ; <i1> [#uses=0]
+ %55 = fcmp olt float %53, 0.000000e+00 ; <i1> [#uses=0]
+ %56 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
+ %57 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
+ %58 = zext i32 %44 to i64 ; <i64> [#uses=1]
+ %59 = shl i64 %58, 48 ; <i64> [#uses=1]
+ %60 = zext i32 %45 to i64 ; <i64> [#uses=1]
+ %61 = shl i64 %60, 32 ; <i64> [#uses=1]
+ %62 = sext i32 %56 to i64 ; <i64> [#uses=1]
+ %63 = shl i64 %62, 16 ; <i64> [#uses=1]
+ %64 = sext i32 %57 to i64 ; <i64> [#uses=1]
+ %65 = or i64 %61, %59 ; <i64> [#uses=1]
+ %66 = or i64 %65, %63 ; <i64> [#uses=1]
+ %67 = or i64 %66, %64 ; <i64> [#uses=2]
+ %68 = getelementptr i8, ptr null, i32 0 ; <ptr> [#uses=1]
+ %69 = load float, ptr %68, align 4 ; <float> [#uses=1]
+ %70 = getelementptr float, ptr %68, i32 1 ; <ptr> [#uses=1]
+ %71 = load float, ptr %70, align 4 ; <float> [#uses=1]
+ %72 = fmul float %69, 6.553500e+04 ; <float> [#uses=1]
+ %73 = fadd float %72, 5.000000e-01 ; <float> [#uses=3]
+ %74 = fmul float %71, 6.553500e+04 ; <float> [#uses=1]
+ %75 = fadd float %74, 5.000000e-01 ; <float> [#uses=1]
+ %76 = fcmp olt float %73, 0.000000e+00 ; <i1> [#uses=1]
+ br i1 %76, label %bb2295, label %bb2292
bb2292: ; preds = %bb2277
- %81 = fcmp ogt float %77, 6.553500e+04 ; <i1> [#uses=1]
- br i1 %81, label %bb2294, label %bb2295
+ %77 = fcmp ogt float %73, 6.553500e+04 ; <i1> [#uses=1]
+ br i1 %77, label %bb2294, label %bb2295
bb2294: ; preds = %bb2292
br label %bb2295
bb2295: ; preds = %bb2294, %bb2292, %bb2277
- %f0569.0 = phi float [ 6.553500e+04, %bb2294 ], [ 0.000000e+00, %bb2277 ], [ %77, %bb2292 ] ; <float> [#uses=1]
- %82 = fcmp olt float %79, 0.000000e+00 ; <i1> [#uses=0]
- %83 = fptosi float %f0569.0 to i32 ; <i32> [#uses=1]
- %84 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %85 = getelementptr float, float* %72, i32 2 ; <float*> [#uses=1]
- %86 = load float, float* %85, align 4 ; <float> [#uses=1]
- %87 = getelementptr float, float* %72, i32 3 ; <float*> [#uses=1]
- %88 = load float, float* %87, align 4 ; <float> [#uses=1]
- %89 = fmul float %86, 6.553500e+04 ; <float> [#uses=1]
- %90 = fadd float %89, 5.000000e-01 ; <float> [#uses=1]
- %91 = fmul float %88, 6.553500e+04 ; <float> [#uses=1]
- %92 = fadd float %91, 5.000000e-01 ; <float> [#uses=1]
- %93 = fcmp olt float %90, 0.000000e+00 ; <i1> [#uses=0]
- %94 = fcmp olt float %92, 0.000000e+00 ; <i1> [#uses=0]
- %95 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %96 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %97 = zext i32 %83 to i64 ; <i64> [#uses=1]
- %98 = shl i64 %97, 48 ; <i64> [#uses=1]
- %99 = zext i32 %84 to i64 ; <i64> [#uses=1]
- %100 = shl i64 %99, 32 ; <i64> [#uses=1]
- %101 = sext i32 %95 to i64 ; <i64> [#uses=1]
- %102 = shl i64 %101, 16 ; <i64> [#uses=1]
- %103 = sext i32 %96 to i64 ; <i64> [#uses=1]
- %104 = or i64 %100, %98 ; <i64> [#uses=1]
- %105 = or i64 %104, %102 ; <i64> [#uses=1]
- %106 = or i64 %105, %103 ; <i64> [#uses=2]
- %107 = shl i64 %70, 16 ; <i64> [#uses=1]
- %108 = and i64 %107, 4294901760 ; <i64> [#uses=1]
- %109 = and i64 %106, 65535 ; <i64> [#uses=1]
- %110 = or i64 %34, %32 ; <i64> [#uses=1]
- %111 = or i64 %110, %108 ; <i64> [#uses=1]
- %112 = or i64 %111, %109 ; <i64> [#uses=1]
- store i64 %70, i64* %4, align 16
- store i64 %106, i64* %5, align 8
- %113 = icmp eq i64 %112, 0 ; <i1> [#uses=1]
- br i1 %113, label %bb2325, label %bb2315
+ %f0569.0 = phi float [ 6.553500e+04, %bb2294 ], [ 0.000000e+00, %bb2277 ], [ %73, %bb2292 ] ; <float> [#uses=1]
+ %78 = fcmp olt float %75, 0.000000e+00 ; <i1> [#uses=0]
+ %79 = fptosi float %f0569.0 to i32 ; <i32> [#uses=1]
+ %80 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
+ %81 = getelementptr float, ptr %68, i32 2 ; <ptr> [#uses=1]
+ %82 = load float, ptr %81, align 4 ; <float> [#uses=1]
+ %83 = getelementptr float, ptr %68, i32 3 ; <ptr> [#uses=1]
+ %84 = load float, ptr %83, align 4 ; <float> [#uses=1]
+ %85 = fmul float %82, 6.553500e+04 ; <float> [#uses=1]
+ %86 = fadd float %85, 5.000000e-01 ; <float> [#uses=1]
+ %87 = fmul float %84, 6.553500e+04 ; <float> [#uses=1]
+ %88 = fadd float %87, 5.000000e-01 ; <float> [#uses=1]
+ %89 = fcmp olt float %86, 0.000000e+00 ; <i1> [#uses=0]
+ %90 = fcmp olt float %88, 0.000000e+00 ; <i1> [#uses=0]
+ %91 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
+ %92 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
+ %93 = zext i32 %79 to i64 ; <i64> [#uses=1]
+ %94 = shl i64 %93, 48 ; <i64> [#uses=1]
+ %95 = zext i32 %80 to i64 ; <i64> [#uses=1]
+ %96 = shl i64 %95, 32 ; <i64> [#uses=1]
+ %97 = sext i32 %91 to i64 ; <i64> [#uses=1]
+ %98 = shl i64 %97, 16 ; <i64> [#uses=1]
+ %99 = sext i32 %92 to i64 ; <i64> [#uses=1]
+ %100 = or i64 %96, %94 ; <i64> [#uses=1]
+ %101 = or i64 %100, %98 ; <i64> [#uses=1]
+ %102 = or i64 %101, %99 ; <i64> [#uses=2]
+ %103 = shl i64 %67, 16 ; <i64> [#uses=1]
+ %104 = and i64 %103, 4294901760 ; <i64> [#uses=1]
+ %105 = and i64 %102, 65535 ; <i64> [#uses=1]
+ %106 = or i64 %32, %30 ; <i64> [#uses=1]
+ %107 = or i64 %106, %104 ; <i64> [#uses=1]
+ %108 = or i64 %107, %105 ; <i64> [#uses=1]
+ store i64 %67, ptr null, align 16
+ store i64 %102, ptr %3, align 8
+ %109 = icmp eq i64 %108, 0 ; <i1> [#uses=1]
+ br i1 %109, label %bb2325, label %bb2315
bb2315: ; preds = %bb2295
- %114 = icmp eq %struct.xx_t* %159, null ; <i1> [#uses=1]
- br i1 %114, label %bb2318, label %bb2317
+ %110 = icmp eq ptr %155, null ; <i1> [#uses=1]
+ br i1 %110, label %bb2318, label %bb2317
bb2317: ; preds = %bb2315
- %115 = load i64, i64* %2, align 16 ; <i64> [#uses=1]
- %116 = call i32 (...) @_u16a_cm( i64 %115, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
- %117 = sext i32 %116 to i64 ; <i64> [#uses=1]
- store i64 %117, i64* %2, align 16
- %118 = load i64, i64* %3, align 8 ; <i64> [#uses=1]
- %119 = call i32 (...) @_u16a_cm( i64 %118, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
- %120 = sext i32 %119 to i64 ; <i64> [#uses=1]
- store i64 %120, i64* %3, align 8
- %121 = load i64, i64* %4, align 16 ; <i64> [#uses=1]
- %122 = call i32 (...) @_u16a_cm( i64 %121, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
- %123 = sext i32 %122 to i64 ; <i64> [#uses=1]
- store i64 %123, i64* %4, align 16
- %124 = load i64, i64* %5, align 8 ; <i64> [#uses=1]
- %125 = call i32 (...) @_u16a_cm( i64 %124, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=0]
+ %111 = load i64, ptr null, align 16 ; <i64> [#uses=1]
+ %112 = call i32 (...) @_u16a_cm( i64 %111, ptr %155, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
+ %113 = sext i32 %112 to i64 ; <i64> [#uses=1]
+ store i64 %113, ptr null, align 16
+ %114 = load i64, ptr %2, align 8 ; <i64> [#uses=1]
+ %115 = call i32 (...) @_u16a_cm( i64 %114, ptr %155, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
+ %116 = sext i32 %115 to i64 ; <i64> [#uses=1]
+ store i64 %116, ptr %2, align 8
+ %117 = load i64, ptr null, align 16 ; <i64> [#uses=1]
+ %118 = call i32 (...) @_u16a_cm( i64 %117, ptr %155, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
+ %119 = sext i32 %118 to i64 ; <i64> [#uses=1]
+ store i64 %119, ptr null, align 16
+ %120 = load i64, ptr %3, align 8 ; <i64> [#uses=1]
+ %121 = call i32 (...) @_u16a_cm( i64 %120, ptr %155, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=0]
unreachable
bb2318: ; preds = %bb2315
- %126 = getelementptr %struct.CGLSI, %struct.CGLSI* %src, i32 %indvar5021, i32 8 ; <%struct.vv_t*> [#uses=1]
- %127 = bitcast %struct.vv_t* %126 to i64* ; <i64*> [#uses=1]
- %128 = load i64, i64* %127, align 8 ; <i64> [#uses=1]
- %129 = trunc i64 %128 to i32 ; <i32> [#uses=4]
- %130 = load i64, i64* %2, align 16 ; <i64> [#uses=1]
- %131 = call i32 (...) @_u16_ff( i64 %130, i32 %129 ) nounwind ; <i32> [#uses=1]
- %132 = sext i32 %131 to i64 ; <i64> [#uses=1]
- store i64 %132, i64* %2, align 16
- %133 = load i64, i64* %3, align 8 ; <i64> [#uses=1]
- %134 = call i32 (...) @_u16_ff( i64 %133, i32 %129 ) nounwind ; <i32> [#uses=1]
- %135 = sext i32 %134 to i64 ; <i64> [#uses=1]
- store i64 %135, i64* %3, align 8
- %136 = load i64, i64* %4, align 16 ; <i64> [#uses=1]
- %137 = call i32 (...) @_u16_ff( i64 %136, i32 %129 ) nounwind ; <i32> [#uses=1]
- %138 = sext i32 %137 to i64 ; <i64> [#uses=1]
- store i64 %138, i64* %4, align 16
- %139 = load i64, i64* %5, align 8 ; <i64> [#uses=1]
- %140 = call i32 (...) @_u16_ff( i64 %139, i32 %129 ) nounwind ; <i32> [#uses=0]
+ %122 = getelementptr %struct.CGLSI, ptr %src, i32 %indvar5021, i32 8 ; <ptr> [#uses=1]
+ %123 = load i64, ptr %122, align 8 ; <i64> [#uses=1]
+ %124 = trunc i64 %123 to i32 ; <i32> [#uses=4]
+ %125 = load i64, ptr null, align 16 ; <i64> [#uses=1]
+ %126 = call i32 (...) @_u16_ff( i64 %125, i32 %124 ) nounwind ; <i32> [#uses=1]
+ %127 = sext i32 %126 to i64 ; <i64> [#uses=1]
+ store i64 %127, ptr null, align 16
+ %128 = load i64, ptr %2, align 8 ; <i64> [#uses=1]
+ %129 = call i32 (...) @_u16_ff( i64 %128, i32 %124 ) nounwind ; <i32> [#uses=1]
+ %130 = sext i32 %129 to i64 ; <i64> [#uses=1]
+ store i64 %130, ptr %2, align 8
+ %131 = load i64, ptr null, align 16 ; <i64> [#uses=1]
+ %132 = call i32 (...) @_u16_ff( i64 %131, i32 %124 ) nounwind ; <i32> [#uses=1]
+ %133 = sext i32 %132 to i64 ; <i64> [#uses=1]
+ store i64 %133, ptr null, align 16
+ %134 = load i64, ptr %3, align 8 ; <i64> [#uses=1]
+ %135 = call i32 (...) @_u16_ff( i64 %134, i32 %124 ) nounwind ; <i32> [#uses=0]
unreachable
bb2319: ; preds = %bb2326
- %141 = getelementptr %struct.CGLSI, %struct.CGLSI* %src, i32 %indvar5021, i32 2 ; <i8**> [#uses=1]
- %142 = load i8*, i8** %141, align 4 ; <i8*> [#uses=4]
- %143 = getelementptr i8, i8* %142, i32 0 ; <i8*> [#uses=1]
- %144 = call i32 (...) @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %143 ) nounwind ; <i32> [#uses=1]
- %145 = sext i32 %144 to i64 ; <i64> [#uses=2]
- %146 = getelementptr i8, i8* %142, i32 0 ; <i8*> [#uses=1]
- %147 = call i32 (...) @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %146 ) nounwind ; <i32> [#uses=1]
- %148 = sext i32 %147 to i64 ; <i64> [#uses=2]
- %149 = shl i64 %145, 48 ; <i64> [#uses=0]
- %150 = shl i64 %148, 32 ; <i64> [#uses=1]
- %151 = and i64 %150, 281470681743360 ; <i64> [#uses=0]
- store i64 %145, i64* %2, align 16
- store i64 %148, i64* %3, align 8
- %152 = getelementptr i8, i8* %142, i32 0 ; <i8*> [#uses=1]
- %153 = call i32 (...) @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %152 ) nounwind ; <i32> [#uses=1]
- %154 = sext i32 %153 to i64 ; <i64> [#uses=0]
- %155 = getelementptr i8, i8* %142, i32 0 ; <i8*> [#uses=1]
- %156 = call i32 (...) @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %155 ) nounwind ; <i32> [#uses=0]
+ %136 = getelementptr %struct.CGLSI, ptr %src, i32 %indvar5021, i32 2 ; <ptr> [#uses=1]
+ %137 = load ptr, ptr %136, align 4 ; <ptr> [#uses=4]
+ %138 = getelementptr i8, ptr %137, i32 0 ; <ptr> [#uses=1]
+ %139 = call i32 (...) @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, ptr %138 ) nounwind ; <i32> [#uses=1]
+ %140 = sext i32 %139 to i64 ; <i64> [#uses=2]
+ %141 = getelementptr i8, ptr %137, i32 0 ; <ptr> [#uses=1]
+ %142 = call i32 (...) @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, ptr %141 ) nounwind ; <i32> [#uses=1]
+ %143 = sext i32 %142 to i64 ; <i64> [#uses=2]
+ %144 = shl i64 %140, 48 ; <i64> [#uses=0]
+ %145 = shl i64 %143, 32 ; <i64> [#uses=1]
+ %146 = and i64 %145, 281470681743360 ; <i64> [#uses=0]
+ store i64 %140, ptr null, align 16
+ store i64 %143, ptr %2, align 8
+ %147 = getelementptr i8, ptr %137, i32 0 ; <ptr> [#uses=1]
+ %148 = call i32 (...) @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, ptr %147 ) nounwind ; <i32> [#uses=1]
+ %149 = sext i32 %148 to i64 ; <i64> [#uses=0]
+ %150 = getelementptr i8, ptr %137, i32 0 ; <ptr> [#uses=1]
+ %151 = call i32 (...) @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, ptr %150 ) nounwind ; <i32> [#uses=0]
unreachable
bb2325: ; preds = %bb2326, %bb2295
bb2326: ; preds = %bb2325, %bb.nph4945
%indvar5021 = phi i32 [ 0, %bb.nph4945 ], [ %indvar.next5145, %bb2325 ] ; <i32> [#uses=6]
- %157 = icmp slt i32 %indvar5021, %n ; <i1> [#uses=0]
- %158 = getelementptr %struct.CGLSI, %struct.CGLSI* %src, i32 %indvar5021, i32 10 ; <%struct.xx_t**> [#uses=1]
- %159 = load %struct.xx_t*, %struct.xx_t** %158, align 4 ; <%struct.xx_t*> [#uses=5]
- %160 = getelementptr %struct.CGLSI, %struct.CGLSI* %src, i32 %indvar5021, i32 1 ; <i32*> [#uses=1]
- %161 = load i32, i32* %160, align 4 ; <i32> [#uses=1]
- %162 = and i32 %161, 255 ; <i32> [#uses=1]
- switch i32 %162, label %bb2325 [
+ %152 = icmp slt i32 %indvar5021, %n ; <i1> [#uses=0]
+ %153 = getelementptr %struct.CGLSI, ptr %src, i32 %indvar5021, i32 10 ; <ptr> [#uses=1]
+ %154 = load ptr, ptr %153, align 4 ; <ptr> [#uses=5]
+ %155 = getelementptr %struct.CGLSI, ptr %src, i32 %indvar5021, i32 1 ; <ptr> [#uses=1]
+ %156 = load i32, ptr %155, align 4 ; <i32> [#uses=1]
+ %157 = and i32 %156, 255 ; <i32> [#uses=1]
+ switch i32 %157, label %bb2325 [
i32 59, label %bb2217
i32 60, label %bb2319
]
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc-unknown-linux-gnu"
-define i32 @test(i64 %x, i32* %p) nounwind {
+define i32 @test(i64 %x, ptr %p) nounwind {
%asmtmp = call i32 asm "", "=r,0"(i64 0) nounwind ; <i32> [#uses=0]
%y = add i32 %asmtmp, 1
ret i32 %y
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64--
-define void @__divtc3({ ppc_fp128, ppc_fp128 }* noalias sret({ ppc_fp128, ppc_fp128 }) %agg.result, ppc_fp128 %a, ppc_fp128 %b, ppc_fp128 %c, ppc_fp128 %d) nounwind {
+define void @__divtc3(ptr noalias sret({ ppc_fp128, ppc_fp128 }) %agg.result, ppc_fp128 %a, ppc_fp128 %b, ppc_fp128 %c, ppc_fp128 %d) nounwind {
entry:
- %imag59 = load ppc_fp128, ppc_fp128* null, align 8 ; <ppc_fp128> [#uses=1]
+ %imag59 = load ppc_fp128, ptr null, align 8 ; <ppc_fp128> [#uses=1]
%0 = fmul ppc_fp128 0xM00000000000000000000000000000000, %imag59 ; <ppc_fp128> [#uses=1]
%1 = fmul ppc_fp128 0xM00000000000000000000000000000000, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
%2 = fadd ppc_fp128 %0, %1 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %2, ppc_fp128* null, align 16
+ store ppc_fp128 %2, ptr null, align 16
unreachable
}
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu.5
-define void @__multc3({ ppc_fp128, ppc_fp128 }* noalias sret({ ppc_fp128, ppc_fp128 }) %agg.result, ppc_fp128 %a, ppc_fp128 %b, ppc_fp128 %c, ppc_fp128 %d) nounwind {
+define void @__multc3(ptr noalias sret({ ppc_fp128, ppc_fp128 }) %agg.result, ppc_fp128 %a, ppc_fp128 %b, ppc_fp128 %c, ppc_fp128 %d) nounwind {
entry:
%.pre139 = and i1 false, false ; <i1> [#uses=1]
br i1 false, label %bb6, label %bb21
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu.5
; rdar://6499616
-@"\01LC" = internal constant [13 x i8] c"conftest.val\00" ; <[13 x i8]*> [#uses=1]
+@"\01LC" = internal constant [13 x i8] c"conftest.val\00" ; <ptr> [#uses=1]
define i32 @main() nounwind {
entry:
- %0 = call i8* @fopen(i8* getelementptr ([13 x i8], [13 x i8]* @"\01LC", i32 0, i32 0), i8* null) nounwind ; <i8*> [#uses=0]
+ %0 = call ptr @fopen(ptr @"\01LC", ptr null) nounwind ; <ptr> [#uses=0]
unreachable
}
-declare i8* @fopen(i8*, i8*)
+declare ptr @fopen(ptr, ptr)
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu
; rdar://6692215
-define fastcc void @_qsort(i8* %a, i32 %n, i32 %es, i32 (i8*, i8*)* %cmp, i32 %depth_limit) nounwind optsize ssp {
+define fastcc void @_qsort(ptr %a, i32 %n, i32 %es, ptr %cmp, i32 %depth_limit) nounwind optsize ssp {
entry:
br i1 false, label %bb21, label %bb20.loopexit
ret void
bb21: ; preds = %entry
- %0 = getelementptr i8, i8* %a, i32 0 ; <i8*> [#uses=2]
+ %0 = getelementptr i8, ptr %a, i32 0 ; <ptr> [#uses=2]
br label %bb35
bb29: ; preds = %bb35
bb7.i252: ; preds = %bb7.i252, %bb29
%pj.0.rec.i247 = phi i32 [ %indvar.next488, %bb7.i252 ], [ 0, %bb29 ] ; <i32> [#uses=2]
- %pi.0.i248 = getelementptr i8, i8* %pa.1, i32 %pj.0.rec.i247 ; <i8*> [#uses=0]
+ %pi.0.i248 = getelementptr i8, ptr %pa.1, i32 %pj.0.rec.i247 ; <ptr> [#uses=0]
%indvar.next488 = add i32 %pj.0.rec.i247, 1 ; <i32> [#uses=1]
br i1 false, label %bb34, label %bb7.i252
bb35: ; preds = %bb34, %bb21
%indvar504 = phi i32 [ %indvar.next505, %bb34 ], [ 0, %bb21 ] ; <i32> [#uses=2]
- %pa.1 = phi i8* [ null, %bb34 ], [ %0, %bb21 ] ; <i8*> [#uses=2]
+ %pa.1 = phi ptr [ null, %bb34 ], [ %0, %bb21 ] ; <ptr> [#uses=2]
%pb.0.rec = mul i32 %indvar504, %es ; <i32> [#uses=1]
br i1 false, label %bb43, label %bb29
br i1 false, label %bb50, label %bb43
bb50: ; preds = %bb43
- %1 = ptrtoint i8* %pa.1 to i32 ; <i32> [#uses=1]
+ %1 = ptrtoint ptr %pa.1 to i32 ; <i32> [#uses=1]
%2 = sub i32 %1, 0 ; <i32> [#uses=2]
%3 = icmp sle i32 0, %2 ; <i1> [#uses=1]
%min = select i1 %3, i32 0, i32 %2 ; <i32> [#uses=1]
%pj.0.rec.i156 = phi i32 [ %indvar.next394, %bb7.i161 ], [ 0, %bb50 ] ; <i32> [#uses=2]
%.sum279 = sub i32 %pj.0.rec.i156, %min ; <i32> [#uses=1]
%pb.0.sum542 = add i32 %pb.0.rec, %.sum279 ; <i32> [#uses=1]
- %pj.0.i158 = getelementptr i8, i8* %0, i32 %pb.0.sum542 ; <i8*> [#uses=0]
+ %pj.0.i158 = getelementptr i8, ptr %0, i32 %pb.0.sum542 ; <ptr> [#uses=0]
%indvar.next394 = add i32 %pj.0.rec.i156, 1 ; <i32> [#uses=1]
br label %bb7.i161
}
; It is wrong on powerpc to substitute reg+reg for $0; the stw opcode
; would have to change.
-@x = external global [0 x i32] ; <[0 x i32]*> [#uses=1]
+@x = external global [0 x i32] ; <ptr> [#uses=1]
define void @foo(i32 %y) nounwind ssp {
entry:
; CHECK: foo
; CHECK: add [[REG:[0-9]+]]
; CHECK: 0([[REG]])
- %y_addr = alloca i32 ; <i32*> [#uses=2]
+ %y_addr = alloca i32 ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %y, i32* %y_addr
- %0 = load i32, i32* %y_addr, align 4 ; <i32> [#uses=1]
- %1 = getelementptr inbounds [0 x i32], [0 x i32]* @x, i32 0, i32 %0 ; <i32*> [#uses=1]
- call void asm sideeffect "isync\0A\09eieio\0A\09stw $1, $0", "=*o,r,~{memory}"(i32* elementtype(i32) %1, i32 0) nounwind
+ store i32 %y, ptr %y_addr
+ %0 = load i32, ptr %y_addr, align 4 ; <i32> [#uses=1]
+ %1 = getelementptr inbounds [0 x i32], ptr @x, i32 0, i32 %0 ; <ptr> [#uses=1]
+ call void asm sideeffect "isync\0A\09eieio\0A\09stw $1, $0", "=*o,r,~{memory}"(ptr elementtype(i32) %1, i32 0) nounwind
br label %return
return: ; preds = %entry
br i1 undef, label %bb51, label %bb48.4
bb48.3: ; preds = %bb49.2
- store i64* undef, i64** undef, align 4
+ store ptr undef, ptr undef, align 4
br label %bb49.3
bb48.4: ; preds = %bb49.3
- %0 = getelementptr inbounds [5 x i64*], [5 x i64*]* undef, i32 0, i32 %c_ix.0.3 ; <i64**> [#uses=0]
+ %0 = getelementptr inbounds [5 x ptr], ptr undef, i32 0, i32 %c_ix.0.3 ; <ptr> [#uses=0]
br label %bb51
}
bb17: ; preds = %bb16, %bb15
%0 = fcmp olt float undef, 0.000000e+00 ; <i1> [#uses=2]
- %eTop.eMaj = select i1 %0, %struct..0EdgeT* undef, %struct..0EdgeT* null ; <%struct..0EdgeT*> [#uses=1]
+ %eTop.eMaj = select i1 %0, ptr undef, ptr null ; <ptr> [#uses=1]
br label %bb69
bb24: ; preds = %bb69
br label %bb38
bb38: ; preds = %bb34, %bb33, %bb32
- %eRight.08 = phi %struct..0EdgeT* [ %eTop.eMaj, %bb32 ], [ undef, %bb34 ], [ undef, %bb33 ] ; <%struct..0EdgeT*> [#uses=0]
+ %eRight.08 = phi ptr [ %eTop.eMaj, %bb32 ], [ undef, %bb34 ], [ undef, %bb33 ] ; <ptr> [#uses=0]
%fdgOuter.0 = phi i32 [ %fdgOuter.1, %bb32 ], [ undef, %bb34 ], [ %fdgOuter.1, %bb33 ] ; <i32> [#uses=1]
%fz.3 = phi i32 [ %fz.2, %bb32 ], [ 2147483647, %bb34 ], [ %fz.2, %bb33 ] ; <i32> [#uses=1]
%1 = add i32 undef, 1 ; <i32> [#uses=0]
; CHECK-DAG: stwx [[T1]], 1, [[T2]]
; CHECK-DAG: addi 3, 1, 28
; CHECK: bl bar
- %x = alloca [100000 x i8] ; <[100000 x i8]*> [#uses=1]
+ %x = alloca [100000 x i8] ; <ptr> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %x1 = bitcast [100000 x i8]* %x to i8* ; <i8*> [#uses=1]
- call void @bar(i8* %x1) nounwind
+ call void @bar(ptr %x) nounwind
call void asm sideeffect "", "~{cr2},~{cr3}"() nounwind
br label %return
ret void
}
-declare void @bar(i8*)
+declare void @bar(ptr)
; Indirect calls must use R3 on powerpc (i.e., R3 must contain the address of
; the function being called; the mtctr is not required to use it).
-@p = external global void (...)* ; <void (...)**> [#uses=1]
+@p = external global ptr ; <ptr> [#uses=1]
define void @foo() nounwind ssp {
entry:
; CHECK: mtctr 3
; CHECK: bctrl
- %0 = load void (...)*, void (...)** @p, align 4 ; <void (...)*> [#uses=1]
+ %0 = load ptr, ptr @p, align 4 ; <ptr> [#uses=1]
call void (...) %0() nounwind
br label %return
%0 = type { i32 }
%1 = type { i64 }
%struct.Buffer = type { [1024 x i8], i64, i64, i64 }
-%struct.InStream = type { %struct.Buffer, %0, %1, i32*, %struct.InStreamMethods* }
-%struct.InStreamMethods = type { void (%struct.InStream*, i8*, i32)*, void (%struct.InStream*, i64)*, i64 (%struct.InStream*)*, void (%struct.InStream*)* }
+%struct.InStream = type { %struct.Buffer, %0, %1, ptr, ptr }
+%struct.InStreamMethods = type { ptr, ptr, ptr, ptr }
-define i64 @t(%struct.InStream* %is) nounwind optsize ssp {
+define i64 @t(ptr %is) nounwind optsize ssp {
entry:
br i1 undef, label %is_read_byte.exit, label %bb.i
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -regalloc=basic | FileCheck %s
-declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare ptr @llvm.frameaddress(i32) nounwind readnone
-define i8* @g2() nounwind readnone {
+define ptr @g2() nounwind readnone {
; CHECK-LABEL: g2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 3, 0(1)
; CHECK-NEXT: blr
entry:
- %0 = tail call i8* @llvm.frameaddress(i32 1) ; <i8*> [#uses=1]
- ret i8* %0
+ %0 = tail call ptr @llvm.frameaddress(i32 1) ; <ptr> [#uses=1]
+ ret ptr %0
}
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone
-define i8* @g() nounwind readnone {
+define ptr @g() nounwind readnone {
; CHECK-LABEL: g:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr 0
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %0 = tail call i8* @llvm.returnaddress(i32 1) ; <i8*> [#uses=1]
- ret i8* %0
+ %0 = tail call ptr @llvm.returnaddress(i32 1) ; <ptr> [#uses=1]
+ ret ptr %0
}
define i32 @testing(i32 %x, float %a, ...) nounwind {
%1 = alloca i32, align 4
%2 = alloca float, align 4
- store i32 %x, i32* %1, align 4
- store float %a, float* %2, align 4
+ store i32 %x, ptr %1, align 4
+ store float %a, ptr %2, align 4
ret i32 0
}
%retval = alloca i32
%0 = alloca i32
%"alloca point" = bitcast i32 0 to i32
- store i32 0, i32* %0, align 4
- %1 = load i32, i32* %0, align 4
- store i32 %1, i32* %retval, align 4
+ store i32 0, ptr %0, align 4
+ %1 = load i32, ptr %0, align 4
+ store i32 %1, ptr %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load i32, i32* %retval
+ %retval1 = load i32, ptr %retval
ret i32 %retval1
}
@.str11 = private unnamed_addr constant [6 x i8] c"s122 \00", align 1
@.str152 = private unnamed_addr constant [14 x i8] c"S122\09 %.2f \09\09\00", align 1
-declare i32 @printf(i8* nocapture, ...) nounwind
-declare i32 @init(i8* %name) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
+declare i32 @init(ptr %name) nounwind
declare i64 @clock() nounwind
-declare i32 @dummy(float*, float*, float*, float*, float*, [256 x float]*, [256 x float]*, [256 x float]*, float)
+declare i32 @dummy(ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, float)
declare void @check(i32 %name) nounwind
; CHECK: mfcr
define i32 @s122(i32 %n1, i32 %n3) nounwind {
entry:
- %call = tail call i32 @init(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str11, i64 0, i64 0))
+ %call = tail call i32 @init(ptr @.str11)
%call1 = tail call i64 @clock() nounwind
%sub = add nsw i32 %n1, -1
%cmp316 = icmp slt i32 %sub, 32000
%sub5.us = sub i64 31999, %indvars.iv20
%sext = shl i64 %sub5.us, 32
%idxprom.us = ashr exact i64 %sext, 32
- %arrayidx.us = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us
- %2 = load float, float* %arrayidx.us, align 4
- %arrayidx7.us = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv
- %3 = load float, float* %arrayidx7.us, align 4
+ %arrayidx.us = getelementptr inbounds [32000 x float], ptr @b, i64 0, i64 %idxprom.us
+ %2 = load float, ptr %arrayidx.us, align 4
+ %arrayidx7.us = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %indvars.iv
+ %3 = load float, ptr %arrayidx7.us, align 4
%add8.us = tail call float asm "fadd $0, $1, $2", "=f,f,f,~{cr2}"(float %3, float %2)
- store float %add8.us, float* %arrayidx7.us, align 4
+ store float %add8.us, ptr %arrayidx7.us, align 4
%indvars.iv.next = add i64 %indvars.iv, %1
%4 = trunc i64 %indvars.iv.next to i32
%cmp3.us = icmp slt i32 %4, 32000
%sub14 = sub nsw i64 %call13, %call1
%conv = sitofp i64 %sub14 to double
%div = fdiv double %conv, 1.000000e+06
- %call15 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str152, i64 0, i64 0), double %div) nounwind
+ %call15 = tail call i32 (ptr, ...) @printf(ptr @.str152, double %div) nounwind
tail call void @check(i32 1)
ret i32 0
for.body4.lr.ph.us.1: ; preds = %for.body4.us
- %call10.us = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.us = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
br label %for.body4.us.1
for.body4.us.1: ; preds = %for.body4.us.1, %for.body4.lr.ph.us.1
%sub5.us.1 = sub i64 31999, %indvars.iv20.1
%sext23 = shl i64 %sub5.us.1, 32
%idxprom.us.1 = ashr exact i64 %sext23, 32
- %arrayidx.us.1 = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us.1
- %5 = load float, float* %arrayidx.us.1, align 4
- %arrayidx7.us.1 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.1
- %6 = load float, float* %arrayidx7.us.1, align 4
+ %arrayidx.us.1 = getelementptr inbounds [32000 x float], ptr @b, i64 0, i64 %idxprom.us.1
+ %5 = load float, ptr %arrayidx.us.1, align 4
+ %arrayidx7.us.1 = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %indvars.iv.1
+ %6 = load float, ptr %arrayidx7.us.1, align 4
%add8.us.1 = fadd float %6, %5
- store float %add8.us.1, float* %arrayidx7.us.1, align 4
+ store float %add8.us.1, ptr %arrayidx7.us.1, align 4
%indvars.iv.next.1 = add i64 %indvars.iv.1, %1
%7 = trunc i64 %indvars.iv.next.1 to i32
%cmp3.us.1 = icmp slt i32 %7, 32000
br i1 %cmp3.us.1, label %for.body4.us.1, label %for.body4.lr.ph.us.2
for.body4.lr.ph.us.2: ; preds = %for.body4.us.1
- %call10.us.1 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.us.1 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
br label %for.body4.us.2
for.body4.us.2: ; preds = %for.body4.us.2, %for.body4.lr.ph.us.2
%sub5.us.2 = sub i64 31999, %indvars.iv20.2
%sext24 = shl i64 %sub5.us.2, 32
%idxprom.us.2 = ashr exact i64 %sext24, 32
- %arrayidx.us.2 = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us.2
- %8 = load float, float* %arrayidx.us.2, align 4
- %arrayidx7.us.2 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.2
- %9 = load float, float* %arrayidx7.us.2, align 4
+ %arrayidx.us.2 = getelementptr inbounds [32000 x float], ptr @b, i64 0, i64 %idxprom.us.2
+ %8 = load float, ptr %arrayidx.us.2, align 4
+ %arrayidx7.us.2 = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %indvars.iv.2
+ %9 = load float, ptr %arrayidx7.us.2, align 4
%add8.us.2 = fadd float %9, %8
- store float %add8.us.2, float* %arrayidx7.us.2, align 4
+ store float %add8.us.2, ptr %arrayidx7.us.2, align 4
%indvars.iv.next.2 = add i64 %indvars.iv.2, %1
%10 = trunc i64 %indvars.iv.next.2 to i32
%cmp3.us.2 = icmp slt i32 %10, 32000
br i1 %cmp3.us.2, label %for.body4.us.2, label %for.body4.lr.ph.us.3
for.body4.lr.ph.us.3: ; preds = %for.body4.us.2
- %call10.us.2 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.us.2 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
br label %for.body4.us.3
for.body4.us.3: ; preds = %for.body4.us.3, %for.body4.lr.ph.us.3
%sub5.us.3 = sub i64 31999, %indvars.iv20.3
%sext25 = shl i64 %sub5.us.3, 32
%idxprom.us.3 = ashr exact i64 %sext25, 32
- %arrayidx.us.3 = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us.3
- %11 = load float, float* %arrayidx.us.3, align 4
- %arrayidx7.us.3 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.3
- %12 = load float, float* %arrayidx7.us.3, align 4
+ %arrayidx.us.3 = getelementptr inbounds [32000 x float], ptr @b, i64 0, i64 %idxprom.us.3
+ %11 = load float, ptr %arrayidx.us.3, align 4
+ %arrayidx7.us.3 = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %indvars.iv.3
+ %12 = load float, ptr %arrayidx7.us.3, align 4
%add8.us.3 = fadd float %12, %11
- store float %add8.us.3, float* %arrayidx7.us.3, align 4
+ store float %add8.us.3, ptr %arrayidx7.us.3, align 4
%indvars.iv.next.3 = add i64 %indvars.iv.3, %1
%13 = trunc i64 %indvars.iv.next.3 to i32
%cmp3.us.3 = icmp slt i32 %13, 32000
br i1 %cmp3.us.3, label %for.body4.us.3, label %for.body4.lr.ph.us.4
for.body4.lr.ph.us.4: ; preds = %for.body4.us.3
- %call10.us.3 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.us.3 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
br label %for.body4.us.4
for.body4.us.4: ; preds = %for.body4.us.4, %for.body4.lr.ph.us.4
%sub5.us.4 = sub i64 31999, %indvars.iv20.4
%sext26 = shl i64 %sub5.us.4, 32
%idxprom.us.4 = ashr exact i64 %sext26, 32
- %arrayidx.us.4 = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us.4
- %14 = load float, float* %arrayidx.us.4, align 4
- %arrayidx7.us.4 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.4
- %15 = load float, float* %arrayidx7.us.4, align 4
+ %arrayidx.us.4 = getelementptr inbounds [32000 x float], ptr @b, i64 0, i64 %idxprom.us.4
+ %14 = load float, ptr %arrayidx.us.4, align 4
+ %arrayidx7.us.4 = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %indvars.iv.4
+ %15 = load float, ptr %arrayidx7.us.4, align 4
%add8.us.4 = fadd float %15, %14
- store float %add8.us.4, float* %arrayidx7.us.4, align 4
+ store float %add8.us.4, ptr %arrayidx7.us.4, align 4
%indvars.iv.next.4 = add i64 %indvars.iv.4, %1
%16 = trunc i64 %indvars.iv.next.4 to i32
%cmp3.us.4 = icmp slt i32 %16, 32000
br i1 %cmp3.us.4, label %for.body4.us.4, label %for.end.us.4
for.end.us.4: ; preds = %for.body4.us.4
- %call10.us.4 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10.us.4 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
%inc.us.4 = add nsw i32 %nl.019.us, 5
%exitcond.4 = icmp eq i32 %inc.us.4, 200000
br i1 %exitcond.4, label %for.end12, label %for.body4.lr.ph.us
for.end.7: ; preds = %entry, %for.end.7
%nl.019 = phi i32 [ %inc.7, %for.end.7 ], [ 0, %entry ]
- %call10 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.1 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.2 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.3 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.4 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.5 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.6 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
- %call10.7 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float 0.000000e+00) nounwind
+ %call10 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
+ %call10.1 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
+ %call10.2 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
+ %call10.3 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
+ %call10.4 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
+ %call10.5 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
+ %call10.6 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
+ %call10.7 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float 0.000000e+00) nounwind
%inc.7 = add nsw i32 %nl.019, 8
%exitcond.7 = icmp eq i32 %inc.7, 200000
br i1 %exitcond.7, label %for.end12, label %for.end.7
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
-declare i32 @puts(i8* nocapture) nounwind
+declare i32 @puts(ptr nocapture) nounwind
!3 = !{!"branch_weights", i32 64, i32 4}
@.str81 = private unnamed_addr constant [6 x i8] c"s3110\00", align 1
@.str235 = private unnamed_addr constant [15 x i8] c"S3110\09 %.2f \09\09\00", align 1
-declare i32 @printf(i8* nocapture, ...) nounwind
-declare i32 @init(i8* %name) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
+declare i32 @init(ptr %name) nounwind
declare i64 @clock() nounwind
-declare i32 @dummy(float*, float*, float*, float*, float*, [256 x float]*, [256 x float]*, [256 x float]*, float)
+declare i32 @dummy(ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, float)
declare void @check(i32 %name) nounwind
; CHECK: mfcr
define i32 @s3110() nounwind {
entry:
- %call = tail call i32 @init(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str81, i64 0, i64 0))
+ %call = tail call i32 @init(ptr @.str81)
%call1 = tail call i64 @clock() nounwind
br label %for.body
for.body: ; preds = %for.end17, %entry
%nl.041 = phi i32 [ 0, %entry ], [ %inc22, %for.end17 ]
- %0 = load float, float* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0, i64 0), align 16
+ %0 = load float, ptr @aa, align 16
br label %for.cond5.preheader
for.cond5.preheader: ; preds = %for.inc15, %for.body
%max.235 = phi float [ %max.139, %for.cond5.preheader ], [ %max.3.15, %for.body7 ]
%xindex.234 = phi i32 [ %xindex.138, %for.cond5.preheader ], [ %xindex.3.15, %for.body7 ]
%yindex.233 = phi i32 [ %yindex.137, %for.cond5.preheader ], [ %yindex.3.15, %for.body7 ]
- %arrayidx9 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv
- %1 = load float, float* %arrayidx9, align 16
+ %arrayidx9 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv
+ %1 = load float, ptr %arrayidx9, align 16
%cmp10 = fcmp ogt float %1, %max.235
%2 = trunc i64 %indvars.iv to i32
%yindex.3 = select i1 %cmp10, i32 %2, i32 %yindex.233
%xindex.3 = select i1 %cmp10, i32 %3, i32 %xindex.234
%max.3 = select i1 %cmp10, float %1, float %max.235
%indvars.iv.next45 = or i64 %indvars.iv, 1
- %arrayidx9.1 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next45
- %4 = load float, float* %arrayidx9.1, align 4
+ %arrayidx9.1 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next45
+ %4 = load float, ptr %arrayidx9.1, align 4
%cmp10.1 = fcmp ogt float %4, %max.3
%5 = trunc i64 %indvars.iv.next45 to i32
%yindex.3.1 = select i1 %cmp10.1, i32 %5, i32 %yindex.3
%xindex.3.1 = select i1 %cmp10.1, i32 %3, i32 %xindex.3
%max.3.1 = select i1 %cmp10.1, float %4, float %max.3
%indvars.iv.next.146 = or i64 %indvars.iv, 2
- %arrayidx9.2 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.146
- %6 = load float, float* %arrayidx9.2, align 8
+ %arrayidx9.2 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.146
+ %6 = load float, ptr %arrayidx9.2, align 8
%cmp10.2 = fcmp ogt float %6, %max.3.1
%7 = trunc i64 %indvars.iv.next.146 to i32
%yindex.3.2 = select i1 %cmp10.2, i32 %7, i32 %yindex.3.1
%xindex.3.2 = select i1 %cmp10.2, i32 %3, i32 %xindex.3.1
%max.3.2 = select i1 %cmp10.2, float %6, float %max.3.1
%indvars.iv.next.247 = or i64 %indvars.iv, 3
- %arrayidx9.3 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.247
- %8 = load float, float* %arrayidx9.3, align 4
+ %arrayidx9.3 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.247
+ %8 = load float, ptr %arrayidx9.3, align 4
%cmp10.3 = fcmp ogt float %8, %max.3.2
%9 = trunc i64 %indvars.iv.next.247 to i32
%yindex.3.3 = select i1 %cmp10.3, i32 %9, i32 %yindex.3.2
%xindex.3.3 = select i1 %cmp10.3, i32 %3, i32 %xindex.3.2
%max.3.3 = select i1 %cmp10.3, float %8, float %max.3.2
%indvars.iv.next.348 = or i64 %indvars.iv, 4
- %arrayidx9.4 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.348
- %10 = load float, float* %arrayidx9.4, align 16
+ %arrayidx9.4 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.348
+ %10 = load float, ptr %arrayidx9.4, align 16
%cmp10.4 = fcmp ogt float %10, %max.3.3
%11 = trunc i64 %indvars.iv.next.348 to i32
%yindex.3.4 = select i1 %cmp10.4, i32 %11, i32 %yindex.3.3
%xindex.3.4 = select i1 %cmp10.4, i32 %3, i32 %xindex.3.3
%max.3.4 = select i1 %cmp10.4, float %10, float %max.3.3
%indvars.iv.next.449 = or i64 %indvars.iv, 5
- %arrayidx9.5 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.449
- %12 = load float, float* %arrayidx9.5, align 4
+ %arrayidx9.5 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.449
+ %12 = load float, ptr %arrayidx9.5, align 4
%cmp10.5 = fcmp ogt float %12, %max.3.4
%13 = trunc i64 %indvars.iv.next.449 to i32
%yindex.3.5 = select i1 %cmp10.5, i32 %13, i32 %yindex.3.4
%xindex.3.5 = select i1 %cmp10.5, i32 %3, i32 %xindex.3.4
%max.3.5 = select i1 %cmp10.5, float %12, float %max.3.4
%indvars.iv.next.550 = or i64 %indvars.iv, 6
- %arrayidx9.6 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.550
- %14 = load float, float* %arrayidx9.6, align 8
+ %arrayidx9.6 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.550
+ %14 = load float, ptr %arrayidx9.6, align 8
%cmp10.6 = fcmp ogt float %14, %max.3.5
%15 = trunc i64 %indvars.iv.next.550 to i32
%yindex.3.6 = select i1 %cmp10.6, i32 %15, i32 %yindex.3.5
%xindex.3.6 = select i1 %cmp10.6, i32 %3, i32 %xindex.3.5
%max.3.6 = select i1 %cmp10.6, float %14, float %max.3.5
%indvars.iv.next.651 = or i64 %indvars.iv, 7
- %arrayidx9.7 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.651
- %16 = load float, float* %arrayidx9.7, align 4
+ %arrayidx9.7 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.651
+ %16 = load float, ptr %arrayidx9.7, align 4
%cmp10.7 = fcmp ogt float %16, %max.3.6
%17 = trunc i64 %indvars.iv.next.651 to i32
%yindex.3.7 = select i1 %cmp10.7, i32 %17, i32 %yindex.3.6
%xindex.3.7 = select i1 %cmp10.7, i32 %3, i32 %xindex.3.6
%max.3.7 = select i1 %cmp10.7, float %16, float %max.3.6
%indvars.iv.next.752 = or i64 %indvars.iv, 8
- %arrayidx9.8 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.752
- %18 = load float, float* %arrayidx9.8, align 16
+ %arrayidx9.8 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.752
+ %18 = load float, ptr %arrayidx9.8, align 16
%cmp10.8 = fcmp ogt float %18, %max.3.7
%19 = trunc i64 %indvars.iv.next.752 to i32
%yindex.3.8 = select i1 %cmp10.8, i32 %19, i32 %yindex.3.7
%xindex.3.8 = select i1 %cmp10.8, i32 %3, i32 %xindex.3.7
%max.3.8 = select i1 %cmp10.8, float %18, float %max.3.7
%indvars.iv.next.853 = or i64 %indvars.iv, 9
- %arrayidx9.9 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.853
- %20 = load float, float* %arrayidx9.9, align 4
+ %arrayidx9.9 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.853
+ %20 = load float, ptr %arrayidx9.9, align 4
%cmp10.9 = fcmp ogt float %20, %max.3.8
%21 = trunc i64 %indvars.iv.next.853 to i32
%yindex.3.9 = select i1 %cmp10.9, i32 %21, i32 %yindex.3.8
%xindex.3.9 = select i1 %cmp10.9, i32 %3, i32 %xindex.3.8
%max.3.9 = select i1 %cmp10.9, float %20, float %max.3.8
%indvars.iv.next.954 = or i64 %indvars.iv, 10
- %arrayidx9.10 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.954
- %22 = load float, float* %arrayidx9.10, align 8
+ %arrayidx9.10 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.954
+ %22 = load float, ptr %arrayidx9.10, align 8
%cmp10.10 = fcmp ogt float %22, %max.3.9
%23 = trunc i64 %indvars.iv.next.954 to i32
%yindex.3.10 = select i1 %cmp10.10, i32 %23, i32 %yindex.3.9
%xindex.3.10 = select i1 %cmp10.10, i32 %3, i32 %xindex.3.9
%max.3.10 = select i1 %cmp10.10, float %22, float %max.3.9
%indvars.iv.next.1055 = or i64 %indvars.iv, 11
- %arrayidx9.11 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1055
- %24 = load float, float* %arrayidx9.11, align 4
+ %arrayidx9.11 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1055
+ %24 = load float, ptr %arrayidx9.11, align 4
%cmp10.11 = fcmp ogt float %24, %max.3.10
%25 = trunc i64 %indvars.iv.next.1055 to i32
%yindex.3.11 = select i1 %cmp10.11, i32 %25, i32 %yindex.3.10
%xindex.3.11 = select i1 %cmp10.11, i32 %3, i32 %xindex.3.10
%max.3.11 = select i1 %cmp10.11, float %24, float %max.3.10
%indvars.iv.next.1156 = or i64 %indvars.iv, 12
- %arrayidx9.12 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1156
- %26 = load float, float* %arrayidx9.12, align 16
+ %arrayidx9.12 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1156
+ %26 = load float, ptr %arrayidx9.12, align 16
%cmp10.12 = fcmp ogt float %26, %max.3.11
%27 = trunc i64 %indvars.iv.next.1156 to i32
%yindex.3.12 = select i1 %cmp10.12, i32 %27, i32 %yindex.3.11
%xindex.3.12 = select i1 %cmp10.12, i32 %3, i32 %xindex.3.11
%max.3.12 = select i1 %cmp10.12, float %26, float %max.3.11
%indvars.iv.next.1257 = or i64 %indvars.iv, 13
- %arrayidx9.13 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1257
- %28 = load float, float* %arrayidx9.13, align 4
+ %arrayidx9.13 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1257
+ %28 = load float, ptr %arrayidx9.13, align 4
%cmp10.13 = fcmp ogt float %28, %max.3.12
%29 = trunc i64 %indvars.iv.next.1257 to i32
%yindex.3.13 = select i1 %cmp10.13, i32 %29, i32 %yindex.3.12
%xindex.3.13 = select i1 %cmp10.13, i32 %3, i32 %xindex.3.12
%max.3.13 = select i1 %cmp10.13, float %28, float %max.3.12
%indvars.iv.next.1358 = or i64 %indvars.iv, 14
- %arrayidx9.14 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1358
- %30 = load float, float* %arrayidx9.14, align 8
+ %arrayidx9.14 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1358
+ %30 = load float, ptr %arrayidx9.14, align 8
%cmp10.14 = fcmp ogt float %30, %max.3.13
%31 = trunc i64 %indvars.iv.next.1358 to i32
%yindex.3.14 = select i1 %cmp10.14, i32 %31, i32 %yindex.3.13
%xindex.3.14 = select i1 %cmp10.14, i32 %3, i32 %xindex.3.13
%max.3.14 = select i1 %cmp10.14, float %30, float %max.3.13
%indvars.iv.next.1459 = or i64 %indvars.iv, 15
- %arrayidx9.15 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1459
- %32 = load float, float* %arrayidx9.15, align 4
+ %arrayidx9.15 = getelementptr inbounds [256 x [256 x float]], ptr @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1459
+ %32 = load float, ptr %arrayidx9.15, align 4
%cmp10.15 = fcmp ogt float %32, %max.3.14
%33 = trunc i64 %indvars.iv.next.1459 to i32
%yindex.3.15 = select i1 %cmp10.15, i32 %33, i32 %yindex.3.14
%add = fadd float %max.3.15, %conv
%conv18 = sitofp i32 %yindex.3.15 to float
%add19 = fadd float %add, %conv18
- %call20 = tail call i32 @dummy(float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @b, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @c, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @d, i64 0, i64 0), float* getelementptr inbounds ([32000 x float], [32000 x float]* @e, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @bb, i64 0, i64 0), [256 x float]* getelementptr inbounds ([256 x [256 x float]], [256 x [256 x float]]* @cc, i64 0, i64 0), float %add19) nounwind
+ %call20 = tail call i32 @dummy(ptr @a, ptr @b, ptr @c, ptr @d, ptr @e, ptr @aa, ptr @bb, ptr @cc, float %add19) nounwind
%inc22 = add nsw i32 %nl.041, 1
%exitcond44 = icmp eq i32 %inc22, 78100
br i1 %exitcond44, label %for.end23, label %for.body
%sub = sub nsw i64 %call24, %call1
%conv25 = sitofp i64 %sub to double
%div = fdiv double %conv25, 1.000000e+06
- %call26 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str235, i64 0, i64 0), double %div) nounwind
+ %call26 = tail call i32 (ptr, ...) @printf(ptr @.str235, double %div) nounwind
%add29 = fadd float %add, 1.000000e+00
%add31 = fadd float %add29, %conv18
%add32 = fadd float %add31, 1.000000e+00
- store float %add32, float* @temp, align 4
+ store float %add32, ptr @temp, align 4
tail call void @check(i32 -1)
ret i32 0
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
-declare i32 @puts(i8* nocapture) nounwind
+declare i32 @puts(ptr nocapture) nounwind
!3 = !{!"branch_weights", i32 64, i32 4}
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=g4 | FileCheck %s
-define void @test(i32* nocapture %x, i64* %xx, i32* %yp) nounwind uwtable ssp {
+define void @test(ptr nocapture %x, ptr %xx, ptr %yp) nounwind uwtable ssp {
entry:
- %yy = load i32, i32* %yp
+ %yy = load i32, ptr %yp
%y = add i32 %yy, 1
%z = zext i32 %y to i64
%z2 = shl i64 %z, 32
- store i64 %z2, i64* %xx, align 4
+ store i64 %z2, ptr %xx, align 4
ret void
; CHECK-LABEL: test:
entry:
%0 = alloca i8, i64 %n, align 1
%1 = alloca i8, i64 %n, align 1
- call void @use(i8* %0, i8* %1) nounwind
+ call void @use(ptr %0, ptr %1) nounwind
ret void
}
-declare void @use(i8*, i8*)
+declare void @use(ptr, ptr)
; Check we actually have two instances of dynamic stack allocation,
; identified by the stdux used to update the back-chain link.
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define i8* @test(i8* %base, i8 %val) {
+define ptr @test(ptr %base, i8 %val) {
entry:
- %arrayidx = getelementptr inbounds i8, i8* %base, i32 -1
- store i8 %val, i8* %arrayidx, align 1
- %arrayidx2 = getelementptr inbounds i8, i8* %base, i32 1
- store i8 %val, i8* %arrayidx2, align 1
- ret i8* %arrayidx
+ %arrayidx = getelementptr inbounds i8, ptr %base, i32 -1
+ store i8 %val, ptr %arrayidx, align 1
+ %arrayidx2 = getelementptr inbounds i8, ptr %base, i32 1
+ store i8 %val, ptr %arrayidx2, align 1
+ ret ptr %arrayidx
}
; CHECK: @test
; CHECK: %entry
; CHECK-NEXT: stb 4, 2(3)
; CHECK-NEXT: blr
-define i64* @test64(i64* %base, i64 %val) {
+define ptr @test64(ptr %base, i64 %val) {
entry:
- %arrayidx = getelementptr inbounds i64, i64* %base, i32 -1
- store i64 %val, i64* %arrayidx, align 8
- %arrayidx2 = getelementptr inbounds i64, i64* %base, i32 1
- store i64 %val, i64* %arrayidx2, align 8
- ret i64* %arrayidx
+ %arrayidx = getelementptr inbounds i64, ptr %base, i32 -1
+ store i64 %val, ptr %arrayidx, align 8
+ %arrayidx2 = getelementptr inbounds i64, ptr %base, i32 1
+ store i64 %val, ptr %arrayidx2, align 8
+ ret ptr %arrayidx
}
; CHECK: @test64
; CHECK: %entry
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
-%struct.buffer_t = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [2 x i8] }
+%struct.buffer_t = type { i64, ptr, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [2 x i8] }
-declare i32 @__f1(i8*, %struct.buffer_t* noalias)
+declare i32 @__f1(ptr, ptr noalias)
; CHECK-LABEL: f1:
-define i32 @f1(i8* %__user_context, %struct.buffer_t* noalias %f1.buffer) {
+define i32 @f1(ptr %__user_context, ptr noalias %f1.buffer) {
entry:
br i1 undef, label %"assert succeeded", label %"assert failed", !prof !1
br label %destructor_block
"assert succeeded": ; preds = %entry
- %__f1_result = call i32 @__f1(i8* %__user_context, %struct.buffer_t* %f1.buffer) #5
+ %__f1_result = call i32 @__f1(ptr %__user_context, ptr %f1.buffer) #5
%0 = icmp eq i32 %__f1_result, 0
br i1 %0, label %"assert succeeded11", label %"assert failed10", !prof !1
%26 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 24
%27 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 25
%28 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 26
- %29 = load i32, i32* @x, align 4
+ %29 = load i32, ptr @x, align 4
%30 = add nsw i32 %29, 1
- store i32 %30, i32* @x, align 4
+ store i32 %30, ptr @x, align 4
tail call void asm sideeffect "nop", "{r3},{r4},{r5},{r6},{r7},{r8},{r9},{r10},{r11},{r12},{r14},{r15},{r16},{r17},{r18},{r19},{r20},{r21},{r22},{r23},{r24},{r25},{r26},{r27},{r28},{r29},{r30},~{memory}"(i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, i32 %14, i32 %15, i32 %16, i32 %17, i32 %18, i32 %19, i32 %20, i32 %21, i32 %22, i32 %23, i32 %24, i32 %25, i32 %26, i32 %27, i32 %28)
ret void
}
; by the confused CodeGen. Just to be sure, check there isn't one.
; CHECK-NOT: cror
; Function Attrs: uwtable
-define signext i32 @_Z8access_pP1Tc(%typ* %p, i8 zeroext %type) {
- %b = getelementptr inbounds %typ, %typ* %p, i64 0, i32 1
- %1 = load i32, i32* %b, align 4
- %2 = ptrtoint i32* %b to i64
+define signext i32 @_Z8access_pP1Tc(ptr %p, i8 zeroext %type) {
+ %b = getelementptr inbounds %typ, ptr %p, i64 0, i32 1
+ %1 = load i32, ptr %b, align 4
+ %2 = ptrtoint ptr %b to i64
%3 = and i64 %2, -35184372088833
- %4 = inttoptr i64 %3 to i32*
- %_msld = load i32, i32* %4, align 4
+ %4 = inttoptr i64 %3 to ptr
+ %_msld = load i32, ptr %4, align 4
%zzz = add i32 %1, %_msld
ret i32 %zzz
}
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
-@ptr = common global i8* null, align 8
+@ptr = common global ptr null, align 8
; Verify there's no junk between these two instructions from misemitted
; EH_SjLj_Setup.
; CHECK: cmplwi 3, 0
define void @h() nounwind {
- %1 = load i8**, i8*** bitcast (i8** @ptr to i8***), align 8
- %2 = tail call i8* @llvm.frameaddress(i32 0)
- store i8* %2, i8** %1, align 8
- %3 = tail call i8* @llvm.stacksave()
- %4 = getelementptr inbounds i8*, i8** %1, i64 2
- store i8* %3, i8** %4, align 8
- %5 = bitcast i8** %1 to i8*
- %6 = tail call i32 @llvm.eh.sjlj.setjmp(i8* %5)
- %7 = icmp eq i32 %6, 0
- br i1 %7, label %9, label %8
+ %1 = load ptr, ptr @ptr, align 8
+ %2 = tail call ptr @llvm.frameaddress(i32 0)
+ store ptr %2, ptr %1, align 8
+ %3 = tail call ptr @llvm.stacksave()
+ %4 = getelementptr inbounds ptr, ptr %1, i64 2
+ store ptr %3, ptr %4, align 8
+ %5 = tail call i32 @llvm.eh.sjlj.setjmp(ptr %1)
+ %6 = icmp eq i32 %5, 0
+ br i1 %6, label %8, label %7
; <label>:8: ; preds = %0
tail call void @g()
- br label %10
+ br label %9
; <label>:9: ; preds = %0
tail call void @f()
- br label %10
+ br label %9
-; <label>:10: ; preds = %9, %8
+; <label>:10: ; preds = %8, %7
ret void
}
; Function Attrs: nounwind readnone
-declare i8* @llvm.frameaddress(i32)
+declare ptr @llvm.frameaddress(i32)
; Function Attrs: nounwind
-declare i8* @llvm.stacksave()
+declare ptr @llvm.stacksave()
; Function Attrs: nounwind
-declare i32 @llvm.eh.sjlj.setjmp(i8*)
+declare i32 @llvm.eh.sjlj.setjmp(ptr)
declare void @g()
define void @test_op_ignore() nounwind {
entry:
- %0 = atomicrmw add i8* @sc, i8 1 monotonic
- %1 = atomicrmw add i8* @uc, i8 1 monotonic
- %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %3 = atomicrmw add i16* %2, i16 1 monotonic
- %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %5 = atomicrmw add i16* %4, i16 1 monotonic
- %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %7 = atomicrmw add i32* %6, i32 1 monotonic
- %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %9 = atomicrmw add i32* %8, i32 1 monotonic
- %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %11 = atomicrmw add i64* %10, i64 1 monotonic
- %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %13 = atomicrmw add i64* %12, i64 1 monotonic
- %14 = atomicrmw sub i8* @sc, i8 1 monotonic
- %15 = atomicrmw sub i8* @uc, i8 1 monotonic
- %16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %17 = atomicrmw sub i16* %16, i16 1 monotonic
- %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %19 = atomicrmw sub i16* %18, i16 1 monotonic
- %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %21 = atomicrmw sub i32* %20, i32 1 monotonic
- %22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %23 = atomicrmw sub i32* %22, i32 1 monotonic
- %24 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %25 = atomicrmw sub i64* %24, i64 1 monotonic
- %26 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %27 = atomicrmw sub i64* %26, i64 1 monotonic
- %28 = atomicrmw or i8* @sc, i8 1 monotonic
- %29 = atomicrmw or i8* @uc, i8 1 monotonic
- %30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %31 = atomicrmw or i16* %30, i16 1 monotonic
- %32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %33 = atomicrmw or i16* %32, i16 1 monotonic
- %34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %35 = atomicrmw or i32* %34, i32 1 monotonic
- %36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %37 = atomicrmw or i32* %36, i32 1 monotonic
- %38 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %39 = atomicrmw or i64* %38, i64 1 monotonic
- %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %41 = atomicrmw or i64* %40, i64 1 monotonic
- %42 = atomicrmw xor i8* @sc, i8 1 monotonic
- %43 = atomicrmw xor i8* @uc, i8 1 monotonic
- %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %45 = atomicrmw xor i16* %44, i16 1 monotonic
- %46 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %47 = atomicrmw xor i16* %46, i16 1 monotonic
- %48 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %49 = atomicrmw xor i32* %48, i32 1 monotonic
- %50 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %51 = atomicrmw xor i32* %50, i32 1 monotonic
- %52 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %53 = atomicrmw xor i64* %52, i64 1 monotonic
- %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %55 = atomicrmw xor i64* %54, i64 1 monotonic
- %56 = atomicrmw and i8* @sc, i8 1 monotonic
- %57 = atomicrmw and i8* @uc, i8 1 monotonic
- %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %59 = atomicrmw and i16* %58, i16 1 monotonic
- %60 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %61 = atomicrmw and i16* %60, i16 1 monotonic
- %62 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %63 = atomicrmw and i32* %62, i32 1 monotonic
- %64 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %65 = atomicrmw and i32* %64, i32 1 monotonic
- %66 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %67 = atomicrmw and i64* %66, i64 1 monotonic
- %68 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %69 = atomicrmw and i64* %68, i64 1 monotonic
- %70 = atomicrmw nand i8* @sc, i8 1 monotonic
- %71 = atomicrmw nand i8* @uc, i8 1 monotonic
- %72 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %73 = atomicrmw nand i16* %72, i16 1 monotonic
- %74 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %75 = atomicrmw nand i16* %74, i16 1 monotonic
- %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %77 = atomicrmw nand i32* %76, i32 1 monotonic
- %78 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %79 = atomicrmw nand i32* %78, i32 1 monotonic
- %80 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %81 = atomicrmw nand i64* %80, i64 1 monotonic
- %82 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %83 = atomicrmw nand i64* %82, i64 1 monotonic
+ %0 = atomicrmw add ptr @sc, i8 1 monotonic
+ %1 = atomicrmw add ptr @uc, i8 1 monotonic
+ %2 = atomicrmw add ptr @ss, i16 1 monotonic
+ %3 = atomicrmw add ptr @us, i16 1 monotonic
+ %4 = atomicrmw add ptr @si, i32 1 monotonic
+ %5 = atomicrmw add ptr @ui, i32 1 monotonic
+ %6 = atomicrmw add ptr @sl, i64 1 monotonic
+ %7 = atomicrmw add ptr @ul, i64 1 monotonic
+ %8 = atomicrmw sub ptr @sc, i8 1 monotonic
+ %9 = atomicrmw sub ptr @uc, i8 1 monotonic
+ %10 = atomicrmw sub ptr @ss, i16 1 monotonic
+ %11 = atomicrmw sub ptr @us, i16 1 monotonic
+ %12 = atomicrmw sub ptr @si, i32 1 monotonic
+ %13 = atomicrmw sub ptr @ui, i32 1 monotonic
+ %14 = atomicrmw sub ptr @sl, i64 1 monotonic
+ %15 = atomicrmw sub ptr @ul, i64 1 monotonic
+ %16 = atomicrmw or ptr @sc, i8 1 monotonic
+ %17 = atomicrmw or ptr @uc, i8 1 monotonic
+ %18 = atomicrmw or ptr @ss, i16 1 monotonic
+ %19 = atomicrmw or ptr @us, i16 1 monotonic
+ %20 = atomicrmw or ptr @si, i32 1 monotonic
+ %21 = atomicrmw or ptr @ui, i32 1 monotonic
+ %22 = atomicrmw or ptr @sl, i64 1 monotonic
+ %23 = atomicrmw or ptr @ul, i64 1 monotonic
+ %24 = atomicrmw xor ptr @sc, i8 1 monotonic
+ %25 = atomicrmw xor ptr @uc, i8 1 monotonic
+ %26 = atomicrmw xor ptr @ss, i16 1 monotonic
+ %27 = atomicrmw xor ptr @us, i16 1 monotonic
+ %28 = atomicrmw xor ptr @si, i32 1 monotonic
+ %29 = atomicrmw xor ptr @ui, i32 1 monotonic
+ %30 = atomicrmw xor ptr @sl, i64 1 monotonic
+ %31 = atomicrmw xor ptr @ul, i64 1 monotonic
+ %32 = atomicrmw and ptr @sc, i8 1 monotonic
+ %33 = atomicrmw and ptr @uc, i8 1 monotonic
+ %34 = atomicrmw and ptr @ss, i16 1 monotonic
+ %35 = atomicrmw and ptr @us, i16 1 monotonic
+ %36 = atomicrmw and ptr @si, i32 1 monotonic
+ %37 = atomicrmw and ptr @ui, i32 1 monotonic
+ %38 = atomicrmw and ptr @sl, i64 1 monotonic
+ %39 = atomicrmw and ptr @ul, i64 1 monotonic
+ %40 = atomicrmw nand ptr @sc, i8 1 monotonic
+ %41 = atomicrmw nand ptr @uc, i8 1 monotonic
+ %42 = atomicrmw nand ptr @ss, i16 1 monotonic
+ %43 = atomicrmw nand ptr @us, i16 1 monotonic
+ %44 = atomicrmw nand ptr @si, i32 1 monotonic
+ %45 = atomicrmw nand ptr @ui, i32 1 monotonic
+ %46 = atomicrmw nand ptr @sl, i64 1 monotonic
+ %47 = atomicrmw nand ptr @ul, i64 1 monotonic
br label %return
return: ; preds = %entry
define void @test_fetch_and_op() nounwind {
entry:
- %0 = atomicrmw add i8* @sc, i8 11 monotonic
- store i8 %0, i8* @sc, align 1
- %1 = atomicrmw add i8* @uc, i8 11 monotonic
- store i8 %1, i8* @uc, align 1
- %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %3 = atomicrmw add i16* %2, i16 11 monotonic
- store i16 %3, i16* @ss, align 2
- %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %5 = atomicrmw add i16* %4, i16 11 monotonic
- store i16 %5, i16* @us, align 2
- %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %7 = atomicrmw add i32* %6, i32 11 monotonic
- store i32 %7, i32* @si, align 4
- %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %9 = atomicrmw add i32* %8, i32 11 monotonic
- store i32 %9, i32* @ui, align 4
- %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %11 = atomicrmw add i64* %10, i64 11 monotonic
- store i64 %11, i64* @sl, align 8
- %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %13 = atomicrmw add i64* %12, i64 11 monotonic
- store i64 %13, i64* @ul, align 8
- %14 = atomicrmw sub i8* @sc, i8 11 monotonic
- store i8 %14, i8* @sc, align 1
- %15 = atomicrmw sub i8* @uc, i8 11 monotonic
- store i8 %15, i8* @uc, align 1
- %16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %17 = atomicrmw sub i16* %16, i16 11 monotonic
- store i16 %17, i16* @ss, align 2
- %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %19 = atomicrmw sub i16* %18, i16 11 monotonic
- store i16 %19, i16* @us, align 2
- %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %21 = atomicrmw sub i32* %20, i32 11 monotonic
- store i32 %21, i32* @si, align 4
- %22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %23 = atomicrmw sub i32* %22, i32 11 monotonic
- store i32 %23, i32* @ui, align 4
- %24 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %25 = atomicrmw sub i64* %24, i64 11 monotonic
- store i64 %25, i64* @sl, align 8
- %26 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %27 = atomicrmw sub i64* %26, i64 11 monotonic
- store i64 %27, i64* @ul, align 8
- %28 = atomicrmw or i8* @sc, i8 11 monotonic
- store i8 %28, i8* @sc, align 1
- %29 = atomicrmw or i8* @uc, i8 11 monotonic
- store i8 %29, i8* @uc, align 1
- %30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %31 = atomicrmw or i16* %30, i16 11 monotonic
- store i16 %31, i16* @ss, align 2
- %32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %33 = atomicrmw or i16* %32, i16 11 monotonic
- store i16 %33, i16* @us, align 2
- %34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %35 = atomicrmw or i32* %34, i32 11 monotonic
- store i32 %35, i32* @si, align 4
- %36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %37 = atomicrmw or i32* %36, i32 11 monotonic
- store i32 %37, i32* @ui, align 4
- %38 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %39 = atomicrmw or i64* %38, i64 11 monotonic
- store i64 %39, i64* @sl, align 8
- %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %41 = atomicrmw or i64* %40, i64 11 monotonic
- store i64 %41, i64* @ul, align 8
- %42 = atomicrmw xor i8* @sc, i8 11 monotonic
- store i8 %42, i8* @sc, align 1
- %43 = atomicrmw xor i8* @uc, i8 11 monotonic
- store i8 %43, i8* @uc, align 1
- %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %45 = atomicrmw xor i16* %44, i16 11 monotonic
- store i16 %45, i16* @ss, align 2
- %46 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %47 = atomicrmw xor i16* %46, i16 11 monotonic
- store i16 %47, i16* @us, align 2
- %48 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %49 = atomicrmw xor i32* %48, i32 11 monotonic
- store i32 %49, i32* @si, align 4
- %50 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %51 = atomicrmw xor i32* %50, i32 11 monotonic
- store i32 %51, i32* @ui, align 4
- %52 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %53 = atomicrmw xor i64* %52, i64 11 monotonic
- store i64 %53, i64* @sl, align 8
- %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %55 = atomicrmw xor i64* %54, i64 11 monotonic
- store i64 %55, i64* @ul, align 8
- %56 = atomicrmw and i8* @sc, i8 11 monotonic
- store i8 %56, i8* @sc, align 1
- %57 = atomicrmw and i8* @uc, i8 11 monotonic
- store i8 %57, i8* @uc, align 1
- %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %59 = atomicrmw and i16* %58, i16 11 monotonic
- store i16 %59, i16* @ss, align 2
- %60 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %61 = atomicrmw and i16* %60, i16 11 monotonic
- store i16 %61, i16* @us, align 2
- %62 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %63 = atomicrmw and i32* %62, i32 11 monotonic
- store i32 %63, i32* @si, align 4
- %64 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %65 = atomicrmw and i32* %64, i32 11 monotonic
- store i32 %65, i32* @ui, align 4
- %66 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %67 = atomicrmw and i64* %66, i64 11 monotonic
- store i64 %67, i64* @sl, align 8
- %68 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %69 = atomicrmw and i64* %68, i64 11 monotonic
- store i64 %69, i64* @ul, align 8
- %70 = atomicrmw nand i8* @sc, i8 11 monotonic
- store i8 %70, i8* @sc, align 1
- %71 = atomicrmw nand i8* @uc, i8 11 monotonic
- store i8 %71, i8* @uc, align 1
- %72 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %73 = atomicrmw nand i16* %72, i16 11 monotonic
- store i16 %73, i16* @ss, align 2
- %74 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %75 = atomicrmw nand i16* %74, i16 11 monotonic
- store i16 %75, i16* @us, align 2
- %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %77 = atomicrmw nand i32* %76, i32 11 monotonic
- store i32 %77, i32* @si, align 4
- %78 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %79 = atomicrmw nand i32* %78, i32 11 monotonic
- store i32 %79, i32* @ui, align 4
- %80 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %81 = atomicrmw nand i64* %80, i64 11 monotonic
- store i64 %81, i64* @sl, align 8
- %82 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %83 = atomicrmw nand i64* %82, i64 11 monotonic
- store i64 %83, i64* @ul, align 8
+ %0 = atomicrmw add ptr @sc, i8 11 monotonic
+ store i8 %0, ptr @sc, align 1
+ %1 = atomicrmw add ptr @uc, i8 11 monotonic
+ store i8 %1, ptr @uc, align 1
+ %2 = atomicrmw add ptr @ss, i16 11 monotonic
+ store i16 %2, ptr @ss, align 2
+ %3 = atomicrmw add ptr @us, i16 11 monotonic
+ store i16 %3, ptr @us, align 2
+ %4 = atomicrmw add ptr @si, i32 11 monotonic
+ store i32 %4, ptr @si, align 4
+ %5 = atomicrmw add ptr @ui, i32 11 monotonic
+ store i32 %5, ptr @ui, align 4
+ %6 = atomicrmw add ptr @sl, i64 11 monotonic
+ store i64 %6, ptr @sl, align 8
+ %7 = atomicrmw add ptr @ul, i64 11 monotonic
+ store i64 %7, ptr @ul, align 8
+ %8 = atomicrmw sub ptr @sc, i8 11 monotonic
+ store i8 %8, ptr @sc, align 1
+ %9 = atomicrmw sub ptr @uc, i8 11 monotonic
+ store i8 %9, ptr @uc, align 1
+ %10 = atomicrmw sub ptr @ss, i16 11 monotonic
+ store i16 %10, ptr @ss, align 2
+ %11 = atomicrmw sub ptr @us, i16 11 monotonic
+ store i16 %11, ptr @us, align 2
+ %12 = atomicrmw sub ptr @si, i32 11 monotonic
+ store i32 %12, ptr @si, align 4
+ %13 = atomicrmw sub ptr @ui, i32 11 monotonic
+ store i32 %13, ptr @ui, align 4
+ %14 = atomicrmw sub ptr @sl, i64 11 monotonic
+ store i64 %14, ptr @sl, align 8
+ %15 = atomicrmw sub ptr @ul, i64 11 monotonic
+ store i64 %15, ptr @ul, align 8
+ %16 = atomicrmw or ptr @sc, i8 11 monotonic
+ store i8 %16, ptr @sc, align 1
+ %17 = atomicrmw or ptr @uc, i8 11 monotonic
+ store i8 %17, ptr @uc, align 1
+ %18 = atomicrmw or ptr @ss, i16 11 monotonic
+ store i16 %18, ptr @ss, align 2
+ %19 = atomicrmw or ptr @us, i16 11 monotonic
+ store i16 %19, ptr @us, align 2
+ %20 = atomicrmw or ptr @si, i32 11 monotonic
+ store i32 %20, ptr @si, align 4
+ %21 = atomicrmw or ptr @ui, i32 11 monotonic
+ store i32 %21, ptr @ui, align 4
+ %22 = atomicrmw or ptr @sl, i64 11 monotonic
+ store i64 %22, ptr @sl, align 8
+ %23 = atomicrmw or ptr @ul, i64 11 monotonic
+ store i64 %23, ptr @ul, align 8
+ %24 = atomicrmw xor ptr @sc, i8 11 monotonic
+ store i8 %24, ptr @sc, align 1
+ %25 = atomicrmw xor ptr @uc, i8 11 monotonic
+ store i8 %25, ptr @uc, align 1
+ %26 = atomicrmw xor ptr @ss, i16 11 monotonic
+ store i16 %26, ptr @ss, align 2
+ %27 = atomicrmw xor ptr @us, i16 11 monotonic
+ store i16 %27, ptr @us, align 2
+ %28 = atomicrmw xor ptr @si, i32 11 monotonic
+ store i32 %28, ptr @si, align 4
+ %29 = atomicrmw xor ptr @ui, i32 11 monotonic
+ store i32 %29, ptr @ui, align 4
+ %30 = atomicrmw xor ptr @sl, i64 11 monotonic
+ store i64 %30, ptr @sl, align 8
+ %31 = atomicrmw xor ptr @ul, i64 11 monotonic
+ store i64 %31, ptr @ul, align 8
+ %32 = atomicrmw and ptr @sc, i8 11 monotonic
+ store i8 %32, ptr @sc, align 1
+ %33 = atomicrmw and ptr @uc, i8 11 monotonic
+ store i8 %33, ptr @uc, align 1
+ %34 = atomicrmw and ptr @ss, i16 11 monotonic
+ store i16 %34, ptr @ss, align 2
+ %35 = atomicrmw and ptr @us, i16 11 monotonic
+ store i16 %35, ptr @us, align 2
+ %36 = atomicrmw and ptr @si, i32 11 monotonic
+ store i32 %36, ptr @si, align 4
+ %37 = atomicrmw and ptr @ui, i32 11 monotonic
+ store i32 %37, ptr @ui, align 4
+ %38 = atomicrmw and ptr @sl, i64 11 monotonic
+ store i64 %38, ptr @sl, align 8
+ %39 = atomicrmw and ptr @ul, i64 11 monotonic
+ store i64 %39, ptr @ul, align 8
+ %40 = atomicrmw nand ptr @sc, i8 11 monotonic
+ store i8 %40, ptr @sc, align 1
+ %41 = atomicrmw nand ptr @uc, i8 11 monotonic
+ store i8 %41, ptr @uc, align 1
+ %42 = atomicrmw nand ptr @ss, i16 11 monotonic
+ store i16 %42, ptr @ss, align 2
+ %43 = atomicrmw nand ptr @us, i16 11 monotonic
+ store i16 %43, ptr @us, align 2
+ %44 = atomicrmw nand ptr @si, i32 11 monotonic
+ store i32 %44, ptr @si, align 4
+ %45 = atomicrmw nand ptr @ui, i32 11 monotonic
+ store i32 %45, ptr @ui, align 4
+ %46 = atomicrmw nand ptr @sl, i64 11 monotonic
+ store i64 %46, ptr @sl, align 8
+ %47 = atomicrmw nand ptr @ul, i64 11 monotonic
+ store i64 %47, ptr @ul, align 8
br label %return
return: ; preds = %entry
define void @test_op_and_fetch() nounwind {
entry:
- %0 = load i8, i8* @uc, align 1
- %1 = atomicrmw add i8* @sc, i8 %0 monotonic
+ %0 = load i8, ptr @uc, align 1
+ %1 = atomicrmw add ptr @sc, i8 %0 monotonic
%2 = add i8 %1, %0
- store i8 %2, i8* @sc, align 1
- %3 = load i8, i8* @uc, align 1
- %4 = atomicrmw add i8* @uc, i8 %3 monotonic
+ store i8 %2, ptr @sc, align 1
+ %3 = load i8, ptr @uc, align 1
+ %4 = atomicrmw add ptr @uc, i8 %3 monotonic
%5 = add i8 %4, %3
- store i8 %5, i8* @uc, align 1
- %6 = load i8, i8* @uc, align 1
+ store i8 %5, ptr @uc, align 1
+ %6 = load i8, ptr @uc, align 1
%7 = zext i8 %6 to i16
- %8 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %9 = atomicrmw add i16* %8, i16 %7 monotonic
- %10 = add i16 %9, %7
- store i16 %10, i16* @ss, align 2
- %11 = load i8, i8* @uc, align 1
- %12 = zext i8 %11 to i16
- %13 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %14 = atomicrmw add i16* %13, i16 %12 monotonic
- %15 = add i16 %14, %12
- store i16 %15, i16* @us, align 2
- %16 = load i8, i8* @uc, align 1
- %17 = zext i8 %16 to i32
- %18 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %19 = atomicrmw add i32* %18, i32 %17 monotonic
- %20 = add i32 %19, %17
- store i32 %20, i32* @si, align 4
- %21 = load i8, i8* @uc, align 1
- %22 = zext i8 %21 to i32
- %23 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %24 = atomicrmw add i32* %23, i32 %22 monotonic
- %25 = add i32 %24, %22
- store i32 %25, i32* @ui, align 4
- %26 = load i8, i8* @uc, align 1
+ %8 = atomicrmw add ptr @ss, i16 %7 monotonic
+ %9 = add i16 %8, %7
+ store i16 %9, ptr @ss, align 2
+ %10 = load i8, ptr @uc, align 1
+ %11 = zext i8 %10 to i16
+ %12 = atomicrmw add ptr @us, i16 %11 monotonic
+ %13 = add i16 %12, %11
+ store i16 %13, ptr @us, align 2
+ %14 = load i8, ptr @uc, align 1
+ %15 = zext i8 %14 to i32
+ %16 = atomicrmw add ptr @si, i32 %15 monotonic
+ %17 = add i32 %16, %15
+ store i32 %17, ptr @si, align 4
+ %18 = load i8, ptr @uc, align 1
+ %19 = zext i8 %18 to i32
+ %20 = atomicrmw add ptr @ui, i32 %19 monotonic
+ %21 = add i32 %20, %19
+ store i32 %21, ptr @ui, align 4
+ %22 = load i8, ptr @uc, align 1
+ %23 = zext i8 %22 to i64
+ %24 = atomicrmw add ptr @sl, i64 %23 monotonic
+ %25 = add i64 %24, %23
+ store i64 %25, ptr @sl, align 8
+ %26 = load i8, ptr @uc, align 1
%27 = zext i8 %26 to i64
- %28 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %29 = atomicrmw add i64* %28, i64 %27 monotonic
- %30 = add i64 %29, %27
- store i64 %30, i64* @sl, align 8
- %31 = load i8, i8* @uc, align 1
- %32 = zext i8 %31 to i64
- %33 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %34 = atomicrmw add i64* %33, i64 %32 monotonic
- %35 = add i64 %34, %32
- store i64 %35, i64* @ul, align 8
- %36 = load i8, i8* @uc, align 1
- %37 = atomicrmw sub i8* @sc, i8 %36 monotonic
- %38 = sub i8 %37, %36
- store i8 %38, i8* @sc, align 1
- %39 = load i8, i8* @uc, align 1
- %40 = atomicrmw sub i8* @uc, i8 %39 monotonic
- %41 = sub i8 %40, %39
- store i8 %41, i8* @uc, align 1
- %42 = load i8, i8* @uc, align 1
- %43 = zext i8 %42 to i16
- %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %45 = atomicrmw sub i16* %44, i16 %43 monotonic
- %46 = sub i16 %45, %43
- store i16 %46, i16* @ss, align 2
- %47 = load i8, i8* @uc, align 1
- %48 = zext i8 %47 to i16
- %49 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %50 = atomicrmw sub i16* %49, i16 %48 monotonic
- %51 = sub i16 %50, %48
- store i16 %51, i16* @us, align 2
- %52 = load i8, i8* @uc, align 1
- %53 = zext i8 %52 to i32
- %54 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %55 = atomicrmw sub i32* %54, i32 %53 monotonic
- %56 = sub i32 %55, %53
- store i32 %56, i32* @si, align 4
- %57 = load i8, i8* @uc, align 1
- %58 = zext i8 %57 to i32
- %59 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %60 = atomicrmw sub i32* %59, i32 %58 monotonic
- %61 = sub i32 %60, %58
- store i32 %61, i32* @ui, align 4
- %62 = load i8, i8* @uc, align 1
- %63 = zext i8 %62 to i64
- %64 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %65 = atomicrmw sub i64* %64, i64 %63 monotonic
- %66 = sub i64 %65, %63
- store i64 %66, i64* @sl, align 8
- %67 = load i8, i8* @uc, align 1
- %68 = zext i8 %67 to i64
- %69 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %70 = atomicrmw sub i64* %69, i64 %68 monotonic
- %71 = sub i64 %70, %68
- store i64 %71, i64* @ul, align 8
- %72 = load i8, i8* @uc, align 1
- %73 = atomicrmw or i8* @sc, i8 %72 monotonic
- %74 = or i8 %73, %72
- store i8 %74, i8* @sc, align 1
- %75 = load i8, i8* @uc, align 1
- %76 = atomicrmw or i8* @uc, i8 %75 monotonic
- %77 = or i8 %76, %75
- store i8 %77, i8* @uc, align 1
- %78 = load i8, i8* @uc, align 1
- %79 = zext i8 %78 to i16
- %80 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %81 = atomicrmw or i16* %80, i16 %79 monotonic
- %82 = or i16 %81, %79
- store i16 %82, i16* @ss, align 2
- %83 = load i8, i8* @uc, align 1
- %84 = zext i8 %83 to i16
- %85 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %86 = atomicrmw or i16* %85, i16 %84 monotonic
- %87 = or i16 %86, %84
- store i16 %87, i16* @us, align 2
- %88 = load i8, i8* @uc, align 1
- %89 = zext i8 %88 to i32
- %90 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %91 = atomicrmw or i32* %90, i32 %89 monotonic
- %92 = or i32 %91, %89
- store i32 %92, i32* @si, align 4
- %93 = load i8, i8* @uc, align 1
- %94 = zext i8 %93 to i32
- %95 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %96 = atomicrmw or i32* %95, i32 %94 monotonic
- %97 = or i32 %96, %94
- store i32 %97, i32* @ui, align 4
- %98 = load i8, i8* @uc, align 1
- %99 = zext i8 %98 to i64
- %100 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %101 = atomicrmw or i64* %100, i64 %99 monotonic
- %102 = or i64 %101, %99
- store i64 %102, i64* @sl, align 8
- %103 = load i8, i8* @uc, align 1
- %104 = zext i8 %103 to i64
- %105 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %106 = atomicrmw or i64* %105, i64 %104 monotonic
- %107 = or i64 %106, %104
- store i64 %107, i64* @ul, align 8
- %108 = load i8, i8* @uc, align 1
- %109 = atomicrmw xor i8* @sc, i8 %108 monotonic
- %110 = xor i8 %109, %108
- store i8 %110, i8* @sc, align 1
- %111 = load i8, i8* @uc, align 1
- %112 = atomicrmw xor i8* @uc, i8 %111 monotonic
- %113 = xor i8 %112, %111
- store i8 %113, i8* @uc, align 1
- %114 = load i8, i8* @uc, align 1
- %115 = zext i8 %114 to i16
- %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %117 = atomicrmw xor i16* %116, i16 %115 monotonic
- %118 = xor i16 %117, %115
- store i16 %118, i16* @ss, align 2
- %119 = load i8, i8* @uc, align 1
- %120 = zext i8 %119 to i16
- %121 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %122 = atomicrmw xor i16* %121, i16 %120 monotonic
- %123 = xor i16 %122, %120
- store i16 %123, i16* @us, align 2
- %124 = load i8, i8* @uc, align 1
- %125 = zext i8 %124 to i32
- %126 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %127 = atomicrmw xor i32* %126, i32 %125 monotonic
- %128 = xor i32 %127, %125
- store i32 %128, i32* @si, align 4
- %129 = load i8, i8* @uc, align 1
- %130 = zext i8 %129 to i32
- %131 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %132 = atomicrmw xor i32* %131, i32 %130 monotonic
- %133 = xor i32 %132, %130
- store i32 %133, i32* @ui, align 4
- %134 = load i8, i8* @uc, align 1
- %135 = zext i8 %134 to i64
- %136 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %137 = atomicrmw xor i64* %136, i64 %135 monotonic
- %138 = xor i64 %137, %135
- store i64 %138, i64* @sl, align 8
- %139 = load i8, i8* @uc, align 1
- %140 = zext i8 %139 to i64
- %141 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %142 = atomicrmw xor i64* %141, i64 %140 monotonic
- %143 = xor i64 %142, %140
- store i64 %143, i64* @ul, align 8
- %144 = load i8, i8* @uc, align 1
- %145 = atomicrmw and i8* @sc, i8 %144 monotonic
- %146 = and i8 %145, %144
- store i8 %146, i8* @sc, align 1
- %147 = load i8, i8* @uc, align 1
- %148 = atomicrmw and i8* @uc, i8 %147 monotonic
- %149 = and i8 %148, %147
- store i8 %149, i8* @uc, align 1
- %150 = load i8, i8* @uc, align 1
- %151 = zext i8 %150 to i16
- %152 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %153 = atomicrmw and i16* %152, i16 %151 monotonic
- %154 = and i16 %153, %151
- store i16 %154, i16* @ss, align 2
- %155 = load i8, i8* @uc, align 1
- %156 = zext i8 %155 to i16
- %157 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %158 = atomicrmw and i16* %157, i16 %156 monotonic
- %159 = and i16 %158, %156
- store i16 %159, i16* @us, align 2
- %160 = load i8, i8* @uc, align 1
- %161 = zext i8 %160 to i32
- %162 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %163 = atomicrmw and i32* %162, i32 %161 monotonic
- %164 = and i32 %163, %161
- store i32 %164, i32* @si, align 4
- %165 = load i8, i8* @uc, align 1
- %166 = zext i8 %165 to i32
- %167 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %168 = atomicrmw and i32* %167, i32 %166 monotonic
- %169 = and i32 %168, %166
- store i32 %169, i32* @ui, align 4
- %170 = load i8, i8* @uc, align 1
- %171 = zext i8 %170 to i64
- %172 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %173 = atomicrmw and i64* %172, i64 %171 monotonic
- %174 = and i64 %173, %171
- store i64 %174, i64* @sl, align 8
- %175 = load i8, i8* @uc, align 1
- %176 = zext i8 %175 to i64
- %177 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %178 = atomicrmw and i64* %177, i64 %176 monotonic
- %179 = and i64 %178, %176
- store i64 %179, i64* @ul, align 8
- %180 = load i8, i8* @uc, align 1
- %181 = atomicrmw nand i8* @sc, i8 %180 monotonic
- %182 = xor i8 %181, -1
- %183 = and i8 %182, %180
- store i8 %183, i8* @sc, align 1
- %184 = load i8, i8* @uc, align 1
- %185 = atomicrmw nand i8* @uc, i8 %184 monotonic
- %186 = xor i8 %185, -1
- %187 = and i8 %186, %184
- store i8 %187, i8* @uc, align 1
- %188 = load i8, i8* @uc, align 1
- %189 = zext i8 %188 to i16
- %190 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %191 = atomicrmw nand i16* %190, i16 %189 monotonic
- %192 = xor i16 %191, -1
- %193 = and i16 %192, %189
- store i16 %193, i16* @ss, align 2
- %194 = load i8, i8* @uc, align 1
- %195 = zext i8 %194 to i16
- %196 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %197 = atomicrmw nand i16* %196, i16 %195 monotonic
- %198 = xor i16 %197, -1
- %199 = and i16 %198, %195
- store i16 %199, i16* @us, align 2
- %200 = load i8, i8* @uc, align 1
- %201 = zext i8 %200 to i32
- %202 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %203 = atomicrmw nand i32* %202, i32 %201 monotonic
- %204 = xor i32 %203, -1
- %205 = and i32 %204, %201
- store i32 %205, i32* @si, align 4
- %206 = load i8, i8* @uc, align 1
- %207 = zext i8 %206 to i32
- %208 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %209 = atomicrmw nand i32* %208, i32 %207 monotonic
- %210 = xor i32 %209, -1
- %211 = and i32 %210, %207
- store i32 %211, i32* @ui, align 4
- %212 = load i8, i8* @uc, align 1
- %213 = zext i8 %212 to i64
- %214 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %215 = atomicrmw nand i64* %214, i64 %213 monotonic
- %216 = xor i64 %215, -1
- %217 = and i64 %216, %213
- store i64 %217, i64* @sl, align 8
- %218 = load i8, i8* @uc, align 1
- %219 = zext i8 %218 to i64
- %220 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %221 = atomicrmw nand i64* %220, i64 %219 monotonic
- %222 = xor i64 %221, -1
- %223 = and i64 %222, %219
- store i64 %223, i64* @ul, align 8
+ %28 = atomicrmw add ptr @ul, i64 %27 monotonic
+ %29 = add i64 %28, %27
+ store i64 %29, ptr @ul, align 8
+ %30 = load i8, ptr @uc, align 1
+ %31 = atomicrmw sub ptr @sc, i8 %30 monotonic
+ %32 = sub i8 %31, %30
+ store i8 %32, ptr @sc, align 1
+ %33 = load i8, ptr @uc, align 1
+ %34 = atomicrmw sub ptr @uc, i8 %33 monotonic
+ %35 = sub i8 %34, %33
+ store i8 %35, ptr @uc, align 1
+ %36 = load i8, ptr @uc, align 1
+ %37 = zext i8 %36 to i16
+ %38 = atomicrmw sub ptr @ss, i16 %37 monotonic
+ %39 = sub i16 %38, %37
+ store i16 %39, ptr @ss, align 2
+ %40 = load i8, ptr @uc, align 1
+ %41 = zext i8 %40 to i16
+ %42 = atomicrmw sub ptr @us, i16 %41 monotonic
+ %43 = sub i16 %42, %41
+ store i16 %43, ptr @us, align 2
+ %44 = load i8, ptr @uc, align 1
+ %45 = zext i8 %44 to i32
+ %46 = atomicrmw sub ptr @si, i32 %45 monotonic
+ %47 = sub i32 %46, %45
+ store i32 %47, ptr @si, align 4
+ %48 = load i8, ptr @uc, align 1
+ %49 = zext i8 %48 to i32
+ %50 = atomicrmw sub ptr @ui, i32 %49 monotonic
+ %51 = sub i32 %50, %49
+ store i32 %51, ptr @ui, align 4
+ %52 = load i8, ptr @uc, align 1
+ %53 = zext i8 %52 to i64
+ %54 = atomicrmw sub ptr @sl, i64 %53 monotonic
+ %55 = sub i64 %54, %53
+ store i64 %55, ptr @sl, align 8
+ %56 = load i8, ptr @uc, align 1
+ %57 = zext i8 %56 to i64
+ %58 = atomicrmw sub ptr @ul, i64 %57 monotonic
+ %59 = sub i64 %58, %57
+ store i64 %59, ptr @ul, align 8
+ %60 = load i8, ptr @uc, align 1
+ %61 = atomicrmw or ptr @sc, i8 %60 monotonic
+ %62 = or i8 %61, %60
+ store i8 %62, ptr @sc, align 1
+ %63 = load i8, ptr @uc, align 1
+ %64 = atomicrmw or ptr @uc, i8 %63 monotonic
+ %65 = or i8 %64, %63
+ store i8 %65, ptr @uc, align 1
+ %66 = load i8, ptr @uc, align 1
+ %67 = zext i8 %66 to i16
+ %68 = atomicrmw or ptr @ss, i16 %67 monotonic
+ %69 = or i16 %68, %67
+ store i16 %69, ptr @ss, align 2
+ %70 = load i8, ptr @uc, align 1
+ %71 = zext i8 %70 to i16
+ %72 = atomicrmw or ptr @us, i16 %71 monotonic
+ %73 = or i16 %72, %71
+ store i16 %73, ptr @us, align 2
+ %74 = load i8, ptr @uc, align 1
+ %75 = zext i8 %74 to i32
+ %76 = atomicrmw or ptr @si, i32 %75 monotonic
+ %77 = or i32 %76, %75
+ store i32 %77, ptr @si, align 4
+ %78 = load i8, ptr @uc, align 1
+ %79 = zext i8 %78 to i32
+ %80 = atomicrmw or ptr @ui, i32 %79 monotonic
+ %81 = or i32 %80, %79
+ store i32 %81, ptr @ui, align 4
+ %82 = load i8, ptr @uc, align 1
+ %83 = zext i8 %82 to i64
+ %84 = atomicrmw or ptr @sl, i64 %83 monotonic
+ %85 = or i64 %84, %83
+ store i64 %85, ptr @sl, align 8
+ %86 = load i8, ptr @uc, align 1
+ %87 = zext i8 %86 to i64
+ %88 = atomicrmw or ptr @ul, i64 %87 monotonic
+ %89 = or i64 %88, %87
+ store i64 %89, ptr @ul, align 8
+ %90 = load i8, ptr @uc, align 1
+ %91 = atomicrmw xor ptr @sc, i8 %90 monotonic
+ %92 = xor i8 %91, %90
+ store i8 %92, ptr @sc, align 1
+ %93 = load i8, ptr @uc, align 1
+ %94 = atomicrmw xor ptr @uc, i8 %93 monotonic
+ %95 = xor i8 %94, %93
+ store i8 %95, ptr @uc, align 1
+ %96 = load i8, ptr @uc, align 1
+ %97 = zext i8 %96 to i16
+ %98 = atomicrmw xor ptr @ss, i16 %97 monotonic
+ %99 = xor i16 %98, %97
+ store i16 %99, ptr @ss, align 2
+ %100 = load i8, ptr @uc, align 1
+ %101 = zext i8 %100 to i16
+ %102 = atomicrmw xor ptr @us, i16 %101 monotonic
+ %103 = xor i16 %102, %101
+ store i16 %103, ptr @us, align 2
+ %104 = load i8, ptr @uc, align 1
+ %105 = zext i8 %104 to i32
+ %106 = atomicrmw xor ptr @si, i32 %105 monotonic
+ %107 = xor i32 %106, %105
+ store i32 %107, ptr @si, align 4
+ %108 = load i8, ptr @uc, align 1
+ %109 = zext i8 %108 to i32
+ %110 = atomicrmw xor ptr @ui, i32 %109 monotonic
+ %111 = xor i32 %110, %109
+ store i32 %111, ptr @ui, align 4
+ %112 = load i8, ptr @uc, align 1
+ %113 = zext i8 %112 to i64
+ %114 = atomicrmw xor ptr @sl, i64 %113 monotonic
+ %115 = xor i64 %114, %113
+ store i64 %115, ptr @sl, align 8
+ %116 = load i8, ptr @uc, align 1
+ %117 = zext i8 %116 to i64
+ %118 = atomicrmw xor ptr @ul, i64 %117 monotonic
+ %119 = xor i64 %118, %117
+ store i64 %119, ptr @ul, align 8
+ %120 = load i8, ptr @uc, align 1
+ %121 = atomicrmw and ptr @sc, i8 %120 monotonic
+ %122 = and i8 %121, %120
+ store i8 %122, ptr @sc, align 1
+ %123 = load i8, ptr @uc, align 1
+ %124 = atomicrmw and ptr @uc, i8 %123 monotonic
+ %125 = and i8 %124, %123
+ store i8 %125, ptr @uc, align 1
+ %126 = load i8, ptr @uc, align 1
+ %127 = zext i8 %126 to i16
+ %128 = atomicrmw and ptr @ss, i16 %127 monotonic
+ %129 = and i16 %128, %127
+ store i16 %129, ptr @ss, align 2
+ %130 = load i8, ptr @uc, align 1
+ %131 = zext i8 %130 to i16
+ %132 = atomicrmw and ptr @us, i16 %131 monotonic
+ %133 = and i16 %132, %131
+ store i16 %133, ptr @us, align 2
+ %134 = load i8, ptr @uc, align 1
+ %135 = zext i8 %134 to i32
+ %136 = atomicrmw and ptr @si, i32 %135 monotonic
+ %137 = and i32 %136, %135
+ store i32 %137, ptr @si, align 4
+ %138 = load i8, ptr @uc, align 1
+ %139 = zext i8 %138 to i32
+ %140 = atomicrmw and ptr @ui, i32 %139 monotonic
+ %141 = and i32 %140, %139
+ store i32 %141, ptr @ui, align 4
+ %142 = load i8, ptr @uc, align 1
+ %143 = zext i8 %142 to i64
+ %144 = atomicrmw and ptr @sl, i64 %143 monotonic
+ %145 = and i64 %144, %143
+ store i64 %145, ptr @sl, align 8
+ %146 = load i8, ptr @uc, align 1
+ %147 = zext i8 %146 to i64
+ %148 = atomicrmw and ptr @ul, i64 %147 monotonic
+ %149 = and i64 %148, %147
+ store i64 %149, ptr @ul, align 8
+ %150 = load i8, ptr @uc, align 1
+ %151 = atomicrmw nand ptr @sc, i8 %150 monotonic
+ %152 = xor i8 %151, -1
+ %153 = and i8 %152, %150
+ store i8 %153, ptr @sc, align 1
+ %154 = load i8, ptr @uc, align 1
+ %155 = atomicrmw nand ptr @uc, i8 %154 monotonic
+ %156 = xor i8 %155, -1
+ %157 = and i8 %156, %154
+ store i8 %157, ptr @uc, align 1
+ %158 = load i8, ptr @uc, align 1
+ %159 = zext i8 %158 to i16
+ %160 = atomicrmw nand ptr @ss, i16 %159 monotonic
+ %161 = xor i16 %160, -1
+ %162 = and i16 %161, %159
+ store i16 %162, ptr @ss, align 2
+ %163 = load i8, ptr @uc, align 1
+ %164 = zext i8 %163 to i16
+ %165 = atomicrmw nand ptr @us, i16 %164 monotonic
+ %166 = xor i16 %165, -1
+ %167 = and i16 %166, %164
+ store i16 %167, ptr @us, align 2
+ %168 = load i8, ptr @uc, align 1
+ %169 = zext i8 %168 to i32
+ %170 = atomicrmw nand ptr @si, i32 %169 monotonic
+ %171 = xor i32 %170, -1
+ %172 = and i32 %171, %169
+ store i32 %172, ptr @si, align 4
+ %173 = load i8, ptr @uc, align 1
+ %174 = zext i8 %173 to i32
+ %175 = atomicrmw nand ptr @ui, i32 %174 monotonic
+ %176 = xor i32 %175, -1
+ %177 = and i32 %176, %174
+ store i32 %177, ptr @ui, align 4
+ %178 = load i8, ptr @uc, align 1
+ %179 = zext i8 %178 to i64
+ %180 = atomicrmw nand ptr @sl, i64 %179 monotonic
+ %181 = xor i64 %180, -1
+ %182 = and i64 %181, %179
+ store i64 %182, ptr @sl, align 8
+ %183 = load i8, ptr @uc, align 1
+ %184 = zext i8 %183 to i64
+ %185 = atomicrmw nand ptr @ul, i64 %184 monotonic
+ %186 = xor i64 %185, -1
+ %187 = and i64 %186, %184
+ store i64 %187, ptr @ul, align 8
br label %return
return: ; preds = %entry
define void @test_compare_and_swap() nounwind {
entry:
- %0 = load i8, i8* @uc, align 1
- %1 = load i8, i8* @sc, align 1
- %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic monotonic
- store i8 %2, i8* @sc, align 1
- %3 = load i8, i8* @uc, align 1
- %4 = load i8, i8* @sc, align 1
- %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic monotonic
- store i8 %5, i8* @uc, align 1
- %6 = load i8, i8* @uc, align 1
+ %0 = load i8, ptr @uc, align 1
+ %1 = load i8, ptr @sc, align 1
+ %2 = cmpxchg ptr @sc, i8 %0, i8 %1 monotonic monotonic
+ store i8 %2, ptr @sc, align 1
+ %3 = load i8, ptr @uc, align 1
+ %4 = load i8, ptr @sc, align 1
+ %5 = cmpxchg ptr @uc, i8 %3, i8 %4 monotonic monotonic
+ store i8 %5, ptr @uc, align 1
+ %6 = load i8, ptr @uc, align 1
%7 = zext i8 %6 to i16
- %8 = load i8, i8* @sc, align 1
+ %8 = load i8, ptr @sc, align 1
%9 = sext i8 %8 to i16
- %10 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic monotonic
- store i16 %11, i16* @ss, align 2
- %12 = load i8, i8* @uc, align 1
- %13 = zext i8 %12 to i16
- %14 = load i8, i8* @sc, align 1
- %15 = sext i8 %14 to i16
- %16 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic monotonic
- store i16 %17, i16* @us, align 2
- %18 = load i8, i8* @uc, align 1
- %19 = zext i8 %18 to i32
- %20 = load i8, i8* @sc, align 1
- %21 = sext i8 %20 to i32
- %22 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic monotonic
- store i32 %23, i32* @si, align 4
- %24 = load i8, i8* @uc, align 1
- %25 = zext i8 %24 to i32
- %26 = load i8, i8* @sc, align 1
- %27 = sext i8 %26 to i32
- %28 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic monotonic
- store i32 %29, i32* @ui, align 4
- %30 = load i8, i8* @uc, align 1
- %31 = zext i8 %30 to i64
- %32 = load i8, i8* @sc, align 1
- %33 = sext i8 %32 to i64
- %34 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %35 = cmpxchg i64* %34, i64 %31, i64 %33 monotonic monotonic
- store i64 %35, i64* @sl, align 8
- %36 = load i8, i8* @uc, align 1
- %37 = zext i8 %36 to i64
- %38 = load i8, i8* @sc, align 1
- %39 = sext i8 %38 to i64
- %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %41 = cmpxchg i64* %40, i64 %37, i64 %39 monotonic monotonic
- store i64 %41, i64* @ul, align 8
- %42 = load i8, i8* @uc, align 1
- %43 = load i8, i8* @sc, align 1
- %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic monotonic
+ %10 = cmpxchg ptr @ss, i16 %7, i16 %9 monotonic monotonic
+ store i16 %10, ptr @ss, align 2
+ %11 = load i8, ptr @uc, align 1
+ %12 = zext i8 %11 to i16
+ %13 = load i8, ptr @sc, align 1
+ %14 = sext i8 %13 to i16
+ %15 = cmpxchg ptr @us, i16 %12, i16 %14 monotonic monotonic
+ store i16 %15, ptr @us, align 2
+ %16 = load i8, ptr @uc, align 1
+ %17 = zext i8 %16 to i32
+ %18 = load i8, ptr @sc, align 1
+ %19 = sext i8 %18 to i32
+ %20 = cmpxchg ptr @si, i32 %17, i32 %19 monotonic monotonic
+ store i32 %20, ptr @si, align 4
+ %21 = load i8, ptr @uc, align 1
+ %22 = zext i8 %21 to i32
+ %23 = load i8, ptr @sc, align 1
+ %24 = sext i8 %23 to i32
+ %25 = cmpxchg ptr @ui, i32 %22, i32 %24 monotonic monotonic
+ store i32 %25, ptr @ui, align 4
+ %26 = load i8, ptr @uc, align 1
+ %27 = zext i8 %26 to i64
+ %28 = load i8, ptr @sc, align 1
+ %29 = sext i8 %28 to i64
+ %30 = cmpxchg ptr @sl, i64 %27, i64 %29 monotonic monotonic
+ store i64 %30, ptr @sl, align 8
+ %31 = load i8, ptr @uc, align 1
+ %32 = zext i8 %31 to i64
+ %33 = load i8, ptr @sc, align 1
+ %34 = sext i8 %33 to i64
+ %35 = cmpxchg ptr @ul, i64 %32, i64 %34 monotonic monotonic
+ store i64 %35, ptr @ul, align 8
+ %36 = load i8, ptr @uc, align 1
+ %37 = load i8, ptr @sc, align 1
+ %38 = cmpxchg ptr @sc, i8 %36, i8 %37 monotonic monotonic
+ %39 = icmp eq i8 %38, %36
+ %40 = zext i1 %39 to i8
+ %41 = zext i8 %40 to i32
+ store i32 %41, ptr @ui, align 4
+ %42 = load i8, ptr @uc, align 1
+ %43 = load i8, ptr @sc, align 1
+ %44 = cmpxchg ptr @uc, i8 %42, i8 %43 monotonic monotonic
%45 = icmp eq i8 %44, %42
%46 = zext i1 %45 to i8
%47 = zext i8 %46 to i32
- store i32 %47, i32* @ui, align 4
- %48 = load i8, i8* @uc, align 1
- %49 = load i8, i8* @sc, align 1
- %50 = cmpxchg i8* @uc, i8 %48, i8 %49 monotonic monotonic
- %51 = icmp eq i8 %50, %48
- %52 = zext i1 %51 to i8
- %53 = zext i8 %52 to i32
- store i32 %53, i32* @ui, align 4
- %54 = load i8, i8* @uc, align 1
- %55 = zext i8 %54 to i16
- %56 = load i8, i8* @sc, align 1
- %57 = sext i8 %56 to i16
- %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %59 = cmpxchg i16* %58, i16 %55, i16 %57 monotonic monotonic
- %60 = icmp eq i16 %59, %55
- %61 = zext i1 %60 to i8
- %62 = zext i8 %61 to i32
- store i32 %62, i32* @ui, align 4
- %63 = load i8, i8* @uc, align 1
- %64 = zext i8 %63 to i16
- %65 = load i8, i8* @sc, align 1
- %66 = sext i8 %65 to i16
- %67 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %68 = cmpxchg i16* %67, i16 %64, i16 %66 monotonic monotonic
- %69 = icmp eq i16 %68, %64
+ store i32 %47, ptr @ui, align 4
+ %48 = load i8, ptr @uc, align 1
+ %49 = zext i8 %48 to i16
+ %50 = load i8, ptr @sc, align 1
+ %51 = sext i8 %50 to i16
+ %52 = cmpxchg ptr @ss, i16 %49, i16 %51 monotonic monotonic
+ %53 = icmp eq i16 %52, %49
+ %54 = zext i1 %53 to i8
+ %55 = zext i8 %54 to i32
+ store i32 %55, ptr @ui, align 4
+ %56 = load i8, ptr @uc, align 1
+ %57 = zext i8 %56 to i16
+ %58 = load i8, ptr @sc, align 1
+ %59 = sext i8 %58 to i16
+ %60 = cmpxchg ptr @us, i16 %57, i16 %59 monotonic monotonic
+ %61 = icmp eq i16 %60, %57
+ %62 = zext i1 %61 to i8
+ %63 = zext i8 %62 to i32
+ store i32 %63, ptr @ui, align 4
+ %64 = load i8, ptr @uc, align 1
+ %65 = zext i8 %64 to i32
+ %66 = load i8, ptr @sc, align 1
+ %67 = sext i8 %66 to i32
+ %68 = cmpxchg ptr @si, i32 %65, i32 %67 monotonic monotonic
+ %69 = icmp eq i32 %68, %65
%70 = zext i1 %69 to i8
%71 = zext i8 %70 to i32
- store i32 %71, i32* @ui, align 4
- %72 = load i8, i8* @uc, align 1
+ store i32 %71, ptr @ui, align 4
+ %72 = load i8, ptr @uc, align 1
%73 = zext i8 %72 to i32
- %74 = load i8, i8* @sc, align 1
+ %74 = load i8, ptr @sc, align 1
%75 = sext i8 %74 to i32
- %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %77 = cmpxchg i32* %76, i32 %73, i32 %75 monotonic monotonic
- %78 = icmp eq i32 %77, %73
- %79 = zext i1 %78 to i8
- %80 = zext i8 %79 to i32
- store i32 %80, i32* @ui, align 4
- %81 = load i8, i8* @uc, align 1
- %82 = zext i8 %81 to i32
- %83 = load i8, i8* @sc, align 1
- %84 = sext i8 %83 to i32
- %85 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %86 = cmpxchg i32* %85, i32 %82, i32 %84 monotonic monotonic
- %87 = icmp eq i32 %86, %82
- %88 = zext i1 %87 to i8
- %89 = zext i8 %88 to i32
- store i32 %89, i32* @ui, align 4
- %90 = load i8, i8* @uc, align 1
- %91 = zext i8 %90 to i64
- %92 = load i8, i8* @sc, align 1
- %93 = sext i8 %92 to i64
- %94 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %95 = cmpxchg i64* %94, i64 %91, i64 %93 monotonic monotonic
- %96 = icmp eq i64 %95, %91
- %97 = zext i1 %96 to i8
- %98 = zext i8 %97 to i32
- store i32 %98, i32* @ui, align 4
- %99 = load i8, i8* @uc, align 1
- %100 = zext i8 %99 to i64
- %101 = load i8, i8* @sc, align 1
- %102 = sext i8 %101 to i64
- %103 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %104 = cmpxchg i64* %103, i64 %100, i64 %102 monotonic monotonic
- %105 = icmp eq i64 %104, %100
- %106 = zext i1 %105 to i8
- %107 = zext i8 %106 to i32
- store i32 %107, i32* @ui, align 4
+ %76 = cmpxchg ptr @ui, i32 %73, i32 %75 monotonic monotonic
+ %77 = icmp eq i32 %76, %73
+ %78 = zext i1 %77 to i8
+ %79 = zext i8 %78 to i32
+ store i32 %79, ptr @ui, align 4
+ %80 = load i8, ptr @uc, align 1
+ %81 = zext i8 %80 to i64
+ %82 = load i8, ptr @sc, align 1
+ %83 = sext i8 %82 to i64
+ %84 = cmpxchg ptr @sl, i64 %81, i64 %83 monotonic monotonic
+ %85 = icmp eq i64 %84, %81
+ %86 = zext i1 %85 to i8
+ %87 = zext i8 %86 to i32
+ store i32 %87, ptr @ui, align 4
+ %88 = load i8, ptr @uc, align 1
+ %89 = zext i8 %88 to i64
+ %90 = load i8, ptr @sc, align 1
+ %91 = sext i8 %90 to i64
+ %92 = cmpxchg ptr @ul, i64 %89, i64 %91 monotonic monotonic
+ %93 = icmp eq i64 %92, %89
+ %94 = zext i1 %93 to i8
+ %95 = zext i8 %94 to i32
+ store i32 %95, ptr @ui, align 4
br label %return
return: ; preds = %entry
define void @test_lock() nounwind {
entry:
- %0 = atomicrmw xchg i8* @sc, i8 1 monotonic
- store i8 %0, i8* @sc, align 1
- %1 = atomicrmw xchg i8* @uc, i8 1 monotonic
- store i8 %1, i8* @uc, align 1
- %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %3 = atomicrmw xchg i16* %2, i16 1 monotonic
- store i16 %3, i16* @ss, align 2
- %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %5 = atomicrmw xchg i16* %4, i16 1 monotonic
- store i16 %5, i16* @us, align 2
- %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %7 = atomicrmw xchg i32* %6, i32 1 monotonic
- store i32 %7, i32* @si, align 4
- %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %9 = atomicrmw xchg i32* %8, i32 1 monotonic
- store i32 %9, i32* @ui, align 4
- %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %11 = atomicrmw xchg i64* %10, i64 1 monotonic
- store i64 %11, i64* @sl, align 8
- %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %13 = atomicrmw xchg i64* %12, i64 1 monotonic
- store i64 %13, i64* @ul, align 8
+ %0 = atomicrmw xchg ptr @sc, i8 1 monotonic
+ store i8 %0, ptr @sc, align 1
+ %1 = atomicrmw xchg ptr @uc, i8 1 monotonic
+ store i8 %1, ptr @uc, align 1
+ %2 = atomicrmw xchg ptr @ss, i16 1 monotonic
+ store i16 %2, ptr @ss, align 2
+ %3 = atomicrmw xchg ptr @us, i16 1 monotonic
+ store i16 %3, ptr @us, align 2
+ %4 = atomicrmw xchg ptr @si, i32 1 monotonic
+ store i32 %4, ptr @si, align 4
+ %5 = atomicrmw xchg ptr @ui, i32 1 monotonic
+ store i32 %5, ptr @ui, align 4
+ %6 = atomicrmw xchg ptr @sl, i64 1 monotonic
+ store i64 %6, ptr @sl, align 8
+ %7 = atomicrmw xchg ptr @ul, i64 1 monotonic
+ store i64 %7, ptr @ul, align 8
fence seq_cst
- store volatile i8 0, i8* @sc, align 1
- store volatile i8 0, i8* @uc, align 1
- %14 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- store volatile i16 0, i16* %14, align 2
- %15 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- store volatile i16 0, i16* %15, align 2
- %16 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- store volatile i32 0, i32* %16, align 4
- %17 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- store volatile i32 0, i32* %17, align 4
- %18 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- store volatile i64 0, i64* %18, align 8
- %19 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- store volatile i64 0, i64* %19, align 8
- %20 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
- store volatile i64 0, i64* %20, align 8
- %21 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
- store volatile i64 0, i64* %21, align 8
+ store volatile i8 0, ptr @sc, align 1
+ store volatile i8 0, ptr @uc, align 1
+ store volatile i16 0, ptr @ss, align 2
+ store volatile i16 0, ptr @us, align 2
+ store volatile i32 0, ptr @si, align 4
+ store volatile i32 0, ptr @ui, align 4
+ store volatile i64 0, ptr @sl, align 8
+ store volatile i64 0, ptr @ul, align 8
+ store volatile i64 0, ptr @sll, align 8
+ store volatile i64 0, ptr @ull, align 8
br label %return
return: ; preds = %entry
; Don't generate zero extension for the return value.
; CHECK-NOT: clrldi
-define zeroext i1 @foo(i32 signext %i, i32* %p) {
+define zeroext i1 @foo(i32 signext %i, ptr %p) {
entry:
%cmp = icmp eq i32 %i, 0
br i1 %cmp, label %return, label %if.end
if.end:
- store i32 %i, i32* %p, align 4
+ store i32 %i, ptr %p, align 4
br label %return
return:
}
; CHECK-LABEL: find
-define zeroext i1 @find(i8** readonly %begin, i8** readnone %end, i1 (i8*)* nocapture %hasProp) {
+define zeroext i1 @find(ptr readonly %begin, ptr readnone %end, ptr nocapture %hasProp) {
entry:
- %cmp.4 = icmp eq i8** %begin, %end
+ %cmp.4 = icmp eq ptr %begin, %end
br i1 %cmp.4, label %cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond: ; preds = %for.body
- %cmp = icmp eq i8** %incdec.ptr, %end
+ %cmp = icmp eq ptr %incdec.ptr, %end
br i1 %cmp, label %cleanup.loopexit, label %for.body
for.body: ; preds = %for.body.preheader, %for.cond
- %curr.05 = phi i8** [ %incdec.ptr, %for.cond ], [ %begin, %for.body.preheader ]
- %0 = load i8*, i8** %curr.05, align 8
- %call = tail call zeroext i1 %hasProp(i8* %0)
- %incdec.ptr = getelementptr inbounds i8*, i8** %curr.05, i64 1
+ %curr.05 = phi ptr [ %incdec.ptr, %for.cond ], [ %begin, %for.body.preheader ]
+ %0 = load ptr, ptr %curr.05, align 8
+ %call = tail call zeroext i1 %hasProp(ptr %0)
+ %incdec.ptr = getelementptr inbounds ptr, ptr %curr.05, i64 1
br i1 %call, label %cleanup.loopexit, label %for.cond
cleanup.loopexit: ; preds = %for.body, %for.cond
}
; CHECK-LABEL: find_cont
-define void @find_cont(i8** readonly %begin, i8** readnone %end, i1 (i8*)* nocapture %hasProp, void (i1)* nocapture %cont) {
+define void @find_cont(ptr readonly %begin, ptr readnone %end, ptr nocapture %hasProp, ptr nocapture %cont) {
entry:
- %cmp.4 = icmp eq i8** %begin, %end
+ %cmp.4 = icmp eq ptr %begin, %end
br i1 %cmp.4, label %cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond: ; preds = %for.body
- %cmp = icmp eq i8** %incdec.ptr, %end
+ %cmp = icmp eq ptr %incdec.ptr, %end
br i1 %cmp, label %cleanup.loopexit, label %for.body
for.body: ; preds = %for.body.preheader, %for.cond
- %curr.05 = phi i8** [ %incdec.ptr, %for.cond ], [ %begin, %for.body.preheader ]
- %0 = load i8*, i8** %curr.05, align 8
- %call = tail call zeroext i1 %hasProp(i8* %0)
- %incdec.ptr = getelementptr inbounds i8*, i8** %curr.05, i64 1
+ %curr.05 = phi ptr [ %incdec.ptr, %for.cond ], [ %begin, %for.body.preheader ]
+ %0 = load ptr, ptr %curr.05, align 8
+ %call = tail call zeroext i1 %hasProp(ptr %0)
+ %incdec.ptr = getelementptr inbounds ptr, ptr %curr.05, i64 1
br i1 %call, label %cleanup.loopexit, label %for.cond
cleanup.loopexit: ; preds = %for.body, %for.cond
}
; CHECK-LABEL: find_cont_ret
-define zeroext i1 @find_cont_ret(i8** readonly %begin, i8** readnone %end, i1 (i8*)* nocapture %hasProp, void (i1)* nocapture %cont) {
+define zeroext i1 @find_cont_ret(ptr readonly %begin, ptr readnone %end, ptr nocapture %hasProp, ptr nocapture %cont) {
entry:
- %cmp.4 = icmp eq i8** %begin, %end
+ %cmp.4 = icmp eq ptr %begin, %end
br i1 %cmp.4, label %cleanup, label %for.body.preheader
for.body.preheader: ; preds = %entry
br label %for.body
for.cond: ; preds = %for.body
- %cmp = icmp eq i8** %incdec.ptr, %end
+ %cmp = icmp eq ptr %incdec.ptr, %end
br i1 %cmp, label %cleanup.loopexit, label %for.body
for.body: ; preds = %for.body.preheader, %for.cond
- %curr.05 = phi i8** [ %incdec.ptr, %for.cond ], [ %begin, %for.body.preheader ]
- %0 = load i8*, i8** %curr.05, align 8
- %call = tail call zeroext i1 %hasProp(i8* %0)
- %incdec.ptr = getelementptr inbounds i8*, i8** %curr.05, i64 1
+ %curr.05 = phi ptr [ %incdec.ptr, %for.cond ], [ %begin, %for.body.preheader ]
+ %0 = load ptr, ptr %curr.05, align 8
+ %call = tail call zeroext i1 %hasProp(ptr %0)
+ %incdec.ptr = getelementptr inbounds ptr, ptr %curr.05, i64 1
br i1 %call, label %cleanup.loopexit, label %for.cond
cleanup.loopexit: ; preds = %for.body, %for.cond
%"class.clang::format::BreakableStringLiteral" = type { %"class.clang::format::BreakableSingleLineToken" }
%"class.clang::format::BreakableSingleLineToken" = type { %"class.clang::format::BreakableToken", i32, %"class.llvm::StringRef", %"class.llvm::StringRef", %"class.llvm::StringRef" }
-%"class.clang::format::BreakableToken" = type { i32 (...)**, %"struct.clang::format::FormatToken"*, i32, i8, i32, %"struct.clang::format::FormatStyle"* }
-%"class.llvm::StringRef" = type { i8*, i64 }
-%"struct.clang::format::FormatToken" = type <{ %"class.clang::Token", i32, i8, [3 x i8], %"class.clang::SourceRange", i32, i32, i32, i8, i8, i8, i8, %"class.llvm::StringRef", i8, [3 x i8], i32, i32, i32, i8, i8, [2 x i8], i32, i32, i16, [2 x i8], %"class.std::unique_ptr", i32, i32, i32, i32, i32, i32, i32, i32, %"class.llvm::SmallVector", i32, i8, i8, [2 x i8], i32, i8, i8, [2 x i8], %"struct.clang::format::FormatToken"*, %"struct.clang::format::FormatToken"*, %"struct.clang::format::FormatToken"*, %"class.llvm::SmallVector.6", i32, i8, [3 x i8] }>
-%"class.clang::Token" = type <{ i32, i32, i8*, i16, i16, [4 x i8] }>
+%"class.clang::format::BreakableToken" = type { ptr, ptr, i32, i8, i32, ptr }
+%"class.llvm::StringRef" = type { ptr, i64 }
+%"struct.clang::format::FormatToken" = type <{ %"class.clang::Token", i32, i8, [3 x i8], %"class.clang::SourceRange", i32, i32, i32, i8, i8, i8, i8, %"class.llvm::StringRef", i8, [3 x i8], i32, i32, i32, i8, i8, [2 x i8], i32, i32, i16, [2 x i8], %"class.std::unique_ptr", i32, i32, i32, i32, i32, i32, i32, i32, %"class.llvm::SmallVector", i32, i8, i8, [2 x i8], i32, i8, i8, [2 x i8], ptr, ptr, ptr, %"class.llvm::SmallVector.6", i32, i8, [3 x i8] }>
+%"class.clang::Token" = type <{ i32, i32, ptr, i16, i16, [4 x i8] }>
%"class.clang::SourceRange" = type { %"class.clang::SourceLocation", %"class.clang::SourceLocation" }
%"class.clang::SourceLocation" = type { i32 }
%"class.std::unique_ptr" = type { %"class.std::tuple" }
%"class.std::tuple" = type { %"struct.std::_Tuple_impl" }
%"struct.std::_Tuple_impl" = type { %"struct.std::_Head_base.2" }
-%"struct.std::_Head_base.2" = type { %"class.clang::format::TokenRole"* }
-%"class.clang::format::TokenRole" = type { i32 (...)**, %"struct.clang::format::FormatStyle"* }
+%"struct.std::_Head_base.2" = type { ptr }
+%"class.clang::format::TokenRole" = type { ptr, ptr }
%"class.llvm::SmallVector" = type { %"class.llvm::SmallVectorImpl.base", %"struct.llvm::SmallVectorStorage" }
%"class.llvm::SmallVectorImpl.base" = type { %"class.llvm::SmallVectorTemplateBase.base" }
%"class.llvm::SmallVectorTemplateBase.base" = type { %"class.llvm::SmallVectorTemplateCommon.base" }
%"class.llvm::SmallVectorTemplateCommon.base" = type <{ %"class.llvm::SmallVectorBase", %"struct.llvm::AlignedCharArrayUnion" }>
-%"class.llvm::SmallVectorBase" = type { i8*, i8*, i8* }
+%"class.llvm::SmallVectorBase" = type { ptr, ptr, ptr }
%"struct.llvm::AlignedCharArrayUnion" = type { %"struct.llvm::AlignedCharArray" }
%"struct.llvm::AlignedCharArray" = type { [4 x i8] }
%"struct.llvm::SmallVectorStorage" = type { [3 x %"struct.llvm::AlignedCharArrayUnion"] }
%"struct.clang::format::FormatStyle" = type { i32, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i8, i32, i8, i8, i8, i8, i32, i32, i8, i8, i32, %"class.std::basic_string", i8, i32, i32, i8, i8, i8, i8, %"class.std::vector", i8, i32, i8, i8, i32, %"class.std::basic_string", %"class.std::basic_string", i32, i32, i32, i8, i8, i32, i32, i32, i32, i32, i32, i32, i8, i8, i32, i8, i32, i8, i8, i8, i8, i8, i32, i32, i32 }
%"class.std::vector" = type { %"struct.std::_Vector_base" }
%"struct.std::_Vector_base" = type { %"struct.std::_Vector_base<std::basic_string<char>, std::allocator<std::basic_string<char> > >::_Vector_impl" }
-%"struct.std::_Vector_base<std::basic_string<char>, std::allocator<std::basic_string<char> > >::_Vector_impl" = type { %"class.std::basic_string"*, %"class.std::basic_string"*, %"class.std::basic_string"* }
+%"struct.std::_Vector_base<std::basic_string<char>, std::allocator<std::basic_string<char> > >::_Vector_impl" = type { ptr, ptr, ptr }
%"class.std::basic_string" = type { %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" }
-%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { i8* }
+%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { ptr }
%"struct.llvm::AlignedCharArray.52" = type { [16 x i8] }
-%"class.clang::format::WhitespaceManager" = type <{ %"class.llvm::SmallVector.13", %"class.clang::SourceManager"*, %"class.std::set", %"struct.clang::format::FormatStyle"*, i8, [7 x i8] }>
+%"class.clang::format::WhitespaceManager" = type <{ %"class.llvm::SmallVector.13", ptr, %"class.std::set", ptr, i8, [7 x i8] }>
%"class.llvm::SmallVector.13" = type { %"class.llvm::SmallVectorImpl.14", %"struct.llvm::SmallVectorStorage.19" }
%"class.llvm::SmallVectorImpl.14" = type { %"class.llvm::SmallVectorTemplateBase.15" }
%"class.llvm::SmallVectorTemplateBase.15" = type { %"class.llvm::SmallVectorTemplateCommon.16" }
%"struct.llvm::AlignedCharArrayUnion.17" = type { %"struct.llvm::AlignedCharArray.18" }
%"struct.llvm::AlignedCharArray.18" = type { [88 x i8] }
%"struct.llvm::SmallVectorStorage.19" = type { [15 x %"struct.llvm::AlignedCharArrayUnion.17"] }
-%"class.clang::SourceManager" = type { %"class.llvm::RefCountedBase", %"class.clang::DiagnosticsEngine"*, %"class.clang::FileManager"*, %"class.llvm::BumpPtrAllocatorImpl", %"class.llvm::DenseMap.65", i8, i8, %"class.std::unique_ptr.78", %"class.std::vector.94", %"class.llvm::SmallVector.99", %"class.llvm::SmallVector.99", i32, i32, %"class.std::vector.107", %"class.clang::ExternalSLocEntrySource"*, %"class.clang::FileID", %"class.clang::LineTableInfo"*, %"class.clang::FileID", %"class.clang::SrcMgr::ContentCache"*, i32, i32, %"class.clang::FileID", %"class.clang::FileID", i32, i32, %"class.llvm::DenseMap.111", %"class.llvm::DenseMap.115", %"class.clang::InBeforeInTUCacheEntry", %"class.std::unique_ptr.119", %"class.std::unique_ptr.127", %"class.llvm::DenseMap.135", %"class.llvm::SmallVector.139" }
+%"class.clang::SourceManager" = type { %"class.llvm::RefCountedBase", ptr, ptr, %"class.llvm::BumpPtrAllocatorImpl", %"class.llvm::DenseMap.65", i8, i8, %"class.std::unique_ptr.78", %"class.std::vector.94", %"class.llvm::SmallVector.99", %"class.llvm::SmallVector.99", i32, i32, %"class.std::vector.107", ptr, %"class.clang::FileID", ptr, %"class.clang::FileID", ptr, i32, i32, %"class.clang::FileID", %"class.clang::FileID", i32, i32, %"class.llvm::DenseMap.111", %"class.llvm::DenseMap.115", %"class.clang::InBeforeInTUCacheEntry", %"class.std::unique_ptr.119", %"class.std::unique_ptr.127", %"class.llvm::DenseMap.135", %"class.llvm::SmallVector.139" }
%"class.llvm::RefCountedBase" = type { i32 }
%"class.clang::DiagnosticsEngine" = type opaque
%"class.clang::FileManager" = type { %"class.llvm::RefCountedBase.20", %"class.llvm::IntrusiveRefCntPtr", %"class.clang::FileSystemOptions", %"class.std::map", %"class.std::map.24", %"class.llvm::SmallVector.29", %"class.llvm::SmallVector.35", %"class.llvm::StringMap", %"class.llvm::StringMap.56", %"class.llvm::DenseMap", %"class.llvm::BumpPtrAllocatorImpl", i32, i32, i32, i32, i32, %"class.std::unique_ptr.57" }
%"class.llvm::RefCountedBase.20" = type { i32 }
-%"class.llvm::IntrusiveRefCntPtr" = type { %"class.clang::vfs::FileSystem"* }
-%"class.clang::vfs::FileSystem" = type <{ i32 (...)**, %"class.llvm::ThreadSafeRefCountedBase", [4 x i8] }>
+%"class.llvm::IntrusiveRefCntPtr" = type { ptr }
+%"class.clang::vfs::FileSystem" = type <{ ptr, %"class.llvm::ThreadSafeRefCountedBase", [4 x i8] }>
%"class.llvm::ThreadSafeRefCountedBase" = type { %"struct.std::atomic" }
%"struct.std::atomic" = type { %"struct.std::__atomic_base" }
%"struct.std::__atomic_base" = type { i32 }
%"class.std::_Rb_tree" = type { %"struct.std::_Rb_tree<llvm::sys::fs::UniqueID, std::pair<const llvm::sys::fs::UniqueID, clang::DirectoryEntry>, std::_Select1st<std::pair<const llvm::sys::fs::UniqueID, clang::DirectoryEntry> >, std::less<llvm::sys::fs::UniqueID>, std::allocator<std::pair<const llvm::sys::fs::UniqueID, clang::DirectoryEntry> > >::_Rb_tree_impl" }
%"struct.std::_Rb_tree<llvm::sys::fs::UniqueID, std::pair<const llvm::sys::fs::UniqueID, clang::DirectoryEntry>, std::_Select1st<std::pair<const llvm::sys::fs::UniqueID, clang::DirectoryEntry> >, std::less<llvm::sys::fs::UniqueID>, std::allocator<std::pair<const llvm::sys::fs::UniqueID, clang::DirectoryEntry> > >::_Rb_tree_impl" = type { %"struct.std::less", %"struct.std::_Rb_tree_node_base", i64 }
%"struct.std::less" = type { i8 }
-%"struct.std::_Rb_tree_node_base" = type { i32, %"struct.std::_Rb_tree_node_base"*, %"struct.std::_Rb_tree_node_base"*, %"struct.std::_Rb_tree_node_base"* }
+%"struct.std::_Rb_tree_node_base" = type { i32, ptr, ptr, ptr }
%"class.std::map.24" = type { %"class.std::_Rb_tree.25" }
%"class.std::_Rb_tree.25" = type { %"struct.std::_Rb_tree<llvm::sys::fs::UniqueID, std::pair<const llvm::sys::fs::UniqueID, clang::FileEntry>, std::_Select1st<std::pair<const llvm::sys::fs::UniqueID, clang::FileEntry> >, std::less<llvm::sys::fs::UniqueID>, std::allocator<std::pair<const llvm::sys::fs::UniqueID, clang::FileEntry> > >::_Rb_tree_impl" }
%"struct.std::_Rb_tree<llvm::sys::fs::UniqueID, std::pair<const llvm::sys::fs::UniqueID, clang::FileEntry>, std::_Select1st<std::pair<const llvm::sys::fs::UniqueID, clang::FileEntry> >, std::less<llvm::sys::fs::UniqueID>, std::allocator<std::pair<const llvm::sys::fs::UniqueID, clang::FileEntry> > >::_Rb_tree_impl" = type { %"struct.std::less", %"struct.std::_Rb_tree_node_base", i64 }
%"struct.llvm::AlignedCharArrayUnion.39" = type { %"struct.llvm::AlignedCharArray.11" }
%"struct.llvm::SmallVectorStorage.40" = type { [3 x %"struct.llvm::AlignedCharArrayUnion.39"] }
%"class.llvm::StringMap" = type { %"class.llvm::StringMapImpl", %"class.llvm::BumpPtrAllocatorImpl" }
-%"class.llvm::StringMapImpl" = type { %"class.llvm::StringMapEntryBase"**, i32, i32, i32, i32 }
+%"class.llvm::StringMapImpl" = type { ptr, i32, i32, i32, i32 }
%"class.llvm::StringMapEntryBase" = type { i32 }
%"class.llvm::StringMap.56" = type { %"class.llvm::StringMapImpl", %"class.llvm::BumpPtrAllocatorImpl" }
-%"class.llvm::DenseMap" = type <{ %"struct.llvm::detail::DenseMapPair"*, i32, i32, i32, [4 x i8] }>
+%"class.llvm::DenseMap" = type <{ ptr, i32, i32, i32, [4 x i8] }>
%"struct.llvm::detail::DenseMapPair" = type opaque
%"class.std::unique_ptr.57" = type { %"class.std::tuple.58" }
%"class.std::tuple.58" = type { %"struct.std::_Tuple_impl.59" }
%"struct.std::_Tuple_impl.59" = type { %"struct.std::_Head_base.64" }
-%"struct.std::_Head_base.64" = type { %"class.clang::FileSystemStatCache"* }
+%"struct.std::_Head_base.64" = type { ptr }
%"class.clang::FileSystemStatCache" = type opaque
-%"class.llvm::BumpPtrAllocatorImpl" = type <{ i8*, i8*, %"class.llvm::SmallVector.41", %"class.llvm::SmallVector.47", i64, %"class.llvm::MallocAllocator", [7 x i8] }>
+%"class.llvm::BumpPtrAllocatorImpl" = type <{ ptr, ptr, %"class.llvm::SmallVector.41", %"class.llvm::SmallVector.47", i64, %"class.llvm::MallocAllocator", [7 x i8] }>
%"class.llvm::SmallVector.41" = type { %"class.llvm::SmallVectorImpl.42", %"struct.llvm::SmallVectorStorage.46" }
%"class.llvm::SmallVectorImpl.42" = type { %"class.llvm::SmallVectorTemplateBase.43" }
%"class.llvm::SmallVectorTemplateBase.43" = type { %"class.llvm::SmallVectorTemplateCommon.44" }
%"struct.llvm::AlignedCharArrayUnion.51" = type { %"struct.llvm::AlignedCharArray.52" }
%"struct.llvm::SmallVectorStorage.53" = type { i8 }
%"class.llvm::MallocAllocator" = type { i8 }
-%"class.llvm::DenseMap.65" = type <{ %"struct.llvm::detail::DenseMapPair.67"*, i32, i32, i32, [4 x i8] }>
+%"class.llvm::DenseMap.65" = type <{ ptr, i32, i32, i32, [4 x i8] }>
%"struct.llvm::detail::DenseMapPair.67" = type { %"struct.std::pair.68" }
-%"struct.std::pair.68" = type { %"class.clang::FileEntry"*, %"class.clang::SrcMgr::ContentCache"* }
-%"class.clang::FileEntry" = type { i8*, i64, i64, %"class.clang::DirectoryEntry"*, i32, %"class.llvm::sys::fs::UniqueID", i8, i8, i8, %"class.std::unique_ptr.69" }
-%"class.clang::DirectoryEntry" = type { i8* }
+%"struct.std::pair.68" = type { ptr, ptr }
+%"class.clang::FileEntry" = type { ptr, i64, i64, ptr, i32, %"class.llvm::sys::fs::UniqueID", i8, i8, i8, %"class.std::unique_ptr.69" }
+%"class.clang::DirectoryEntry" = type { ptr }
%"class.llvm::sys::fs::UniqueID" = type { i64, i64 }
%"class.std::unique_ptr.69" = type { %"class.std::tuple.70" }
%"class.std::tuple.70" = type { %"struct.std::_Tuple_impl.71" }
%"struct.std::_Tuple_impl.71" = type { %"struct.std::_Head_base.76" }
-%"struct.std::_Head_base.76" = type { %"class.clang::vfs::File"* }
-%"class.clang::vfs::File" = type { i32 (...)** }
+%"struct.std::_Head_base.76" = type { ptr }
+%"class.clang::vfs::File" = type { ptr }
%"class.std::unique_ptr.78" = type { %"class.std::tuple.79" }
%"class.std::tuple.79" = type { %"struct.std::_Tuple_impl.80" }
%"struct.std::_Tuple_impl.80" = type { %"struct.std::_Head_base.85" }
-%"struct.std::_Head_base.85" = type { %"struct.clang::SourceManager::OverriddenFilesInfoTy"* }
+%"struct.std::_Head_base.85" = type { ptr }
%"struct.clang::SourceManager::OverriddenFilesInfoTy" = type { %"class.llvm::DenseMap.86", %"class.llvm::DenseSet" }
-%"class.llvm::DenseMap.86" = type <{ %"struct.llvm::detail::DenseMapPair.88"*, i32, i32, i32, [4 x i8] }>
+%"class.llvm::DenseMap.86" = type <{ ptr, i32, i32, i32, [4 x i8] }>
%"struct.llvm::detail::DenseMapPair.88" = type { %"struct.std::pair.89" }
-%"struct.std::pair.89" = type { %"class.clang::FileEntry"*, %"class.clang::FileEntry"* }
+%"struct.std::pair.89" = type { ptr, ptr }
%"class.llvm::DenseSet" = type { %"class.llvm::DenseMap.91" }
-%"class.llvm::DenseMap.91" = type <{ %"class.llvm::detail::DenseSetPair"*, i32, i32, i32, [4 x i8] }>
-%"class.llvm::detail::DenseSetPair" = type { %"class.clang::FileEntry"* }
+%"class.llvm::DenseMap.91" = type <{ ptr, i32, i32, i32, [4 x i8] }>
+%"class.llvm::detail::DenseSetPair" = type { ptr }
%"class.std::vector.94" = type { %"struct.std::_Vector_base.95" }
%"struct.std::_Vector_base.95" = type { %"struct.std::_Vector_base<clang::SrcMgr::ContentCache *, std::allocator<clang::SrcMgr::ContentCache *> >::_Vector_impl" }
-%"struct.std::_Vector_base<clang::SrcMgr::ContentCache *, std::allocator<clang::SrcMgr::ContentCache *> >::_Vector_impl" = type { %"class.clang::SrcMgr::ContentCache"**, %"class.clang::SrcMgr::ContentCache"**, %"class.clang::SrcMgr::ContentCache"** }
+%"struct.std::_Vector_base<clang::SrcMgr::ContentCache *, std::allocator<clang::SrcMgr::ContentCache *> >::_Vector_impl" = type { ptr, ptr, ptr }
%"class.llvm::SmallVector.99" = type <{ %"class.llvm::SmallVectorImpl.100", %"struct.llvm::SmallVectorStorage.105", [7 x i8] }>
%"class.llvm::SmallVectorImpl.100" = type { %"class.llvm::SmallVectorTemplateBase.101" }
%"class.llvm::SmallVectorTemplateBase.101" = type { %"class.llvm::SmallVectorTemplateCommon.102" }
%"struct.llvm::SmallVectorStorage.105" = type { i8 }
%"class.std::vector.107" = type { %"struct.std::_Bvector_base" }
%"struct.std::_Bvector_base" = type { %"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl" }
-%"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl" = type { %"struct.std::_Bit_iterator", %"struct.std::_Bit_iterator", i64* }
+%"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl" = type { %"struct.std::_Bit_iterator", %"struct.std::_Bit_iterator", ptr }
%"struct.std::_Bit_iterator" = type { %"struct.std::_Bit_iterator_base.base", [4 x i8] }
-%"struct.std::_Bit_iterator_base.base" = type <{ i64*, i32 }>
-%"class.clang::ExternalSLocEntrySource" = type { i32 (...)** }
+%"struct.std::_Bit_iterator_base.base" = type <{ ptr, i32 }>
+%"class.clang::ExternalSLocEntrySource" = type { ptr }
%"class.clang::LineTableInfo" = type opaque
-%"class.clang::SrcMgr::ContentCache" = type <{ %"class.llvm::PointerIntPair", %"class.clang::FileEntry"*, %"class.clang::FileEntry"*, i32*, [5 x i8], [3 x i8] }>
+%"class.clang::SrcMgr::ContentCache" = type <{ %"class.llvm::PointerIntPair", ptr, ptr, ptr, [5 x i8], [3 x i8] }>
%"class.llvm::PointerIntPair" = type { i64 }
%"class.clang::FileID" = type { i32 }
-%"class.llvm::DenseMap.111" = type <{ %"struct.llvm::detail::DenseMapPair.113"*, i32, i32, i32, [4 x i8] }>
+%"class.llvm::DenseMap.111" = type <{ ptr, i32, i32, i32, [4 x i8] }>
%"struct.llvm::detail::DenseMapPair.113" = type opaque
-%"class.llvm::DenseMap.115" = type <{ %"struct.llvm::detail::DenseMapPair.117"*, i32, i32, i32, [4 x i8] }>
+%"class.llvm::DenseMap.115" = type <{ ptr, i32, i32, i32, [4 x i8] }>
%"struct.llvm::detail::DenseMapPair.117" = type opaque
%"class.clang::InBeforeInTUCacheEntry" = type { %"class.clang::FileID", %"class.clang::FileID", i8, %"class.clang::FileID", i32, i32 }
%"class.std::unique_ptr.119" = type { %"class.std::tuple.120" }
%"class.std::tuple.120" = type { %"struct.std::_Tuple_impl.121" }
%"struct.std::_Tuple_impl.121" = type { %"struct.std::_Head_base.126" }
-%"struct.std::_Head_base.126" = type { %"class.llvm::MemoryBuffer"* }
-%"class.llvm::MemoryBuffer" = type { i32 (...)**, i8*, i8* }
+%"struct.std::_Head_base.126" = type { ptr }
+%"class.llvm::MemoryBuffer" = type { ptr, ptr, ptr }
%"class.std::unique_ptr.127" = type { %"class.std::tuple.128" }
%"class.std::tuple.128" = type { %"struct.std::_Tuple_impl.129" }
%"struct.std::_Tuple_impl.129" = type { %"struct.std::_Head_base.134" }
-%"struct.std::_Head_base.134" = type { %"class.clang::SrcMgr::ContentCache"* }
-%"class.llvm::DenseMap.135" = type <{ %"struct.llvm::detail::DenseMapPair.137"*, i32, i32, i32, [4 x i8] }>
+%"struct.std::_Head_base.134" = type { ptr }
+%"class.llvm::DenseMap.135" = type <{ ptr, i32, i32, i32, [4 x i8] }>
%"struct.llvm::detail::DenseMapPair.137" = type opaque
%"class.llvm::SmallVector.139" = type { %"class.llvm::SmallVectorImpl.140", %"struct.llvm::SmallVectorStorage.144" }
%"class.llvm::SmallVectorImpl.140" = type { %"class.llvm::SmallVectorTemplateBase.141" }
; Ensure the LR is restored using a different register
; CHECK: mtlr {{[0-9]+}}
; CHECK: blr
-define void @_ZN5clang6format22BreakableStringLiteral11insertBreakEjjSt4pairImjERNS0_17WhitespaceManagerE(%"class.clang::format::BreakableStringLiteral"* nocapture readonly %this, i32 zeroext %LineIndex, i32 zeroext %TailOffset, [2 x i64] %Split.coerce, %"class.clang::format::WhitespaceManager"* dereferenceable(1504) %Whitespaces) unnamed_addr #1 align 2 {
+define void @_ZN5clang6format22BreakableStringLiteral11insertBreakEjjSt4pairImjERNS0_17WhitespaceManagerE(ptr nocapture readonly %this, i32 zeroext %LineIndex, i32 zeroext %TailOffset, [2 x i64] %Split.coerce, ptr dereferenceable(1504) %Whitespaces) unnamed_addr #1 align 2 {
entry:
%Split.coerce.fca.0.extract = extractvalue [2 x i64] %Split.coerce, 0
%Split.coerce.fca.1.extract = extractvalue [2 x i64] %Split.coerce, 1
- %StartColumn = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", %"class.clang::format::BreakableStringLiteral"* %this, i64 0, i32 0, i32 1
- %0 = load i32, i32* %StartColumn, align 8, !tbaa !2
- %Prefix = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", %"class.clang::format::BreakableStringLiteral"* %this, i64 0, i32 0, i32 2
- %Length.i.19 = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", %"class.clang::format::BreakableStringLiteral"* %this, i64 0, i32 0, i32 2, i32 1
- %1 = load i64, i64* %Length.i.19, align 8, !tbaa !10
+ %StartColumn = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", ptr %this, i64 0, i32 0, i32 1
+ %0 = load i32, ptr %StartColumn, align 8, !tbaa !2
+ %Prefix = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", ptr %this, i64 0, i32 0, i32 2
+ %Length.i.19 = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", ptr %this, i64 0, i32 0, i32 2, i32 1
+ %1 = load i64, ptr %Length.i.19, align 8, !tbaa !10
%cmp.i = icmp eq i64 %1, 0
br i1 %cmp.i, label %entry._ZNK4llvm9StringRef10startswithES0_.exit_crit_edge, label %if.end.i.i
entry._ZNK4llvm9StringRef10startswithES0_.exit_crit_edge: ; preds = %entry
- %agg.tmp7.sroa.0.0..sroa_cast.phi.trans.insert = bitcast %"class.llvm::StringRef"* %Prefix to i64*
- %agg.tmp7.sroa.0.0.copyload.pre = load i64, i64* %agg.tmp7.sroa.0.0..sroa_cast.phi.trans.insert, align 8
+ %agg.tmp7.sroa.0.0.copyload.pre = load i64, ptr %Prefix, align 8
br label %_ZNK4llvm9StringRef10startswithES0_.exit
if.end.i.i: ; preds = %entry
- %Data.i.20 = getelementptr inbounds %"class.llvm::StringRef", %"class.llvm::StringRef"* %Prefix, i64 0, i32 0
- %2 = load i8*, i8** %Data.i.20, align 8, !tbaa !12
- %lhsc = load i8, i8* %2, align 1
+ %2 = load ptr, ptr %Prefix, align 8, !tbaa !12
+ %lhsc = load i8, ptr %2, align 1
%phitmp.i = icmp eq i8 %lhsc, 64
- %3 = ptrtoint i8* %2 to i64
+ %3 = ptrtoint ptr %2 to i64
br label %_ZNK4llvm9StringRef10startswithES0_.exit
_ZNK4llvm9StringRef10startswithES0_.exit: ; preds = %entry._ZNK4llvm9StringRef10startswithES0_.exit_crit_edge, %if.end.i.i
%4 = phi i1 [ false, %entry._ZNK4llvm9StringRef10startswithES0_.exit_crit_edge ], [ %phitmp.i, %if.end.i.i ]
%dec = sext i1 %4 to i32
%dec. = add i32 %dec, %0
- %Tok = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", %"class.clang::format::BreakableStringLiteral"* %this, i64 0, i32 0, i32 0, i32 1
- %ref = load %"struct.clang::format::FormatToken"*, %"struct.clang::format::FormatToken"** %Tok, align 8, !tbaa !13
+ %Tok = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", ptr %this, i64 0, i32 0, i32 0, i32 1
+ %ref = load ptr, ptr %Tok, align 8, !tbaa !13
%conv = zext i32 %TailOffset to i64
%add = add i64 %Split.coerce.fca.0.extract, %conv
%add4 = add i64 %add, %1
%conv5 = trunc i64 %add4 to i32
%Split.sroa.2.8.extract.trunc = trunc i64 %Split.coerce.fca.1.extract to i32
- %agg.tmp6.sroa.0.0..sroa_idx13 = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", %"class.clang::format::BreakableStringLiteral"* %this, i64 0, i32 0, i32 3
- %agg.tmp6.sroa.0.0..sroa_cast = bitcast %"class.llvm::StringRef"* %agg.tmp6.sroa.0.0..sroa_idx13 to i64*
- %agg.tmp6.sroa.0.0.copyload = load i64, i64* %agg.tmp6.sroa.0.0..sroa_cast, align 8
- %agg.tmp6.sroa.2.0..sroa_idx14 = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", %"class.clang::format::BreakableStringLiteral"* %this, i64 0, i32 0, i32 3, i32 1
- %agg.tmp6.sroa.2.0.copyload = load i64, i64* %agg.tmp6.sroa.2.0..sroa_idx14, align 8
- %InPPDirective = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", %"class.clang::format::BreakableStringLiteral"* %this, i64 0, i32 0, i32 0, i32 3
- %5 = load i8, i8* %InPPDirective, align 4, !tbaa !34, !range !39
+ %agg.tmp6.sroa.0.0..sroa_idx13 = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", ptr %this, i64 0, i32 0, i32 3
+ %agg.tmp6.sroa.0.0.copyload = load i64, ptr %agg.tmp6.sroa.0.0..sroa_idx13, align 8
+ %agg.tmp6.sroa.2.0..sroa_idx14 = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", ptr %this, i64 0, i32 0, i32 3, i32 1
+ %agg.tmp6.sroa.2.0.copyload = load i64, ptr %agg.tmp6.sroa.2.0..sroa_idx14, align 8
+ %InPPDirective = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", ptr %this, i64 0, i32 0, i32 0, i32 3
+ %5 = load i8, ptr %InPPDirective, align 4, !tbaa !34, !range !39
%tobool = icmp ne i8 %5, 0
- %IndentLevel = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", %"class.clang::format::BreakableStringLiteral"* %this, i64 0, i32 0, i32 0, i32 2
- %6 = load i32, i32* %IndentLevel, align 8, !tbaa !33
+ %IndentLevel = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", ptr %this, i64 0, i32 0, i32 0, i32 2
+ %6 = load i32, ptr %IndentLevel, align 8, !tbaa !33
%.fca.0.insert11 = insertvalue [2 x i64] undef, i64 %agg.tmp6.sroa.0.0.copyload, 0
%.fca.1.insert12 = insertvalue [2 x i64] %.fca.0.insert11, i64 %agg.tmp6.sroa.2.0.copyload, 1
%.fca.0.insert = insertvalue [2 x i64] undef, i64 %agg.tmp7.sroa.0.0.copyload, 0
%.fca.1.insert = insertvalue [2 x i64] %.fca.0.insert, i64 %1, 1
- tail call void @_ZN5clang6format17WhitespaceManager24replaceWhitespaceInTokenERKNS0_11FormatTokenEjjN4llvm9StringRefES6_bjji(%"class.clang::format::WhitespaceManager"* nonnull %Whitespaces, %"struct.clang::format::FormatToken"* dereferenceable(272) %ref, i32 zeroext %conv5, i32 zeroext %Split.sroa.2.8.extract.trunc, [2 x i64] %.fca.1.insert12, [2 x i64] %.fca.1.insert, i1 zeroext %tobool, i32 zeroext 1, i32 zeroext %6, i32 signext %dec.) #9
+ tail call void @_ZN5clang6format17WhitespaceManager24replaceWhitespaceInTokenERKNS0_11FormatTokenEjjN4llvm9StringRefES6_bjji(ptr nonnull %Whitespaces, ptr dereferenceable(272) %ref, i32 zeroext %conv5, i32 zeroext %Split.sroa.2.8.extract.trunc, [2 x i64] %.fca.1.insert12, [2 x i64] %.fca.1.insert, i1 zeroext %tobool, i32 zeroext 1, i32 zeroext %6, i32 signext %dec.) #9
ret void
}
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2
-declare void @_ZN5clang6format17WhitespaceManager24replaceWhitespaceInTokenERKNS0_11FormatTokenEjjN4llvm9StringRefES6_bjji(%"class.clang::format::WhitespaceManager"*, %"struct.clang::format::FormatToken"* dereferenceable(272), i32 zeroext, i32 zeroext, [2 x i64], [2 x i64], i1 zeroext, i32 zeroext, i32 zeroext, i32 signext) #3
+declare void @_ZN5clang6format17WhitespaceManager24replaceWhitespaceInTokenERKNS0_11FormatTokenEjjN4llvm9StringRefES6_bjji(ptr, ptr dereferenceable(272), i32 zeroext, i32 zeroext, [2 x i64], [2 x i64], i1 zeroext, i32 zeroext, i32 zeroext, i32 signext) #3
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2
attributes #9 = { nounwind }
; Function Attrs: noinline nounwind
define void @call(i64 %a) local_unnamed_addr #0 {
entry:
- store i64 %a, i64* @glob, align 8
+ store i64 %a, ptr @glob, align 8
tail call void asm sideeffect "#Do Nothing", "~{memory}"()
ret void
}
define signext i32 @main() local_unnamed_addr #1 {
entry:
%call = tail call signext i32 @test(i32 signext 10, i32 signext -15, i32 signext 0)
- %call1 = tail call signext i32 (i8*, ...) @printf(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str, i64 0, i64 0), i32 signext %call)
+ %call1 = tail call signext i32 (ptr, ...) @printf(ptr @.str, i32 signext %call)
ret i32 0
}
; Function Attrs: nounwind
-declare signext i32 @printf(i8* nocapture readonly, ...) local_unnamed_addr #2
+declare signext i32 @printf(ptr nocapture readonly, ...) local_unnamed_addr #2
define dso_local signext i32 @foo(i32 %n) {
entry:
- %ptr0 = alloca i32*
+ %ptr0 = alloca ptr
%0 = alloca i32, i32 %n
- store i32* %0, i32** %ptr0
+ store ptr %0, ptr %ptr0
%1 = alloca i32, i32 %n
%2 = alloca i32, i32 %n
%3 = alloca i32, i32 %n
%6 = alloca i32, i32 %n
%7 = alloca i32, i32 %n
%8 = alloca i32, i32 %n
- %9 = load i32*, i32** %ptr0
+ %9 = load ptr, ptr %ptr0
- %call = call i32 @bar(i32* %1, i32* %2, i32* %3, i32* %4, i32* %5, i32* %6, i32* %7, i32* %8, i32* %9)
+ %call = call i32 @bar(ptr %1, ptr %2, ptr %3, ptr %4, ptr %5, ptr %6, ptr %7, ptr %8, ptr %9)
ret i32 %call
}
-declare i32 @bar(i32*, i32*, i32*, i32*, i32*, i32*, i32*, i32*, i32*)
+declare i32 @bar(ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr)
; PPC32-LINUX-LABEL: foo
; PPC32-LINUX: mflr 0
; RUN: -mtriple=powerpc64-ibm-aix-xcoff -frame-pointer=all | FileCheck %s \
; RUN: -check-prefix=PPC64-AIX
-define i32* @f1(i32 %n) nounwind {
- %tmp = alloca i32, i32 %n ; <i32*> [#uses=1]
- ret i32* %tmp
+define ptr @f1(i32 %n) nounwind {
+ %tmp = alloca i32, i32 %n ; <ptr> [#uses=1]
+ ret ptr %tmp
}
; PPC32-LINUX-LABEL: f1
; RUN: -mtriple=powerpc64-ibm-aix-xcoff -frame-pointer=all | FileCheck %s \
; RUN: -check-prefix=PPC64-AIX-FP
-define i32* @f1() nounwind {
- %tmp = alloca i32, i32 8191 ; <i32*> [#uses=1]
- ret i32* %tmp
+define ptr @f1() nounwind {
+ %tmp = alloca i32, i32 8191 ; <ptr> [#uses=1]
+ ret ptr %tmp
}
; - The stdux is used to update the back-chain link when allocated frame is large
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc64-- -frame-pointer=all | \
; RUN: not grep "ld r31, 40(r1)"
-define i32* @f1() {
- %tmp = alloca i32, i32 2 ; <i32*> [#uses=1]
- ret i32* %tmp
+define ptr @f1() {
+ %tmp = alloca i32, i32 2 ; <ptr> [#uses=1]
+ ret ptr %tmp
}
; RUN: -mtriple=powerpc64-ibm-aix-xcoff -frame-pointer=all | FileCheck %s \
; RUN: -check-prefix=PPC64-AIX-FP
-define i32* @frame_small() {
+define ptr @frame_small() {
%tmp = alloca i32, i32 95
- ret i32* %tmp
+ ret ptr %tmp
}
; The linkage area, if there is one, is still on the top of the stack after
; - PPC32 AIX ABI:
; 220 bytes = 18*8 (FPRs) + 19*4 (GPRs);
-define i32* @in_stack_floor_32() {
+define ptr @in_stack_floor_32() {
%tmp = alloca i32, i32 55
- ret i32* %tmp
+ ret ptr %tmp
}
-define i32* @out_stack_floor_32() {
+define ptr @out_stack_floor_32() {
%tmp = alloca i32, i32 56
- ret i32* %tmp
+ ret ptr %tmp
}
-define i32* @in_stack_floor_64() {
+define ptr @in_stack_floor_64() {
%tmp = alloca i32, i32 72
- ret i32* %tmp
+ ret ptr %tmp
}
-define i32* @out_stack_floor_64() {
+define ptr @out_stack_floor_64() {
%tmp = alloca i32, i32 73
- ret i32* %tmp
+ ret ptr %tmp
}
; PPC32-LINUX-NOFP-LABEL: in_stack_floor_32
ret void
}
-define void @foo_pt(<7 x i8>* %x) {
+define void @foo_pt(ptr %x) {
; CHECK-LABEL: name: foo_pt
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x3
ret void
}
-define void @foo_int(i32* %x) {
+define void @foo_int(ptr %x) {
; CHECK-LABEL: name: foo_int
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x3
ret void
}
-define void @foo(float* %x) {
+define void @foo(ptr %x) {
; CHECK-LABEL: name: foo
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x3
; OPENBSD: [[GUARD:%[0-9]+]]:_(p0) = G_LOAD [[GUARD_LOCAL]](p0) :: (dereferenceable load (p0) from @__guard_local)
; OPENBSD: G_STORE [[GUARD]](p0), [[GUARD_SLOT]](p0) :: (volatile store (p0) into %stack.0.StackGuardSlot)
define void @test_stack_guard_openbsd() {
- %StackGuardSlot = alloca i8*
+ %StackGuardSlot = alloca ptr
%StackGuard = load ptr, ptr @__guard_local
call void @llvm.stackprotector(ptr %StackGuard, ptr %StackGuardSlot)
ret void
; PPC64: 4, 32751(3)
; PPC64: blr
define void @test() nounwind {
- store i32 0, i32* inttoptr (i64 48725999 to i32*)
+ store i32 0, ptr inttoptr (i64 48725999 to ptr)
ret void
}
; PPC64: std 4, 9024(3)
; PPC64: blr
define void @test2() nounwind {
- store i64 0, i64* inttoptr (i64 74560 to i64*)
+ store i64 0, ptr inttoptr (i64 74560 to ptr)
ret void
}
%"struct.CC::TT" = type { i64, i32 }
%class.CC = type { %struct.SS }
-%struct.SS = type { void ()* }
+%struct.SS = type { ptr }
@_ZN2CC2ccE = external thread_local global %"struct.CC::TT", align 8
-define noalias i8* @_ZN2CC3funEv(%class.CC* %this) nounwind {
+define noalias ptr @_ZN2CC3funEv(ptr %this) nounwind {
; CHECK-LABEL: _ZN2CC3funEv:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr 0
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %foo = getelementptr inbounds %class.CC, %class.CC* %this, i64 0, i32 0, i32 0
- %0 = load void ()*, void ()** %foo, align 8
+ %0 = load ptr, ptr %this, align 8
tail call void %0()
- %1 = load i64, i64* getelementptr inbounds (%"struct.CC::TT", %"struct.CC::TT"* @_ZN2CC2ccE, i64 0, i32 0)
+ %1 = load i64, ptr @_ZN2CC2ccE
%tobool = icmp eq i64 %1, 0
br i1 %tobool, label %if.end, label %if.then
if.then:
- tail call void @_ZN2CC3barEPi(%class.CC* nonnull %this, i32* getelementptr inbounds (%"struct.CC::TT", %"struct.CC::TT"* @_ZN2CC2ccE, i64 0, i32 1))
+ tail call void @_ZN2CC3barEPi(ptr nonnull %this, ptr getelementptr inbounds (%"struct.CC::TT", ptr @_ZN2CC2ccE, i64 0, i32 1))
br label %if.end
if.end:
- ret i8* null
+ ret ptr null
}
-declare void @_ZN2CC3barEPi(%class.CC*, i32*)
+declare void @_ZN2CC3barEPi(ptr, ptr)
; void llvm::MachineMemOperand::refineAlignment(const llvm::MachineMemOperand*):
; Assertion `MMO->getFlags() == getFlags() && "Flags mismatch !"' failed.
-declare void @_Z3fn11F(%class.F* byval(%class.F) align 8) local_unnamed_addr
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
-declare signext i32 @_ZN1F11isGlobalRegEv(%class.F*) local_unnamed_addr
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @_Z10EmitLValuev(%class.F* sret(%class.F)) local_unnamed_addr
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @_Z3fn11F(ptr byval(%class.F) align 8) local_unnamed_addr
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
+declare signext i32 @_ZN1F11isGlobalRegEv(ptr) local_unnamed_addr
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @_Z10EmitLValuev(ptr sret(%class.F)) local_unnamed_addr
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
-%class.F = type { i32, i64, i8, [64 x i8], i8, i32* }
+%class.F = type { i32, i64, i8, [64 x i8], i8, ptr }
-define signext i32 @_Z29EmitOMPAtomicSimpleUpdateExpr1F(%class.F* byval(%class.F) align 8 %p1) local_unnamed_addr {
+define signext i32 @_Z29EmitOMPAtomicSimpleUpdateExpr1F(ptr byval(%class.F) align 8 %p1) local_unnamed_addr {
entry:
- call void @_Z3fn11F(%class.F* byval(%class.F) nonnull align 8 %p1)
- %call = call signext i32 @_ZN1F11isGlobalRegEv(%class.F* nonnull %p1)
+ call void @_Z3fn11F(ptr byval(%class.F) nonnull align 8 %p1)
+ %call = call signext i32 @_ZN1F11isGlobalRegEv(ptr nonnull %p1)
ret i32 %call
}
entry:
%agg.tmp1 = alloca %class.F, align 8
%XLValue = alloca %class.F, align 8
- %0 = bitcast %class.F* %XLValue to i8*
- call void @llvm.lifetime.start.p0i8(i64 96, i8* nonnull %0)
- call void @_Z10EmitLValuev(%class.F* nonnull sret(%class.F) %XLValue)
- %1 = bitcast %class.F* %agg.tmp1 to i8*
- call void @llvm.lifetime.start.p0i8(i64 96, i8* nonnull %1)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull %1, i8* align 8 nonnull %0, i64 96, i1 false)
- call void @_Z3fn11F(%class.F* byval(%class.F) nonnull align 8 %XLValue)
- %call.i = call signext i32 @_ZN1F11isGlobalRegEv(%class.F* nonnull %agg.tmp1)
- call void @llvm.lifetime.end.p0i8(i64 96, i8* nonnull %1)
- call void @llvm.lifetime.end.p0i8(i64 96, i8* nonnull %0)
+ call void @llvm.lifetime.start.p0(i64 96, ptr nonnull %XLValue)
+ call void @_Z10EmitLValuev(ptr nonnull sret(%class.F) %XLValue)
+ call void @llvm.lifetime.start.p0(i64 96, ptr nonnull %agg.tmp1)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 nonnull %agg.tmp1, ptr align 8 nonnull %XLValue, i64 96, i1 false)
+ call void @_Z3fn11F(ptr byval(%class.F) nonnull align 8 %XLValue)
+ %call.i = call signext i32 @_ZN1F11isGlobalRegEv(ptr nonnull %agg.tmp1)
+ call void @llvm.lifetime.end.p0(i64 96, ptr nonnull %agg.tmp1)
+ call void @llvm.lifetime.end.p0(i64 96, ptr nonnull %XLValue)
ret void
}
;; CHECK-NEXT: blr
define void @f() {
entry:
- %0 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 0), align 4
- %1 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 1), align 4
- %2 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 2), align 4
- %3 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 3), align 4
- store i32 %0, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 0), align 4
- store i32 %1, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 1), align 4
- store i32 %2, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 2), align 4
- store i32 %3, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 3), align 4
+ %0 = load i32, ptr @fx, align 4
+ %1 = load i32, ptr getelementptr inbounds (%struct.X, ptr @fx, i32 0, i32 1), align 4
+ %2 = load i32, ptr getelementptr inbounds (%struct.X, ptr @fx, i32 0, i32 2), align 4
+ %3 = load i32, ptr getelementptr inbounds (%struct.X, ptr @fx, i32 0, i32 3), align 4
+ store i32 %0, ptr @fy, align 4
+ store i32 %1, ptr getelementptr inbounds (%struct.X, ptr @fy, i32 0, i32 1), align 4
+ store i32 %2, ptr getelementptr inbounds (%struct.X, ptr @fy, i32 0, i32 2), align 4
+ store i32 %3, ptr getelementptr inbounds (%struct.X, ptr @fy, i32 0, i32 3), align 4
ret void
}
;; CHECK: blr
define void @g() {
entry:
- %0 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 0), align 16
- %1 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 1), align 4
- %2 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 2), align 4
- %3 = load i32, i32* getelementptr inbounds (%struct.X, %struct.X* @fx, i32 0, i32 3), align 4
- store i32 %0, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 0), align 16
- store i32 %1, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 1), align 4
- store i32 %2, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 2), align 4
- store i32 %3, i32* getelementptr inbounds (%struct.X, %struct.X* @fy, i32 0, i32 3), align 4
+ %0 = load i32, ptr @fx, align 16
+ %1 = load i32, ptr getelementptr inbounds (%struct.X, ptr @fx, i32 0, i32 1), align 4
+ %2 = load i32, ptr getelementptr inbounds (%struct.X, ptr @fx, i32 0, i32 2), align 4
+ %3 = load i32, ptr getelementptr inbounds (%struct.X, ptr @fx, i32 0, i32 3), align 4
+ store i32 %0, ptr @fy, align 16
+ store i32 %1, ptr getelementptr inbounds (%struct.X, ptr @fy, i32 0, i32 1), align 4
+ store i32 %2, ptr getelementptr inbounds (%struct.X, ptr @fy, i32 0, i32 2), align 4
+ store i32 %3, ptr getelementptr inbounds (%struct.X, ptr @fy, i32 0, i32 3), align 4
ret void
}
; CHECK-BE-NEXT: blr
entry:
%a = alloca <8 x i32>, align 32
- %0 = bitcast <8 x i32>* %a to i8*
- call void @llvm.lifetime.start.p0i8(i64 32, i8* %0)
- store <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>, <8 x i32>* %a, align 32
- call void @test(<8 x i32>* %a)
- %1 = load <8 x i32>, <8 x i32>* %a, align 32
- %vecext = extractelement <8 x i32> %1, i32 0
- %2 = bitcast <8 x i32>* %a to i8*
- call void @llvm.lifetime.end.p0i8(i64 32, i8* %2)
+ call void @llvm.lifetime.start.p0(i64 32, ptr %a)
+ store <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>, ptr %a, align 32
+ call void @test(ptr %a)
+ %0 = load <8 x i32>, ptr %a, align 32
+ %vecext = extractelement <8 x i32> %0, i32 0
+ call void @llvm.lifetime.end.p0(i64 32, ptr %a)
ret i32 %vecext
}
; CHECK-BE-NEXT: blr
entry:
%a = alloca <4 x i32>, align 32
- %0 = bitcast <4 x i32>* %a to i8*
- call void @llvm.lifetime.start.p0i8(i64 16, i8* %0)
- store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32>* %a, align 32
- call void @test1(<4 x i32>* %a)
- %1 = load <4 x i32>, <4 x i32>* %a, align 32
- %vecext = extractelement <4 x i32> %1, i32 0
- %2 = bitcast <4 x i32>* %a to i8*
- call void @llvm.lifetime.end.p0i8(i64 16, i8* %2)
+ call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %a, align 32
+ call void @test1(ptr %a)
+ %0 = load <4 x i32>, ptr %a, align 32
+ %vecext = extractelement <4 x i32> %0, i32 0
+ call void @llvm.lifetime.end.p0(i64 16, ptr %a)
ret i32 %vecext
}
; CHECK-OPT-LABEL: @test_Array(
; CHECK-OPT-NEXT: entry:
; CHECK-OPT-NEXT: %Arr2 = alloca [64 x i16], align 2
-; CHECK-OPT: store <16 x i16> [[TMP0:%.*]], <16 x i16>* [[TMP0:%.*]], align 2
+; CHECK-OPT: store <16 x i16> [[TMP0:%.*]], ptr [[TMP0:%.*]], align 2
; CHECK-LE-LABEL: test_Array:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: mflr r0
entry:
%Arr2 = alloca [64 x i16], align 2
%i = alloca i32, align 4
- %0 = bitcast [64 x i16]* %Arr2 to i8*
- call void @llvm.lifetime.start.p0i8(i64 128, i8* %0)
- %1 = bitcast i32* %i to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %1)
- store i32 0, i32* %i, align 4
+ call void @llvm.lifetime.start.p0(i64 128, ptr %Arr2)
+ call void @llvm.lifetime.start.p0(i64 4, ptr %i)
+ store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %2 = load i32, i32* %i, align 4
- %cmp = icmp slt i32 %2, 64
+ %0 = load i32, ptr %i, align 4
+ %cmp = icmp slt i32 %0, 64
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- %3 = bitcast i32* %i to i8*
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %3)
+ call void @llvm.lifetime.end.p0(i64 4, ptr %i)
br label %for.end
for.body: ; preds = %for.cond
- %4 = load i32, i32* %i, align 4
- %idxprom = sext i32 %4 to i64
- %arrayidx = getelementptr inbounds [64 x i8], [64 x i8]* @Arr1, i64 0, i64 %idxprom
- %5 = load i8, i8* %arrayidx, align 1
- %conv = zext i8 %5 to i16
- %6 = load i32, i32* %i, align 4
- %idxprom1 = sext i32 %6 to i64
- %arrayidx2 = getelementptr inbounds [64 x i16], [64 x i16]* %Arr2, i64 0, i64 %idxprom1
- store i16 %conv, i16* %arrayidx2, align 2
+ %1 = load i32, ptr %i, align 4
+ %idxprom = sext i32 %1 to i64
+ %arrayidx = getelementptr inbounds [64 x i8], ptr @Arr1, i64 0, i64 %idxprom
+ %2 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %2 to i16
+ %3 = load i32, ptr %i, align 4
+ %idxprom1 = sext i32 %3 to i64
+ %arrayidx2 = getelementptr inbounds [64 x i16], ptr %Arr2, i64 0, i64 %idxprom1
+ store i16 %conv, ptr %arrayidx2, align 2
br label %for.inc
for.inc: ; preds = %for.body
- %7 = load i32, i32* %i, align 4
- %inc = add nsw i32 %7, 1
- store i32 %inc, i32* %i, align 4
+ %4 = load i32, ptr %i, align 4
+ %inc = add nsw i32 %4, 1
+ store i32 %inc, ptr %i, align 4
br label %for.cond
for.end: ; preds = %for.cond.cleanup
- %arraydecay = getelementptr inbounds [64 x i16], [64 x i16]* %Arr2, i64 0, i64 0
- call void @test_arr(i16* %arraydecay)
- %8 = bitcast [64 x i16]* %Arr2 to i8*
- call void @llvm.lifetime.end.p0i8(i64 128, i8* %8)
+ call void @test_arr(ptr %Arr2)
+ call void @llvm.lifetime.end.p0(i64 128, ptr %Arr2)
ret void
}
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) nounwind
-declare void @test(<8 x i32>*) nounwind
-declare void @test1(<4 x i32>*) nounwind
-declare void @test_arr(i16*)
+declare void @test(ptr) nounwind
+declare void @test1(ptr) nounwind
+declare void @test_arr(ptr)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) nounwind
%constexpr5 = trunc i32 %constexpr4 to i8
%constexpr6 = icmp ne i8 %constexpr5, 0
%constexpr8 = zext i1 %constexpr6 to i16
- store i16 %constexpr8, i16* null, align 2
+ store i16 %constexpr8, ptr null, align 2
ret void
}
; Function Attrs: norecurse nounwind
; RUN: llc -mtriple=powerpc64le-unknown-unknown -mcpu=pwr9 < %s | FileCheck %s
-define void @test1(i32* nocapture readonly %arr, i32* nocapture %arrTo) {
+define void @test1(ptr nocapture readonly %arr, ptr nocapture %arrTo) {
entry:
- %arrayidx = getelementptr inbounds i32, i32* %arrTo, i64 4
- %0 = bitcast i32* %arrayidx to <4 x i32>*
- %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 4
- %1 = bitcast i32* %arrayidx1 to <4 x i32>*
- %2 = load <4 x i32>, <4 x i32>* %1, align 16
- store <4 x i32> %2, <4 x i32>* %0, align 16
+ %arrayidx = getelementptr inbounds i32, ptr %arrTo, i64 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %arr, i64 4
+ %0 = load <4 x i32>, ptr %arrayidx1, align 16
+ store <4 x i32> %0, ptr %arrayidx, align 16
ret void
; CHECK-LABEL: test1
; CHECK: lxv [[LD:[0-9]+]], 16(3)
}
; Function Attrs: norecurse nounwind
-define void @test2(i32* nocapture readonly %arr, i32* nocapture %arrTo) {
+define void @test2(ptr nocapture readonly %arr, ptr nocapture %arrTo) {
entry:
- %arrayidx = getelementptr inbounds i32, i32* %arrTo, i64 1
- %0 = bitcast i32* %arrayidx to <4 x i32>*
- %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 2
- %1 = bitcast i32* %arrayidx1 to <4 x i32>*
- %2 = load <4 x i32>, <4 x i32>* %1, align 16
- store <4 x i32> %2, <4 x i32>* %0, align 16
+ %arrayidx = getelementptr inbounds i32, ptr %arrTo, i64 1
+ %arrayidx1 = getelementptr inbounds i32, ptr %arr, i64 2
+ %0 = load <4 x i32>, ptr %arrayidx1, align 16
+ store <4 x i32> %0, ptr %arrayidx, align 16
ret void
; CHECK-LABEL: test2
; CHECK: li [[REG:[0-9]+]], 8
; Function Attrs: nounwind
define void @__alloc_pages_nodemask() #0 {
entry:
- %0 = call i64 asm sideeffect "ld${1:U}${1:X} $0,$1", "=r,*m"(i64* elementtype(i64) undef)
+ %0 = call i64 asm sideeffect "ld${1:U}${1:X} $0,$1", "=r,*m"(ptr elementtype(i64) undef)
br i1 undef, label %do.body.lr.ph.i.i.i, label %zone_page_state_snapshot.exit.i.i
; CHECK: ld 3, 0(3)
do.body.i.i.i: ; preds = %do.body.i.i.i, %do.body.lr.ph.i.i.i
%x.022.i.i.i = phi i64 [ %0, %do.body.lr.ph.i.i.i ], [ %add7.i.i.i, %do.body.i.i.i ]
- %1 = load i8, i8* undef, align 1
+ %1 = load i8, ptr undef, align 1
%conv.i.i458.i = sext i8 %1 to i64
%add7.i.i.i = add i64 %x.022.i.i.i, %conv.i.i458.i
- %2 = load i32, i32* @nr_cpu_ids, align 4
+ %2 = load i32, ptr @nr_cpu_ids, align 4
%cmp.i1.i.i = icmp ult i32 0, %2
br i1 %cmp.i1.i.i, label %do.body.i.i.i, label %zone_page_state_snapshot.exit.i.i
; CHECK-P7-NEXT: blr
L.entry:
%value.addr = alloca i16, align 2
- store i16 -32477, i16* %value.addr, align 2
- %0 = cmpxchg i16* %value.addr, i16 -32477, i16 234 seq_cst seq_cst
+ store i16 -32477, ptr %value.addr, align 2
+ %0 = cmpxchg ptr %value.addr, i16 -32477, i16 234 seq_cst seq_cst
%1 = extractvalue { i16, i1 } %0, 1
br i1 %1, label %L.B0000, label %L.B0003
L.B0003: ; preds = %L.entry
- %puts = call i32 @puts(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @str, i64 0, i64 0))
+ %puts = call i32 @puts(ptr @str)
ret i32 1
L.B0000: ; preds = %L.entry
- %2 = load i16, i16* %value.addr, align 2
+ %2 = load i16, ptr %value.addr, align 2
%3 = icmp eq i16 %2, 234
br i1 %3, label %L.B0001, label %L.B0005
L.B0005: ; preds = %L.B0000
- %puts1 = call i32 @puts(i8* getelementptr inbounds ([59 x i8], [59 x i8]* @str.1, i64 0, i64 0))
+ %puts1 = call i32 @puts(ptr @str.1)
ret i32 1
L.B0001: ; preds = %L.B0000
- %puts2 = call i32 @puts(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @str.2, i64 0, i64 0))
+ %puts2 = call i32 @puts(ptr @str.2)
ret i32 0
}
; Function Attrs: nounwind
-declare i32 @puts(i8* nocapture readonly) #0
+declare i32 @puts(ptr nocapture readonly) #0
; Function Attrs: nounwind
define double @_Z7getLXSDddddddddddddd(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, double %j, double %k, double %l, double %m) local_unnamed_addr #0 {
entry:
- %0 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 10), align 8
- %1 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 17), align 8
- %2 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 87), align 8
- %3 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 97), align 8
- %4 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 77), align 8
- store double %3, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 122), align 8
+ %0 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 10), align 8
+ %1 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 17), align 8
+ %2 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 87), align 8
+ %3 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 97), align 8
+ %4 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 77), align 8
+ store double %3, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 122), align 8
%add = fadd double %a, %b
%add1 = fadd double %add, %c
%add2 = fadd double %add1, %d
; Function Attrs: nounwind
define float @_Z8getLXSSPfffffffffffff(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i, float %j, float %k, float %l, float %m) local_unnamed_addr #0 {
entry:
- %0 = load float, float* getelementptr inbounds ([500 x float], [500 x float]* @gf, i64 0, i64 10), align 4
- %1 = load float, float* getelementptr inbounds ([500 x float], [500 x float]* @gf, i64 0, i64 17), align 4
- %2 = load float, float* getelementptr inbounds ([500 x float], [500 x float]* @gf, i64 0, i64 87), align 4
- %3 = load float, float* getelementptr inbounds ([500 x float], [500 x float]* @gf, i64 0, i64 97), align 4
- %4 = load float, float* getelementptr inbounds ([500 x float], [500 x float]* @gf, i64 0, i64 77), align 4
- store float %3, float* getelementptr inbounds ([500 x float], [500 x float]* @gf, i64 0, i64 122), align 4
+ %0 = load float, ptr getelementptr inbounds ([500 x float], ptr @gf, i64 0, i64 10), align 4
+ %1 = load float, ptr getelementptr inbounds ([500 x float], ptr @gf, i64 0, i64 17), align 4
+ %2 = load float, ptr getelementptr inbounds ([500 x float], ptr @gf, i64 0, i64 87), align 4
+ %3 = load float, ptr getelementptr inbounds ([500 x float], ptr @gf, i64 0, i64 97), align 4
+ %4 = load float, ptr getelementptr inbounds ([500 x float], ptr @gf, i64 0, i64 77), align 4
+ store float %3, ptr getelementptr inbounds ([500 x float], ptr @gf, i64 0, i64 122), align 4
%add = fadd float %a, %b
%add1 = fadd float %add, %c
%add2 = fadd float %add1, %d
; RUN: | FileCheck %s --check-prefix=CHECK-P9
@a = external local_unnamed_addr global <4 x i32>, align 16
-@pb = external local_unnamed_addr global float*, align 8
+@pb = external local_unnamed_addr global ptr, align 8
-define void @testExpandPostRAPseudo(i32* nocapture readonly %ptr) {
+define void @testExpandPostRAPseudo(ptr nocapture readonly %ptr) {
; CHECK-P8-LABEL: testExpandPostRAPseudo:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8: lfiwzx f0, 0, r3
; CHECK-P9: stfsx f0, r3, r4
; CHECK-P9: blr
entry:
- %0 = load i32, i32* %ptr, align 4
+ %0 = load i32, ptr %ptr, align 4
%splat.splatinsert = insertelement <4 x i32> undef, i32 %0, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
- store <4 x i32> %splat.splat, <4 x i32>* @a, align 16
+ store <4 x i32> %splat.splat, ptr @a, align 16
tail call void asm sideeffect "#Clobber Rigisters", "~{f0},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
- %1 = load i32, i32* %ptr, align 4
+ %1 = load i32, ptr %ptr, align 4
%conv = sitofp i32 %1 to float
- %2 = load float*, float** @pb, align 8
- %add.ptr = getelementptr inbounds float, float* %2, i64 16777216
- store float %conv, float* %add.ptr, align 4
+ %2 = load ptr, ptr @pb, align 8
+ %add.ptr = getelementptr inbounds float, ptr %2, i64 16777216
+ store float %conv, ptr %add.ptr, align 4
ret void
}
%0 = type { double, double }
-define void @maybe_an_fma(%0* sret(%0) %agg.result, %0* byval(%0) %a, %0* byval(%0) %b, %0* byval(%0) %c) nounwind {
+define void @maybe_an_fma(ptr sret(%0) %agg.result, ptr byval(%0) %a, ptr byval(%0) %b, ptr byval(%0) %c) nounwind {
entry:
- %a.realp = getelementptr inbounds %0, %0* %a, i32 0, i32 0
- %a.real = load double, double* %a.realp
- %a.imagp = getelementptr inbounds %0, %0* %a, i32 0, i32 1
- %a.imag = load double, double* %a.imagp
- %b.realp = getelementptr inbounds %0, %0* %b, i32 0, i32 0
- %b.real = load double, double* %b.realp
- %b.imagp = getelementptr inbounds %0, %0* %b, i32 0, i32 1
- %b.imag = load double, double* %b.imagp
+ %a.real = load double, ptr %a
+ %a.imagp = getelementptr inbounds %0, ptr %a, i32 0, i32 1
+ %a.imag = load double, ptr %a.imagp
+ %b.real = load double, ptr %b
+ %b.imagp = getelementptr inbounds %0, ptr %b, i32 0, i32 1
+ %b.imag = load double, ptr %b.imagp
%mul.rl = fmul double %a.real, %b.real
%mul.rr = fmul double %a.imag, %b.imag
%mul.r = fsub double %mul.rl, %mul.rr
%mul.il = fmul double %a.imag, %b.real
%mul.ir = fmul double %a.real, %b.imag
%mul.i = fadd double %mul.il, %mul.ir
- %c.realp = getelementptr inbounds %0, %0* %c, i32 0, i32 0
- %c.real = load double, double* %c.realp
- %c.imagp = getelementptr inbounds %0, %0* %c, i32 0, i32 1
- %c.imag = load double, double* %c.imagp
+ %c.real = load double, ptr %c
+ %c.imagp = getelementptr inbounds %0, ptr %c, i32 0, i32 1
+ %c.imag = load double, ptr %c.imagp
%add.r = fadd double %mul.r, %c.real
%add.i = fadd double %mul.i, %c.imag
- %real = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 0
- %imag = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 1
- store double %add.r, double* %real
- store double %add.i, double* %imag
+ %imag = getelementptr inbounds %0, ptr %agg.result, i32 0, i32 1
+ store double %add.r, ptr %agg.result
+ store double %add.i, ptr %imag
ret void
; CHECK: fmadd
}
; CHECK-LABEL: @foo
next:
- %sunkaddr18 = ptrtoint %"class.llvm::MCOperand"* %MCOp to i64
+ %sunkaddr18 = ptrtoint ptr %MCOp to i64
%sunkaddr19 = add i64 %sunkaddr18, 8
- %sunkaddr20 = inttoptr i64 %sunkaddr19 to double*
- store double 0.000000e+00, double* %sunkaddr20, align 8, !tbaa !1
- %sunkaddr21 = ptrtoint %"class.llvm::MCOperand"* %MCOp to i64
+ %sunkaddr20 = inttoptr i64 %sunkaddr19 to ptr
+ store double 0.000000e+00, ptr %sunkaddr20, align 8, !tbaa !1
+ %sunkaddr21 = ptrtoint ptr %MCOp to i64
%sunkaddr22 = add i64 %sunkaddr21, 8
- %sunkaddr23 = inttoptr i64 %sunkaddr22 to i32*
- store i32 %v, i32* %sunkaddr23, align 8, !tbaa !2
+ %sunkaddr23 = inttoptr i64 %sunkaddr22 to ptr
+ store i32 %v, ptr %sunkaddr23, align 8, !tbaa !2
ret void
; Make sure that the 64-bit store comes first, regardless of what TBAA says
br label %.thread
.thread: ; preds = %45, %.thread.outer
- call void @llvm.memset.p0i8.i64(i8* align 8 undef, i8 0, i64 56, i1 false)
- store i8* %21, i8** undef, align 8
- store i32 1073741824, i32* undef, align 8
+ call void @llvm.memset.p0.i64(ptr align 8 undef, i8 0, i64 56, i1 false)
+ store ptr %21, ptr undef, align 8
+ store i32 1073741824, ptr undef, align 8
%22 = call { i64, i64, i64, i64, i64, i64, i64 } asm sideeffect "sc\0A\09mfcr $0", "=&{r0},=&{r3},=&{r4},=&{r5},=&{r6},=&{r7},=&{r8},{r0},{r3},{r4},{r5},~{cr0},~{ctr},~{memory},~{r11},~{r12}"(i64 342, i64 80871424, i64 undef, i64 0) #2, !srcloc !1
br i1 undef, label %.lr.ph, label %.critedge15.preheader
}
; Function Attrs: nounwind argmemonly
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #1
attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="pwr8" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+power8-vector,+vsx" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind argmemonly }
; RUN: -ppc-use-absolute-jumptables -ppc-asm-full-reg-names \
; RUN: -verify-machineinstrs %s | FileCheck %s -check-prefix=CHECK-BE
-%struct.node = type { i8, %struct.node* }
+%struct.node = type { i8, ptr }
; Function Attrs: norecurse nounwind readonly
-define zeroext i32 @jumpTableTest(%struct.node* readonly %list) {
+define zeroext i32 @jumpTableTest(ptr readonly %list) {
; CHECK-LE-LABEL: jumpTableTest:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE: rldic r[[REG:[0-9]+]], r[[REG]], 3, 29
; CHECK-BE: bctr
; CHECK-BE: blr
entry:
- %cmp36 = icmp eq %struct.node* %list, null
+ %cmp36 = icmp eq ptr %list, null
br i1 %cmp36, label %while.end, label %while.body
while.body: ; preds = %entry, %sw.epilog
%result.038 = phi i32 [ %result.1, %sw.epilog ], [ 0, %entry ]
- %current.037 = phi %struct.node* [ %spec.store.select, %sw.epilog ], [ %list, %entry ]
- %next1 = getelementptr inbounds %struct.node, %struct.node* %current.037, i64 0, i32 1
- %0 = load %struct.node*, %struct.node** %next1, align 8
- %cmp2 = icmp eq %struct.node* %0, %current.037
- %spec.store.select = select i1 %cmp2, %struct.node* null, %struct.node* %0
- %type = getelementptr inbounds %struct.node, %struct.node* %current.037, i64 0, i32 0
- %1 = load i8, i8* %type, align 8
+ %current.037 = phi ptr [ %spec.store.select, %sw.epilog ], [ %list, %entry ]
+ %next1 = getelementptr inbounds %struct.node, ptr %current.037, i64 0, i32 1
+ %0 = load ptr, ptr %next1, align 8
+ %cmp2 = icmp eq ptr %0, %current.037
+ %spec.store.select = select i1 %cmp2, ptr null, ptr %0
+ %1 = load i8, ptr %current.037, align 8
switch i8 %1, label %sw.epilog [
i8 1, label %sw.bb
i8 2, label %sw.bb3
sw.epilog: ; preds = %while.body, %sw.bb17, %sw.bb15, %sw.bb13, %sw.bb11, %sw.bb9, %sw.bb7, %sw.bb5, %sw.bb3, %sw.bb
%result.1 = phi i32 [ %result.038, %while.body ], [ %add18, %sw.bb17 ], [ %add16, %sw.bb15 ], [ %add14, %sw.bb13 ], [ %add12, %sw.bb11 ], [ %add10, %sw.bb9 ], [ %add8, %sw.bb7 ], [ %add6, %sw.bb5 ], [ %add4, %sw.bb3 ], [ %add, %sw.bb ]
- %cmp = icmp eq %struct.node* %spec.store.select, null
+ %cmp = icmp eq ptr %spec.store.select, null
br i1 %cmp, label %while.end, label %while.body
while.end: ; preds = %sw.epilog, %entry
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define i32* @test1() {
+define ptr @test1() {
%X = alloca { i32, i32 }
- %Y = getelementptr {i32,i32}, {i32,i32}* %X, i32 0, i32 1
- ret i32* %Y
+ %Y = getelementptr {i32,i32}, ptr %X, i32 0, i32 1
+ ret ptr %Y
; CHECK-LABEL: @test1
; CHECK: addi 3, 1, -4
; CHECK: blr
}
-define i32* @test2() {
+define ptr @test2() {
%X = alloca { i32, i32, i32, i32 }
- %Y = getelementptr {i32,i32,i32,i32}, {i32,i32,i32,i32}* %X, i32 0, i32 3
- ret i32* %Y
+ %Y = getelementptr {i32,i32,i32,i32}, ptr %X, i32 0, i32 3
+ ret ptr %Y
; CHECK-LABEL: @test2
; CHECK: addi 3, 1, -4
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
-define void @bn_mul_comba8(i64* nocapture %r, i64* nocapture readonly %a, i64* nocapture readonly %b) {
+define void @bn_mul_comba8(ptr nocapture %r, ptr nocapture readonly %a, ptr nocapture readonly %b) {
; CHECK-LABEL: bn_mul_comba8:
; CHECK: # %bb.0:
; CHECK-NEXT: std 4, -8(1) # 8-byte Folded Spill
; CHECK-NEXT: isel 3, 3, 5, 20
; CHECK-NEXT: std 3, 0(4)
; CHECK-NEXT: blr
- %1 = load i64, i64* %a, align 8
+ %1 = load i64, ptr %a, align 8
%conv = zext i64 %1 to i128
- %2 = load i64, i64* %b, align 8
+ %2 = load i64, ptr %b, align 8
%conv2 = zext i64 %2 to i128
%mul = mul nuw i128 %conv2, %conv
%shr = lshr i128 %mul, 64
- %agep = getelementptr inbounds i64, i64* %a, i64 1
- %3 = load i64, i64* %agep, align 8
+ %agep = getelementptr inbounds i64, ptr %a, i64 1
+ %3 = load i64, ptr %agep, align 8
%conv14 = zext i64 %3 to i128
%mul15 = mul nuw i128 %conv14, %conv
%add17 = add i128 %mul15, %shr
%shr19 = lshr i128 %add17, 64
%conv20 = trunc i128 %shr19 to i64
- %bgep = getelementptr inbounds i64, i64* %b, i64 1
- %4 = load i64, i64* %bgep, align 8
+ %bgep = getelementptr inbounds i64, ptr %b, i64 1
+ %4 = load i64, ptr %bgep, align 8
%conv28 = zext i64 %4 to i128
%mul31 = mul nuw i128 %conv28, %conv2
%conv32 = and i128 %add17, 18446744073709551615
%add37 = add i64 %conv36, %conv20
%cmp38 = icmp ult i64 %add37, %conv36
%conv148 = zext i1 %cmp38 to i64
- store i64 %conv148, i64* %r, align 8
+ store i64 %conv148, ptr %r, align 8
ret void
}
entry:
%x = alloca [2048 x float], align 4
%y = alloca [2048 x float], align 4
- %0 = bitcast [2048 x float]* %x to i8*
- call void @llvm.lifetime.start.p0i8(i64 8192, i8* %0) #2
- %1 = bitcast [2048 x float]* %y to i8*
- call void @llvm.lifetime.start.p0i8(i64 8192, i8* %1) #2
+ call void @llvm.lifetime.start.p0(i64 8192, ptr %x) #2
+ call void @llvm.lifetime.start.p0(i64 8192, ptr %y) #2
br label %for.body.i
; CHECK-LABEL: @foo
for.body.i: ; preds = %for.body.i.preheader, %for.body.i
%accumulator.09.i = phi double [ %add.i, %for.body.i ], [ 0.000000e+00, %entry ]
%i.08.i = phi i64 [ %inc.i, %for.body.i ], [ 0, %entry ]
- %arrayidx.i = getelementptr inbounds [2048 x float], [2048 x float]* %x, i64 0, i64 %i.08.i
- %v14 = load float, float* %arrayidx.i, align 4
+ %arrayidx.i = getelementptr inbounds [2048 x float], ptr %x, i64 0, i64 %i.08.i
+ %v14 = load float, ptr %arrayidx.i, align 4
%conv.i = fpext float %v14 to double
- %arrayidx1.i = getelementptr inbounds [2048 x float], [2048 x float]* %y, i64 0, i64 %i.08.i
- %v15 = load float, float* %arrayidx1.i, align 4
+ %arrayidx1.i = getelementptr inbounds [2048 x float], ptr %y, i64 0, i64 %i.08.i
+ %v15 = load float, ptr %arrayidx1.i, align 4
%conv2.i = fpext float %v15 to double
%mul.i = fmul double %conv.i, %conv2.i
%add.i = fadd double %accumulator.09.i, %mul.i
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2
-declare void @bar(float*, float*)
+declare void @bar(ptr, ptr)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2
attributes #0 = { nounwind readonly }
attributes #1 = { nounwind }
%a = alloca %struct.S0, align 8
%a.coerce.fca.0.extract = extractvalue [2 x i64] %a.coerce, 0
%a.coerce.fca.1.extract = extractvalue [2 x i64] %a.coerce, 1
- %a.0.a.0..sroa_cast = bitcast %struct.S0* %a to i64*
- store i64 %a.coerce.fca.0.extract, i64* %a.0.a.0..sroa_cast, align 8
+ store i64 %a.coerce.fca.0.extract, ptr %a, align 8
%tmp.sroa.2.0.extract.trunc = trunc i64 %a.coerce.fca.1.extract to i8
- %a.8.a.8..sroa_idx = getelementptr inbounds %struct.S0, %struct.S0* %a, i64 0, i32 1, i64 4
- store i8 %tmp.sroa.2.0.extract.trunc, i8* %a.8.a.8..sroa_idx, align 8
- %a.4.a.4..sroa_idx = getelementptr inbounds %struct.S0, %struct.S0* %a, i64 0, i32 1
- %a.4.a.4..sroa_cast = bitcast [5 x i8]* %a.4.a.4..sroa_idx to i40*
- %a.4.a.4.bf.load = load i40, i40* %a.4.a.4..sroa_cast, align 4
+ %a.8.a.8..sroa_idx = getelementptr inbounds %struct.S0, ptr %a, i64 0, i32 1, i64 4
+ store i8 %tmp.sroa.2.0.extract.trunc, ptr %a.8.a.8..sroa_idx, align 8
+ %a.4.a.4..sroa_idx = getelementptr inbounds %struct.S0, ptr %a, i64 0, i32 1
+ %a.4.a.4.bf.load = load i40, ptr %a.4.a.4..sroa_idx, align 4
%bf.lshr = lshr i40 %a.4.a.4.bf.load, 31
%bf.lshr.tr = trunc i40 %bf.lshr to i32
%bf.cast = and i32 %bf.lshr.tr, 127
%struct.X = type { [5 x i8] }
-define i32 @test1([4 x i32]* %P, i32 %i) {
+define i32 @test1(ptr %P, i32 %i) {
%tmp.2 = add i32 %i, 2 ; <i32> [#uses=1]
- %tmp.4 = getelementptr [4 x i32], [4 x i32]* %P, i32 %tmp.2, i32 1 ; <i32*> [#uses=1]
- %tmp.5 = load i32, i32* %tmp.4 ; <i32> [#uses=1]
+ %tmp.4 = getelementptr [4 x i32], ptr %P, i32 %tmp.2, i32 1 ; <ptr> [#uses=1]
+ %tmp.5 = load i32, ptr %tmp.4 ; <i32> [#uses=1]
ret i32 %tmp.5
}
-define i32 @test2(%struct.X* %P, i32 %i) {
+define i32 @test2(ptr %P, i32 %i) {
%tmp.2 = add i32 %i, 2 ; <i32> [#uses=1]
- %tmp.5 = getelementptr %struct.X, %struct.X* %P, i32 %tmp.2, i32 0, i32 1 ; <i8*> [#uses=1]
- %tmp.6 = load i8, i8* %tmp.5 ; <i8> [#uses=1]
+ %tmp.5 = getelementptr %struct.X, ptr %P, i32 %tmp.2, i32 0, i32 1 ; <ptr> [#uses=1]
+ %tmp.6 = load i8, ptr %tmp.5 ; <i8> [#uses=1]
%tmp.7 = sext i8 %tmp.6 to i32 ; <i32> [#uses=1]
ret i32 %tmp.7
}
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-%struct.x = type { i64 (i8*, i64, i64, %struct._IO_FILE*)* }
-%struct._IO_FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct._IO_FILE*, i32, i32, i64, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i64, i32, [20 x i8] }
-%struct._IO_marker = type { %struct._IO_marker*, %struct._IO_FILE*, i32 }
+%struct.x = type { ptr }
+%struct._IO_FILE = type { i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, i64, i16, i8, [1 x i8], ptr, i64, ptr, ptr, ptr, ptr, i64, i32, [20 x i8] }
+%struct._IO_marker = type { ptr, ptr, i32 }
-@_ZL1y = internal constant %struct.x { i64 (i8*, i64, i64, %struct._IO_FILE*)* @fread }, align 8
+@_ZL1y = internal constant %struct.x { ptr @fread }, align 8
; Function Attrs: nounwind
-define %struct.x* @_Z3foov() #0 {
+define ptr @_Z3foov() #0 {
entry:
- ret %struct.x* @_ZL1y
+ ret ptr @_ZL1y
}
-declare i64 @fread(i8*, i64, i64, %struct._IO_FILE*) #1
+declare i64 @fread(ptr, i64, i64, ptr) #1
; CHECK: .section .data.rel.ro
; CHECK: .quad fread
; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu %s -o - | FileCheck %s
; Check that codegen for an addrspace cast succeeds without error.
-define <4 x i32 addrspace(1)*> @f (<4 x i32*> %x) {
- %1 = addrspacecast <4 x i32*> %x to <4 x i32 addrspace(1)*>
- ret <4 x i32 addrspace(1)*> %1
+define <4 x ptr addrspace(1)> @f (<4 x ptr> %x) {
+ %1 = addrspacecast <4 x ptr> %x to <4 x ptr addrspace(1)>
+ ret <4 x ptr addrspace(1)> %1
; CHECK-LABEL: @f
}
; Check that fairly complicated addrspace cast and operations succeed without error.
%struct = type opaque
-define void @g (%struct addrspace(10)** %x) {
- %1 = load %struct addrspace(10)*, %struct addrspace(10)** %x
- %2 = addrspacecast %struct addrspace(10)* %1 to %struct addrspace(11)*
- %3 = bitcast %struct addrspace(11)* %2 to i8 addrspace(11)*
- %4 = getelementptr i8, i8 addrspace(11)* %3, i64 16
- %5 = bitcast i8 addrspace(11)* %4 to %struct addrspace(10)* addrspace(11)*
- %6 = load %struct addrspace(10)*, %struct addrspace(10)* addrspace(11)* %5
- store %struct addrspace(10)* %6, %struct addrspace(10)** undef
+define void @g (ptr %x) {
+ %1 = load ptr addrspace(10), ptr %x
+ %2 = addrspacecast ptr addrspace(10) %1 to ptr addrspace(11)
+ %3 = getelementptr i8, ptr addrspace(11) %2, i64 16
+ %4 = load ptr addrspace(10), ptr addrspace(11) %3
+ store ptr addrspace(10) %4, ptr undef
ret void
; CHECK-LABEL: @g
}
; RUN: llc -verify-machineinstrs %s -mtriple=powerpc64-unknown-linux-gnu -O2 -o - -optimize-regalloc=false -regalloc=fast | FileCheck %s
-declare void @func(i8*, i64, i64)
+declare void @func(ptr, i64, i64)
-define void @test(i8* %context, i32** %elementArrayPtr, i32 %value) {
+define void @test(ptr %context, ptr %elementArrayPtr, i32 %value) {
entry:
%cmp = icmp eq i32 %value, 0
br i1 %cmp, label %lreturn, label %lnext
lnext:
- %elementArray = load i32*, i32** %elementArrayPtr, align 8
+ %elementArray = load ptr, ptr %elementArrayPtr, align 8
; CHECK: lwz [[LDREG:[0-9]+]], 140(1) # 4-byte Folded Reload
; CHECK: # implicit-def: $x[[TEMPREG:[0-9]+]]
- %element = load i32, i32* %elementArray, align 4
+ %element = load i32, ptr %elementArray, align 4
; CHECK: mr [[TEMPREG]], [[LDREG]]
; CHECK: clrldi 4, [[TEMPREG]], 32
%element.ext = zext i32 %element to i64
%value.ext = zext i32 %value to i64
- call void @func(i8* %context, i64 %value.ext, i64 %element.ext)
+ call void @func(ptr %context, i64 %value.ext, i64 %element.ext)
br label %lreturn
lreturn:
; RUN: llc -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff < \
; RUN: %s | FileCheck %s
-@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @foo, i8* null }]
-@llvm.global_dtors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @bar, i8* null }]
+@llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @foo, ptr null }]
+@llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @bar, ptr null }]
define dso_local void @foo() {
entry:
; RUN: llvm-objdump --syms %t.o | FileCheck --check-prefix=SYM %s
@_MergedGlobals = global <{ i32, i32 }> <{ i32 1, i32 2 }>, align 4
-@var1 = alias i32, getelementptr inbounds (<{ i32, i32 }>, <{ i32, i32 }>* @_MergedGlobals, i32 0, i32 0)
-@var2 = alias i32, getelementptr inbounds (<{ i32, i32 }>, <{ i32, i32 }>* @_MergedGlobals, i32 0, i32 1)
-@var3 = alias i32, i32* @var2
+@var1 = alias i32, getelementptr inbounds (<{ i32, i32 }>, ptr @_MergedGlobals, i32 0, i32 0)
+@var2 = alias i32, getelementptr inbounds (<{ i32, i32 }>, ptr @_MergedGlobals, i32 0, i32 1)
+@var3 = alias i32, ptr @var2
define void @foo(i32 %a1, i32 %a2, i32 %a3) {
- store i32 %a1, i32* getelementptr inbounds (<{ i32, i32 }>, <{ i32, i32 }>* @_MergedGlobals, i32 0, i32 0), align 4
- store i32 %a2, i32* getelementptr inbounds (<{ i32, i32 }>, <{ i32, i32 }>* @_MergedGlobals, i32 0, i32 1), align 4
+ store i32 %a1, ptr getelementptr inbounds (<{ i32, i32 }>, ptr @_MergedGlobals, i32 0, i32 0), align 4
+ store i32 %a2, ptr getelementptr inbounds (<{ i32, i32 }>, ptr @_MergedGlobals, i32 0, i32 1), align 4
ret void
}
; CHECK: ERROR: alias without a base object is not yet supported on AIX
@bar = global i32 42
-@test = alias i32, inttoptr(i32 42 to i32*)
+@test = alias i32, inttoptr(i32 42 to ptr)
; RUN: FileCheck --check-prefix=ASM %s
@var = global i32 42
-@var1 = alias i32, i32* @var
-@var2 = alias i32, i32* @var1
-@var_l = linkonce_odr alias i32, i32* @var
-@var_i = internal alias i32, i32* @var
-@var_h = hidden alias i32, i32* @var
-@var_p = protected alias i32, i32* @var
+@var1 = alias i32, ptr @var
+@var2 = alias i32, ptr @var1
+@var_l = linkonce_odr alias i32, ptr @var
+@var_i = internal alias i32, ptr @var
+@var_h = hidden alias i32, ptr @var
+@var_p = protected alias i32, ptr @var
@array = global [2 x i32] [i32 1, i32 2], align 4
-@x = global i32* bitcast (i8* getelementptr (i8, i8* bitcast ([2 x i32]* @array to i8*), i64 4) to i32*), align 4
-@bitcast_alias = alias i32*, i32** @x
+@x = global ptr getelementptr (i8, ptr @array, i64 4), align 4
+@bitcast_alias = alias ptr, ptr @x
define i32 @fun() {
ret i32 0
}
%FunTy = type i32()
-@fun_weak = weak alias %FunTy, %FunTy* @fun
-@fun_hidden = hidden alias %FunTy, %FunTy* @fun
-@fun_ptr = global i32()* @fun_weak
+@fun_weak = weak alias %FunTy, ptr @fun
+@fun_hidden = hidden alias %FunTy, ptr @fun
+@fun_ptr = global ptr @fun_weak
define i32 @test() {
entry:
- %tmp = load i32, i32* @var1
- %tmp1 = load i32, i32* @var2
- %tmp0 = load i32, i32* @var_i
+ %tmp = load i32, ptr @var1
+ %tmp1 = load i32, ptr @var2
+ %tmp0 = load i32, ptr @var_i
%tmp2 = call i32 @fun()
%tmp3 = add i32 %tmp, %tmp2
%tmp4 = call i32 @fun_weak()
%tmp5 = add i32 %tmp3, %tmp4
%tmp6 = add i32 %tmp1, %tmp5
%tmp7 = add i32 %tmp6, %tmp0
- %fun_ptr1 = alloca i32 ()*
- store i32 ()* @fun_weak, i32 ()** %fun_ptr1
- %callee.knr.cast = bitcast i32 ()** %fun_ptr1 to i32 ()*
- %tmp8 = call i32 %callee.knr.cast()
+ %fun_ptr1 = alloca ptr
+ store ptr @fun_weak, ptr %fun_ptr1
+ %tmp8 = call i32 %fun_ptr1()
%tmp9 = call i32 @fun_hidden()
%tmp10 = add i32 %tmp7, %tmp8
%tmp11 = add i32 %tmp10, %tmp9
; CHECK-ASM64-NEXT: .byte 0x1f # AllocaUsed
; CHECK-ASM64-NEXT: # -- End function
entry:
- %0 = load i32, i32* @var
+ %0 = load i32, ptr @var
%1 = alloca i8, i32 %0
- %2 = bitcast i8* %1 to i32*
- %3 = load i32, i32* %2
- ret i32 %3
+ %2 = load i32, ptr %1
+ ret i32 %2
}
define hidden i32 @fun() {
entry:
- %0 = load i32, i32* @var, align 4
+ %0 = load i32, ptr @var, align 4
ret i32 %0
}
define float @caller(float %f) {
%AlignedBuffer = alloca [32 x i32], align 32
- %Pointer = getelementptr inbounds [32 x i32], [32 x i32]* %AlignedBuffer, i64 0, i64 0
- call void @callee(i32* %Pointer)
+ call void @callee(ptr %AlignedBuffer)
ret float %f
}
-declare void @callee(i32*)
+declare void @callee(ptr)
; 32BIT-LABEL: .caller:
; 32BIT: stw 30, -16(1)
define void @test_i1(i1 %b) {
entry:
%frombool = zext i1 %b to i8
- store i8 %frombool, i8* @global_i1, align 1
+ store i8 %frombool, ptr @global_i1, align 1
ret void
}
define void @test_i1zext(i1 zeroext %b) {
entry:
%frombool = zext i1 %b to i8
- store i8 %frombool, i8 * @global_i1, align 1
+ store i8 %frombool, ptr @global_i1, align 1
ret void
}
define void @call_test_int_ptr() {
entry:
%b = alloca i32, align 4
- store i32 0, i32* %b, align 4
- call void @test_int_ptr(i32* %b)
+ store i32 0, ptr %b, align 4
+ call void @test_int_ptr(ptr %b)
ret void
}
; 64BIT: BL8_NOP <mcsymbol .test_int_ptr>, csr_ppc64, implicit-def dead $lr8, implicit $rm, implicit $x3, implicit $x2, implicit-def $r1
; 64BIT: ADJCALLSTACKUP 112, 0, implicit-def dead $r1, implicit $r1
-define void @test_int_ptr(i32* %a) {
+define void @test_int_ptr(ptr %a) {
entry:
- %a.addr = alloca i32*, align 8
- store i32* %a, i32** %a.addr, align 8
+ %a.addr = alloca ptr, align 8
+ store ptr %a, ptr %a.addr, align 8
ret void
}
entry:
%i.addr = alloca i32, align 4
%b = alloca i8, align 1
- store i32 %i, i32* %i.addr, align 4
- %0 = load i32, i32* %i.addr, align 4
+ store i32 %i, ptr %i.addr, align 4
+ %0 = load i32, ptr %i.addr, align 4
%cmp = icmp ne i32 %0, 0
%frombool = zext i1 %cmp to i8
- store i8 %frombool, i8* %b, align 1
- %1 = load i8, i8* %b, align 1
+ store i8 %frombool, ptr %b, align 1
+ %1 = load i8, ptr %b, align 1
%tobool = trunc i8 %1 to i1
%call = call i32 @call_test_bool(i1 zeroext %tobool)
ret i32 %call
define void @call_test_floats() {
entry:
- %0 = load float, float* @f1, align 4
+ %0 = load float, ptr @f1, align 4
call float @test_floats(float %0, float %0, float %0)
ret void
}
define void @call_test_fpr_max() {
entry:
- %0 = load double, double* @d1, align 8
+ %0 = load double, ptr @d1, align 8
call double @test_fpr_max(double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0)
ret void
}
define void @call_test_mix() {
entry:
- %0 = load float, float* @f1, align 4
- %1 = load double, double* @d1, align 8
+ %0 = load float, ptr @f1, align 4
+ %1 = load double, ptr @d1, align 8
call i32 @test_mix(float %0, i32 1, double %1, i8 signext 97)
ret void
}
define void @call_test_vararg() {
entry:
- %0 = load float, float* @f1, align 4
+ %0 = load float, ptr @f1, align 4
%conv = fpext float %0 to double
- %1 = load double, double* @d1, align 8
+ %1 = load double, ptr @d1, align 8
call void (i32, ...) @test_vararg(i32 42, double %conv, double %1)
ret void
}
define void @call_test_vararg2() {
entry:
- %0 = load float, float* @f1, align 4
+ %0 = load float, ptr @f1, align 4
%conv = fpext float %0 to double
- %1 = load double, double* @d1, align 8
+ %1 = load double, ptr @d1, align 8
call void (i32, ...) @test_vararg(i32 42, double %conv, i32 42, double %1)
ret void
}
define void @call_test_vararg3() {
entry:
- %0 = load float, float* @f1, align 4
+ %0 = load float, ptr @f1, align 4
%conv = fpext float %0 to double
- %1 = load double, double* @d1, align 8
+ %1 = load double, ptr @d1, align 8
call void (i32, ...) @test_vararg(i32 42, double %conv, i64 42, double %1)
ret void
}
define void @call_test_vararg4() {
entry:
- %0 = load float, float* @f1, align 4
+ %0 = load float, ptr @f1, align 4
call void (i32, ...) @test_vararg(i32 42, float %0)
ret void
}
; Basic saving of integral type arguments to the parameter save area.
define void @call_test_stackarg_int() {
entry:
- %0 = load i8, i8* @c, align 1
- %1 = load i16, i16* @si, align 2
- %2 = load i32, i32* @i, align 4
- %3 = load i64, i64* @lli, align 8
- %4 = load i32, i32* @i, align 4
+ %0 = load i8, ptr @c, align 1
+ %1 = load i16, ptr @si, align 2
+ %2 = load i32, ptr @i, align 4
+ %3 = load i64, ptr @lli, align 8
+ %4 = load i32, ptr @i, align 4
call void @test_stackarg_int(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i8 zeroext %0, i16 signext %1, i32 %2, i64 %3, i32 %4)
ret void
}
; The float and double arguments will pass in both fpr as well as parameter save area.
define void @call_test_stackarg_float() {
entry:
- %0 = load float, float* @f, align 4
- %1 = load double, double* @d, align 8
+ %0 = load float, ptr @f, align 4
+ %1 = load double, ptr @d, align 8
call void @test_stackarg_float(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, float %0, double %1)
ret void
}
define void @call_test_stackarg_float2() {
entry:
- %0 = load double, double* @d, align 8
+ %0 = load double, ptr @d, align 8
call void (i32, i32, i32, i32, i32, i32, ...) @test_stackarg_float2(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, double %0)
ret void
}
; A double arg will pass on the stack in PPC32 if there is only one available GPR.
define void @call_test_stackarg_float3() {
entry:
- %0 = load double, double* @d, align 8
- %1 = load float, float* @f, align 4
+ %0 = load double, ptr @d, align 8
+ %1 = load float, ptr @f, align 4
call void (i32, i32, i32, i32, i32, i32, i32, ...) @test_stackarg_float3(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, double %0, float %1)
ret void
}
define void @caller_ints_stack() {
entry:
- %0 = load i64, i64* @ll1, align 8
- %1 = load i16, i16* @si1, align 2
- %2 = load i8, i8* @ch, align 1
- %3 = load i32, i32* @ui, align 4
- %4 = load i32, i32* @sint, align 4
- %5 = load i64, i64* @ll2, align 8
- %6 = load i8, i8* @uc1, align 1
- %7 = load i32, i32* @i1, align 4
+ %0 = load i64, ptr @ll1, align 8
+ %1 = load i16, ptr @si1, align 2
+ %2 = load i8, ptr @ch, align 1
+ %3 = load i32, ptr @ui, align 4
+ %4 = load i32, ptr @sint, align 4
+ %5 = load i64, ptr @ll2, align 8
+ %6 = load i8, ptr @uc1, align 1
+ %7 = load i32, ptr @i1, align 4
%call = call i64 @test_ints_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i64 %0, i16 signext %1, i8 zeroext %2, i32 %3, i32 %4, i64 %5, i8 zeroext %6, i32 %7)
ret void
}
define void @test_i1_stack(i32 %a, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i1 zeroext %b) {
entry:
%frombool = zext i1 %b to i8
- store i8 %frombool, i8* @globali1, align 1
+ store i8 %frombool, ptr @globali1, align 1
ret void
}
define void @caller_fpr_stack() {
entry:
- %0 = load float, float* @f14, align 4
- %1 = load double, double* @d15, align 8
- %2 = load float, float* @f16, align 4
+ %0 = load float, ptr @f14, align 4
+ %1 = load double, ptr @d15, align 8
+ %2 = load float, ptr @f16, align 4
%call = call double @test_fpr_stack(double 1.000000e-01, double 2.000000e-01, double 3.000000e-01, double 4.000000e-01, double 5.000000e-01, double 6.000000e-01, double 0x3FE6666666666666, double 8.000000e-01, double 9.000000e-01, double 1.000000e-01, double 1.100000e-01, double 1.200000e-01, double 1.300000e-01, float %0, double %1, float %2)
ret void
}
define void @call_test_vararg() {
entry:
- %0 = load float, float* @f1, align 4
+ %0 = load float, ptr @f1, align 4
%conv = fpext float %0 to double
call void (i32, ...) @test_vararg(i32 42, double %conv, float %0)
ret void
entry:
%s1 = alloca %struct.S, align 32
%agg.tmp = alloca %struct.S, align 32
- call void @foo(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, %struct.S* byval(%struct.S) align 32 %agg.tmp)
+ call void @foo(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, ptr byval(%struct.S) align 32 %agg.tmp)
ret void
}
-declare void @foo(i32, i32, i32, i32, i32, i32, i32, i32, %struct.S* byval(%struct.S) align 32)
+declare void @foo(i32, i32, i32, i32, i32, i32, i32, i32, ptr byval(%struct.S) align 32)
; CHECK: LLVM ERROR: Pass-by-value arguments with alignment greater than register width are not supported.
define void @call_test_byval_mem1() {
entry:
- %call = call zeroext i8 @test_byval_mem1(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, %struct_S1* byval(%struct_S1) align 1 @gS1)
+ %call = call zeroext i8 @test_byval_mem1(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, ptr byval(%struct_S1) align 1 @gS1)
ret void
}
; ASM64BIT: bl .test_byval_mem1
; ASM64BIT: addi 1, 1, 128
-define zeroext i8 @test_byval_mem1(i32, i32, i32, i32, i32, i32, i32, i32, %struct_S1* byval(%struct_S1) align 1 %s) {
+define zeroext i8 @test_byval_mem1(i32, i32, i32, i32, i32, i32, i32, i32, ptr byval(%struct_S1) align 1 %s) {
entry:
- %gep = getelementptr inbounds %struct_S1, %struct_S1* %s, i32 0, i32 0
- %load = load i8, i8* %gep, align 1
+ %load = load i8, ptr %s, align 1
ret i8 %load
}
define void @call_test_byval_mem2() {
entry:
- %call = call zeroext i8 @test_byval_mem2(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, %struct_S256* byval(%struct_S256) align 1 @gS256)
+ %call = call zeroext i8 @test_byval_mem2(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, ptr byval(%struct_S256) align 1 @gS256)
ret void
}
; ASM64BIT: addi 1, 1, 368
-define zeroext i8 @test_byval_mem2(i32, i32, i32, i32, i32, i32, i32, i32, %struct_S256* byval(%struct_S256) align 1 %s) {
+define zeroext i8 @test_byval_mem2(i32, i32, i32, i32, i32, i32, i32, i32, ptr byval(%struct_S256) align 1 %s) {
entry:
- %gep = getelementptr inbounds %struct_S256, %struct_S256* %s, i32 0, i32 0, i32 255
- %load = load i8, i8* %gep, align 1
+ %gep = getelementptr inbounds %struct_S256, ptr %s, i32 0, i32 0, i32 255
+ %load = load i8, ptr %gep, align 1
ret i8 %load
}
define void @call_test_byval_mem3() {
entry:
- call void @test_byval_mem3(i32 42, float 0x40091EB860000000, %struct_S57* byval(%struct_S57) align 1 @gS57)
+ call void @test_byval_mem3(i32 42, float 0x40091EB860000000, ptr byval(%struct_S57) align 1 @gS57)
ret void
}
; ASM64BIT: bl .test_byval_mem3
; ASM64BIT: addi 1, 1, 128
-define void @test_byval_mem3(i32, float, %struct_S57* byval(%struct_S57) align 1 %s) {
+define void @test_byval_mem3(i32, float, ptr byval(%struct_S57) align 1 %s) {
entry:
ret void
}
define void @call_test_byval_mem4() {
entry:
- call void @test_byval_mem4(i32 42, %struct_S31* byval(%struct_S31) align 1 @gS31, %struct_S256* byval(%struct_S256) align 1 @gS256)
+ call void @test_byval_mem4(i32 42, ptr byval(%struct_S31) align 1 @gS31, ptr byval(%struct_S256) align 1 @gS256)
ret void
}
; ASM64BIT: bl .test_byval_mem4
; ASM64BIT: addi 1, 1, 352
-define void @test_byval_mem4(i32, %struct_S31* byval(%struct_S31) align 1, %struct_S256* byval(%struct_S256) align 1 %s) {
+define void @test_byval_mem4(i32, ptr byval(%struct_S31) align 1, ptr byval(%struct_S256) align 1 %s) {
entry:
ret void
}
%struct.Spill = type { [12 x i64 ] }
@GS = external global %struct.Spill, align 4
-define i64 @test(%struct.Spill* byval(%struct.Spill) align 4 %s) {
+define i64 @test(ptr byval(%struct.Spill) align 4 %s) {
entry:
- %arrayidx_a = getelementptr inbounds %struct.Spill, %struct.Spill* %s, i32 0, i32 0, i32 2
- %arrayidx_b = getelementptr inbounds %struct.Spill, %struct.Spill* %s, i32 0, i32 0, i32 10
- %a = load i64, i64* %arrayidx_a
- %b = load i64, i64* %arrayidx_b
+ %arrayidx_a = getelementptr inbounds %struct.Spill, ptr %s, i32 0, i32 0, i32 2
+ %arrayidx_b = getelementptr inbounds %struct.Spill, ptr %s, i32 0, i32 0, i32 10
+ %a = load i64, ptr %arrayidx_a
+ %b = load i64, ptr %arrayidx_b
%add = add i64 %a, %b
ret i64 %add
}
define void @call_test_byval_1Byte() {
entry:
%s0 = alloca %struct.S0, align 8
- %call = call zeroext i8 @test_byval_1Byte(%struct.S0* byval(%struct.S0) align 1 %s0, %struct.S1* byval(%struct.S1) align 1 @gS1)
+ %call = call zeroext i8 @test_byval_1Byte(ptr byval(%struct.S0) align 1 %s0, ptr byval(%struct.S1) align 1 @gS1)
ret void
}
; ASM64-NEXT: addi 1, 1, 128
-define zeroext i8 @test_byval_1Byte(%struct.S0* byval(%struct.S0) align 1 %s0, %struct.S1* byval(%struct.S1) align 1 %s) {
+define zeroext i8 @test_byval_1Byte(ptr byval(%struct.S0) align 1 %s0, ptr byval(%struct.S1) align 1 %s) {
entry:
- %arrayidx = getelementptr inbounds %struct.S1, %struct.S1* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
define void @call_test_byval_2Byte() {
entry:
- %0 = load float, float* @f, align 4
- %call = call zeroext i8 @test_byval_2Byte(i32 signext 42, float %0, %struct.S2* byval(%struct.S2) align 1 @gS2, float %0, i32 signext 43)
+ %0 = load float, ptr @f, align 4
+ %call = call zeroext i8 @test_byval_2Byte(i32 signext 42, float %0, ptr byval(%struct.S2) align 1 @gS2, float %0, i32 signext 43)
ret void
}
; ASM64-NEXT: nop
; ASM64-NEXT: addi 1, 1, 112
-define zeroext i8 @test_byval_2Byte(i32, float, %struct.S2* byval(%struct.S2) align 1 %s, float, i32) {
+define zeroext i8 @test_byval_2Byte(i32, float, ptr byval(%struct.S2) align 1 %s, float, i32) {
entry:
- %arrayidx = getelementptr inbounds %struct.S2, %struct.S2* %s, i32 0, i32 0, i32 1
- %4 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds %struct.S2, ptr %s, i32 0, i32 0, i32 1
+ %4 = load i8, ptr %arrayidx, align 1
ret i8 %4
}
define void @call_test_byval_3Byte() {
entry:
- %call = call zeroext i16 @test_byval_3Byte(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, %struct.S3* byval(%struct.S3) align 1 @gS3, i32 42)
+ %call = call zeroext i16 @test_byval_3Byte(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, ptr byval(%struct.S3) align 1 @gS3, i32 42)
ret void
}
; ASM64-NEXT: nop
-define zeroext i16 @test_byval_3Byte(i32, i32, i32, i32, i32, i32, i32, %struct.S3* byval(%struct.S3) align 1 %s, i32) {
+define zeroext i16 @test_byval_3Byte(i32, i32, i32, i32, i32, i32, i32, ptr byval(%struct.S3) align 1 %s, i32) {
entry:
- %gep = getelementptr inbounds %struct.S3, %struct.S3* %s, i32 0, i32 1
- %8 = load i16, i16* %gep, align 1
+ %gep = getelementptr inbounds %struct.S3, ptr %s, i32 0, i32 1
+ %8 = load i16, ptr %gep, align 1
ret i16 %8
}
entry:
%s0 = alloca %struct.S0, align 8
%s4a = alloca %struct.S4A, align 4
- %call = call signext i32 @test_byval_4Byte(%struct.S4* byval(%struct.S4) align 1 @gS4, %struct.S0* byval(%struct.S0) align 1 %s0, %struct.S4A* byval(%struct.S4A) align 4 %s4a)
+ %call = call signext i32 @test_byval_4Byte(ptr byval(%struct.S4) align 1 @gS4, ptr byval(%struct.S0) align 1 %s0, ptr byval(%struct.S4A) align 4 %s4a)
ret void
}
; ASM64-NEXT: addi 1, 1, 128
-define signext i32 @test_byval_4Byte(%struct.S4* byval(%struct.S4) align 1 %s, %struct.S0* byval(%struct.S0) align 1, %struct.S4A* byval(%struct.S4A) align 4 %s4a) {
+define signext i32 @test_byval_4Byte(ptr byval(%struct.S4) align 1 %s, ptr byval(%struct.S0) align 1, ptr byval(%struct.S4A) align 4 %s4a) {
entry:
- %arrayidx = getelementptr inbounds %struct.S4, %struct.S4* %s, i32 0, i32 0, i32 3
- %gep = getelementptr inbounds %struct.S4A, %struct.S4A* %s4a, i32 0, i32 0
- %1 = load i8, i8* %arrayidx, align 1
- %2 = load i32, i32* %gep, align 4
+ %arrayidx = getelementptr inbounds %struct.S4, ptr %s, i32 0, i32 0, i32 3
+ %1 = load i8, ptr %arrayidx, align 1
+ %2 = load i32, ptr %s4a, align 4
%conv = zext i8 %1 to i32
%add = add nsw i32 %2, %conv
ret i32 %add
define void @call_test_byval_5Byte() {
entry:
- %call = call zeroext i8 @test_byval_5Byte(%struct.S5* byval(%struct.S5) align 1 @gS5)
+ %call = call zeroext i8 @test_byval_5Byte(ptr byval(%struct.S5) align 1 @gS5)
ret void
}
-declare zeroext i8 @test_byval_5Byte(%struct.S5* byval(%struct.S5) align 1)
+declare zeroext i8 @test_byval_5Byte(ptr byval(%struct.S5) align 1)
; CHECK-LABEL: name: call_test_byval_5Byte{{.*}}
define void @call_test_byval_6Byte() {
entry:
- %call = call zeroext i8 @test_byval_6Byte(%struct.S6* byval(%struct.S6) align 1 @gS6)
+ %call = call zeroext i8 @test_byval_6Byte(ptr byval(%struct.S6) align 1 @gS6)
ret void
}
-declare zeroext i8 @test_byval_6Byte(%struct.S6* byval(%struct.S6) align 1)
+declare zeroext i8 @test_byval_6Byte(ptr byval(%struct.S6) align 1)
; CHECK-LABEL: name: call_test_byval_6Byte{{.*}}
define void @call_test_byval_7Byte() {
entry:
- %call = call zeroext i8 @test_byval_7Byte(%struct.S7* byval(%struct.S7) align 1 @gS7)
+ %call = call zeroext i8 @test_byval_7Byte(ptr byval(%struct.S7) align 1 @gS7)
ret void
}
-declare zeroext i8 @test_byval_7Byte(%struct.S7* byval(%struct.S7) align 1)
+declare zeroext i8 @test_byval_7Byte(ptr byval(%struct.S7) align 1)
; CHECK-LABEL: name: call_test_byval_7Byte{{.*}}
define void @call_test_byval_8Byte() {
entry:
- %call = call zeroext i8 @test_byval_8Byte(%struct.S8* byval(%struct.S8) align 1 @gS8)
+ %call = call zeroext i8 @test_byval_8Byte(ptr byval(%struct.S8) align 1 @gS8)
ret void
}
-declare zeroext i8 @test_byval_8Byte(%struct.S8* byval(%struct.S8) align 1)
+declare zeroext i8 @test_byval_8Byte(ptr byval(%struct.S8) align 1)
; CHECK-LABEL: name: call_test_byval_8Byte{{.*}}
define void @call_test_byval_32Byte() {
entry:
- %call = call zeroext i8 @test_byval_32Byte(%struct.S32* byval(%struct.S32) align 1 @gS32)
+ %call = call zeroext i8 @test_byval_32Byte(ptr byval(%struct.S32) align 1 @gS32)
ret void
}
; ASM64-NEXT: bl .test_byval_32Byte
; ASM64-NEXT: nop
-define zeroext i8 @test_byval_32Byte(%struct.S32* byval(%struct.S32) align 1 %s) {
+define zeroext i8 @test_byval_32Byte(ptr byval(%struct.S32) align 1 %s) {
entry:
- %arrayidx = getelementptr inbounds %struct.S32, %struct.S32* %s, i32 0, i32 0, i32 21
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds %struct.S32, ptr %s, i32 0, i32 0, i32 21
+ %0 = load i8, ptr %arrayidx, align 1
ret i8 %0
}
define void @call_test_byval_31Byte() {
entry:
- %call = call double @test_byval_31Byte(%struct.S31* byval(%struct.S31) align 1 @gS31)
+ %call = call double @test_byval_31Byte(ptr byval(%struct.S31) align 1 @gS31)
ret void
}
-define double @test_byval_31Byte(%struct.S31* byval(%struct.S31) align 1 %s) {
+define double @test_byval_31Byte(ptr byval(%struct.S31) align 1 %s) {
entry:
- %gep = getelementptr inbounds %struct.S31, %struct.S31* %s, i32 0, i32 3
- %load = load double, double* %gep, align 1
+ %gep = getelementptr inbounds %struct.S31, ptr %s, i32 0, i32 3
+ %load = load double, ptr %gep, align 1
ret double %load
}
define i32 @call_test_byval_homogeneous_float_struct() {
entry:
%s = alloca %struct.F, align 4
- %0 = bitcast %struct.F* %s to i8*
- call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 12, i1 false)
- %call = call i32 @test_byval_homogeneous_float_struct(%struct.F* byval(%struct.F) align 4 %s)
+ call void @llvm.memset.p0.i32(ptr align 4 %s, i8 0, i32 12, i1 false)
+ %call = call i32 @test_byval_homogeneous_float_struct(ptr byval(%struct.F) align 4 %s)
ret i32 %call
}
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1 immarg)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg)
-declare i32 @test_byval_homogeneous_float_struct(%struct.F* byval(%struct.F) align 4)
+declare i32 @test_byval_homogeneous_float_struct(ptr byval(%struct.F) align 4)
; CHECK-LABEL: name: call_test_byval_homogeneous_float_struct{{.*}}
define dso_local { double, double } @dblCmplxRetCallee() {
entry:
%retval = alloca { double, double }, align 8
- %retval.realp = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 0
- store double 1.000000e+00, double* %retval.realp, align 8
- %retval.imagp = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 1
- store double 0.000000e+00, double* %retval.imagp, align 8
- %0 = load { double, double }, { double, double }* %retval, align 8
+ %retval.realp = getelementptr inbounds { double, double }, ptr %retval, i32 0, i32 0
+ store double 1.000000e+00, ptr %retval.realp, align 8
+ %retval.imagp = getelementptr inbounds { double, double }, ptr %retval, i32 0, i32 1
+ store double 0.000000e+00, ptr %retval.imagp, align 8
+ %0 = load { double, double }, ptr %retval, align 8
ret { double, double } %0
}
%call = call { double, double } @dblCmplxRetCallee()
%0 = extractvalue { double, double } %call, 0
%1 = extractvalue { double, double } %call, 1
- store double %0, double* getelementptr inbounds ({ double, double }, { double, double }* @gcd, i32 0, i32 0), align 8
- store double %1, double* getelementptr inbounds ({ double, double }, { double, double }* @gcd, i32 0, i32 1), align 8
- call void bitcast (void (...)* @anchor to void ()*)()
+ store double %0, ptr getelementptr inbounds ({ double, double }, ptr @gcd, i32 0, i32 0), align 8
+ store double %1, ptr getelementptr inbounds ({ double, double }, ptr @gcd, i32 0, i32 1), align 8
+ call void @anchor()
ret void
}
define dso_local { float, float } @fltCmplxRetCallee() {
entry:
%retval = alloca { float, float }, align 4
- %retval.realp = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
- %retval.imagp = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
- store float 1.000000e+00, float* %retval.realp, align 4
- store float 0.000000e+00, float* %retval.imagp, align 4
- %0 = load { float, float }, { float, float }* %retval, align 4
+ %retval.realp = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 0
+ %retval.imagp = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 1
+ store float 1.000000e+00, ptr %retval.realp, align 4
+ store float 0.000000e+00, ptr %retval.imagp, align 4
+ %0 = load { float, float }, ptr %retval, align 4
ret { float, float } %0
}
%call = call { float, float } @fltCmplxRetCallee()
%0 = extractvalue { float, float } %call, 0
%1 = extractvalue { float, float } %call, 1
- store float %0, float* getelementptr inbounds ({ float, float }, { float, float }* @gcf, i32 0, i32 0), align 4
- store float %1, float* getelementptr inbounds ({ float, float }, { float, float }* @gcf, i32 0, i32 1), align 4
- call void bitcast (void (...)* @anchor to void ()*)()
+ store float %0, ptr getelementptr inbounds ({ float, float }, ptr @gcf, i32 0, i32 0), align 4
+ store float %1, ptr getelementptr inbounds ({ float, float }, ptr @gcf, i32 0, i32 1), align 4
+ call void @anchor()
ret void
}
define dso_local { ppc_fp128, ppc_fp128 } @fp128CmplxRetCallee() {
entry:
%retval = alloca { ppc_fp128, ppc_fp128 }, align 16
- %retval.realp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 0
- %retval.imagp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 1
- store ppc_fp128 0xM7ffeffffffffffffffffffffffffffff, ppc_fp128* %retval.realp, align 16
- store ppc_fp128 0xM3ffefffffffffffffffffffffffffffe, ppc_fp128* %retval.imagp, align 16
- %0 = load { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval, align 16
+ %retval.realp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, ptr %retval, i32 0, i32 0
+ %retval.imagp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, ptr %retval, i32 0, i32 1
+ store ppc_fp128 0xM7ffeffffffffffffffffffffffffffff, ptr %retval.realp, align 16
+ store ppc_fp128 0xM3ffefffffffffffffffffffffffffffe, ptr %retval.imagp, align 16
+ %0 = load { ppc_fp128, ppc_fp128 }, ptr %retval, align 16
ret { ppc_fp128, ppc_fp128 } %0
}
%call = call { ppc_fp128, ppc_fp128 } @fp128CmplxRetCallee()
%0 = extractvalue { ppc_fp128, ppc_fp128 } %call, 0
%1 = extractvalue { ppc_fp128, ppc_fp128 } %call, 1
- store ppc_fp128 %0, ppc_fp128* getelementptr inbounds ({ ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* @gcfp128, i32 0, i32 0), align 16
- store ppc_fp128 %1, ppc_fp128* getelementptr inbounds ({ ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* @gcfp128, i32 0, i32 1), align 16
- call void bitcast (void (...)* @anchor to void ()*)()
+ store ppc_fp128 %0, ptr getelementptr inbounds ({ ppc_fp128, ppc_fp128 }, ptr @gcfp128, i32 0, i32 0), align 16
+ store ppc_fp128 %1, ptr getelementptr inbounds ({ ppc_fp128, ppc_fp128 }, ptr @gcfp128, i32 0, i32 1), align 16
+ call void @anchor()
ret void
}
; RUN: -stop-after=machine-cp -mtriple powerpc64-ibm-aix-xcoff < %s | \
; RUN: FileCheck %s --check-prefix=EXTABI
-define double @dbl_test(double %a, double* %b) local_unnamed_addr {
+define double @dbl_test(double %a, ptr %b) local_unnamed_addr {
entry:
- %0 = load volatile double, double* %b, align 4
+ %0 = load volatile double, ptr %b, align 4
%add = fadd double %0, %a
- store volatile double %add, double* %b, align 4
+ store volatile double %add, ptr %b, align 4
;; Clobbered all vector and floating point registers. In the default Altivec
;; ABI this forces a register spill since no registers are free to use.
tail call void asm sideeffect "nop", "~{v19},~{v18},~{v17},~{v16},~{v15},~{v14},~{v13},~{v12},~{v11},~{v10},~{v9},~{v8},~{v7},~{v6},~{v5},~{v4},~{v3},~{v2},~{v1},~{v0},~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
%mul = fmul double %a, %a
- %1 = load volatile double, double* %b, align 4
+ %1 = load volatile double, ptr %b, align 4
%add1 = fadd double %mul, %1
- store volatile double %add1, double* %b, align 4
- %2 = load volatile double, double* %b, align 4
+ store volatile double %add1, ptr %b, align 4
+ %2 = load volatile double, ptr %b, align 4
ret double %2
}
-define <4 x i32> @vec_test(<4 x i32> %a, <4 x i32>* %b) local_unnamed_addr {
+define <4 x i32> @vec_test(<4 x i32> %a, ptr %b) local_unnamed_addr {
entry:
- %0 = load volatile <4 x i32>, <4 x i32>* %b, align 4
+ %0 = load volatile <4 x i32>, ptr %b, align 4
%add = add <4 x i32> %0, %a
- store volatile <4 x i32> %add, <4 x i32>* %b, align 4
+ store volatile <4 x i32> %add, ptr %b, align 4
tail call void asm sideeffect "nop", "~{v19},~{v18},~{v17},~{v16},~{v15},~{v14},~{v13},~{v12},~{v11},~{v10},~{v9},~{v8},~{v7},~{v6},~{v5},~{v4},~{v3},~{v2},~{v1},~{v0},~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
%mul = mul <4 x i32> %a, %a
- %1 = load volatile <4 x i32>, <4 x i32>* %b, align 4
+ %1 = load volatile <4 x i32>, ptr %b, align 4
%add1 = add <4 x i32> %mul, %1
- store volatile <4 x i32> %add1, <4 x i32>* %b, align 4
- %2 = load volatile <4 x i32>, <4 x i32>* %b, align 4
+ store volatile <4 x i32> %add1, ptr %b, align 4
+ %2 = load volatile <4 x i32>, ptr %b, align 4
ret <4 x i32> %2
}
define i32 @main() #0 !dbg !8 {
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval, align 4
+ store i32 0, ptr %retval, align 4
ret i32 0, !dbg !12
}
declare i32 @__xlcxx_personality_v1(...)
; Function Attrs: mustprogress noinline optnone
-define linkonce_odr void @func2() #1 align 2 personality i8* bitcast (i32 (...)* @__xlcxx_personality_v1 to i8*) {
+define linkonce_odr void @func2() #1 align 2 personality ptr @__xlcxx_personality_v1 {
entry:
- %0 = alloca i8*, align 8
+ %0 = alloca ptr, align 8
%1 = alloca i32, align 4
br label %2
to label %2 unwind label %lpad
lpad: ; preds = %3
- %5 = landingpad { i8*, i32 }
+ %5 = landingpad { ptr, i32 }
cleanup
- %6 = extractvalue { i8*, i32 } %5, 0
- store i8* %6, i8** %0, align 8
- %7 = extractvalue { i8*, i32 } %5, 1
- store i32 %7, i32* %1, align 4
+ %6 = extractvalue { ptr, i32 } %5, 0
+ store ptr %6, ptr %0, align 8
+ %7 = extractvalue { ptr, i32 } %5, 1
+ store i32 %7, ptr %1, align 4
br label %eh.resume
8: ; preds = 2%
ret void
eh.resume: ; preds = %lpad
- %9 = load i8*, i8** %0, align 8
- %10 = load i32, i32* %1, align 4
- %11 = insertvalue { i8*, i32 } undef, i8* %9, 0
- %12 = insertvalue { i8*, i32 } %11, i32 %10, 1
- resume { i8*, i32 } %12
+ %9 = load ptr, ptr %0, align 8
+ %10 = load i32, ptr %1, align 4
+ %11 = insertvalue { ptr, i32 } undef, ptr %9, 0
+ %12 = insertvalue { ptr, i32 } %11, i32 %10, 1
+ resume { ptr, i32 } %12
}
attributes #0 = { nounwind }
%fvalue = alloca float, align 4
%taken = alloca i32, align 4
%data = alloca i32, align 4
- store float 1.000000e+00, float* %fvalue, align 4
- %0 = load float, float* %fvalue, align 4
+ store float 1.000000e+00, ptr %fvalue, align 4
+ %0 = load float, ptr %fvalue, align 4
%1 = call float asm "fneg $0,$1\0A\09", "=b,b,~{f31},~{f30},~{f29},~{f28},~{f27}"(float %0)
- store float %1, float* %fvalue, align 4
- store i32 123, i32* %data, align 4
- %2 = load i32, i32* %data, align 4
+ store float %1, ptr %fvalue, align 4
+ store i32 123, ptr %data, align 4
+ %2 = load i32, ptr %data, align 4
%3 = call i32 asm "cntlzw $0, $1\0A\09", "=b,b,~{r31},~{r30},~{r29},~{r28}"(i32 %2)
- store i32 %3, i32* %taken, align 4
- %4 = load i32, i32* %taken, align 4
+ store i32 %3, ptr %taken, align 4
+ %4 = load i32, ptr %taken, align 4
%conv = sitofp i32 %4 to float
- %5 = load float, float* %fvalue, align 4
+ %5 = load float, ptr %fvalue, align 4
%add = fadd float %conv, %5
ret float %add
}
entry:
%taken = alloca <4 x i32>, align 16
%data = alloca <4 x i32>, align 16
- store <4 x i32> <i32 123, i32 0, i32 0, i32 0>, <4 x i32>* %data, align 16
+ store <4 x i32> <i32 123, i32 0, i32 0, i32 0>, ptr %data, align 16
call void asm sideeffect "", "~{v31},~{v30},~{v29},~{v28}"()
- %0 = load <4 x i32>, <4 x i32>* %taken, align 16
+ %0 = load <4 x i32>, ptr %taken, align 16
ret <4 x i32> %0
}
; CHECK-LABEL: f:
; CHECK: __ssp_canary_word
; CHECK: TB_SSP_CANARY
-define i32 @f() #0 personality i8* bitcast (i32 (...)* @__xlcxx_personality_v1 to i8*) {
- invoke i32 undef(i8* undef)
+define i32 @f() #0 personality ptr @__xlcxx_personality_v1 {
+ invoke i32 undef(ptr undef)
to label %invoke unwind label %lpad
invoke:
%var = alloca i32, align 4
- store i32 0, i32* %var, align 4
- %gep = getelementptr inbounds i32, i32* %var, i32 1
- %ret = load i32, i32* %gep, align 4
+ store i32 0, ptr %var, align 4
+ %gep = getelementptr inbounds i32, ptr %var, i32 1
+ %ret = load i32, ptr %gep, align 4
ret i32 %ret
lpad:
- landingpad { i8*, i32 }
- catch i8* null
+ landingpad { ptr, i32 }
+ catch ptr null
unreachable
}
; CHECK-NOT: TB_SSP_CANARY
define i32 @f2() #0 {
%var = alloca i32, align 4
- store i32 0, i32* %var, align 4
- %gep = getelementptr inbounds i32, i32* %var, i32 1
- %ret = load i32, i32* %gep, align 4
+ store i32 0, ptr %var, align 4
+ %gep = getelementptr inbounds i32, ptr %var, i32 1
+ %ret = load i32, ptr %gep, align 4
ret i32 %ret
}
%vf.addr = alloca <4 x float>, align 16
%d1.addr = alloca double, align 8
%vc1.addr = alloca <16 x i8>, align 16
- store <4 x i32> %vi1, <4 x i32>* %vi1.addr, align 16
- store i32 %i1, i32* %i1.addr, align 4
- store i32 %i2, i32* %i2.addr, align 4
- store float %f1, float* %f1.addr, align 4
- store <4 x float> %vf, <4 x float>* %vf.addr, align 16
- store double %d1, double* %d1.addr, align 8
- store <16 x i8> %vc1, <16 x i8>* %vc1.addr, align 16
- %0 = load <4 x float>, <4 x float>* %vf.addr, align 16
- store <4 x float> %0, <4 x float>* %__a.addr.i, align 16
- %1 = load <4 x float>, <4 x float>* %__a.addr.i, align 16
- %2 = load <4 x float>, <4 x float>* %__a.addr.i, align 16
+ store <4 x i32> %vi1, ptr %vi1.addr, align 16
+ store i32 %i1, ptr %i1.addr, align 4
+ store i32 %i2, ptr %i2.addr, align 4
+ store float %f1, ptr %f1.addr, align 4
+ store <4 x float> %vf, ptr %vf.addr, align 16
+ store double %d1, ptr %d1.addr, align 8
+ store <16 x i8> %vc1, ptr %vc1.addr, align 16
+ %0 = load <4 x float>, ptr %vf.addr, align 16
+ store <4 x float> %0, ptr %__a.addr.i, align 16
+ %1 = load <4 x float>, ptr %__a.addr.i, align 16
+ %2 = load <4 x float>, ptr %__a.addr.i, align 16
%3 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %2) #2
ret <4 x float> %3
}
%retval = alloca <4 x float>, align 16
%x.addr = alloca i32, align 4
%vf = alloca <4 x float>, align 16
- store i32 %x, i32* %x.addr, align 4
- store <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, <4 x float>* %vf, align 16
- %0 = load i32, i32* %x.addr, align 4
+ store i32 %x, ptr %x.addr, align 4
+ store <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, ptr %vf, align 16
+ %0 = load i32, ptr %x.addr, align 4
%tobool = icmp ne i32 %0, 0
br i1 %tobool, label %if.then, label %if.end
if.then: ; preds = %entry
- %1 = load <4 x float>, <4 x float>* %vf, align 16
- store <4 x float> %1, <4 x float>* %retval, align 16
+ %1 = load <4 x float>, ptr %vf, align 16
+ store <4 x float> %1, ptr %retval, align 16
br label %return
if.end: ; preds = %entry
- %2 = load <4 x float>, <4 x float>* %vf, align 16
- store <4 x float> %2, <4 x float>* %__a.addr.i, align 16
- %3 = load <4 x float>, <4 x float>* %__a.addr.i, align 16
+ %2 = load <4 x float>, ptr %vf, align 16
+ store <4 x float> %2, ptr %__a.addr.i, align 16
+ %3 = load <4 x float>, ptr %__a.addr.i, align 16
%4 = bitcast <4 x float> %3 to <4 x i32>
%and.i = and <4 x i32> %4, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
- store <4 x i32> %and.i, <4 x i32>* %__res.i, align 16
- %5 = load <4 x i32>, <4 x i32>* %__res.i, align 16
+ store <4 x i32> %and.i, ptr %__res.i, align 16
+ %5 = load <4 x i32>, ptr %__res.i, align 16
%6 = bitcast <4 x i32> %5 to <4 x float>
- store <4 x float> %6, <4 x float>* %retval, align 16
+ store <4 x float> %6, ptr %retval, align 16
br label %return
return: ; preds = %if.end, %if.then
- %7 = load <4 x float>, <4 x float>* %retval, align 16
+ %7 = load <4 x float>, ptr %retval, align 16
ret <4 x float> %7
}
define void @f(<4 x float> %vf, ...) #0 {
entry:
%vf.addr = alloca <4 x float>, align 16
- store <4 x float> %vf, <4 x float>* %vf.addr, align 16
+ store <4 x float> %vf, ptr %vf.addr, align 16
ret void
}
%struct.S = type { i32, i32 }
%struct.D = type { float, double }
-%struct.SD = type { %struct.S*, %struct.D }
+%struct.SD = type { ptr, %struct.D }
@__const.main.s = private unnamed_addr constant %struct.S { i32 10, i32 20 }, align 4
@__const.main.d = private unnamed_addr constant %struct.D { float 1.000000e+01, double 2.000000e+01 }, align 8
-define double @_Z10add_structifd1SP2SD1Di(i32 %value, float %fvalue, double %dvalue, %struct.S* byval(%struct.S) align 4 %s, %struct.SD* %dp, %struct.D* byval(%struct.D) align 4 %0, i32 %v2) #0 {
+define double @_Z10add_structifd1SP2SD1Di(i32 %value, float %fvalue, double %dvalue, ptr byval(%struct.S) align 4 %s, ptr %dp, ptr byval(%struct.D) align 4 %0, i32 %v2) #0 {
entry:
%d = alloca %struct.D, align 8
%value.addr = alloca i32, align 4
%fvalue.addr = alloca float, align 4
%dvalue.addr = alloca double, align 8
- %dp.addr = alloca %struct.SD*, align 4
+ %dp.addr = alloca ptr, align 4
%v2.addr = alloca i32, align 4
- %1 = bitcast %struct.D* %d to i8*
- %2 = bitcast %struct.D* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %1, i8* align 4 %2, i32 16, i1 false)
- store i32 %value, i32* %value.addr, align 4
- store float %fvalue, float* %fvalue.addr, align 4
- store double %dvalue, double* %dvalue.addr, align 8
- store %struct.SD* %dp, %struct.SD** %dp.addr, align 4
- store i32 %v2, i32* %v2.addr, align 4
- %3 = load double, double* %dvalue.addr, align 8
- %4 = load float, float* %fvalue.addr, align 4
- %conv = fpext float %4 to double
- %add = fadd double %3, %conv
- %5 = load i32, i32* %value.addr, align 4
- %conv1 = sitofp i32 %5 to double
+ call void @llvm.memcpy.p0.p0.i32(ptr align 8 %d, ptr align 4 %0, i32 16, i1 false)
+ store i32 %value, ptr %value.addr, align 4
+ store float %fvalue, ptr %fvalue.addr, align 4
+ store double %dvalue, ptr %dvalue.addr, align 8
+ store ptr %dp, ptr %dp.addr, align 4
+ store i32 %v2, ptr %v2.addr, align 4
+ %1 = load double, ptr %dvalue.addr, align 8
+ %2 = load float, ptr %fvalue.addr, align 4
+ %conv = fpext float %2 to double
+ %add = fadd double %1, %conv
+ %3 = load i32, ptr %value.addr, align 4
+ %conv1 = sitofp i32 %3 to double
%add2 = fadd double %add, %conv1
- %i1 = getelementptr inbounds %struct.S, %struct.S* %s, i32 0, i32 0
- %6 = load i32, i32* %i1, align 4
- %conv3 = sitofp i32 %6 to double
+ %4 = load i32, ptr %s, align 4
+ %conv3 = sitofp i32 %4 to double
%add4 = fadd double %add2, %conv3
- %7 = load %struct.SD*, %struct.SD** %dp.addr, align 4
- %d5 = getelementptr inbounds %struct.SD, %struct.SD* %7, i32 0, i32 1
- %d1 = getelementptr inbounds %struct.D, %struct.D* %d5, i32 0, i32 1
- %8 = load double, double* %d1, align 8
- %add6 = fadd double %add4, %8
- %f1 = getelementptr inbounds %struct.D, %struct.D* %d, i32 0, i32 0
- %9 = load float, float* %f1, align 8
- %conv7 = fpext float %9 to double
+ %5 = load ptr, ptr %dp.addr, align 4
+ %d5 = getelementptr inbounds %struct.SD, ptr %5, i32 0, i32 1
+ %d1 = getelementptr inbounds %struct.D, ptr %d5, i32 0, i32 1
+ %6 = load double, ptr %d1, align 8
+ %add6 = fadd double %add4, %6
+ %7 = load float, ptr %d, align 8
+ %conv7 = fpext float %7 to double
%add8 = fadd double %add6, %conv7
- %10 = load i32, i32* %v2.addr, align 4
- %conv9 = sitofp i32 %10 to double
+ %8 = load i32, ptr %v2.addr, align 4
+ %conv9 = sitofp i32 %8 to double
%add10 = fadd double %add8, %conv9
ret double %add10
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i32, i1 immarg) #1
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg) #1
define i32 @main() {
entry:
%sd = alloca %struct.SD, align 8
%agg.tmp = alloca %struct.S, align 4
%agg.tmp4 = alloca %struct.D, align 8
- store i32 0, i32* %retval, align 4
- %0 = bitcast %struct.S* %s to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 bitcast (%struct.S* @__const.main.s to i8*), i32 8, i1 false)
- %1 = bitcast %struct.D* %d to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %1, i8* align 8 bitcast (%struct.D* @__const.main.d to i8*), i32 16, i1 false)
- %sp = getelementptr inbounds %struct.SD, %struct.SD* %sd, i32 0, i32 0
- store %struct.S* %s, %struct.S** %sp, align 8
- %d1 = getelementptr inbounds %struct.SD, %struct.SD* %sd, i32 0, i32 1
- %f1 = getelementptr inbounds %struct.D, %struct.D* %d1, i32 0, i32 0
- store float 1.000000e+02, float* %f1, align 8
- %d2 = getelementptr inbounds %struct.SD, %struct.SD* %sd, i32 0, i32 1
- %d13 = getelementptr inbounds %struct.D, %struct.D* %d2, i32 0, i32 1
- store double 2.000000e+02, double* %d13, align 8
- %2 = bitcast %struct.S* %agg.tmp to i8*
- %3 = bitcast %struct.S* %s to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %2, i8* align 4 %3, i32 8, i1 false)
- %4 = bitcast %struct.D* %agg.tmp4 to i8*
- %5 = bitcast %struct.D* %d to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %4, i8* align 8 %5, i32 16, i1 false)
- %call = call double @_Z10add_structifd1SP2SD1Di(i32 1, float 2.000000e+00, double 3.000000e+00, %struct.S* byval(%struct.S) align 4 %agg.tmp, %struct.SD* %sd, %struct.D* byval(%struct.D) align 4 %agg.tmp4, i32 7)
+ store i32 0, ptr %retval, align 4
+ call void @llvm.memcpy.p0.p0.i32(ptr align 4 %s, ptr align 4 @__const.main.s, i32 8, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr align 8 %d, ptr align 8 @__const.main.d, i32 16, i1 false)
+ store ptr %s, ptr %sd, align 8
+ %d1 = getelementptr inbounds %struct.SD, ptr %sd, i32 0, i32 1
+ store float 1.000000e+02, ptr %d1, align 8
+ %d2 = getelementptr inbounds %struct.SD, ptr %sd, i32 0, i32 1
+ %d13 = getelementptr inbounds %struct.D, ptr %d2, i32 0, i32 1
+ store double 2.000000e+02, ptr %d13, align 8
+ call void @llvm.memcpy.p0.p0.i32(ptr align 4 %agg.tmp, ptr align 4 %s, i32 8, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr align 8 %agg.tmp4, ptr align 8 %d, i32 16, i1 false)
+ %call = call double @_Z10add_structifd1SP2SD1Di(i32 1, float 2.000000e+00, double 3.000000e+00, ptr byval(%struct.S) align 4 %agg.tmp, ptr %sd, ptr byval(%struct.D) align 4 %agg.tmp4, i32 7)
%add = fadd double %call, 1.000000e+00
%conv = fptosi double %add to i32
ret i32 %conv
}
-define double @_Z7add_bari1SfdP2SD1Di(i32 %value, %struct.S* byval(%struct.S) align 4 %s, float %fvalue, double %dvalue, %struct.SD* %dp, %struct.D* byval(%struct.D) align 4 %0, i32 %v2) #0 {
+define double @_Z7add_bari1SfdP2SD1Di(i32 %value, ptr byval(%struct.S) align 4 %s, float %fvalue, double %dvalue, ptr %dp, ptr byval(%struct.D) align 4 %0, i32 %v2) #0 {
entry:
%d = alloca %struct.D, align 8
%value.addr = alloca i32, align 4
%fvalue.addr = alloca float, align 4
%dvalue.addr = alloca double, align 8
- %dp.addr = alloca %struct.SD*, align 4
+ %dp.addr = alloca ptr, align 4
%v2.addr = alloca i32, align 4
- %1 = bitcast %struct.D* %d to i8*
- %2 = bitcast %struct.D* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %1, i8* align 4 %2, i32 16, i1 false)
- store i32 %value, i32* %value.addr, align 4
- store float %fvalue, float* %fvalue.addr, align 4
- store double %dvalue, double* %dvalue.addr, align 8
- store %struct.SD* %dp, %struct.SD** %dp.addr, align 4
- store i32 %v2, i32* %v2.addr, align 4
- %3 = load double, double* %dvalue.addr, align 8
- %4 = load float, float* %fvalue.addr, align 4
- %conv = fpext float %4 to double
- %add = fadd double %3, %conv
- %5 = load i32, i32* %value.addr, align 4
- %conv1 = sitofp i32 %5 to double
+ call void @llvm.memcpy.p0.p0.i32(ptr align 8 %d, ptr align 4 %0, i32 16, i1 false)
+ store i32 %value, ptr %value.addr, align 4
+ store float %fvalue, ptr %fvalue.addr, align 4
+ store double %dvalue, ptr %dvalue.addr, align 8
+ store ptr %dp, ptr %dp.addr, align 4
+ store i32 %v2, ptr %v2.addr, align 4
+ %1 = load double, ptr %dvalue.addr, align 8
+ %2 = load float, ptr %fvalue.addr, align 4
+ %conv = fpext float %2 to double
+ %add = fadd double %1, %conv
+ %3 = load i32, ptr %value.addr, align 4
+ %conv1 = sitofp i32 %3 to double
%add2 = fadd double %add, %conv1
- %i1 = getelementptr inbounds %struct.S, %struct.S* %s, i32 0, i32 0
- %6 = load i32, i32* %i1, align 4
- %conv3 = sitofp i32 %6 to double
+ %4 = load i32, ptr %s, align 4
+ %conv3 = sitofp i32 %4 to double
%add4 = fadd double %add2, %conv3
- %7 = load %struct.SD*, %struct.SD** %dp.addr, align 4
- %d5 = getelementptr inbounds %struct.SD, %struct.SD* %7, i32 0, i32 1
- %d1 = getelementptr inbounds %struct.D, %struct.D* %d5, i32 0, i32 1
- %8 = load double, double* %d1, align 8
- %add6 = fadd double %add4, %8
- %f1 = getelementptr inbounds %struct.D, %struct.D* %d, i32 0, i32 0
- %9 = load float, float* %f1, align 8
- %conv7 = fpext float %9 to double
+ %5 = load ptr, ptr %dp.addr, align 4
+ %d5 = getelementptr inbounds %struct.SD, ptr %5, i32 0, i32 1
+ %d1 = getelementptr inbounds %struct.D, ptr %d5, i32 0, i32 1
+ %6 = load double, ptr %d1, align 8
+ %add6 = fadd double %add4, %6
+ %7 = load float, ptr %d, align 8
+ %conv7 = fpext float %7 to double
%add8 = fadd double %add6, %conv7
- %10 = load i32, i32* %v2.addr, align 4
- %conv9 = sitofp i32 %10 to double
+ %8 = load i32, ptr %v2.addr, align 4
+ %conv9 = sitofp i32 %8 to double
%add10 = fadd double %add8, %conv9
ret double %add10
}
define i32 @foo(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, float %f1, float %f2, float %f3, float %f4, float %f5, float %f6, float %f7, float %f8, float %f9, float %f10, float %f11, float %f12, float %f13, float %f14, i32 %i8) {
entry:
%i1.addr = alloca i32, align 4
- store i32 %i1, i32* %i1.addr, align 4
+ store i32 %i1, ptr %i1.addr, align 4
ret i32 %i1
}
; RUN: -function-sections < %s | \
; RUN: FileCheck --check-prefixes=ASM,ASMFS,ASM64 %s
-@_ZTIi = external constant i8*
+@_ZTIi = external constant ptr
define void @_Z9throwFuncv() {
entry:
- %exception = call i8* @__cxa_allocate_exception(i32 4) #2
- %0 = bitcast i8* %exception to i32*
- store i32 1, i32* %0, align 16
- call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #3
+ %exception = call ptr @__cxa_allocate_exception(i32 4) #2
+ store i32 1, ptr %exception, align 16
+ call void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null) #3
unreachable
}
; ASM: bl .__cxa_throw[PR]
; ASM: nop
-define i32 @_Z9catchFuncv() personality i8* bitcast (i32 (...)* @__xlcxx_personality_v1 to i8*) {
+define i32 @_Z9catchFuncv() personality ptr @__xlcxx_personality_v1 {
entry:
%retval = alloca i32, align 4
- %exn.slot = alloca i8*, align 4
+ %exn.slot = alloca ptr, align 4
%ehselector.slot = alloca i32, align 4
%0 = alloca i32, align 4
invoke void @_Z9throwFuncv()
br label %try.cont
lpad: ; preds = %entry
- %1 = landingpad { i8*, i32 }
- catch i8* bitcast (i8** @_ZTIi to i8*)
- %2 = extractvalue { i8*, i32 } %1, 0
- store i8* %2, i8** %exn.slot, align 4
- %3 = extractvalue { i8*, i32 } %1, 1
- store i32 %3, i32* %ehselector.slot, align 4
+ %1 = landingpad { ptr, i32 }
+ catch ptr @_ZTIi
+ %2 = extractvalue { ptr, i32 } %1, 0
+ store ptr %2, ptr %exn.slot, align 4
+ %3 = extractvalue { ptr, i32 } %1, 1
+ store i32 %3, ptr %ehselector.slot, align 4
br label %catch.dispatch
catch.dispatch: ; preds = %lpad
- %sel = load i32, i32* %ehselector.slot, align 4
- %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #2
+ %sel = load i32, ptr %ehselector.slot, align 4
+ %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #2
%matches = icmp eq i32 %sel, %4
br i1 %matches, label %catch, label %eh.resume
catch: ; preds = %catch.dispatch
- %exn = load i8*, i8** %exn.slot, align 4
- %5 = call i8* @__cxa_begin_catch(i8* %exn) #2
- %6 = bitcast i8* %5 to i32*
- %7 = load i32, i32* %6, align 4
- store i32 %7, i32* %0, align 4
- store i32 2, i32* %retval, align 4
+ %exn = load ptr, ptr %exn.slot, align 4
+ %5 = call ptr @__cxa_begin_catch(ptr %exn) #2
+ %6 = load i32, ptr %5, align 4
+ store i32 %6, ptr %0, align 4
+ store i32 2, ptr %retval, align 4
call void @__cxa_end_catch() #2
br label %return
try.cont: ; preds = %invoke.cont
- store i32 1, i32* %retval, align 4
+ store i32 1, ptr %retval, align 4
br label %return
return: ; preds = %try.cont, %catch
- %8 = load i32, i32* %retval, align 4
- ret i32 %8
+ %7 = load i32, ptr %retval, align 4
+ ret i32 %7
eh.resume: ; preds = %catch.dispatch
- %exn1 = load i8*, i8** %exn.slot, align 4
- %sel2 = load i32, i32* %ehselector.slot, align 4
- %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn1, 0
- %lpad.val3 = insertvalue { i8*, i32 } %lpad.val, i32 %sel2, 1
- resume { i8*, i32 } %lpad.val3
+ %exn1 = load ptr, ptr %exn.slot, align 4
+ %sel2 = load i32, ptr %ehselector.slot, align 4
+ %lpad.val = insertvalue { ptr, i32 } undef, ptr %exn1, 0
+ %lpad.val3 = insertvalue { ptr, i32 } %lpad.val, i32 %sel2, 1
+ resume { ptr, i32 } %lpad.val3
}
; ASMNFS: ._Z9catchFuncv:
; ASM: L..C1:
; ASM: .tc __ehinfo.1[TC],__ehinfo.1
-declare i8* @__cxa_allocate_exception(i32)
-declare void @__cxa_throw(i8*, i8*, i8*)
+declare ptr @__cxa_allocate_exception(i32)
+declare void @__cxa_throw(ptr, ptr, ptr)
declare i32 @__xlcxx_personality_v1(...)
-declare i32 @llvm.eh.typeid.for(i8*)
-declare i8* @__cxa_begin_catch(i8*)
+declare i32 @llvm.eh.typeid.for(ptr)
+declare ptr @__cxa_begin_catch(ptr)
declare void @__cxa_end_catch()
; RUN: -filetype=obj -o %t64.o < %s
; RUN: llvm-readobj --symbols %t64.o | FileCheck --check-prefixes=CHECKSYM,CHECKSYM64 %s
-@foo_ext_weak_p = global void (...)* bitcast (void ()* @foo_ext_weak_ref to void (...)*)
+@foo_ext_weak_p = global ptr @foo_ext_weak_ref
@b_w = extern_weak global i32
declare extern_weak void @foo_ext_weak_ref()
define i32 @main() {
entry:
- %0 = load void (...)*, void (...)** @foo_ext_weak_p
- %callee.knr.cast = bitcast void (...)* %0 to void ()*
- call void %callee.knr.cast()
- call void @foo_ext_weak(i32* @b_w)
+ %0 = load ptr, ptr @foo_ext_weak_p
+ call void %0()
+ call void @foo_ext_weak(ptr @b_w)
ret i32 0
}
-declare extern_weak void @foo_ext_weak(i32*)
+declare extern_weak void @foo_ext_weak(ptr)
; COMMON: .globl main[DS] # -- Begin function main
; COMMON-NEXT: .globl .main
; RUN: -filetype=obj -o %t64.o < %s
; RUN: llvm-readobj --symbols %t64.o | FileCheck --check-prefixes=CHECKSYM,CHECKSYM64 %s
-@bar_p = global i32 (...)* @bar_ref, align 4
+@bar_p = global ptr @bar_ref, align 4
@b_e = external global i32, align 4
; Function Attrs: noinline nounwind optnone
; Function Attrs: noinline nounwind optnone
define i32 @main() {
entry:
- %call = call i32 @bar_extern(i32* @b_e)
+ %call = call i32 @bar_extern(ptr @b_e)
call void @foo()
- %0 = load i32 (...)*, i32 (...)** @bar_p, align 4
- %callee.knr.cast = bitcast i32 (...)* %0 to i32 ()*
- %call1 = call i32 %callee.knr.cast()
- %call2 = call i32 bitcast (i32 (...)* @bar_ref to i32 ()*)()
+ %0 = load ptr, ptr @bar_p, align 4
+ %call1 = call i32 %0()
+ %call2 = call i32 @bar_ref()
ret i32 0
}
-declare i32 @bar_extern(i32*)
+declare i32 @bar_extern(ptr)
; COMMON: .globl foo[DS] # -- Begin function foo
; RUN: -mtriple=powerpc64-ibm-aix-xcoff | \
; RUN: FileCheck %s -check-prefixes=AIX64
-declare void @clobber(i32*)
+declare void @clobber(ptr)
define dso_local float @frameptr_only(i32 %n, float %f) {
; AIX32-LABEL: frameptr_only:
; AIX64-NEXT: blr
entry:
%0 = alloca i32, i32 %n
- call void @clobber(i32* %0)
+ call void @clobber(ptr %0)
ret float %f
}
; AIX64-NEXT: ld 30, -16(1)
; AIX64-NEXT: blr
%ptr = alloca i32, i32 %n, align 64
- call void @clobber(i32* %ptr)
+ call void @clobber(ptr %ptr)
ret void
}
; RUN: -mattr=-altivec -data-sections=false -ignore-xcoff-visibility < %s | \
; RUN: FileCheck --check-prefix=IGNOREVISIBILITY-ASM %s
-@foo_p = global void ()* @zoo_extern_h, align 4
+@foo_p = global ptr @zoo_extern_h, align 4
@b = protected global i32 0, align 4
-define hidden void @foo_h(i32* %p) {
+define hidden void @foo_h(ptr %p) {
entry:
- %p.addr = alloca i32*, align 4
- store i32* %p, i32** %p.addr, align 4
- %0 = load i32*, i32** %p.addr, align 4
- %1 = load i32, i32* %0, align 4
+ %p.addr = alloca ptr, align 4
+ store ptr %p, ptr %p.addr, align 4
+ %0 = load ptr, ptr %p.addr, align 4
+ %1 = load i32, ptr %0, align 4
%inc = add nsw i32 %1, 1
- store i32 %inc, i32* %0, align 4
+ store i32 %inc, ptr %0, align 4
ret void
}
define protected void @bar() {
entry:
- call void @foo_h(i32* @b)
- %0 = load void ()*, void ()** @foo_p, align 4
+ call void @foo_h(ptr @b)
+ %0 = load ptr, ptr @foo_p, align 4
call void %0()
ret void
}
; RUN: llvm-objdump -d %t | FileCheck \
; RUN: --check-prefixes=CHECKOBJ,ASMOBJ32,OBJ32 %s
-define signext i32 @callThroughPtr(i32 ()* nocapture) {
+define signext i32 @callThroughPtr(ptr nocapture) {
%2 = tail call signext i32 %0()
ret i32 %2
}
; OBJ32-NEXT: 80 41 00 14 lwz 2, 20(1)
; OBJ32-NEXT: addi 1, 1, 64
-define void @callThroughPtrWithArgs(void (i32, i16, i64)* nocapture) {
+define void @callThroughPtrWithArgs(ptr nocapture) {
tail call void %0(i32 signext 1, i16 zeroext 2, i64 3)
ret void
}
define void @bar() {
entry:
- %0 = load i32, i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 1), align 4
+ %0 = load i32, ptr getelementptr inbounds (%struct.S, ptr @s, i32 0, i32 1), align 4
%1 = trunc i32 %0 to i8
- %2 = load i32, i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 1), align 4
- call void @llvm.memset.p0i8.i32(i8* align 4 bitcast (%struct.S* @s to i8*), i8 %1, i32 %2, i1 false)
+ %2 = load i32, ptr getelementptr inbounds (%struct.S, ptr @s, i32 0, i32 1), align 4
+ call void @llvm.memset.p0.i32(ptr align 4 @s, i8 %1, i32 %2, i1 false)
ret void
}
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1 immarg)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg)
; CHECK-LABEL: .bar:
; CHECK-NEXT: # %bb.0: # %entry
br label %__here
__here:
- store i64 ptrtoint (i8* blockaddress(@foo, %__here) to i64), i64* %tmp
+ store i64 ptrtoint (ptr blockaddress(@foo, %__here) to i64), ptr %tmp
ret void
}
- call void bitcast (void (...)* @foo to void ()*)()
+ call void @foo()
ret void
}
; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
-define i8* @nest_receiver(i8* nest %arg) nounwind {
- ret i8* %arg
+define ptr @nest_receiver(ptr nest %arg) nounwind {
+ ret ptr %arg
}
-define i8* @nest_caller(i8* %arg) nounwind {
- %result = call i8* @nest_receiver(i8* nest %arg)
- ret i8* %result
+define ptr @nest_caller(ptr %arg) nounwind {
+ %result = call ptr @nest_receiver(ptr nest %arg)
+ ret ptr %result
}
; CHECK: LLVM ERROR: Nest arguments are unimplemented.
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec \
; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s | FileCheck %s --check-prefixes=SYM,SYM64
-@__xlcxx_personality_v1 = alias i32 (), i32 ()* @__gxx_personality_v0
+@__xlcxx_personality_v1 = alias i32 (), ptr @__gxx_personality_v0
define i32 @__gxx_personality_v0() {
entry:
ret i32 1
}
-define dso_local signext i32 @_Z3foov() #0 personality i8* bitcast (i32 ()* @__xlcxx_personality_v1 to i8*) {
+define dso_local signext i32 @_Z3foov() #0 personality ptr @__xlcxx_personality_v1 {
entry:
%retval = alloca i32, align 4
- %exn.slot = alloca i8*, align 8
+ %exn.slot = alloca ptr, align 8
%ehselector.slot = alloca i32, align 4
invoke void @_Z3barv()
to label %invoke.cont unwind label %lpad
br label %try.cont
lpad: ; preds = %entry
- %0 = landingpad { i8*, i32 }
- catch i8* null
- %1 = extractvalue { i8*, i32 } %0, 0
- store i8* %1, i8** %exn.slot, align 8
- %2 = extractvalue { i8*, i32 } %0, 1
- store i32 %2, i32* %ehselector.slot, align 4
+ %0 = landingpad { ptr, i32 }
+ catch ptr null
+ %1 = extractvalue { ptr, i32 } %0, 0
+ store ptr %1, ptr %exn.slot, align 8
+ %2 = extractvalue { ptr, i32 } %0, 1
+ store i32 %2, ptr %ehselector.slot, align 4
br label %catch
catch: ; preds = %lpad
- %exn = load i8*, i8** %exn.slot, align 8
+ %exn = load ptr, ptr %exn.slot, align 8
br label %return
try.cont: ; preds = %invoke.cont
- store i32 2, i32* %retval, align 4
+ store i32 2, ptr %retval, align 4
br label %return
return: ; preds = %try.cont, %catch
; RUN: --relocation-model=pic -data-sections=false < %s | FileCheck --check-prefix=CHECK64 %s
@a = common global i32 0
-@b = constant i32* @a
+@b = constant ptr @a
;CHECK: .comm a[RW],4,2
;CHECK-NEXT: .csect .data[RW],2
; RUN: llc -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff \
; RUN: -data-sections=false < %s | FileCheck --check-prefix=CHECK64 %s
-@foo_ptr = global void (...)* @foo
+@foo_ptr = global ptr @foo
declare void @foo(...)
-@bar_ptr1 = global void (...)* bitcast (void ()* @bar to void (...)*)
+@bar_ptr1 = global ptr @bar
define void @bar() {
entry:
ret void
; RUN: FileCheck --check-prefix=ASM %s
@__profc_main = private global [1 x i64] zeroinitializer, section "__llvm_prf_cnts", align 8
-@__profd_main = private global { i64, i64, i64, i8*, i8*, i32, [4 x i16] } { i64 -2624081020897602054, i64 742261418966908927, i64 sub (i64 ptrtoint ([1 x i64]* @__profc_main to i64), i64 ptrtoint ({ i64, i64, i64, i8*, i8*, i32, [4 x i16] }* @__profd_main to i64)), i8* bitcast (i32 ()* @main to i8*), i8* null, i32 1, [4 x i16] zeroinitializer }, section "__llvm_prf_data", align 8
+@__profd_main = private global { i64, i64, i64, ptr, ptr, i32, [4 x i16] } { i64 -2624081020897602054, i64 742261418966908927, i64 sub (i64 ptrtoint (ptr @__profc_main to i64), i64 ptrtoint (ptr @__profd_main to i64)), ptr @main, ptr null, i32 1, [4 x i16] zeroinitializer }, section "__llvm_prf_data", align 8
; Test fallback of using sub expr for lowerRelativeReference
define signext i32 @main() {
; ASM: L..__profd_main:
; ASM: .vbyte 8, L..__profc_main-L..__profd_main
entry:
- %pgocount = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_main, i64 0, i64 0), align 8
+ %pgocount = load i64, ptr @__profc_main, align 8
%0 = add i64 %pgocount, 1
- store i64 %0, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_main, i64 0, i64 0), align 8
+ store i64 %0, ptr @__profc_main, align 8
%retval = alloca i32, align 4
- store i32 0, i32* %retval, align 4
+ store i32 0, ptr %retval, align 4
ret i32 0
}
define void @test1() {
entry:
%s = alloca %struct.S, align 4
- call void @foo(%struct.S* sret(%struct.S) %s)
+ call void @foo(ptr sret(%struct.S) %s)
ret void
}
define void @test2() {
entry:
%t = alloca %struct.T, align 8
- call void @bar(%struct.T* sret(%struct.T) %t)
+ call void @bar(ptr sret(%struct.T) %t)
ret void
}
-declare void @foo(%struct.S* sret(%struct.S))
-declare void @bar(%struct.T* sret(%struct.T))
+declare void @foo(ptr sret(%struct.S))
+declare void @bar(ptr sret(%struct.T))
; MIR: name: test1
; MIR: stack:
; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s | FileCheck %s
; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s | FileCheck %s
-@llvm.global_ctors = appending global [2 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @init1, i8* null }, { i32, void ()*, i8* } { i32 65535, void ()* @init2, i8* null }]
-@llvm.global_dtors = appending global [2 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @destruct1, i8* null }, { i32, void ()*, i8* } { i32 65535, void ()* @destruct2, i8* null }]
+@llvm.global_ctors = appending global [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @init1, ptr null }, { i32, ptr, ptr } { i32 65535, ptr @init2, ptr null }]
+@llvm.global_dtors = appending global [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @destruct1, ptr null }, { i32, ptr, ptr } { i32 65535, ptr @destruct2, ptr null }]
define i32 @extFunc() {
entry:
@v = global i8 0
-@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @foo, i8* @v}]
+@llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @foo, ptr @v}]
define void @foo() {
ret void
; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s | FileCheck %s
; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s | FileCheck %s
-@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @foo, i8* null }]
+@llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @foo, ptr null }]
define internal void @foo() {
ret void
; RUN: llc -mtriple powerpc-ibm-aix-xcoff -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -verify-machineinstrs < %s | FileCheck %s
-@llvm.global_ctors = appending global [5 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 0, void ()* bitcast (i32 (i32)* @cf1 to void ()*), i8* null }, { i32, void ()*, i8* } { i32 21, void ()* @cf2, i8* null }, { i32, void ()*, i8* } { i32 81, void ()* @cf3, i8* null }, { i32, void ()*, i8* } { i32 1125, void ()* @cf4, i8* null }, { i32, void ()*, i8* } { i32 64512, void ()* @cf5, i8* null }]
-@llvm.global_dtors = appending global [5 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 20, void ()* bitcast (i32 (i32)* @df1 to void ()*), i8* null }, { i32, void ()*, i8* } { i32 80, void ()* @df2, i8* null }, { i32, void ()*, i8* } { i32 1124, void ()* @df3, i8* null }, { i32, void ()*, i8* } { i32 64511, void ()* @df4, i8* null }, { i32, void ()*, i8* } { i32 65535, void ()* @df5, i8* null }]
+@llvm.global_ctors = appending global [5 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 0, ptr @cf1, ptr null }, { i32, ptr, ptr } { i32 21, ptr @cf2, ptr null }, { i32, ptr, ptr } { i32 81, ptr @cf3, ptr null }, { i32, ptr, ptr } { i32 1125, ptr @cf4, ptr null }, { i32, ptr, ptr } { i32 64512, ptr @cf5, ptr null }]
+@llvm.global_dtors = appending global [5 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 20, ptr @df1, ptr null }, { i32, ptr, ptr } { i32 80, ptr @df2, ptr null }, { i32, ptr, ptr } { i32 1124, ptr @df3, ptr null }, { i32, ptr, ptr } { i32 64511, ptr @df4, ptr null }, { i32, ptr, ptr } { i32 65535, ptr @df5, ptr null }]
define i32 @cf1(i32 %a) {
ret i32 %a
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- store double %Val, double* @TGUninit, align 8
+ store double %Val, ptr @TGUninit, align 8
ret void
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- store double %Val, double* @TGInit, align 8
+ store double %Val, ptr @TGInit, align 8
ret void
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- store double %Val, double* @TIInit, align 8
+ store double %Val, ptr @TIInit, align 8
ret void
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- store double %Val, double* @TWInit, align 8
+ store double %Val, ptr @TWInit, align 8
ret void
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- %0 = load double, double* @TGUninit, align 8
- %1 = load double, double* @GInit, align 8
+ %0 = load double, ptr @TGUninit, align 8
+ %1 = load double, ptr @GInit, align 8
%add = fadd double %0, %1
ret double %add
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- %0 = load double, double* @TGInit, align 8
- %1 = load double, double* @GInit, align 8
+ %0 = load double, ptr @TGInit, align 8
+ %1 = load double, ptr @GInit, align 8
%add = fadd double %0, %1
ret double %add
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- %0 = load double, double* @TIInit, align 8
- %1 = load double, double* @GInit, align 8
+ %0 = load double, ptr @TIInit, align 8
+ %1 = load double, ptr @GInit, align 8
%add = fadd double %0, %1
ret double %add
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- %0 = load double, double* @TWInit, align 8
- %1 = load double, double* @GInit, align 8
+ %0 = load double, ptr @TWInit, align 8
+ %1 = load double, ptr @GInit, align 8
%add = fadd double %0, %1
ret double %add
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- store i32 %Val, i32* @TGUninit, align 4
+ store i32 %Val, ptr @TGUninit, align 4
ret void
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- store i32 %Val, i32* @TGInit, align 4
+ store i32 %Val, ptr @TGInit, align 4
ret void
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- store i32 %Val, i32* @TIUninit, align 4
+ store i32 %Val, ptr @TIUninit, align 4
ret void
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- store i32 %Val, i32* @TWUninit, align 4
+ store i32 %Val, ptr @TWUninit, align 4
ret void
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- %0 = load i32, i32* @TGUninit, align 4
- %1 = load i32, i32* @GInit, align 4
+ %0 = load i32, ptr @TGUninit, align 4
+ %1 = load i32, ptr @GInit, align 4
%add = add nsw i32 %1, %0
ret i32 %add
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- %0 = load i32, i32* @TGInit, align 4
- %1 = load i32, i32* @GInit, align 4
+ %0 = load i32, ptr @TGInit, align 4
+ %1 = load i32, ptr @GInit, align 4
%add = add nsw i32 %1, %0
ret i32 %add
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- %0 = load i32, i32* @TIUninit, align 4
- %1 = load i32, i32* @GInit, align 4
+ %0 = load i32, ptr @TIUninit, align 4
+ %1 = load i32, ptr @GInit, align 4
%add = add nsw i32 %1, %0
ret i32 %add
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- %0 = load i32, i32* @TWUninit, align 4
- %1 = load i32, i32* @GInit, align 4
+ %0 = load i32, ptr @TWUninit, align 4
+ %1 = load i32, ptr @GInit, align 4
%add = add nsw i32 %1, %0
ret i32 %add
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- store i64 %Val, i64* @TGInit, align 8
+ store i64 %Val, ptr @TGInit, align 8
ret void
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- store i64 %Val, i64* @TIUninit, align 8
+ store i64 %Val, ptr @TIUninit, align 8
ret void
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- store i64 %Val, i64* @TIInit, align 8
+ store i64 %Val, ptr @TIInit, align 8
ret void
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- store i64 %Val, i64* @TWInit, align 8
+ store i64 %Val, ptr @TWInit, align 8
ret void
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- %0 = load i64, i64* @TGInit, align 8
- %1 = load i64, i64* @GInit, align 8
+ %0 = load i64, ptr @TGInit, align 8
+ %1 = load i64, ptr @GInit, align 8
%add = add nsw i64 %1, %0
ret i64 %add
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- %0 = load i64, i64* @TIUninit, align 8
- %1 = load i64, i64* @GInit, align 8
+ %0 = load i64, ptr @TIUninit, align 8
+ %1 = load i64, ptr @GInit, align 8
%add = add nsw i64 %1, %0
ret i64 %add
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- %0 = load i64, i64* @TIInit, align 8
- %1 = load i64, i64* @GInit, align 8
+ %0 = load i64, ptr @TIInit, align 8
+ %1 = load i64, ptr @GInit, align 8
%add = add nsw i64 %1, %0
ret i64 %add
}
; LARGE64-NEXT: mtlr 0
; LARGE64-NEXT: blr
entry:
- %0 = load i64, i64* @TWInit, align 8
- %1 = load i64, i64* @GInit, align 8
+ %0 = load i64, ptr @TWInit, align 8
+ %1 = load i64, ptr @GInit, align 8
%add = add nsw i64 %1, %0
ret i64 %add
}
; Function Attrs: nofree norecurse nounwind willreturn writeonly
define void @storesTIInit(i64 %Val) #0 {
entry:
- store i64 %Val, i64* @TIInit, align 8
+ store i64 %Val, ptr @TIInit, align 8
ret void
}
; Function Attrs: norecurse nounwind readonly willreturn
define double @loadsTWInit() #1 {
entry:
- %0 = load double, double* @TWInit, align 8
- %1 = load double, double* @GInit, align 8
+ %0 = load double, ptr @TWInit, align 8
+ %1 = load double, ptr @GInit, align 8
%add = fadd double %0, %1
ret double %add
}
; Function Attrs: nofree norecurse nounwind willreturn writeonly
define void @storesTIUninit(i32 %Val) #0 {
entry:
- store i32 %Val, i32* @TIUninit, align 4
+ store i32 %Val, ptr @TIUninit, align 4
ret void
}
; Function Attrs: norecurse nounwind readonly willreturn
define i32 @loadsTGInit() #1 {
entry:
- %0 = load i32, i32* @TGInit, align 4
- %1 = load i32, i32* @GInit, align 4
+ %0 = load i32, ptr @TGInit, align 4
+ %1 = load i32, ptr @GInit, align 4
%add = add nsw i32 %1, %0
ret i32 %add
}
@tls_global_long_long_internal_zero_initialized = internal thread_local global i64 0, align 8
@tls_global_long_long_weak_val_initialized = weak thread_local global i64 1, align 8
@tls_global_long_long_weak_zero_initialized = weak thread_local global i64 0, align 8
-@tls_global_alias_int_external_val_initialized = thread_local alias i32, i32* @tls_global_int_external_val_initialized
+@tls_global_alias_int_external_val_initialized = thread_local alias i32, ptr @tls_global_int_external_val_initialized
@const_ivar = constant i32 6, align 4
; RUN: llc -verify-machineinstrs -mtriple=powerpc-ibm-aix-xcoff < %s | \
; RUN: FileCheck --check-prefix=AIX-32 %s
-%0 = type { i8*, i8*, i8*, i8*, i8*, i32, i32, i32, i16, i16, [4 x i64] }
+%0 = type { ptr, ptr, ptr, ptr, ptr, i32, i32, i32, i16, i16, [4 x i64] }
%1 = type { [167 x i64] }
%2 = type { [179 x i64] }
-%3 = type { i64, void (i32, %3*)*, i64, i64 }
+%3 = type { i64, ptr, i64, i64 }
-declare i32 @wibble(%1*) local_unnamed_addr #0
+declare i32 @wibble(ptr) local_unnamed_addr #0
-declare hidden fastcc i32 @spam(%1*, %2*, %3*) unnamed_addr #0
+declare hidden fastcc i32 @spam(ptr, ptr, ptr) unnamed_addr #0
; Function Attrs: nounwind
-define void @baz(%3* %0) local_unnamed_addr #2 {
+define void @baz(ptr %0) local_unnamed_addr #2 {
; AIX-64: std 31
; AIX-64: .byte 0x01 # -HasExtensionTable, -HasVectorInfo, NumOfGPRsSaved = 1
; AIX-32: stw 31
; AIX-32: .byte 0x01 # -HasExtensionTable, -HasVectorInfo, NumOfGPRsSaved = 1
- %2 = call signext i32 @wibble(%1* nonnull undef) #2
- %3 = call fastcc zeroext i32 @spam(%1* nonnull undef, %2* nonnull undef, %3* nonnull %0)
+ %2 = call signext i32 @wibble(ptr nonnull undef) #2
+ %3 = call fastcc zeroext i32 @spam(ptr nonnull undef, ptr nonnull undef, ptr nonnull %0)
unreachable
}
; CHECK: LLVM ERROR: INIT_TRAMPOLINE operation is not supported on AIX.
-define void @create_trampoline(i8* %buffer, i8* %nval) nounwind {
+define void @create_trampoline(ptr %buffer, ptr %nval) nounwind {
entry:
- call void @llvm.init.trampoline(i8* %buffer, i8* bitcast (i32 (i32)* @nested to i8*) , i8* %nval)
+ call void @llvm.init.trampoline(ptr %buffer, ptr @nested , ptr %nval)
ret void
}
declare i32 @nested(i32);
-declare void @llvm.init.trampoline(i8*, i8*, i8*) nounwind
+declare void @llvm.init.trampoline(ptr, ptr, ptr) nounwind
define void @bar() {
entry:
- call void bitcast (void (...)* @foo to void ()*)()
- call void bitcast (void (...)* @long_undef_name to void ()*)()
+ call void @foo()
+ call void @long_undef_name()
ret void
}
; "memcpy" ExternalSymbol's, we pick up the user-defined version, even if this
; may lead to some undefined behavior.
-define dso_local signext i32 @memcpy(i8* %destination, i32 signext %num) {
+define dso_local signext i32 @memcpy(ptr %destination, i32 signext %num) {
entry:
ret i32 3
}
-define void @call_memcpy(i8* %p, i8* %q, i32 %n) {
+define void @call_memcpy(ptr %p, ptr %q, i32 %n) {
entry:
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr %p, ptr %q, i32 %n, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1)
; This test check
; 1. The symbol table for .o file to verify .memcpy is a defined external label.
%struct.Test = type { double, double, double, double }
-define double @test(i32 signext %r3, i32 signext %r4, double %fpr1, double %fpr2, <2 x double> %v2, <2 x double> %v3, <2 x double> %v4, <2 x double> %v5, <2 x double> %v6, <2 x double> %v7, <2 x double> %v8, <2 x double> %v9, <2 x double> %v10, <2 x double> %v11, <2 x double> %v12, <2 x double> %v13, <2 x double> %vSpill, double %fpr3, double %fpr4, double %fpr5, double %fpr6, double %fpr7, double %fpr8, double %fpr9, double %fpr10, double %fpr11, double %fpr12, double %fpr13, i32 signext %gprSpill, %struct.Test* nocapture readonly byval(%struct.Test) align 4 %t) {
+define double @test(i32 signext %r3, i32 signext %r4, double %fpr1, double %fpr2, <2 x double> %v2, <2 x double> %v3, <2 x double> %v4, <2 x double> %v5, <2 x double> %v6, <2 x double> %v7, <2 x double> %v8, <2 x double> %v9, <2 x double> %v10, <2 x double> %v11, <2 x double> %v12, <2 x double> %v13, <2 x double> %vSpill, double %fpr3, double %fpr4, double %fpr5, double %fpr6, double %fpr7, double %fpr8, double %fpr9, double %fpr10, double %fpr11, double %fpr12, double %fpr13, i32 signext %gprSpill, ptr nocapture readonly byval(%struct.Test) align 4 %t) {
entry:
%vecext = extractelement <2 x double> %vSpill, i32 0
- %x = getelementptr inbounds %struct.Test, %struct.Test* %t, i32 0, i32 0
- %0 = load double, double* %x, align 4
+ %0 = load double, ptr %t, align 4
%add = fadd double %vecext, %0
ret double %add
}
; MIR64-NEXT: ADJCALLSTACKUP 224, 0, implicit-def dead $r1, implicit $r1
; MIR64-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $f1
entry:
- %call = tail call double @callee(i32 signext 128, i32 signext 256, double 0.000000e+00, double 0.000000e+00, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 2.400000e+01, double 2.500000e+01>, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, i32 signext 512, %struct.Test* nonnull byval(%struct.Test) align 4 @__const.caller.t)
+ %call = tail call double @callee(i32 signext 128, i32 signext 256, double 0.000000e+00, double 0.000000e+00, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 2.400000e+01, double 2.500000e+01>, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, i32 signext 512, ptr nonnull byval(%struct.Test) align 4 @__const.caller.t)
ret double %call
}
-declare double @callee(i32 signext, i32 signext, double, double, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, double, double, double, double, double, double, double, double, double, double, double, i32 signext, %struct.Test* byval(%struct.Test) align 8)
+declare double @callee(i32 signext, i32 signext, double, double, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, double, double, double, double, double, double, double, double, double, double, double, i32 signext, ptr byval(%struct.Test) align 8)
; 64BIT-NEXT: blr
entry:
- %call = tail call double @callee(i32 signext 128, i32 signext 256, double 0.000000e+00, double 0.000000e+00, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 2.400000e+01, double 2.500000e+01>, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, i32 signext 512, %struct.Test* nonnull byval(%struct.Test) align 4 @__const.caller.t)
+ %call = tail call double @callee(i32 signext 128, i32 signext 256, double 0.000000e+00, double 0.000000e+00, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 2.400000e+01, double 2.500000e+01>, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, i32 signext 512, ptr nonnull byval(%struct.Test) align 4 @__const.caller.t)
ret double %call
}
-declare double @callee(i32 signext, i32 signext, double, double, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, double, double, double, double, double, double, double, double, double, double, double, i32 signext, %struct.Test* byval(%struct.Test) align 8)
+declare double @callee(i32 signext, i32 signext, double, double, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, double, double, double, double, double, double, double, double, double, double, double, i32 signext, ptr byval(%struct.Test) align 8)
ret <4 x float> %vecins
}
-define <4 x float> @testFloat2(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 zeroext %idx2) {
+define <4 x float> @testFloat2(<4 x float> %a, ptr %b, i32 zeroext %idx1, i32 zeroext %idx2) {
; CHECK-64-LABEL: testFloat2:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: lwz 6, 0(3)
; CHECK-32-P10-NEXT: vinswlx 2, 4, 3
; CHECK-32-P10-NEXT: blr
entry:
- %0 = bitcast i8* %b to float*
- %add.ptr1 = getelementptr inbounds i8, i8* %b, i64 1
- %1 = bitcast i8* %add.ptr1 to float*
- %2 = load float, float* %0, align 4
- %vecins = insertelement <4 x float> %a, float %2, i32 %idx1
- %3 = load float, float* %1, align 4
- %vecins2 = insertelement <4 x float> %vecins, float %3, i32 %idx2
+ %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 1
+ %0 = load float, ptr %b, align 4
+ %vecins = insertelement <4 x float> %a, float %0, i32 %idx1
+ %1 = load float, ptr %add.ptr1, align 4
+ %vecins2 = insertelement <4 x float> %vecins, float %1, i32 %idx2
ret <4 x float> %vecins2
}
-define <4 x float> @testFloat3(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 zeroext %idx2) {
+define <4 x float> @testFloat3(<4 x float> %a, ptr %b, i32 zeroext %idx1, i32 zeroext %idx2) {
; CHECK-64-LABEL: testFloat3:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: lis 6, 1
; CHECK-32-P10-NEXT: vinswlx 2, 4, 3
; CHECK-32-P10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %b, i64 65536
- %0 = bitcast i8* %add.ptr to float*
- %add.ptr1 = getelementptr inbounds i8, i8* %b, i64 68719476736
- %1 = bitcast i8* %add.ptr1 to float*
- %2 = load float, float* %0, align 4
- %vecins = insertelement <4 x float> %a, float %2, i32 %idx1
- %3 = load float, float* %1, align 4
- %vecins2 = insertelement <4 x float> %vecins, float %3, i32 %idx2
+ %add.ptr = getelementptr inbounds i8, ptr %b, i64 65536
+ %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 68719476736
+ %0 = load float, ptr %add.ptr, align 4
+ %vecins = insertelement <4 x float> %a, float %0, i32 %idx1
+ %1 = load float, ptr %add.ptr1, align 4
+ %vecins2 = insertelement <4 x float> %vecins, float %1, i32 %idx2
ret <4 x float> %vecins2
}
ret <4 x float> %vecins1
}
-define <4 x float> @testFloatImm2(<4 x float> %a, i32* %b) {
+define <4 x float> @testFloatImm2(<4 x float> %a, ptr %b) {
; CHECK-64-LABEL: testFloatImm2:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: lwz 4, 0(3)
; CHECK-32-P10-NEXT: vinsw 2, 3, 8
; CHECK-32-P10-NEXT: blr
entry:
- %0 = bitcast i32* %b to float*
- %add.ptr1 = getelementptr inbounds i32, i32* %b, i64 1
- %1 = bitcast i32* %add.ptr1 to float*
- %2 = load float, float* %0, align 4
- %vecins = insertelement <4 x float> %a, float %2, i32 0
- %3 = load float, float* %1, align 4
- %vecins2 = insertelement <4 x float> %vecins, float %3, i32 2
+ %add.ptr1 = getelementptr inbounds i32, ptr %b, i64 1
+ %0 = load float, ptr %b, align 4
+ %vecins = insertelement <4 x float> %a, float %0, i32 0
+ %1 = load float, ptr %add.ptr1, align 4
+ %vecins2 = insertelement <4 x float> %vecins, float %1, i32 2
ret <4 x float> %vecins2
}
-define <4 x float> @testFloatImm3(<4 x float> %a, i32* %b) {
+define <4 x float> @testFloatImm3(<4 x float> %a, ptr %b) {
; CHECK-64-LABEL: testFloatImm3:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: lis 4, 4
; CHECK-32-P10-NEXT: vinsw 2, 3, 8
; CHECK-32-P10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i32, i32* %b, i64 65536
- %0 = bitcast i32* %add.ptr to float*
- %add.ptr1 = getelementptr inbounds i32, i32* %b, i64 68719476736
- %1 = bitcast i32* %add.ptr1 to float*
- %2 = load float, float* %0, align 4
- %vecins = insertelement <4 x float> %a, float %2, i32 0
- %3 = load float, float* %1, align 4
- %vecins2 = insertelement <4 x float> %vecins, float %3, i32 2
+ %add.ptr = getelementptr inbounds i32, ptr %b, i64 65536
+ %add.ptr1 = getelementptr inbounds i32, ptr %b, i64 68719476736
+ %0 = load float, ptr %add.ptr, align 4
+ %vecins = insertelement <4 x float> %a, float %0, i32 0
+ %1 = load float, ptr %add.ptr1, align 4
+ %vecins2 = insertelement <4 x float> %vecins, float %1, i32 2
ret <4 x float> %vecins2
}
ret <2 x double> %vecins
}
-define <2 x double> @testDouble2(<2 x double> %a, i8* %b, i32 zeroext %idx1, i32 zeroext %idx2) {
+define <2 x double> @testDouble2(<2 x double> %a, ptr %b, i32 zeroext %idx1, i32 zeroext %idx2) {
; CHECK-64-LABEL: testDouble2:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: ld 6, 0(3)
; CHECK-32-P10-NEXT: lxv 34, -16(1)
; CHECK-32-P10-NEXT: blr
entry:
- %0 = bitcast i8* %b to double*
- %add.ptr1 = getelementptr inbounds i8, i8* %b, i64 1
- %1 = bitcast i8* %add.ptr1 to double*
- %2 = load double, double* %0, align 8
- %vecins = insertelement <2 x double> %a, double %2, i32 %idx1
- %3 = load double, double* %1, align 8
- %vecins2 = insertelement <2 x double> %vecins, double %3, i32 %idx2
+ %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 1
+ %0 = load double, ptr %b, align 8
+ %vecins = insertelement <2 x double> %a, double %0, i32 %idx1
+ %1 = load double, ptr %add.ptr1, align 8
+ %vecins2 = insertelement <2 x double> %vecins, double %1, i32 %idx2
ret <2 x double> %vecins2
}
-define <2 x double> @testDouble3(<2 x double> %a, i8* %b, i32 zeroext %idx1, i32 zeroext %idx2) {
+define <2 x double> @testDouble3(<2 x double> %a, ptr %b, i32 zeroext %idx1, i32 zeroext %idx2) {
; CHECK-64-LABEL: testDouble3:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: lis 6, 1
; CHECK-32-P10-NEXT: lxv 34, -16(1)
; CHECK-32-P10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %b, i64 65536
- %0 = bitcast i8* %add.ptr to double*
- %add.ptr1 = getelementptr inbounds i8, i8* %b, i64 68719476736
- %1 = bitcast i8* %add.ptr1 to double*
- %2 = load double, double* %0, align 8
- %vecins = insertelement <2 x double> %a, double %2, i32 %idx1
- %3 = load double, double* %1, align 8
- %vecins2 = insertelement <2 x double> %vecins, double %3, i32 %idx2
+ %add.ptr = getelementptr inbounds i8, ptr %b, i64 65536
+ %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 68719476736
+ %0 = load double, ptr %add.ptr, align 8
+ %vecins = insertelement <2 x double> %a, double %0, i32 %idx1
+ %1 = load double, ptr %add.ptr1, align 8
+ %vecins2 = insertelement <2 x double> %vecins, double %1, i32 %idx2
ret <2 x double> %vecins2
}
ret <2 x double> %vecins
}
-define <2 x double> @testDoubleImm2(<2 x double> %a, i32* %b) {
+define <2 x double> @testDoubleImm2(<2 x double> %a, ptr %b) {
; CHECK-64-LABEL: testDoubleImm2:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: lfd 0, 0(3)
; CHECK-32-P10-NEXT: xxpermdi 34, 0, 34, 1
; CHECK-32-P10-NEXT: blr
entry:
- %0 = bitcast i32* %b to double*
- %1 = load double, double* %0, align 8
- %vecins = insertelement <2 x double> %a, double %1, i32 0
+ %0 = load double, ptr %b, align 8
+ %vecins = insertelement <2 x double> %a, double %0, i32 0
ret <2 x double> %vecins
}
-define <2 x double> @testDoubleImm3(<2 x double> %a, i32* %b) {
+define <2 x double> @testDoubleImm3(<2 x double> %a, ptr %b) {
; CHECK-64-LABEL: testDoubleImm3:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: lfd 0, 4(3)
; CHECK-32-P10-NEXT: xxpermdi 34, 0, 34, 1
; CHECK-32-P10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i32, i32* %b, i64 1
- %0 = bitcast i32* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %vecins = insertelement <2 x double> %a, double %1, i32 0
+ %add.ptr = getelementptr inbounds i32, ptr %b, i64 1
+ %0 = load double, ptr %add.ptr, align 8
+ %vecins = insertelement <2 x double> %a, double %0, i32 0
ret <2 x double> %vecins
}
-define <2 x double> @testDoubleImm4(<2 x double> %a, i32* %b) {
+define <2 x double> @testDoubleImm4(<2 x double> %a, ptr %b) {
; CHECK-64-LABEL: testDoubleImm4:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: lis 4, 4
; CHECK-32-P10-NEXT: xxpermdi 34, 0, 34, 1
; CHECK-32-P10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i32, i32* %b, i64 65536
- %0 = bitcast i32* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %vecins = insertelement <2 x double> %a, double %1, i32 0
+ %add.ptr = getelementptr inbounds i32, ptr %b, i64 65536
+ %0 = load double, ptr %add.ptr, align 8
+ %vecins = insertelement <2 x double> %a, double %0, i32 0
ret <2 x double> %vecins
}
-define <2 x double> @testDoubleImm5(<2 x double> %a, i32* %b) {
+define <2 x double> @testDoubleImm5(<2 x double> %a, ptr %b) {
; CHECK-64-LABEL: testDoubleImm5:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: li 4, 1
; CHECK-32-P10-NEXT: xxpermdi 34, 0, 34, 1
; CHECK-32-P10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i32, i32* %b, i64 68719476736
- %0 = bitcast i32* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %vecins = insertelement <2 x double> %a, double %1, i32 0
+ %add.ptr = getelementptr inbounds i32, ptr %b, i64 68719476736
+ %0 = load double, ptr %add.ptr, align 8
+ %vecins = insertelement <2 x double> %a, double %0, i32 0
ret <2 x double> %vecins
}
; LITERAL-NEXT: .tc L..CPI0_1[TC],L..CPI0_1
entry:
- %call = call i32 bitcast (i32 (...)* @vec_callee_stack to i32 (<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>)*)(<4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 49, i32 50, i32 51, i32 52>, <4 x i32> <i32 53, i32 54, i32 55, i32 56>)
+ %call = call i32 @vec_callee_stack(<4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 49, i32 50, i32 51, i32 52>, <4 x i32> <i32 53, i32 54, i32 55, i32 56>)
ret i32 %call
}
%i9 = mul nsw i32 %i8, %i7
%i10 = insertelement <8 x i16> %i6, i16 0, i32 2
%i11 = insertelement <8 x i16> %i10, i16 0, i32 3
- %i12 = load i32, i32* undef, align 4
+ %i12 = load i32, ptr undef, align 4
%i13 = ashr i32 %i12, 1
%i14 = mul i32 %i9, %i13
%i15 = trunc i32 %i14 to i16
%i18 = insertelement <8 x i16> %i17, i16 0, i32 5
%i19 = bitcast <8 x i16> %i18 to <16 x i8>
%i20 = shufflevector <16 x i8> %i19, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
- store <16 x i8> %i20, <16 x i8>* null, align 16
+ store <16 x i8> %i20, ptr null, align 16
%i21 = shufflevector <16 x i8> %i19, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 8, i32 9, i32 8, i32 9, i32 8, i32 9, i32 8, i32 9, i32 8, i32 9, i32 8, i32 9, i32 8, i32 9>
- store <16 x i8> %i21, <16 x i8>* undef, align 16
+ store <16 x i8> %i21, ptr undef, align 16
unreachable
bb22: ; preds = %bb
define void @bar() {
entry:
- call void bitcast (void (...)* @foo to void ()*)()
+ call void @foo()
ret void
}
; RUN: -mattr=-altivec -data-sections=false -filetype=obj -o %t64.o < %s
; RUN: llvm-readobj --symbols %t64.o | FileCheck --check-prefixes=CHECKSYM,CHECKSYM64 %s
-@foo_weak_p = global void (...)* bitcast (void ()* @foo_ref_weak to void (...)*), align 4
+@foo_weak_p = global ptr @foo_ref_weak, align 4
@b = weak global i32 0, align 4
-define weak void @foo_weak(i32* %p) {
+define weak void @foo_weak(ptr %p) {
entry:
- %p.addr = alloca i32*, align 4
- store i32* %p, i32** %p.addr, align 4
- %0 = load i32*, i32** %p.addr, align 4
- %1 = load i32, i32* %0, align 4
+ %p.addr = alloca ptr, align 4
+ store ptr %p, ptr %p.addr, align 4
+ %0 = load ptr, ptr %p.addr, align 4
+ %1 = load i32, ptr %0, align 4
%inc = add nsw i32 %1, 1
- store i32 %inc, i32* %0, align 4
+ store i32 %inc, ptr %0, align 4
ret void
}
define i32 @main() {
entry:
- %0 = load void (...)*, void (...)** @foo_weak_p, align 4
- %callee.knr.cast = bitcast void (...)* %0 to void ()*
- call void %callee.knr.cast()
- call void @foo_weak(i32* @b)
+ %0 = load ptr, ptr @foo_weak_p, align 4
+ call void %0()
+ call void @foo_weak(ptr @b)
call void @foo_ref_weak()
ret i32 0
}
@c = external global i16, align 2
@globa = common global i32 0, align 4
-@ptr = internal global void (...)* null, align 4
+@ptr = internal global ptr null, align 4
; CHECK-NOT: .toc
; SYMS-NOT: Name: TOC
@f = common local_unnamed_addr global i32 0, align 4
@.str = private unnamed_addr constant [9 x i8] c"abcdefgh\00", align 1
-@p = global i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), align 4
+@p = global ptr @.str, align 4
define i8 @foo() {
entry:
- %0 = load i8*, i8** @p, align 4
- %1 = load i8, i8* %0, align 1
+ %0 = load ptr, ptr @p, align 4
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
define i32 @bar() {
entry:
- %0 = load i32, i32* @ivar, align 4
- %1 = load i32, i32* @const_ivar, align 4
+ %0 = load i32, ptr @ivar, align 4
+ %1 = load i32, ptr @const_ivar, align 4
%add = add nsw i32 %0, %1
- %2 = load i32, i32* @a, align 4
+ %2 = load i32, ptr @a, align 4
%add1 = add nsw i32 %add, %2
- %3 = load i32, i32* @f, align 4
+ %3 = load i32, ptr @f, align 4
%add2 = add nsw i32 %add1, %3
ret i32 %add2
}
define dso_local signext i32 @ext_fun() section ".ext_fun_sec" {
entry:
- %0 = load i32, i32* @ext_const, align 4
- %1 = load i32, i32* @ext_var, align 4
+ %0 = load i32, ptr @ext_const, align 4
+ %1 = load i32, ptr @ext_var, align 4
%add = add nsw i32 %0, %1
- %2 = load i32, i32* @ext_zvar, align 4
+ %2 = load i32, ptr @ext_zvar, align 4
%add1 = add nsw i32 %add, %2
ret i32 %add1
}
define void @foo() {
entry:
- store i32 1, i32* @La, align 4
- call i32 bitcast (i32 (...)* @Lb to i32 ()*)()
+ store i32 1, ptr @La, align 4
+ call i32 @Lb()
ret void
}
;; Since overflow section is not supported yet, we will emit an error instead of
;; generating an invalid binary for now.
; RUN: grep -v RUN: %s | \
-; RUN: sed >%t.overflow.ll 's/SIZE/65535/;s/MACRO/#/;s/#/################/g;s/#/################/g;s/#/################/g;s/#/################/g;s/#/#_/g;s/_#_\([^#]\)/\1/;s/_/, /g;s/#/i8* @c/g;'
+; RUN: sed >%t.overflow.ll 's/SIZE/65535/;s/MACRO/#/;s/#/################/g;s/#/################/g;s/#/################/g;s/#/################/g;s/#/#_/g;s/_#_\([^#]\)/\1/;s/_/, /g;s/#/ptr @c/g;'
; RUN: not --crash llc -verify-machineinstrs -mtriple powerpc-ibm-aix-xcoff \
; RUN: -mcpu=pwr4 -mattr=-altivec -filetype=obj -o %t.o %t.overflow.ll 2>&1 | \
; RUN: FileCheck --check-prefix=OVERFLOW %s
;; This test generates 65534 relocation entries, an overflow section should
;; not be generated.
; RUN: grep -v RUN: %s | \
-; RUN: sed >%t.ll 's/SIZE/65534/;s/MACRO/#/;s/#/################/g;s/#/################/g;s/#/################/g;s/#/################/g;s/#/#_/g;s/_#_#_\([^#]\)/\1/;s/_/, /g;s/#/i8* @c/g;'
+; RUN: sed >%t.ll 's/SIZE/65534/;s/MACRO/#/;s/#/################/g;s/#/################/g;s/#/################/g;s/#/################/g;s/#/#_/g;s/_#_#_\([^#]\)/\1/;s/_/, /g;s/#/ptr @c/g;'
; RUN: llc -verify-machineinstrs -mtriple powerpc-ibm-aix-xcoff \
; RUN: -mcpu=pwr4 -mattr=-altivec -filetype=obj -o %t.o %t.ll
; RUN: llvm-readobj --section-headers %t.o | FileCheck --check-prefix=XCOFF32 %s
; RUN: llvm-readobj --section-headers %t64.o | FileCheck --check-prefix=XCOFF64 %s
@c = external global i8, align 1
-@arr = global [SIZE x i8*] [MACRO], align 8
+@arr = global [SIZE x ptr] [MACRO], align 8
; XCOFF32-NOT: Name: .ovrflo
; XCOFF32-NOT: Type: STYP_OVRFLO
; RUN: llvm-readobj -r --expand-relocs --syms %t64.o | FileCheck --check-prefixes=RELOC,SYM,RELOC64,SYM64 %s
@common = common global i32 0, align 4
-@pointer = global i32* @common, align 4
+@pointer = global ptr @common, align 4
; CHECK: .comm common[RW],4,2
@magic32 = private unnamed_addr constant [4 x i32] [i32 464, i32 472, i32 413, i32 0], align 4
@strA = private unnamed_addr constant [14 x i8] c"hello world!\0A\00", align 1
@.str = private unnamed_addr constant [9 x i8] c"abcdefgh\00", align 1
-@p = global i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), align 4
+@p = global ptr @.str, align 4
; Function Attrs: noinline nounwind optnone
define i8 @foo() #0 {
entry:
- %0 = load i8*, i8** @p, align 4
- %1 = load i8, i8* %0, align 1
+ %0 = load ptr, ptr @p, align 4
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
define i32 @foo() {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%add = add nsw i32 %0, %1
- %2 = load i32, i32* @c, align 4
+ %2 = load i32, ptr @c, align 4
%add1 = add nsw i32 %add, %2
ret i32 %add1
}
@globalA = global i32 1, align 4
@globalB = global i32 2, align 4
@arr = global <{ i32, [9 x i32] }> <{ i32 3, [9 x i32] zeroinitializer }>, align 4
-@p = global i32* bitcast (i8* getelementptr (i8, i8* bitcast (<{ i32, [9 x i32] }>* @arr to i8*), i64 16) to i32*), align 4
+@p = global ptr getelementptr (i8, ptr @arr, i64 16), align 4
define i32 @foo() {
entry:
%call = call i32 @bar(i32 1)
- %0 = load i32, i32* @globalA, align 4
+ %0 = load i32, ptr @globalA, align 4
%add = add nsw i32 %call, %0
- %1 = load i32, i32* @globalB, align 4
+ %1 = load i32, ptr @globalB, align 4
%add1 = add nsw i32 %add, %1
ret i32 %add1
}
define internal i32 @f$o() {
entry:
- %call = call i32 bitcast (i32 (...)* @"f\40o" to i32 ()*)()
+ %call = call i32 @"f\40o"()
ret i32 %call
}
define i32 @"f\26o"() {
entry:
%tmp = call i32 @f$o()
- %tmp1 = load i32, i32* @"f\3do"
+ %tmp1 = load i32, ptr @"f\3do"
%tmp2 = add i32 %tmp, %tmp1
ret i32 %tmp2
}
; This is f&_o
-define i32 (...)* @"f\26_o"() {
+define ptr @"f\26_o"() {
entry:
- ret i32 (...)* @"f\40o"
+ ret ptr @"f\40o"
}
; This is f@o
@c = external global i16, align 2
@globa = common global i32 0, align 4
-@ptr = internal global void (...)* null, align 4
+@ptr = internal global ptr null, align 4
declare void @foo()
define void @bar() {
- %1 = alloca i8*, align 8
- %2 = alloca i8*, align 8
- store i32 0, i32* @a, align 4
- store i64 0, i64* @b, align 8
- store i16 0, i16* @c, align 2
- store i32 0, i32* @globa, align 4
- store void (...)* bitcast (void ()* @bar to void (...)*), void (...)** @ptr, align 4
- store i8* bitcast (void ()* @foo to i8*), i8** %1, align 8
- store i8* bitcast (void ()* @foobar to i8*), i8** %2, align 8
+ %1 = alloca ptr, align 8
+ %2 = alloca ptr, align 8
+ store i32 0, ptr @a, align 4
+ store i64 0, ptr @b, align 8
+ store i16 0, ptr @c, align 2
+ store i32 0, ptr @globa, align 4
+ store ptr @bar, ptr @ptr, align 4
+ store ptr @foo, ptr %1, align 8
+ store ptr @foobar, ptr %2, align 8
ret void
}
; We initialize a csect when we first reference an external global, so make sure we don't run into problems when we see it again.
define void @bar2() {
- store i32 0, i32* @a, align 4
- store i64 0, i64* @b, align 8
- store i16 0, i16* @c, align 2
+ store i32 0, ptr @a, align 4
+ store i64 0, ptr @b, align 8
+ store i16 0, ptr @c, align 2
ret void
}
@keep_this = internal global i32 2, align 4
@keep_this2 = internal global i32 3, align 4
-@llvm.used = appending global [1 x i8*] [i8* bitcast (i32* @keep_this to i8*)], section "llvm.metadata"
-@llvm.compiler.used = appending global [1 x i8*] [i8* bitcast (i32* @keep_this2 to i8*)], section "llvm.metadata"
+@llvm.used = appending global [1 x ptr] [ptr @keep_this], section "llvm.metadata"
+@llvm.compiler.used = appending global [1 x ptr] [ptr @keep_this2], section "llvm.metadata"
; CHECK-NOT: llvm.metadata
; CHECK-NOT: llvm.used
ret void
}
-define hidden void @foo_h(i32* %ip) {
+define hidden void @foo_h(ptr %ip) {
entry:
ret void
}
-define dllexport void @foo_e(i32* %ip) {
+define dllexport void @foo_e(ptr %ip) {
entry:
ret void
}
-define protected void @foo_protected(i32* %ip) {
+define protected void @foo_protected(ptr %ip) {
entry:
ret void
}
ret void
}
-@foo_p = global void ()* @zoo_weak_extern_h, align 4
+@foo_p = global ptr @zoo_weak_extern_h, align 4
declare extern_weak hidden void @zoo_weak_extern_h()
declare extern_weak dllexport void @zoo_weak_extern_e()
define i32 @main() {
entry:
- %call1= call i32 @bar_h(i32* @b_h)
+ %call1= call i32 @bar_h(ptr @b_h)
call void @foo_weak_h()
- %0 = load void ()*, void ()** @foo_p, align 4
+ %0 = load ptr, ptr @foo_p, align 4
call void %0()
ret i32 0
}
-declare hidden i32 @bar_h(i32*)
-declare dllexport i32 @bar_e(i32*)
+declare hidden i32 @bar_h(ptr)
+declare dllexport i32 @bar_e(ptr)
; CHECK: .globl foo[DS]{{[[:space:]]*([#].*)?$}}
; CHECK: .globl .foo{{[[:space:]]*([#].*)?$}}
; CHECK-NEXT: lxvdsx 34, 0, 3
; CHECK-NEXT: blr
entry:
- %0 = load double, double* @d, align 8
+ %0 = load double, ptr @d, align 8
%splat.splatinsert = insertelement <2 x double> undef, double %0, i32 0
%splat.splat = shufflevector <2 x double> %splat.splatinsert, <2 x double> undef, <2 x i32> zeroinitializer
ret <2 x double> %splat.splat
%struct.S5 = type { [5 x i8] }
-define zeroext i8 @test_byval_5Byte(%struct.S5* byval(%struct.S5) align 1 %s) {
+define zeroext i8 @test_byval_5Byte(ptr byval(%struct.S5) align 1 %s) {
entry:
- %arrayidx = getelementptr inbounds %struct.S5, %struct.S5* %s, i32 0, i32 0, i32 4
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds %struct.S5, ptr %s, i32 0, i32 0, i32 4
+ %0 = load i8, ptr %arrayidx, align 1
ret i8 %0
}
%struct.S6 = type { [6 x i8] }
-define zeroext i8 @test_byval_6Byte(%struct.S6* byval(%struct.S6) align 1 %s) {
+define zeroext i8 @test_byval_6Byte(ptr byval(%struct.S6) align 1 %s) {
entry:
- %arrayidx = getelementptr inbounds %struct.S6, %struct.S6* %s, i32 0, i32 0, i32 5
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds %struct.S6, ptr %s, i32 0, i32 0, i32 5
+ %0 = load i8, ptr %arrayidx, align 1
ret i8 %0
}
%struct.S7 = type { [7 x i8] }
-define zeroext i8 @test_byval_7Byte(%struct.S7* byval(%struct.S7) align 1 %s) {
+define zeroext i8 @test_byval_7Byte(ptr byval(%struct.S7) align 1 %s) {
entry:
- %arrayidx = getelementptr inbounds %struct.S7, %struct.S7* %s, i32 0, i32 0, i32 6
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds %struct.S7, ptr %s, i32 0, i32 0, i32 6
+ %0 = load i8, ptr %arrayidx, align 1
ret i8 %0
}
%struct.S8 = type { [8 x i8] }
-define zeroext i8 @test_byval_8Byte(%struct.S8* byval(%struct.S8) align 1 %s) {
+define zeroext i8 @test_byval_8Byte(ptr byval(%struct.S8) align 1 %s) {
entry:
- %arrayidx = getelementptr inbounds %struct.S8, %struct.S8* %s, i32 0, i32 0, i32 7
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds %struct.S8, ptr %s, i32 0, i32 0, i32 7
+ %0 = load i8, ptr %arrayidx, align 1
ret i8 %0
}
define void @call_test_byval_64Byte() {
entry:
- call void @test_byval_64Byte(%struct.S64* byval(%struct.S64) align 1 @gS64)
+ call void @test_byval_64Byte(ptr byval(%struct.S64) align 1 @gS64)
ret void
}
-declare void @test_byval_64Byte(%struct.S64* byval(%struct.S64) align 1)
+declare void @test_byval_64Byte(ptr byval(%struct.S64) align 1)
; CHECK-LABEL: name: call_test_byval_64Byte{{.*}}
store ptr %b, ptr %b.addr, align 8
%0 = load ptr, ptr %b.addr, align 8
%vtable = load ptr, ptr %0, align 8
- %vfn = getelementptr inbounds ptr, ptr %vtable, i64 0
- %1 = load ptr, ptr %vfn, align 8
+ %1 = load ptr, ptr %vtable, align 8
%call = call noundef signext i32 %1(ptr noundef nonnull align 8 dereferenceable(8) %0)
ret i32 %call
}
%f8 = type <8 x float>
%i4 = type <4 x i32>
-define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
+define void @test_f2(ptr %P, ptr %Q, ptr %S) {
; AIX-P8-64-LABEL: test_f2:
; AIX-P8-64: # %bb.0:
; AIX-P8-64-NEXT: lfdx f0, 0, r3
; AIX-P9-32-NEXT: stfs f1, 0(r5)
; AIX-P9-32-NEXT: stfs f0, 4(r5)
; AIX-P9-32-NEXT: blr
- %p = load %f2, %f2* %P
- %q = load %f2, %f2* %Q
+ %p = load %f2, ptr %P
+ %q = load %f2, ptr %Q
%R = fadd %f2 %p, %q
- store %f2 %R, %f2* %S
+ store %f2 %R, ptr %S
ret void
}
; RUN: llc -relocation-model=static -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -code-model=large | FileCheck --check-prefix=CHECK --check-prefix=LARGE %s
@foo = dso_local global i32 42
-@fooa = dso_local alias i32, i32* @foo
+@fooa = dso_local alias i32, ptr @foo
@foo2 = dso_local global i64 42
-@foo2a = dso_local alias i64, i64* @foo2
+@foo2a = dso_local alias i64, ptr @foo2
; CHECK-LABEL: bar:
define dso_local i32 @bar() {
; MEDIUM: addis 3, 2, fooa@toc@ha
; LARGE: addis 3, 2, .L[[L0:.*]]@toc@ha
- %a = load i32, i32* @fooa
+ %a = load i32, ptr @fooa
ret i32 %a
}
; MEDIUM: addis 3, 2, foo2a@toc@ha
; MEDIUM: addi 3, 3, foo2a@toc@l
; LARGE: addis 3, 2, .L[[L1:.*]]@toc@ha
- %a = load i64, i64* @foo2a
+ %a = load i64, ptr @foo2a
ret i64 %a
}
%conv = sext i32 %n to i64
%0 = alloca double, i64 %conv, align 16
tail call void asm sideeffect "", "~{cr2}"()
- %call = call signext i32 @do_something(double* nonnull %0)
+ %call = call signext i32 @do_something(ptr nonnull %0)
ret i32 %call
}
-declare signext i32 @do_something(double*)
+declare signext i32 @do_something(ptr)
; CHECK: name: test
; CHECK: alignment: 16
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- | not grep rlwin
-define void @test(i8* %P) {
- %W = load i8, i8* %P
+define void @test(ptr %P) {
+ %W = load i8, ptr %P
%X = shl i8 %W, 1
%Y = add i8 %X, 2
%Z = and i8 %Y, 254 ; dead and
- store i8 %Z, i8* %P
+ store i8 %Z, ptr %P
ret void
}
; The bug is triggered by passing a byval structure after an anonymous
; aggregate.
-%tarray = type { i64, i8* }
+%tarray = type { i64, ptr }
-define i8* @func1({ i64, i8* } %array, i8* %ptr) {
+define ptr @func1({ i64, ptr } %array, ptr %ptr) {
entry:
- %array_ptr = extractvalue {i64, i8* } %array, 1
- %cond = icmp eq i8* %array_ptr, %ptr
+ %array_ptr = extractvalue {i64, ptr } %array, 1
+ %cond = icmp eq ptr %array_ptr, %ptr
br i1 %cond, label %equal, label %unequal
equal:
- ret i8* %array_ptr
+ ret ptr %array_ptr
unequal:
- ret i8* %ptr
+ ret ptr %ptr
}
; CHECK-LABEL: func1:
; CHECK: ld 3, -[[OFFSET1]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
-define i8* @func2({ i64, i8* } %array1, %tarray* byval(%tarray) %array2) {
+define ptr @func2({ i64, ptr } %array1, ptr byval(%tarray) %array2) {
entry:
- %array1_ptr = extractvalue {i64, i8* } %array1, 1
- %tmp = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1
- %array2_ptr = load i8*, i8** %tmp
- %cond = icmp eq i8* %array1_ptr, %array2_ptr
+ %array1_ptr = extractvalue {i64, ptr } %array1, 1
+ %tmp = getelementptr inbounds %tarray, ptr %array2, i32 0, i32 1
+ %array2_ptr = load ptr, ptr %tmp
+ %cond = icmp eq ptr %array1_ptr, %array2_ptr
br i1 %cond, label %equal, label %unequal
equal:
- ret i8* %array1_ptr
+ ret ptr %array1_ptr
unequal:
- ret i8* %array2_ptr
+ ret ptr %array2_ptr
}
; CHECK-LABEL: func2:
; CHECK-DAG: cmpld {{([0-9]+,)?}}4, 3
; CHECK: ld 3, -[[OFFSET1]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
-define i8* @func3({ i64, i8* }* byval({ i64, i8* }) %array1, %tarray* byval(%tarray) %array2) {
+define ptr @func3(ptr byval({ i64, ptr }) %array1, ptr byval(%tarray) %array2) {
entry:
- %tmp1 = getelementptr inbounds { i64, i8* }, { i64, i8* }* %array1, i32 0, i32 1
- %array1_ptr = load i8*, i8** %tmp1
- %tmp2 = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1
- %array2_ptr = load i8*, i8** %tmp2
- %cond = icmp eq i8* %array1_ptr, %array2_ptr
+ %tmp1 = getelementptr inbounds { i64, ptr }, ptr %array1, i32 0, i32 1
+ %array1_ptr = load ptr, ptr %tmp1
+ %tmp2 = getelementptr inbounds %tarray, ptr %array2, i32 0, i32 1
+ %array2_ptr = load ptr, ptr %tmp2
+ %cond = icmp eq ptr %array1_ptr, %array2_ptr
br i1 %cond, label %equal, label %unequal
equal:
- ret i8* %array1_ptr
+ ret ptr %array1_ptr
unequal:
- ret i8* %array2_ptr
+ ret ptr %array2_ptr
}
; CHECK-LABEL: func3:
; CHECK: ld 3, -[[OFFSET2]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
-define i8* @func4(i64 %p1, i64 %p2, i64 %p3, i64 %p4,
+define ptr @func4(i64 %p1, i64 %p2, i64 %p3, i64 %p4,
i64 %p5, i64 %p6, i64 %p7, i64 %p8,
- { i64, i8* } %array1, %tarray* byval(%tarray) %array2) {
+ { i64, ptr } %array1, ptr byval(%tarray) %array2) {
entry:
- %array1_ptr = extractvalue {i64, i8* } %array1, 1
- %tmp = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1
- %array2_ptr = load i8*, i8** %tmp
- %cond = icmp eq i8* %array1_ptr, %array2_ptr
+ %array1_ptr = extractvalue {i64, ptr } %array1, 1
+ %tmp = getelementptr inbounds %tarray, ptr %array2, i32 0, i32 1
+ %array2_ptr = load ptr, ptr %tmp
+ %cond = icmp eq ptr %array1_ptr, %array2_ptr
br i1 %cond, label %equal, label %unequal
equal:
- ret i8* %array1_ptr
+ ret ptr %array1_ptr
unequal:
- ret i8* %array2_ptr
+ ret ptr %array2_ptr
}
; CHECK-LABEL: func4:
; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
; RUN: -mcpu=pwr8 < %s | FileCheck %s
-%class.PB2 = type { [1 x i32], %class.PB1* }
+%class.PB2 = type { [1 x i32], ptr }
%class.PB1 = type { [1 x i32], i64, i64, i32 }
; Function Attrs: norecurse nounwind readonly
-define zeroext i1 @foo(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr {
+define zeroext i1 @foo(ptr %s_a, ptr %s_b) local_unnamed_addr {
entry:
- %arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
- %0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
+ %0 = load i32, ptr %s_a, align 8, !tbaa !1
%and.i = and i32 %0, 8
%cmp.i = icmp ne i32 %and.i, 0
- %arrayidx.i37 = bitcast %class.PB2* %s_b to i32*
- %1 = load i32, i32* %arrayidx.i37, align 8, !tbaa !1
+ %1 = load i32, ptr %s_b, align 8, !tbaa !1
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp ne i32 %and.i4, 0
%cmp = xor i1 %cmp.i, %cmp.i5
; Test to check that we do not promote arguments when the
; type size is greater than 128 bits.
-define internal fastcc void @print_acc(<512 x i1>* nocapture readonly %a) nounwind {
+define internal fastcc void @print_acc(ptr nocapture readonly %a) nounwind {
; CHECK-LABEL: @print_acc(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <512 x i1>, <512 x i1>* [[A:%.*]], align 64
+; CHECK-NEXT: [[TMP0:%.*]] = load <512 x i1>, ptr [[A:%.*]], align 64
; CHECK-NEXT: [[TMP1:%.*]] = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> [[TMP0]])
; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP1]], 0
; CHECK-NEXT: ret void
;
entry:
- %0 = load <512 x i1>, <512 x i1>* %a, align 64
+ %0 = load <512 x i1>, ptr %a, align 64
%1 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> %0)
%2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 0
ret void
declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1>) nounwind
-define dso_local void @test(<512 x i1>* nocapture %a, <16 x i8> %ac) {
+define dso_local void @test(ptr nocapture %a, <16 x i8> %ac) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = tail call <512 x i1> @llvm.ppc.mma.xvf32ger(<16 x i8> [[AC:%.*]], <16 x i8> [[AC]])
-; CHECK-NEXT: store <512 x i1> [[TMP0]], <512 x i1>* [[A:%.*]], align 64
-; CHECK-NEXT: tail call fastcc void @print_acc(<512 x i1>* nonnull [[A]])
+; CHECK-NEXT: store <512 x i1> [[TMP0]], ptr [[A:%.*]], align 64
+; CHECK-NEXT: tail call fastcc void @print_acc(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
entry:
%0 = tail call <512 x i1> @llvm.ppc.mma.xvf32ger(<16 x i8> %ac, <16 x i8> %ac)
- store <512 x i1> %0, <512 x i1>* %a, align 64
- tail call fastcc void @print_acc(<512 x i1>* nonnull %a)
+ store <512 x i1> %0, ptr %a, align 64
+ tail call fastcc void @print_acc(ptr nonnull %a)
ret void
}
define internal fastcc void @printWideVec(<16 x i32> %ptr.val) nounwind {
; CHECK-LABEL: @printWideVec(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i64 0, i64 0))
+; CHECK-NEXT: [[CALL:%.*]] = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str)
; CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[PTR_VAL:%.*]], i32 0
-; CHECK-NEXT: [[CALL1:%.*]] = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext [[VECEXT]])
+; CHECK-NEXT: [[CALL1:%.*]] = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext [[VECEXT]])
; CHECK-NEXT: [[VECEXT_1:%.*]] = extractelement <16 x i32> [[PTR_VAL]], i32 1
-; CHECK-NEXT: [[CALL1_1:%.*]] = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext [[VECEXT_1]])
+; CHECK-NEXT: [[CALL1_1:%.*]] = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext [[VECEXT_1]])
; CHECK-NEXT: [[VECEXT_2:%.*]] = extractelement <16 x i32> [[PTR_VAL]], i32 2
-; CHECK-NEXT: [[CALL1_2:%.*]] = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext [[VECEXT_2]])
+; CHECK-NEXT: [[CALL1_2:%.*]] = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext [[VECEXT_2]])
; CHECK-NEXT: [[VECEXT_3:%.*]] = extractelement <16 x i32> [[PTR_VAL]], i32 3
-; CHECK-NEXT: [[CALL1_3:%.*]] = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext [[VECEXT_3]])
+; CHECK-NEXT: [[CALL1_3:%.*]] = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext [[VECEXT_3]])
; CHECK-NEXT: [[VECEXT_4:%.*]] = extractelement <16 x i32> [[PTR_VAL]], i32 4
-; CHECK-NEXT: [[CALL1_4:%.*]] = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext [[VECEXT_4]])
+; CHECK-NEXT: [[CALL1_4:%.*]] = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext [[VECEXT_4]])
; CHECK-NEXT: [[VECEXT_5:%.*]] = extractelement <16 x i32> [[PTR_VAL]], i32 5
-; CHECK-NEXT: [[CALL1_5:%.*]] = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext [[VECEXT_5]])
+; CHECK-NEXT: [[CALL1_5:%.*]] = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext [[VECEXT_5]])
; CHECK-NEXT: [[VECEXT_6:%.*]] = extractelement <16 x i32> [[PTR_VAL]], i32 6
-; CHECK-NEXT: [[CALL1_6:%.*]] = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext [[VECEXT_6]])
+; CHECK-NEXT: [[CALL1_6:%.*]] = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext [[VECEXT_6]])
; CHECK-NEXT: [[VECEXT2:%.*]] = extractelement <16 x i32> [[PTR_VAL]], i32 7
-; CHECK-NEXT: [[CALL3:%.*]] = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([6 x i8], [6 x i8]* @.str.2, i64 0, i64 0), i32 signext [[VECEXT2]])
+; CHECK-NEXT: [[CALL3:%.*]] = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.2, i32 signext [[VECEXT2]])
; CHECK-NEXT: ret void
;
entry:
- %call = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i64 0, i64 0))
+ %call = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str)
%vecext = extractelement <16 x i32> %ptr.val, i32 0
- %call1 = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext %vecext)
+ %call1 = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext %vecext)
%vecext.1 = extractelement <16 x i32> %ptr.val, i32 1
- %call1.1 = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext %vecext.1)
+ %call1.1 = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext %vecext.1)
%vecext.2 = extractelement <16 x i32> %ptr.val, i32 2
- %call1.2 = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext %vecext.2)
+ %call1.2 = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext %vecext.2)
%vecext.3 = extractelement <16 x i32> %ptr.val, i32 3
- %call1.3 = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext %vecext.3)
+ %call1.3 = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext %vecext.3)
%vecext.4 = extractelement <16 x i32> %ptr.val, i32 4
- %call1.4 = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext %vecext.4)
+ %call1.4 = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext %vecext.4)
%vecext.5 = extractelement <16 x i32> %ptr.val, i32 5
- %call1.5 = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext %vecext.5)
+ %call1.5 = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext %vecext.5)
%vecext.6 = extractelement <16 x i32> %ptr.val, i32 6
- %call1.6 = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i64 0, i64 0), i32 signext %vecext.6)
+ %call1.6 = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.1, i32 signext %vecext.6)
%vecext2 = extractelement <16 x i32> %ptr.val, i32 7
- %call3 = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([6 x i8], [6 x i8]* @.str.2, i64 0, i64 0), i32 signext %vecext2)
+ %call3 = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.2, i32 signext %vecext2)
ret void
}
-declare noundef signext i32 @printf(i8* nocapture noundef readonly, ...) nounwind
+declare noundef signext i32 @printf(ptr nocapture noundef readonly, ...) nounwind
define dso_local void @test1(<4 x i32> %a, <4 x i32> %b) nounwind {
; CHECK-LABEL: @test1(
; : "m"(*addr) : "memory", "cr0");
; }
-define void @foo(i32 signext %result, i8* %addr) #0 {
+define void @foo(i32 signext %result, ptr %addr) #0 {
; CHECK-LABEL: @foo
; CHECK: ld [[REG:[0-9]+]], 0(4)
entry:
%result.addr = alloca i32, align 4
- %addr.addr = alloca i8*, align 8
- store i32 %result, i32* %result.addr, align 4
- store i8* %addr, i8** %addr.addr, align 8
- %0 = load i8*, i8** %addr.addr, align 8
- %1 = call i32 asm sideeffect "ld${1:U}${1:X} $0,$1\0Acmpw $0,$0\0Abne- 1f\0A1: isync\0A", "=r,*m,~{memory},~{cr0}"(i8* elementtype(i8) %0) #1, !srcloc !0
- store i32 %1, i32* %result.addr, align 4
+ %addr.addr = alloca ptr, align 8
+ store i32 %result, ptr %result.addr, align 4
+ store ptr %addr, ptr %addr.addr, align 8
+ %0 = load ptr, ptr %addr.addr, align 8
+ %1 = call i32 asm sideeffect "ld${1:U}${1:X} $0,$1\0Acmpw $0,$0\0Abne- 1f\0A1: isync\0A", "=r,*m,~{memory},~{cr0}"(ptr elementtype(i8) %0) #1, !srcloc !0
+ store i32 %1, ptr %result.addr, align 4
ret void
}
entry:
%x.addr = alloca double, align 8
%result = alloca i64, align 8
- store double %x, double* %x.addr, align 8
- %0 = load double, double* %x.addr, align 8
+ store double %x, ptr %x.addr, align 8
+ %0 = load double, ptr %x.addr, align 8
%1 = call i64 asm sideeffect "fctid $0, $1", "=d,d"(double %0) #1, !srcloc !1
- store i64 %1, i64* %result, align 8
- %2 = load i64, i64* %result, align 8
+ store i64 %1, ptr %result, align 8
+ %2 = load i64, ptr %result, align 8
%conv = trunc i64 %2 to i32
ret i32 %conv
}
; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
-@TestA = alias void (), void ()* @TestC
-@TestB = alias void (), void ()* @TestC
-@TestC = alias void (), void ()* @TestD
+@TestA = alias void (), ptr @TestC
+@TestB = alias void (), ptr @TestC
+@TestC = alias void (), ptr @TestD
define void @TestD() {
entry:
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-- | FileCheck %s
; https://bugs.llvm.org/show_bug.cgi?id=50608
-define dso_local signext i32 @main(i32 signext %argc, i8** %argv) {
+define dso_local signext i32 @main(i32 signext %argc, ptr %argv) {
; CHECK-LABEL: main:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw 3, -4(1)
declare double @gen_random(double) #0
-declare void @benchmark_heapsort(i32 signext, double* nocapture) #0
+declare void @benchmark_heapsort(i32 signext, ptr nocapture) #0
-define signext i32 @main(i32 signext %argc, i8** nocapture %argv) #0 {
+define signext i32 @main(i32 signext %argc, ptr nocapture %argv) #0 {
entry:
br i1 undef, label %cond.true, label %cond.end
%add = add i32 %cond, 1
%conv = sext i32 %add to i64
%mul = shl nsw i64 %conv, 3
- %call1 = tail call noalias i8* @malloc(i64 %mul) #1
+ %call1 = tail call noalias ptr @malloc(i64 %mul) #1
br i1 undef, label %for.end, label %for.body.lr.ph
for.body.lr.ph: ; preds = %cond.end
ret i32 0
}
-declare noalias i8* @malloc(i64) #0
+declare noalias ptr @malloc(i64) #0
-declare signext i32 @printf(i8* nocapture, ...) #0
+declare signext i32 @printf(ptr nocapture, ...) #0
-declare void @free(i8* nocapture) #0
+declare void @free(ptr nocapture) #0
-declare i64 @strtol(i8*, i8** nocapture, i32 signext) #0
+declare i64 @strtol(ptr, ptr nocapture, i32 signext) #0
attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s
-define i32 @exchange_and_add(i32* %mem, i32 %val) nounwind {
+define i32 @exchange_and_add(ptr %mem, i32 %val) nounwind {
; CHECK-LABEL: exchange_and_add:
; CHECK: lwarx {{[0-9]+}}, 0, {{[0-9]+}}
- %tmp = atomicrmw add i32* %mem, i32 %val monotonic
+ %tmp = atomicrmw add ptr %mem, i32 %val monotonic
; CHECK: stwcx. {{[0-9]+}}, 0, {{[0-9]+}}
ret i32 %tmp
}
-define i32 @exchange_and_cmp(i32* %mem) nounwind {
+define i32 @exchange_and_cmp(ptr %mem) nounwind {
; CHECK-LABEL: exchange_and_cmp:
; CHECK: lwarx
- %tmppair = cmpxchg i32* %mem, i32 0, i32 1 monotonic monotonic
+ %tmppair = cmpxchg ptr %mem, i32 0, i32 1 monotonic monotonic
%tmp = extractvalue { i32, i1 } %tmppair, 0
; CHECK: stwcx.
ret i32 %tmp
}
-define i32 @exchange(i32* %mem, i32 %val) nounwind {
+define i32 @exchange(ptr %mem, i32 %val) nounwind {
; CHECK-LABEL: exchange:
; CHECK: lwarx
- %tmp = atomicrmw xchg i32* %mem, i32 1 monotonic
+ %tmp = atomicrmw xchg ptr %mem, i32 1 monotonic
; CHECK: stwcx.
ret i32 %tmp
}
; RUN: llc -verify-machineinstrs < %s -ppc-asm-full-reg-names -mtriple=ppc64-- -mcpu=pwr7 | FileCheck %s
; RUN: llc -verify-machineinstrs < %s -ppc-asm-full-reg-names -mtriple=ppc64-- -mcpu=pwr8 | FileCheck %s -check-prefix=CHECK-P8U
-define i64 @exchange_and_add(i64* %mem, i64 %val) nounwind {
+define i64 @exchange_and_add(ptr %mem, i64 %val) nounwind {
; CHECK-LABEL: exchange_and_add:
; CHECK: ldarx
- %tmp = atomicrmw add i64* %mem, i64 %val monotonic
+ %tmp = atomicrmw add ptr %mem, i64 %val monotonic
; CHECK: stdcx.
ret i64 %tmp
}
-define i8 @exchange_and_add8(i8* %mem, i8 %val) nounwind {
+define i8 @exchange_and_add8(ptr %mem, i8 %val) nounwind {
; CHECK-LABEL: exchange_and_add8:
; CHECK-BE: xori
; CHECK-LE-NOT: xori
; CHECK-P8U: lbarx
- %tmp = atomicrmw add i8* %mem, i8 %val monotonic
+ %tmp = atomicrmw add ptr %mem, i8 %val monotonic
; CHECK-P8U: stbcx.
ret i8 %tmp
}
-define i16 @exchange_and_add16(i16* %mem, i16 %val) nounwind {
+define i16 @exchange_and_add16(ptr %mem, i16 %val) nounwind {
; CHECK-LABEL: exchange_and_add16:
; CHECK-BE: xori
; CHECK-LE-NOT: xori
; CHECK-P8U: lharx
- %tmp = atomicrmw add i16* %mem, i16 %val monotonic
+ %tmp = atomicrmw add ptr %mem, i16 %val monotonic
; CHECK-P8U: sthcx.
ret i16 %tmp
}
-define i64 @exchange_and_cmp(i64* %mem) nounwind {
+define i64 @exchange_and_cmp(ptr %mem) nounwind {
; CHECK-LABEL: exchange_and_cmp:
; CHECK: ldarx
- %tmppair = cmpxchg i64* %mem, i64 0, i64 1 monotonic monotonic
+ %tmppair = cmpxchg ptr %mem, i64 0, i64 1 monotonic monotonic
%tmp = extractvalue { i64, i1 } %tmppair, 0
; CHECK: stdcx.
ret i64 %tmp
}
-define i8 @exchange_and_cmp8(i8* %mem) nounwind {
+define i8 @exchange_and_cmp8(ptr %mem) nounwind {
; CHECK-LABEL: exchange_and_cmp8:
; CHECK-BE: xori
; CHECK-LE-NOT: xori
; CHECK-P8U: lbarx
- %tmppair = cmpxchg i8* %mem, i8 0, i8 1 monotonic monotonic
+ %tmppair = cmpxchg ptr %mem, i8 0, i8 1 monotonic monotonic
%tmp = extractvalue { i8, i1 } %tmppair, 0
; CHECK-P8U: stbcx.
ret i8 %tmp
}
-define i16 @exchange_and_cmp16(i16* %mem) nounwind {
+define i16 @exchange_and_cmp16(ptr %mem) nounwind {
; CHECK-LABEL: exchange_and_cmp16:
; CHECK-BE: xori
; CHECK-LE-NOT: xori
; CHECK-P8U: lharx
- %tmppair = cmpxchg i16* %mem, i16 0, i16 1 monotonic monotonic
+ %tmppair = cmpxchg ptr %mem, i16 0, i16 1 monotonic monotonic
%tmp = extractvalue { i16, i1 } %tmppair, 0
; CHECK-P8U: sthcx.
ret i16 %tmp
}
-define i64 @exchange(i64* %mem, i64 %val) nounwind {
+define i64 @exchange(ptr %mem, i64 %val) nounwind {
; CHECK-LABEL: exchange:
; CHECK: ldarx
- %tmp = atomicrmw xchg i64* %mem, i64 1 monotonic
+ %tmp = atomicrmw xchg ptr %mem, i64 1 monotonic
; CHECK: stdcx.
ret i64 %tmp
}
-define i8 @exchange8(i8* %mem, i8 %val) nounwind {
+define i8 @exchange8(ptr %mem, i8 %val) nounwind {
; CHECK-LABEL: exchange8:
; CHECK-BE: xori
; CHECK-LE-NOT: xori
; CHECK-P8U: lbarx
- %tmp = atomicrmw xchg i8* %mem, i8 1 monotonic
+ %tmp = atomicrmw xchg ptr %mem, i8 1 monotonic
; CHECK-P8U: stbcx.
ret i8 %tmp
}
-define i16 @exchange16(i16* %mem, i16 %val) nounwind {
+define i16 @exchange16(ptr %mem, i16 %val) nounwind {
; CHECK-LABEL: exchange16:
; CHECK-BE: xori
; CHECK-LE-NOT: xori
; CHECK-P8U: lharx
- %tmp = atomicrmw xchg i16* %mem, i16 1 monotonic
+ %tmp = atomicrmw xchg ptr %mem, i16 1 monotonic
; CHECK-P8U: sthcx.
ret i16 %tmp
}
-define void @atomic_store(i64* %mem, i64 %val) nounwind {
+define void @atomic_store(ptr %mem, i64 %val) nounwind {
entry:
; CHECK: @atomic_store
- store atomic i64 %val, i64* %mem release, align 64
+ store atomic i64 %val, ptr %mem release, align 64
; CHECK: lwsync
; CHECK-NOT: stdcx
; CHECK: std
ret void
}
-define i64 @atomic_load(i64* %mem) nounwind {
+define i64 @atomic_load(ptr %mem) nounwind {
entry:
; CHECK: @atomic_load
- %tmp = load atomic i64, i64* %mem acquire, align 64
+ %tmp = load atomic i64, ptr %mem acquire, align 64
; CHECK-NOT: ldarx
; CHECK: ld [[VAL:r[0-9]+]]
; CHECK: cmpd [[CR:cr[0-9]+]], [[VAL]], [[VAL]]
; RUN: llc -verify-machineinstrs -mtriple=powerpc-unknown-unknown \
; RUN: < %s | FileCheck --check-prefix=CHECK-32 %s
-define float @test_add(float* %ptr, float %incr) {
+define float @test_add(ptr %ptr, float %incr) {
; CHECK-64-LABEL: test_add:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: sync
; CHECK-32-NEXT: addi 1, 1, 32
; CHECK-32-NEXT: blr
entry:
- %r = atomicrmw fadd float* %ptr, float %incr seq_cst
+ %r = atomicrmw fadd ptr %ptr, float %incr seq_cst
ret float %r
}
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define void @a32min(i32* nocapture dereferenceable(4) %minimum, i32 %val) #0 {
+define void @a32min(ptr nocapture dereferenceable(4) %minimum, i32 %val) #0 {
; CHECK-LABEL: a32min:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB0_1: # %entry
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw min i32* %minimum, i32 %val monotonic
+ %0 = atomicrmw min ptr %minimum, i32 %val monotonic
ret void
}
-define void @a32max(i32* nocapture dereferenceable(4) %minimum, i32 %val) #0 {
+define void @a32max(ptr nocapture dereferenceable(4) %minimum, i32 %val) #0 {
; CHECK-LABEL: a32max:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB1_1: # %entry
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw max i32* %minimum, i32 %val monotonic
+ %0 = atomicrmw max ptr %minimum, i32 %val monotonic
ret void
}
-define void @a32umin(i32* nocapture dereferenceable(4) %minimum, i32 %val) #0 {
+define void @a32umin(ptr nocapture dereferenceable(4) %minimum, i32 %val) #0 {
; CHECK-LABEL: a32umin:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB2_1: # %entry
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw umin i32* %minimum, i32 %val monotonic
+ %0 = atomicrmw umin ptr %minimum, i32 %val monotonic
ret void
}
-define void @a32umax(i32* nocapture dereferenceable(4) %minimum, i32 %val) #0 {
+define void @a32umax(ptr nocapture dereferenceable(4) %minimum, i32 %val) #0 {
; CHECK-LABEL: a32umax:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB3_1: # %entry
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw umax i32* %minimum, i32 %val monotonic
+ %0 = atomicrmw umax ptr %minimum, i32 %val monotonic
ret void
}
-define void @a16min(i16* nocapture dereferenceable(4) %minimum, i16 %val) #1 {
+define void @a16min(ptr nocapture dereferenceable(4) %minimum, i16 %val) #1 {
; CHECK-LABEL: a16min:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: extsh 4, 4
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw min i16* %minimum, i16 %val monotonic
+ %0 = atomicrmw min ptr %minimum, i16 %val monotonic
ret void
}
-define void @a16max(i16* nocapture dereferenceable(4) %minimum, i16 %val) #1 {
+define void @a16max(ptr nocapture dereferenceable(4) %minimum, i16 %val) #1 {
; CHECK-LABEL: a16max:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: extsh 4, 4
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw max i16* %minimum, i16 %val monotonic
+ %0 = atomicrmw max ptr %minimum, i16 %val monotonic
ret void
}
-define void @a16umin(i16* nocapture dereferenceable(4) %minimum, i16 %val) #1 {
+define void @a16umin(ptr nocapture dereferenceable(4) %minimum, i16 %val) #1 {
; CHECK-LABEL: a16umin:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB6_1: # %entry
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw umin i16* %minimum, i16 %val monotonic
+ %0 = atomicrmw umin ptr %minimum, i16 %val monotonic
ret void
}
-define void @a16umax(i16* nocapture dereferenceable(4) %minimum, i16 %val) #1 {
+define void @a16umax(ptr nocapture dereferenceable(4) %minimum, i16 %val) #1 {
; CHECK-LABEL: a16umax:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB7_1: # %entry
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw umax i16* %minimum, i16 %val monotonic
+ %0 = atomicrmw umax ptr %minimum, i16 %val monotonic
ret void
}
-define void @a8min(i8* nocapture dereferenceable(4) %minimum, i8 %val) #1 {
+define void @a8min(ptr nocapture dereferenceable(4) %minimum, i8 %val) #1 {
; CHECK-LABEL: a8min:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: extsb 4, 4
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw min i8* %minimum, i8 %val monotonic
+ %0 = atomicrmw min ptr %minimum, i8 %val monotonic
ret void
}
-define void @a8max(i8* nocapture dereferenceable(4) %minimum, i8 %val) #1 {
+define void @a8max(ptr nocapture dereferenceable(4) %minimum, i8 %val) #1 {
; CHECK-LABEL: a8max:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: extsb 4, 4
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw max i8* %minimum, i8 %val monotonic
+ %0 = atomicrmw max ptr %minimum, i8 %val monotonic
ret void
}
-define void @a8umin(i8* nocapture dereferenceable(4) %minimum, i8 %val) #1 {
+define void @a8umin(ptr nocapture dereferenceable(4) %minimum, i8 %val) #1 {
; CHECK-LABEL: a8umin:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB10_1: # %entry
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw umin i8* %minimum, i8 %val monotonic
+ %0 = atomicrmw umin ptr %minimum, i8 %val monotonic
ret void
}
-define void @a8umax(i8* nocapture dereferenceable(4) %minimum, i8 %val) #1 {
+define void @a8umax(ptr nocapture dereferenceable(4) %minimum, i8 %val) #1 {
; CHECK-LABEL: a8umax:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB11_1: # %entry
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw umax i8* %minimum, i8 %val monotonic
+ %0 = atomicrmw umax ptr %minimum, i8 %val monotonic
ret void
}
-define void @a64min(i64* nocapture dereferenceable(4) %minimum, i64 %val) #0 {
+define void @a64min(ptr nocapture dereferenceable(4) %minimum, i64 %val) #0 {
; CHECK-LABEL: a64min:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB12_1: # %entry
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw min i64* %minimum, i64 %val monotonic
+ %0 = atomicrmw min ptr %minimum, i64 %val monotonic
ret void
}
-define void @a64max(i64* nocapture dereferenceable(4) %minimum, i64 %val) #0 {
+define void @a64max(ptr nocapture dereferenceable(4) %minimum, i64 %val) #0 {
; CHECK-LABEL: a64max:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB13_1: # %entry
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw max i64* %minimum, i64 %val monotonic
+ %0 = atomicrmw max ptr %minimum, i64 %val monotonic
ret void
}
-define void @a64umin(i64* nocapture dereferenceable(4) %minimum, i64 %val) #0 {
+define void @a64umin(ptr nocapture dereferenceable(4) %minimum, i64 %val) #0 {
; CHECK-LABEL: a64umin:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB14_1: # %entry
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw umin i64* %minimum, i64 %val monotonic
+ %0 = atomicrmw umin ptr %minimum, i64 %val monotonic
ret void
}
-define void @a64umax(i64* nocapture dereferenceable(4) %minimum, i64 %val) #0 {
+define void @a64umax(ptr nocapture dereferenceable(4) %minimum, i64 %val) #0 {
; CHECK-LABEL: a64umax:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB15_1: # %entry
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw umax i64* %minimum, i64 %val monotonic
+ %0 = atomicrmw umax ptr %minimum, i64 %val monotonic
ret void
}
-define void @ae16min(i16* nocapture dereferenceable(4) %minimum, i16 %val) #0 {
+define void @ae16min(ptr nocapture dereferenceable(4) %minimum, i16 %val) #0 {
; CHECK-LABEL: ae16min:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 5, 0
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw min i16* %minimum, i16 %val monotonic
+ %0 = atomicrmw min ptr %minimum, i16 %val monotonic
ret void
}
-define void @ae16max(i16* nocapture dereferenceable(4) %minimum, i16 %val) #0 {
+define void @ae16max(ptr nocapture dereferenceable(4) %minimum, i16 %val) #0 {
; CHECK-LABEL: ae16max:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 5, 0
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw max i16* %minimum, i16 %val monotonic
+ %0 = atomicrmw max ptr %minimum, i16 %val monotonic
ret void
}
-define void @ae16umin(i16* nocapture dereferenceable(4) %minimum, i16 %val) #0 {
+define void @ae16umin(ptr nocapture dereferenceable(4) %minimum, i16 %val) #0 {
; CHECK-LABEL: ae16umin:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 5, 0
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw umin i16* %minimum, i16 %val monotonic
+ %0 = atomicrmw umin ptr %minimum, i16 %val monotonic
ret void
}
-define void @ae16umax(i16* nocapture dereferenceable(4) %minimum, i16 %val) #0 {
+define void @ae16umax(ptr nocapture dereferenceable(4) %minimum, i16 %val) #0 {
; CHECK-LABEL: ae16umax:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 5, 0
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw umax i16* %minimum, i16 %val monotonic
+ %0 = atomicrmw umax ptr %minimum, i16 %val monotonic
ret void
}
-define void @ae8min(i8* nocapture dereferenceable(4) %minimum, i8 %val) #0 {
+define void @ae8min(ptr nocapture dereferenceable(4) %minimum, i8 %val) #0 {
; CHECK-LABEL: ae8min:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: rlwinm 5, 3, 3, 27, 28
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw min i8* %minimum, i8 %val monotonic
+ %0 = atomicrmw min ptr %minimum, i8 %val monotonic
ret void
}
-define void @ae8max(i8* nocapture dereferenceable(4) %minimum, i8 %val) #0 {
+define void @ae8max(ptr nocapture dereferenceable(4) %minimum, i8 %val) #0 {
; CHECK-LABEL: ae8max:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: rlwinm 5, 3, 3, 27, 28
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw max i8* %minimum, i8 %val monotonic
+ %0 = atomicrmw max ptr %minimum, i8 %val monotonic
ret void
}
-define void @ae8umin(i8* nocapture dereferenceable(4) %minimum, i8 %val) #0 {
+define void @ae8umin(ptr nocapture dereferenceable(4) %minimum, i8 %val) #0 {
; CHECK-LABEL: ae8umin:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: rlwinm 6, 3, 3, 27, 28
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw umin i8* %minimum, i8 %val monotonic
+ %0 = atomicrmw umin ptr %minimum, i8 %val monotonic
ret void
}
-define void @ae8umax(i8* nocapture dereferenceable(4) %minimum, i8 %val) #0 {
+define void @ae8umax(ptr nocapture dereferenceable(4) %minimum, i8 %val) #0 {
; CHECK-LABEL: ae8umax:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: rlwinm 6, 3, 3, 27, 28
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
- %0 = atomicrmw umax i8* %minimum, i8 %val monotonic
+ %0 = atomicrmw umax ptr %minimum, i8 %val monotonic
ret void
}
; CHECK-NEXT: isync
; CHECK-NEXT: blr
entry:
- %value = load atomic i64, i64* @a acquire, align 8
+ %value = load atomic i64, ptr @a acquire, align 8
ret i64 %value
}
; RUN: -ppc-quadword-atomics -ppc-asm-full-reg-names -ppc-track-subreg-liveness < %s \
; RUN: | FileCheck --check-prefix=PPC-PWR8 %s
-define dso_local i128 @lq_unordered(i128* %src) {
+define dso_local i128 @lq_unordered(ptr %src) {
; P8-LABEL: lq_unordered:
; P8: # %bb.0: # %entry
; P8-NEXT: lq r4, 0(r3)
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = load atomic i128, i128* %src unordered, align 16
+ %0 = load atomic i128, ptr %src unordered, align 16
ret i128 %0
}
-define dso_local i128 @lqx_unordered(i128* %src, i64 %idx) {
+define dso_local i128 @lqx_unordered(ptr %src, i64 %idx) {
; P8-LABEL: lqx_unordered:
; P8: # %bb.0: # %entry
; P8-NEXT: sldi r4, r4, 4
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = getelementptr i128, i128* %src, i64 %idx
- %1 = load atomic i128, i128* %0 unordered, align 16
+ %0 = getelementptr i128, ptr %src, i64 %idx
+ %1 = load atomic i128, ptr %0 unordered, align 16
ret i128 %1
}
-define dso_local i128 @lq_big_offset_unordered(i128* %src) {
+define dso_local i128 @lq_big_offset_unordered(ptr %src) {
; P8-LABEL: lq_big_offset_unordered:
; P8: # %bb.0: # %entry
; P8-NEXT: lis r4, 32
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = getelementptr i128, i128* %src, i64 131072
- %1 = load atomic i128, i128* %0 unordered, align 16
+ %0 = getelementptr i128, ptr %src, i64 131072
+ %1 = load atomic i128, ptr %0 unordered, align 16
ret i128 %1
}
-define dso_local i128 @lq_monotonic(i128* %src) {
+define dso_local i128 @lq_monotonic(ptr %src) {
; P8-LABEL: lq_monotonic:
; P8: # %bb.0: # %entry
; P8-NEXT: lq r4, 0(r3)
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = load atomic i128, i128* %src monotonic, align 16
+ %0 = load atomic i128, ptr %src monotonic, align 16
ret i128 %0
}
-define dso_local i128 @lq_acquire(i128* %src) {
+define dso_local i128 @lq_acquire(ptr %src) {
; P8-LABEL: lq_acquire:
; P8: # %bb.0: # %entry
; P8-NEXT: lq r4, 0(r3)
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = load atomic i128, i128* %src acquire, align 16
+ %0 = load atomic i128, ptr %src acquire, align 16
ret i128 %0
}
-define dso_local i128 @lq_seqcst(i128* %src) {
+define dso_local i128 @lq_seqcst(ptr %src) {
; P8-LABEL: lq_seqcst:
; P8: # %bb.0: # %entry
; P8-NEXT: sync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = load atomic i128, i128* %src seq_cst, align 16
+ %0 = load atomic i128, ptr %src seq_cst, align 16
ret i128 %0
}
-define dso_local void @stq_unordered(i128 %val, i128* %dst) {
+define dso_local void @stq_unordered(i128 %val, ptr %dst) {
; P8-LABEL: stq_unordered:
; P8: # %bb.0: # %entry
; P8-NEXT: mr r7, r4
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- store atomic i128 %val, i128* %dst unordered, align 16
+ store atomic i128 %val, ptr %dst unordered, align 16
ret void
}
-define dso_local void @stqx_unordered(i128 %val, i128* %dst, i64 %idx) {
+define dso_local void @stqx_unordered(i128 %val, ptr %dst, i64 %idx) {
; P8-LABEL: stqx_unordered:
; P8: # %bb.0: # %entry
; P8-NEXT: sldi r6, r6, 4
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = getelementptr i128, i128* %dst, i64 %idx
- store atomic i128 %val, i128* %0 unordered, align 16
+ %0 = getelementptr i128, ptr %dst, i64 %idx
+ store atomic i128 %val, ptr %0 unordered, align 16
ret void
}
-define dso_local void @stq_big_offset_unordered(i128 %val, i128* %dst) {
+define dso_local void @stq_big_offset_unordered(i128 %val, ptr %dst) {
; P8-LABEL: stq_big_offset_unordered:
; P8: # %bb.0: # %entry
; P8-NEXT: lis r6, 32
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = getelementptr i128, i128* %dst, i64 131072
- store atomic i128 %val, i128* %0 unordered, align 16
+ %0 = getelementptr i128, ptr %dst, i64 131072
+ store atomic i128 %val, ptr %0 unordered, align 16
ret void
}
-define dso_local void @stq_monotonic(i128 %val, i128* %dst) {
+define dso_local void @stq_monotonic(i128 %val, ptr %dst) {
; P8-LABEL: stq_monotonic:
; P8: # %bb.0: # %entry
; P8-NEXT: mr r7, r4
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- store atomic i128 %val, i128* %dst monotonic, align 16
+ store atomic i128 %val, ptr %dst monotonic, align 16
ret void
}
-define dso_local void @stq_release(i128 %val, i128* %dst) {
+define dso_local void @stq_release(i128 %val, ptr %dst) {
; P8-LABEL: stq_release:
; P8: # %bb.0: # %entry
; P8-NEXT: lwsync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- store atomic i128 %val, i128* %dst release, align 16
+ store atomic i128 %val, ptr %dst release, align 16
ret void
}
-define dso_local void @stq_seqcst(i128 %val, i128* %dst) {
+define dso_local void @stq_seqcst(i128 %val, ptr %dst) {
; P8-LABEL: stq_seqcst:
; P8: # %bb.0: # %entry
; P8-NEXT: sync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- store atomic i128 %val, i128* %dst seq_cst, align 16
+ store atomic i128 %val, ptr %dst seq_cst, align 16
ret void
}
; RUN: | FileCheck --check-prefix=PPC-PWR8 %s
-define i128 @swap(i128* %a, i128 %x) {
+define i128 @swap(ptr %a, i128 %x) {
; CHECK-LABEL: swap:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = atomicrmw xchg i128* %a, i128 %x seq_cst, align 16
+ %0 = atomicrmw xchg ptr %a, i128 %x seq_cst, align 16
ret i128 %0
}
-define i128 @add(i128* %a, i128 %x) {
+define i128 @add(ptr %a, i128 %x) {
; CHECK-LABEL: add:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = atomicrmw add i128* %a, i128 %x seq_cst, align 16
+ %0 = atomicrmw add ptr %a, i128 %x seq_cst, align 16
ret i128 %0
}
-define i128 @sub(i128* %a, i128 %x) {
+define i128 @sub(ptr %a, i128 %x) {
; CHECK-LABEL: sub:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = atomicrmw sub i128* %a, i128 %x seq_cst, align 16
+ %0 = atomicrmw sub ptr %a, i128 %x seq_cst, align 16
ret i128 %0
}
-define i128 @and(i128* %a, i128 %x) {
+define i128 @and(ptr %a, i128 %x) {
; CHECK-LABEL: and:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = atomicrmw and i128* %a, i128 %x seq_cst, align 16
+ %0 = atomicrmw and ptr %a, i128 %x seq_cst, align 16
ret i128 %0
}
-define i128 @or(i128* %a, i128 %x) {
+define i128 @or(ptr %a, i128 %x) {
; CHECK-LABEL: or:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = atomicrmw or i128* %a, i128 %x seq_cst, align 16
+ %0 = atomicrmw or ptr %a, i128 %x seq_cst, align 16
ret i128 %0
}
-define i128 @xor(i128* %a, i128 %x) {
+define i128 @xor(ptr %a, i128 %x) {
; CHECK-LABEL: xor:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = atomicrmw xor i128* %a, i128 %x seq_cst, align 16
+ %0 = atomicrmw xor ptr %a, i128 %x seq_cst, align 16
ret i128 %0
}
-define i128 @nand(i128* %a, i128 %x) {
+define i128 @nand(ptr %a, i128 %x) {
; CHECK-LABEL: nand:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = atomicrmw nand i128* %a, i128 %x seq_cst, align 16
+ %0 = atomicrmw nand ptr %a, i128 %x seq_cst, align 16
ret i128 %0
}
;; CmpXchg
-define i128 @cas_weak_acquire_acquire(i128* %a, i128 %cmp, i128 %new) {
+define i128 @cas_weak_acquire_acquire(ptr %a, i128 %cmp, i128 %new) {
; CHECK-LABEL: cas_weak_acquire_acquire:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .LBB7_1: # %entry
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = cmpxchg weak i128* %a, i128 %cmp, i128 %new acquire acquire
+ %0 = cmpxchg weak ptr %a, i128 %cmp, i128 %new acquire acquire
%1 = extractvalue { i128, i1 } %0, 0
ret i128 %1
}
-define i128 @cas_weak_release_monotonic(i128* %a, i128 %cmp, i128 %new) {
+define i128 @cas_weak_release_monotonic(ptr %a, i128 %cmp, i128 %new) {
; CHECK-LABEL: cas_weak_release_monotonic:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwsync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = cmpxchg weak i128* %a, i128 %cmp, i128 %new release monotonic
+ %0 = cmpxchg weak ptr %a, i128 %cmp, i128 %new release monotonic
%1 = extractvalue { i128, i1 } %0, 0
ret i128 %1
}
-define i128 @cas_sc_sc(i128* %a, i128 %cmp, i128 %new) {
+define i128 @cas_sc_sc(ptr %a, i128 %cmp, i128 %new) {
; CHECK-LABEL: cas_sc_sc:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = cmpxchg i128* %a, i128 %cmp, i128 %new seq_cst seq_cst
+ %0 = cmpxchg ptr %a, i128 %cmp, i128 %new seq_cst seq_cst
%1 = extractvalue { i128, i1 } %0, 0
ret i128 %1
}
-define i128 @cas_acqrel_acquire(i128* %a, i128 %cmp, i128 %new) {
+define i128 @cas_acqrel_acquire(ptr %a, i128 %cmp, i128 %new) {
; CHECK-LABEL: cas_acqrel_acquire:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwsync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = cmpxchg i128* %a, i128 %cmp, i128 %new acq_rel acquire
+ %0 = cmpxchg ptr %a, i128 %cmp, i128 %new acq_rel acquire
%1 = extractvalue { i128, i1 } %0, 0
ret i128 %1
}
-define i1 @cas_acqrel_acquire_check_succ(i128* %a, i128 %cmp, i128 %new) {
+define i1 @cas_acqrel_acquire_check_succ(ptr %a, i128 %cmp, i128 %new) {
; CHECK-LABEL: cas_acqrel_acquire_check_succ:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwsync
; PPC-PWR8-NEXT: mtlr r0
; PPC-PWR8-NEXT: blr
entry:
- %0 = cmpxchg i128* %a, i128 %cmp, i128 %new acq_rel acquire
+ %0 = cmpxchg ptr %a, i128 %cmp, i128 %new acq_rel acquire
%1 = extractvalue { i128, i1 } %0, 1
ret i1 %1
}
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int16_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int16_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int16_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int16_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv1 = zext i8 %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
%conv = zext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
%conv = zext i8 %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
%conv = zext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: lbz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = zext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: lbz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = zext i8 %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = zext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int16_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int16_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int16_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int16_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv1 = sext i8 %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
%conv = sext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
%conv = sext i8 %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
%conv = sext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = sext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = sext i8 %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = sext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
ret i16 %1
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int16_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ ret i16 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int16_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ ret i16 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int16_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ ret i16 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int16_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ ret i16 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
ret i16 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
ret i16 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 8
ret i16 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
ret i16 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 16
ret i16 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
ret i16 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 4096
ret i16 %1
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ %0 = load atomic i16, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret i16 %0
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ %0 = load atomic i16, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret i16 %0
}
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ %0 = load atomic i16, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret i16 %0
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int16_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 8(r3)
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int16_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int16_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int16_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwzx r3, r3, r4
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv1 = trunc i32 %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 8
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 16
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4096
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ %0 = load atomic i32, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = trunc i32 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ %0 = load atomic i32, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = trunc i32 %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ %0 = load atomic i32, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = trunc i32 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int16_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 8(r3)
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int16_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pld r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int16_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int16_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ldx r3, r3, r4
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv1 = trunc i64 %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 16
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 4096
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ %0 = load atomic i64, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = trunc i64 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ %0 = load atomic i64, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
%conv = trunc i64 %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ %0 = load atomic i64, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = trunc i64 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint16_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint16_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint16_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint16_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv1 = zext i8 %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
%conv = zext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
%conv = zext i8 %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
%conv = zext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: lbz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = zext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: lbz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = zext i8 %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = zext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint16_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint16_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: clrldi r3, r3, 48
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint16_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: clrldi r3, r3, 48
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint16_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv1 = sext i8 %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
%conv = sext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
%conv = sext i8 %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
%conv = sext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = sext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = sext i8 %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: clrldi r3, r3, 48
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = sext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: lhz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
ret i16 %1
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint16_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ ret i16 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint16_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ ret i16 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint16_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ ret i16 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint16_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ ret i16 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
ret i16 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
ret i16 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 8
ret i16 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
ret i16 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 16
ret i16 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
ret i16 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 4096
ret i16 %1
}
; CHECK-NEXT: lhz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ %0 = load atomic i16, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret i16 %0
}
; CHECK-NEXT: lhz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ %0 = load atomic i16, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret i16 %0
}
; CHECK-PREP10-NEXT: lhz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ %0 = load atomic i16, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret i16 %0
}
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint16_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 8(r3)
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint16_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: clrldi r3, r3, 48
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint16_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: clrldi r3, r3, 48
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint16_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwzx r3, r3, r4
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv1 = trunc i32 %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 8
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 16
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4096
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ %0 = load atomic i32, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = trunc i32 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ %0 = load atomic i32, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = trunc i32 %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: clrldi r3, r3, 48
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ %0 = load atomic i32, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = trunc i32 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint16_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 8(r3)
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint16_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pld r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: clrldi r3, r3, 48
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint16_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: clrldi r3, r3, 48
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint16_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ldx r3, r3, r4
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv1 = trunc i64 %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 16
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 4096
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ %0 = load atomic i64, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = trunc i64 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ %0 = load atomic i64, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
%conv = trunc i64 %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: clrldi r3, r3, 48
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ %0 = load atomic i64, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = trunc i64 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: stb r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
+ %0 = inttoptr i64 %ptr to ptr
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 1
+ store atomic i8 %conv, ptr %0 monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint16_t_uint8_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_uint8_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-LABEL: st_align16_uint16_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stb r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* %add.ptr monotonic, align 1
+ store atomic i8 %conv, ptr %add.ptr monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint16_t_uint8_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_uint8_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint16_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstb r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stbx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* %add.ptr monotonic, align 1
+ store atomic i8 %conv, ptr %add.ptr monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint16_t_uint8_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_uint8_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint16_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stbx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* %add.ptr monotonic, align 1
+ store atomic i8 %conv, ptr %add.ptr monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint16_t_uint8_t(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_uint8_t(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
; CHECK-LABEL: st_reg_uint16_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stbx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* %add.ptr monotonic, align 1
+ store atomic i8 %conv, ptr %add.ptr monotonic, align 1
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv1 = trunc i16 %str to i8
- store atomic i8 %conv1, i8* %0 monotonic, align 1
+ store atomic i8 %conv1, ptr %0 monotonic, align 1
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 1
+ store atomic i8 %conv, ptr %0 monotonic, align 1
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 8
+ store atomic i8 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 1
+ store atomic i8 %conv, ptr %0 monotonic, align 1
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 16
+ store atomic i8 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 1
+ store atomic i8 %conv, ptr %0 monotonic, align 1
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 4096
+ store atomic i8 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ store atomic i8 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ store atomic i8 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i16 %str to i8
- store atomic i8 %conv, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ store atomic i8 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: sth r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- store atomic i16 %str, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ store atomic i16 %str, ptr %0 monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint16_t_uint16_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_uint16_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-LABEL: st_align16_uint16_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- store atomic i16 %str, i16* %0 monotonic, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store atomic i16 %str, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint16_t_uint16_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_uint16_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint16_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: psth r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: sthx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- store atomic i16 %str, i16* %0 monotonic, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store atomic i16 %str, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint16_t_uint16_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_uint16_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint16_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: sthx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- store atomic i16 %str, i16* %0 monotonic, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store atomic i16 %str, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint16_t_uint16_t(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_uint16_t(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
; CHECK-LABEL: st_reg_uint16_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sthx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- store atomic i16 %str, i16* %0 monotonic, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store atomic i16 %str, ptr %add.ptr monotonic, align 2
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- store atomic i16 %str, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ store atomic i16 %str, ptr %0 monotonic, align 2
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- store atomic i16 %str, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ store atomic i16 %str, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- store atomic i16 %str, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ store atomic i16 %str, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- store atomic i16 %str, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ store atomic i16 %str, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- store atomic i16 %str, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ store atomic i16 %str, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- store atomic i16 %str, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ store atomic i16 %str, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- store atomic i16 %str, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store atomic i16 %str, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: sth r3, 4080(0)
; CHECK-NEXT: blr
entry:
- store atomic i16 %str, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ store atomic i16 %str, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: sth r3, -27108(r4)
; CHECK-NEXT: blr
entry:
- store atomic i16 %str, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ store atomic i16 %str, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: sth r3, 0(r4)
; CHECK-PREP10-NEXT: blr
entry:
- store atomic i16 %str, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ store atomic i16 %str, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: stw r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
+ %0 = inttoptr i64 %ptr to ptr
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint16_t_uint32_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_uint32_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-LABEL: st_align16_uint16_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint16_t_uint32_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_uint32_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint16_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstw r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint16_t_uint32_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_uint32_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint16_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint16_t_uint32_t(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_uint32_t(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
; CHECK-LABEL: st_reg_uint16_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv1 = zext i16 %str to i32
- store atomic i32 %conv1, i32* %0 monotonic, align 4
+ store atomic i32 %conv1, ptr %0 monotonic, align 4
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 8
+ store atomic i32 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 16
+ store atomic i32 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4096
+ store atomic i32 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ store atomic i32 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ store atomic i32 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i16 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ store atomic i32 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: std r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
+ %0 = inttoptr i64 %ptr to ptr
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint16_t_uint64_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_uint64_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-LABEL: st_align16_uint16_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint16_t_uint64_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_uint64_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint16_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint16_t_uint64_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_uint64_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint16_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint16_t_uint64_t(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_uint64_t(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
; CHECK-LABEL: st_reg_uint16_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv1 = zext i16 %str to i64
- store atomic i64 %conv1, i64* %0 monotonic, align 8
+ store atomic i64 %conv1, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 16
+ store atomic i64 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 4096
+ store atomic i64 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ store atomic i64 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ store atomic i64 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i16 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ store atomic i64 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: stw r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
+ %0 = inttoptr i64 %ptr to ptr
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_int16_t_uint32_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align16_int16_t_uint32_t(ptr nocapture %ptr, i16 signext %str) {
; CHECK-LABEL: st_align16_int16_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_int16_t_uint32_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align32_int16_t_uint32_t(ptr nocapture %ptr, i16 signext %str) {
; CHECK-P10-LABEL: st_align32_int16_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstw r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_int16_t_uint32_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align64_int16_t_uint32_t(ptr nocapture %ptr, i16 signext %str) {
; CHECK-P10-LABEL: st_align64_int16_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_int16_t_uint32_t(i8* nocapture %ptr, i64 %off, i16 signext %str) {
+define dso_local void @st_reg_int16_t_uint32_t(ptr nocapture %ptr, i64 %off, i16 signext %str) {
; CHECK-LABEL: st_reg_int16_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv1 = sext i16 %str to i32
- store atomic i32 %conv1, i32* %0 monotonic, align 4
+ store atomic i32 %conv1, ptr %0 monotonic, align 4
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 8
+ store atomic i32 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 16
+ store atomic i32 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4096
+ store atomic i32 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ store atomic i32 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ store atomic i32 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i16 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ store atomic i32 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: std r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
+ %0 = inttoptr i64 %ptr to ptr
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_int16_t_uint64_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align16_int16_t_uint64_t(ptr nocapture %ptr, i16 signext %str) {
; CHECK-LABEL: st_align16_int16_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_int16_t_uint64_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align32_int16_t_uint64_t(ptr nocapture %ptr, i16 signext %str) {
; CHECK-P10-LABEL: st_align32_int16_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_int16_t_uint64_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align64_int16_t_uint64_t(ptr nocapture %ptr, i16 signext %str) {
; CHECK-P10-LABEL: st_align64_int16_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_int16_t_uint64_t(i8* nocapture %ptr, i64 %off, i16 signext %str) {
+define dso_local void @st_reg_int16_t_uint64_t(ptr nocapture %ptr, i64 %off, i16 signext %str) {
; CHECK-LABEL: st_reg_int16_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv1 = sext i16 %str to i64
- store atomic i64 %conv1, i64* %0 monotonic, align 8
+ store atomic i64 %conv1, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 16
+ store atomic i64 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 4096
+ store atomic i64 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ store atomic i64 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ store atomic i64 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i16 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ store atomic i64 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int32_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int32_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int32_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int32_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv1 = zext i8 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
%conv = zext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
%conv = zext i8 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
%conv = zext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: lbz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = zext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lbz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = zext i8 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = zext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int32_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int32_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int32_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int32_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv1 = sext i8 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
%conv = sext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
%conv = sext i8 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
%conv = sext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = sext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = sext i8 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = sext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lhz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int32_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int32_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int32_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int32_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv1 = zext i16 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 8
%conv = zext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 16
%conv = zext i16 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 4096
%conv = zext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: lhz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ %0 = load atomic i16, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = zext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lhz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ %0 = load atomic i16, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = zext i16 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: lhz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ %0 = load atomic i16, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = zext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int32_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int32_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int32_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_int16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int32_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv1 = sext i16 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 8
%conv = sext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 16
%conv = sext i16 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 4096
%conv = sext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ %0 = load atomic i16, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = sext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ %0 = load atomic i16, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = sext i16 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ %0 = load atomic i16, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = sext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lwa r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
ret i32 %1
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int32_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwa r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ ret i32 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int32_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ ret i32 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int32_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ ret i32 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int32_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwax r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ ret i32 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
ret i32 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 8
ret i32 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 16
ret i32 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4096
ret i32 %1
}
; CHECK-NEXT: lwa r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ %0 = load atomic i32, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret i32 %0
}
; CHECK-NEXT: lwa r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ %0 = load atomic i32, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret i32 %0
}
; CHECK-PREP10-NEXT: lwa r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ %0 = load atomic i32, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret i32 %0
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int32_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 8(r3)
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int32_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pld r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int32_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int32_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ldx r3, r3, r4
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv1 = trunc i64 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 16
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 4096
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ %0 = load atomic i64, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ %0 = load atomic i64, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ %0 = load atomic i64, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv1 = zext i8 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
%conv = zext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
%conv = zext i8 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
%conv = zext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: lbz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = zext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lbz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = zext i8 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = zext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv1 = sext i8 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
%conv = sext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
%conv = sext i8 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
%conv = sext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = sext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = sext i8 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = sext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lhz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv1 = zext i16 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 8
%conv = zext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 16
%conv = zext i16 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 4096
%conv = zext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: lhz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ %0 = load atomic i16, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = zext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lhz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ %0 = load atomic i16, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = zext i16 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: lhz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ %0 = load atomic i16, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = zext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lha r3, 8(r3)
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_int16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhax r3, r3, r4
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv1 = sext i16 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 8
%conv = sext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 16
%conv = sext i16 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 4096
%conv = sext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ %0 = load atomic i16, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = sext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ %0 = load atomic i16, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = sext i16 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ %0 = load atomic i16, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = sext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lwz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
ret i32 %1
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ ret i32 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lwzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ ret i32 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lwzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ ret i32 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ ret i32 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
ret i32 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 8
ret i32 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 16
ret i32 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4096
ret i32 %1
}
; CHECK-NEXT: lwz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ %0 = load atomic i32, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret i32 %0
}
; CHECK-NEXT: lwz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ %0 = load atomic i32, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret i32 %0
}
; CHECK-PREP10-NEXT: lwz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ %0 = load atomic i32, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret i32 %0
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 8(r3)
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pld r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ldx r3, r3, r4
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv1 = trunc i64 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 16
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 4096
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ %0 = load atomic i64, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ %0 = load atomic i64, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ %0 = load atomic i64, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: stb r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
+ %0 = inttoptr i64 %ptr to ptr
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 1
+ store atomic i8 %conv, ptr %0 monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint32_t_uint8_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_uint8_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-LABEL: st_align16_uint32_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stb r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* %add.ptr monotonic, align 1
+ store atomic i8 %conv, ptr %add.ptr monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint32_t_uint8_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_uint8_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint32_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstb r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stbx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* %add.ptr monotonic, align 1
+ store atomic i8 %conv, ptr %add.ptr monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint32_t_uint8_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_uint8_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint32_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stbx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* %add.ptr monotonic, align 1
+ store atomic i8 %conv, ptr %add.ptr monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint32_t_uint8_t(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_uint8_t(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
; CHECK-LABEL: st_reg_uint32_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stbx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* %add.ptr monotonic, align 1
+ store atomic i8 %conv, ptr %add.ptr monotonic, align 1
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv1 = trunc i32 %str to i8
- store atomic i8 %conv1, i8* %0 monotonic, align 1
+ store atomic i8 %conv1, ptr %0 monotonic, align 1
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 1
+ store atomic i8 %conv, ptr %0 monotonic, align 1
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 8
+ store atomic i8 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 1
+ store atomic i8 %conv, ptr %0 monotonic, align 1
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 16
+ store atomic i8 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 1
+ store atomic i8 %conv, ptr %0 monotonic, align 1
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 4096
+ store atomic i8 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ store atomic i8 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ store atomic i8 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i32 %str to i8
- store atomic i8 %conv, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ store atomic i8 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: sth r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
+ %0 = inttoptr i64 %ptr to ptr
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint32_t_uint16_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_uint16_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-LABEL: st_align16_uint32_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint32_t_uint16_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_uint16_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint32_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: psth r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: sthx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint32_t_uint16_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_uint16_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint32_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: sthx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint32_t_uint16_t(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_uint16_t(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
; CHECK-LABEL: st_reg_uint32_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sthx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv1 = trunc i32 %str to i16
- store atomic i16 %conv1, i16* %0 monotonic, align 2
+ store atomic i16 %conv1, ptr %0 monotonic, align 2
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 8
+ store atomic i16 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 16
+ store atomic i16 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 4096
+ store atomic i16 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ store atomic i16 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ store atomic i16 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i32 %str to i16
- store atomic i16 %conv, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ store atomic i16 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: stw r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- store atomic i32 %str, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store atomic i32 %str, ptr %0 monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint32_t_uint32_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_uint32_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-LABEL: st_align16_uint32_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- store atomic i32 %str, i32* %0 monotonic, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store atomic i32 %str, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint32_t_uint32_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_uint32_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint32_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstw r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- store atomic i32 %str, i32* %0 monotonic, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store atomic i32 %str, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint32_t_uint32_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_uint32_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint32_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- store atomic i32 %str, i32* %0 monotonic, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store atomic i32 %str, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint32_t_uint32_t(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_uint32_t(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
; CHECK-LABEL: st_reg_uint32_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- store atomic i32 %str, i32* %0 monotonic, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store atomic i32 %str, ptr %add.ptr monotonic, align 4
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- store atomic i32 %str, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ store atomic i32 %str, ptr %0 monotonic, align 4
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- store atomic i32 %str, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ store atomic i32 %str, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- store atomic i32 %str, i32* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ store atomic i32 %str, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- store atomic i32 %str, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ store atomic i32 %str, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- store atomic i32 %str, i32* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ store atomic i32 %str, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- store atomic i32 %str, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ store atomic i32 %str, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- store atomic i32 %str, i32* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store atomic i32 %str, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: stw r3, 4080(0)
; CHECK-NEXT: blr
entry:
- store atomic i32 %str, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ store atomic i32 %str, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: stw r3, -27108(r4)
; CHECK-NEXT: blr
entry:
- store atomic i32 %str, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ store atomic i32 %str, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: stw r3, 0(r4)
; CHECK-PREP10-NEXT: blr
entry:
- store atomic i32 %str, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ store atomic i32 %str, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: std r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
+ %0 = inttoptr i64 %ptr to ptr
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint32_t_uint64_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_uint64_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-LABEL: st_align16_uint32_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint32_t_uint64_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_uint64_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint32_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint32_t_uint64_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_uint64_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint32_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint32_t_uint64_t(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_uint64_t(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
; CHECK-LABEL: st_reg_uint32_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv1 = zext i32 %str to i64
- store atomic i64 %conv1, i64* %0 monotonic, align 8
+ store atomic i64 %conv1, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 16
+ store atomic i64 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 4096
+ store atomic i64 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ store atomic i64 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ store atomic i64 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i32 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ store atomic i64 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: std r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
+ %0 = inttoptr i64 %ptr to ptr
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_int32_t_uint64_t(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align16_int32_t_uint64_t(ptr nocapture %ptr, i32 signext %str) {
; CHECK-LABEL: st_align16_int32_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_int32_t_uint64_t(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align32_int32_t_uint64_t(ptr nocapture %ptr, i32 signext %str) {
; CHECK-P10-LABEL: st_align32_int32_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_int32_t_uint64_t(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align64_int32_t_uint64_t(ptr nocapture %ptr, i32 signext %str) {
; CHECK-P10-LABEL: st_align64_int32_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_int32_t_uint64_t(i8* nocapture %ptr, i64 %off, i32 signext %str) {
+define dso_local void @st_reg_int32_t_uint64_t(ptr nocapture %ptr, i64 %off, i32 signext %str) {
; CHECK-LABEL: st_reg_int32_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv1 = sext i32 %str to i64
- store atomic i64 %conv1, i64* %0 monotonic, align 8
+ store atomic i64 %conv1, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 16
+ store atomic i64 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 4096
+ store atomic i64 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ store atomic i64 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ store atomic i64 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i32 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ store atomic i64 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_int64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_int64_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int64_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_int64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_int64_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int64_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_int64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_int64_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int64_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_int64_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_int64_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int64_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv1 = zext i8 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
%conv = zext i8 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
%conv = zext i8 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
%conv = zext i8 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lbz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lbz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_int64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_int64_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int64_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_int64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_int64_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int64_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_int64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_int64_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int64_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_int64_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_int64_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int64_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv1 = sext i8 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
%conv = sext i8 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
%conv = sext i8 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
%conv = sext i8 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lhz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_int64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_int64_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int64_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_int64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_int64_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int64_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_int64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_int64_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int64_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_int64_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_int64_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int64_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv1 = zext i16 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 8
%conv = zext i16 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 16
%conv = zext i16 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 4096
%conv = zext i16 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lhz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ %0 = load atomic i16, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lhz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ %0 = load atomic i16, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lhz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ %0 = load atomic i16, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_int64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_int64_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int64_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_int64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_int64_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int64_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_int64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_int64_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int64_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_int64_t_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_int64_t_int16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int64_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv1 = sext i16 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 8
%conv = sext i16 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 16
%conv = sext i16 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 4096
%conv = sext i16 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ %0 = load atomic i16, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ %0 = load atomic i16, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ %0 = load atomic i16, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_int64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_int64_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int64_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_int64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_int64_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int64_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lwzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_int64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_int64_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int64_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lwzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_int64_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_int64_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int64_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv1 = zext i32 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 8
%conv = zext i32 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 16
%conv = zext i32 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4096
%conv = zext i32 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lwz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ %0 = load atomic i32, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ %0 = load atomic i32, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lwz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ %0 = load atomic i32, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwa r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_int64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_int64_t_int32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int64_t_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwa r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_int64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_int64_t_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int64_t_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_int64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_int64_t_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int64_t_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_int64_t_int32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_int64_t_int32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int64_t_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwax r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv1 = sext i32 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 8
%conv = sext i32 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 16
%conv = sext i32 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4096
%conv = sext i32 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lwa r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ %0 = load atomic i32, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwa r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ %0 = load atomic i32, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lwa r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ %0 = load atomic i32, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: ld r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
ret i64 %1
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_int64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_int64_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int64_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ ret i64 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_int64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_int64_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pld r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: ldx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ ret i64 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_int64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_int64_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: ldx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ ret i64 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_int64_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_int64_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int64_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ldx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ ret i64 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
ret i64 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
ret i64 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
ret i64 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
ret i64 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 16
ret i64 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
ret i64 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 4096
ret i64 %1
}
; CHECK-NEXT: ld r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ %0 = load atomic i64, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret i64 %0
}
; CHECK-NEXT: ld r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ %0 = load atomic i64, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
ret i64 %0
}
; CHECK-PREP10-NEXT: ld r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ %0 = load atomic i64, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret i64 %0
}
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv1 = zext i8 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
%conv = zext i8 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
%conv = zext i8 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
%conv = zext i8 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lbz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lbz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv1 = sext i8 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
%conv = sext i8 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
%conv = sext i8 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
%conv = sext i8 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lhz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv1 = zext i16 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 8
%conv = zext i16 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 16
%conv = zext i16 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 4096
%conv = zext i16 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lhz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ %0 = load atomic i16, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lhz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ %0 = load atomic i16, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lhz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ %0 = load atomic i16, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_int16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv1 = sext i16 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 8
%conv = sext i16 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 16
%conv = sext i16 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 4096
%conv = sext i16 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ %0 = load atomic i16, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: extsh r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ %0 = load atomic i16, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: extsh r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ %0 = load atomic i16, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lwzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lwzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv1 = zext i32 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 8
%conv = zext i32 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 16
%conv = zext i32 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4096
%conv = zext i32 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lwz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ %0 = load atomic i32, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ %0 = load atomic i32, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lwz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ %0 = load atomic i32, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwa r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_int32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwa r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_int32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_int32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwax r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv1 = sext i32 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 8
%conv = sext i32 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 16
%conv = sext i32 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4096
%conv = sext i32 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lwa r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ %0 = load atomic i32, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwa r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ %0 = load atomic i32, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lwa r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ %0 = load atomic i32, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: ld r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
ret i64 %1
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ ret i64 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pld r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: ldx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ ret i64 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: ldx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ ret i64 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ldx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ ret i64 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
ret i64 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
ret i64 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
ret i64 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
ret i64 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 16
ret i64 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
ret i64 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 4096
ret i64 %1
}
; CHECK-NEXT: ld r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ %0 = load atomic i64, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret i64 %0
}
; CHECK-NEXT: ld r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ %0 = load atomic i64, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
ret i64 %0
}
; CHECK-PREP10-NEXT: ld r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ %0 = load atomic i64, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret i64 %0
}
; CHECK-NEXT: stb r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
+ %0 = inttoptr i64 %ptr to ptr
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 1
+ store atomic i8 %conv, ptr %0 monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint64_t_uint8_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_uint8_t(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_uint64_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stb r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* %add.ptr monotonic, align 1
+ store atomic i8 %conv, ptr %add.ptr monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint64_t_uint8_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_uint8_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_uint64_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstb r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stbx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* %add.ptr monotonic, align 1
+ store atomic i8 %conv, ptr %add.ptr monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint64_t_uint8_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_uint8_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_uint64_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stbx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* %add.ptr monotonic, align 1
+ store atomic i8 %conv, ptr %add.ptr monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint64_t_uint8_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_uint8_t(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_uint64_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stbx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* %add.ptr monotonic, align 1
+ store atomic i8 %conv, ptr %add.ptr monotonic, align 1
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv1 = trunc i64 %str to i8
- store atomic i8 %conv1, i8* %0 monotonic, align 1
+ store atomic i8 %conv1, ptr %0 monotonic, align 1
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 1
+ store atomic i8 %conv, ptr %0 monotonic, align 1
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 8
+ store atomic i8 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 1
+ store atomic i8 %conv, ptr %0 monotonic, align 1
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 16
+ store atomic i8 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 1
+ store atomic i8 %conv, ptr %0 monotonic, align 1
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* %0 monotonic, align 4096
+ store atomic i8 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ store atomic i8 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ store atomic i8 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i8
- store atomic i8 %conv, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ store atomic i8 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: sth r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
+ %0 = inttoptr i64 %ptr to ptr
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint64_t_uint16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_uint16_t(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_uint64_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint64_t_uint16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_uint16_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_uint64_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: psth r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: sthx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint64_t_uint16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_uint16_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_uint64_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: sthx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint64_t_uint16_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_uint16_t(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_uint64_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sthx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv1 = trunc i64 %str to i16
- store atomic i16 %conv1, i16* %0 monotonic, align 2
+ store atomic i16 %conv1, ptr %0 monotonic, align 2
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 8
+ store atomic i16 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 16
+ store atomic i16 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 4096
+ store atomic i16 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ store atomic i16 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ store atomic i16 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- store atomic i16 %conv, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ store atomic i16 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: stw r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
+ %0 = inttoptr i64 %ptr to ptr
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint64_t_uint32_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_uint32_t(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_uint64_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint64_t_uint32_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_uint32_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_uint64_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstw r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint64_t_uint32_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_uint32_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_uint64_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint64_t_uint32_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_uint32_t(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_uint64_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv1 = trunc i64 %str to i32
- store atomic i32 %conv1, i32* %0 monotonic, align 4
+ store atomic i32 %conv1, ptr %0 monotonic, align 4
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 8
+ store atomic i32 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 16
+ store atomic i32 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4096
+ store atomic i32 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ store atomic i32 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ store atomic i32 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ store atomic i32 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: std r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- store atomic i64 %str, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store atomic i64 %str, ptr %0 monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint64_t_uint64_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_uint64_t(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_uint64_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- store atomic i64 %str, i64* %0 monotonic, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store atomic i64 %str, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint64_t_uint64_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_uint64_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_uint64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- store atomic i64 %str, i64* %0 monotonic, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store atomic i64 %str, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint64_t_uint64_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_uint64_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_uint64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- store atomic i64 %str, i64* %0 monotonic, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store atomic i64 %str, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint64_t_uint64_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_uint64_t(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_uint64_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- store atomic i64 %str, i64* %0 monotonic, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store atomic i64 %str, ptr %add.ptr monotonic, align 8
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- store atomic i64 %str, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ store atomic i64 %str, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- store atomic i64 %str, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ store atomic i64 %str, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- store atomic i64 %str, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ store atomic i64 %str, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- store atomic i64 %str, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ store atomic i64 %str, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- store atomic i64 %str, i64* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ store atomic i64 %str, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- store atomic i64 %str, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ store atomic i64 %str, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- store atomic i64 %str, i64* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store atomic i64 %str, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: std r3, 4080(0)
; CHECK-NEXT: blr
entry:
- store atomic i64 %str, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ store atomic i64 %str, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: std r3, -27108(r4)
; CHECK-NEXT: blr
entry:
- store atomic i64 %str, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ store atomic i64 %str, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
ret void
}
; CHECK-PREP10-NEXT: std r3, 0(r4)
; CHECK-PREP10-NEXT: blr
entry:
- store atomic i64 %str, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ store atomic i64 %str, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
ret i8 %1
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int8_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
ret i8 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
ret i8 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
ret i8 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int8_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
ret i8 %0
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
ret i8 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
ret i8 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
ret i8 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
ret i8 %1
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret i8 %0
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret i8 %0
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret i8 %0
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int8_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int8_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int8_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int8_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv1 = trunc i16 %1 to i8
ret i8 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 8
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 16
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 4096
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ %0 = load atomic i16, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ %0 = load atomic i16, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ %0 = load atomic i16, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int8_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 8(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int8_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int8_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int8_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwzx r3, r3, r4
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv1 = trunc i32 %1 to i8
ret i8 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 8
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 16
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4096
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ %0 = load atomic i32, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ %0 = load atomic i32, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ %0 = load atomic i32, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int8_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 8(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int8_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pld r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int8_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int8_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ldx r3, r3, r4
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv1 = trunc i64 %1 to i8
ret i8 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 16
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 4096
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ %0 = load atomic i64, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ %0 = load atomic i64, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ %0 = load atomic i64, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
ret i8 %1
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint8_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
ret i8 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
ret i8 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
ret i8 %0
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint8_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load atomic i8, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i8, ptr %add.ptr monotonic, align 1
ret i8 %0
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
ret i8 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 8
ret i8 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 16
ret i8 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load atomic i8, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i8, ptr %0 monotonic, align 4096
ret i8 %1
}
; CHECK-NEXT: lbz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ %0 = load atomic i8, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret i8 %0
}
; CHECK-NEXT: lbz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ %0 = load atomic i8, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret i8 %0
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i8, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ %0 = load atomic i8, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret i8 %0
}
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint8_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint8_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: clrldi r3, r3, 56
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint8_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: clrldi r3, r3, 56
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint8_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i16, ptr %add.ptr monotonic, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv1 = trunc i16 %1 to i8
ret i8 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 8
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 16
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load atomic i16, i16* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i16, ptr %0 monotonic, align 4096
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ %0 = load atomic i16, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ %0 = load atomic i16, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: clrldi r3, r3, 56
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i16, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ %0 = load atomic i16, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint8_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 8(r3)
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint8_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: clrldi r3, r3, 56
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint8_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: clrldi r3, r3, 56
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint8_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwzx r3, r3, r4
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i32, ptr %add.ptr monotonic, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv1 = trunc i32 %1 to i8
ret i8 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 8
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 16
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load atomic i32, i32* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i32, ptr %0 monotonic, align 4096
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ %0 = load atomic i32, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ %0 = load atomic i32, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: clrldi r3, r3, 56
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i32, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ %0 = load atomic i32, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint8_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 8(r3)
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint8_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pld r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: clrldi r3, r3, 56
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint8_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: clrldi r3, r3, 56
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint8_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ldx r3, r3, r4
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load atomic i64, ptr %add.ptr monotonic, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv1 = trunc i64 %1 to i8
ret i8 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 16
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load atomic i64, i64* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load atomic i64, ptr %0 monotonic, align 4096
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ %0 = load atomic i64, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: clrldi r3, r3, 56
; CHECK-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ %0 = load atomic i64, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: clrldi r3, r3, 56
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load atomic i64, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ %0 = load atomic i64, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: stb r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- store atomic i8 %str, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ store atomic i8 %str, ptr %0 monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint8_t_uint8_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_uint8_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-LABEL: st_align16_uint8_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stb r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- store atomic i8 %str, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store atomic i8 %str, ptr %add.ptr monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint8_t_uint8_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_uint8_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstb r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stbx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- store atomic i8 %str, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store atomic i8 %str, ptr %add.ptr monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint8_t_uint8_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_uint8_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stbx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- store atomic i8 %str, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store atomic i8 %str, ptr %add.ptr monotonic, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint8_t_uint8_t(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_uint8_t(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
; CHECK-LABEL: st_reg_uint8_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stbx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- store atomic i8 %str, i8* %add.ptr monotonic, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store atomic i8 %str, ptr %add.ptr monotonic, align 1
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- store atomic i8 %str, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ store atomic i8 %str, ptr %0 monotonic, align 1
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- store atomic i8 %str, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ store atomic i8 %str, ptr %0 monotonic, align 1
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- store atomic i8 %str, i8* %0 monotonic, align 8
+ %0 = inttoptr i64 %or to ptr
+ store atomic i8 %str, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- store atomic i8 %str, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ store atomic i8 %str, ptr %0 monotonic, align 1
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- store atomic i8 %str, i8* %0 monotonic, align 16
+ %0 = inttoptr i64 %or to ptr
+ store atomic i8 %str, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- store atomic i8 %str, i8* %0 monotonic, align 1
+ %0 = inttoptr i64 %or to ptr
+ store atomic i8 %str, ptr %0 monotonic, align 1
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- store atomic i8 %str, i8* %0 monotonic, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store atomic i8 %str, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: stb r3, 4080(0)
; CHECK-NEXT: blr
entry:
- store atomic i8 %str, i8* inttoptr (i64 4080 to i8*) monotonic, align 16
+ store atomic i8 %str, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: stb r3, -27108(r4)
; CHECK-NEXT: blr
entry:
- store atomic i8 %str, i8* inttoptr (i64 9999900 to i8*) monotonic, align 4
+ store atomic i8 %str, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: stb r3, 0(r4)
; CHECK-PREP10-NEXT: blr
entry:
- store atomic i8 %str, i8* inttoptr (i64 1000000000000 to i8*) monotonic, align 4096
+ store atomic i8 %str, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: sth r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
+ %0 = inttoptr i64 %ptr to ptr
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint8_t_uint16_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_uint16_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-LABEL: st_align16_uint8_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint8_t_uint16_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_uint16_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint8_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: psth r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: sthx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint8_t_uint16_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_uint16_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint8_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: sthx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint8_t_uint16_t(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_uint16_t(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
; CHECK-LABEL: st_reg_uint8_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sthx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv1 = zext i8 %str to i16
- store atomic i16 %conv1, i16* %0 monotonic, align 2
+ store atomic i16 %conv1, ptr %0 monotonic, align 2
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 8
+ store atomic i16 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 16
+ store atomic i16 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 4096
+ store atomic i16 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ store atomic i16 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ store atomic i16 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i8 %str to i16
- store atomic i16 %conv, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ store atomic i16 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: stw r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
+ %0 = inttoptr i64 %ptr to ptr
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint8_t_uint32_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_uint32_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-LABEL: st_align16_uint8_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint8_t_uint32_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_uint32_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint8_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstw r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint8_t_uint32_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_uint32_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint8_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint8_t_uint32_t(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_uint32_t(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
; CHECK-LABEL: st_reg_uint8_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv1 = zext i8 %str to i32
- store atomic i32 %conv1, i32* %0 monotonic, align 4
+ store atomic i32 %conv1, ptr %0 monotonic, align 4
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 8
+ store atomic i32 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 16
+ store atomic i32 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4096
+ store atomic i32 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ store atomic i32 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ store atomic i32 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i8 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ store atomic i32 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: std r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
+ %0 = inttoptr i64 %ptr to ptr
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_uint8_t_uint64_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_uint64_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-LABEL: st_align16_uint8_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_uint8_t_uint64_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_uint64_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint8_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_uint8_t_uint64_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_uint64_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint8_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_uint8_t_uint64_t(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_uint64_t(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
; CHECK-LABEL: st_reg_uint8_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv1 = zext i8 %str to i64
- store atomic i64 %conv1, i64* %0 monotonic, align 8
+ store atomic i64 %conv1, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 16
+ store atomic i64 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 4096
+ store atomic i64 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ store atomic i64 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ store atomic i64 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i8 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ store atomic i64 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: sth r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
+ %0 = inttoptr i64 %ptr to ptr
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_int8_t_uint16_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_uint16_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-LABEL: st_align16_int8_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_int8_t_uint16_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_uint16_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align32_int8_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: psth r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: sthx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_int8_t_uint16_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_uint16_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align64_int8_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: sthx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_int8_t_uint16_t(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_uint16_t(ptr nocapture %ptr, i64 %off, i8 signext %str) {
; CHECK-LABEL: st_reg_int8_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sthx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %add.ptr monotonic, align 2
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv1 = sext i8 %str to i16
- store atomic i16 %conv1, i16* %0 monotonic, align 2
+ store atomic i16 %conv1, ptr %0 monotonic, align 2
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 8
+ store atomic i16 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 16
+ store atomic i16 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 2
+ store atomic i16 %conv, ptr %0 monotonic, align 2
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* %0 monotonic, align 4096
+ store atomic i16 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* inttoptr (i64 4080 to i16*) monotonic, align 16
+ store atomic i16 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* inttoptr (i64 9999900 to i16*) monotonic, align 4
+ store atomic i16 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i8 %str to i16
- store atomic i16 %conv, i16* inttoptr (i64 1000000000000 to i16*) monotonic, align 4096
+ store atomic i16 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: stw r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
+ %0 = inttoptr i64 %ptr to ptr
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_int8_t_uint32_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_uint32_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-LABEL: st_align16_int8_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_int8_t_uint32_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_uint32_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align32_int8_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstw r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_int8_t_uint32_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_uint32_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align64_int8_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_int8_t_uint32_t(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_uint32_t(ptr nocapture %ptr, i64 %off, i8 signext %str) {
; CHECK-LABEL: st_reg_int8_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %add.ptr monotonic, align 4
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv1 = sext i8 %str to i32
- store atomic i32 %conv1, i32* %0 monotonic, align 4
+ store atomic i32 %conv1, ptr %0 monotonic, align 4
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 8
+ store atomic i32 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 16
+ store atomic i32 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4
+ store atomic i32 %conv, ptr %0 monotonic, align 4
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* %0 monotonic, align 4096
+ store atomic i32 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 4080 to i32*) monotonic, align 16
+ store atomic i32 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 9999900 to i32*) monotonic, align 4
+ store atomic i32 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i8 %str to i32
- store atomic i32 %conv, i32* inttoptr (i64 1000000000000 to i32*) monotonic, align 4096
+ store atomic i32 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; CHECK-NEXT: std r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
+ %0 = inttoptr i64 %ptr to ptr
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align16_int8_t_uint64_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_uint64_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-LABEL: st_align16_int8_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align32_int8_t_uint64_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_uint64_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align32_int8_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_align64_int8_t_uint64_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_uint64_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align64_int8_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn
-define dso_local void @st_reg_int8_t_uint64_t(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_uint64_t(ptr nocapture %ptr, i64 %off, i8 signext %str) {
; CHECK-LABEL: st_reg_int8_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %add.ptr monotonic, align 8
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv1 = sext i8 %str to i64
- store atomic i64 %conv1, i64* %0 monotonic, align 8
+ store atomic i64 %conv1, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 16
+ store atomic i64 %conv, ptr %0 monotonic, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 8
+ store atomic i64 %conv, ptr %0 monotonic, align 8
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
+ %0 = inttoptr i64 %or to ptr
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* %0 monotonic, align 4096
+ store atomic i64 %conv, ptr %0 monotonic, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 4080 to i64*) monotonic, align 16
+ store atomic i64 %conv, ptr inttoptr (i64 4080 to ptr) monotonic, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 9999900 to i64*) monotonic, align 8
+ store atomic i64 %conv, ptr inttoptr (i64 9999900 to ptr) monotonic, align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i8 %str to i64
- store atomic i64 %conv, i64* inttoptr (i64 1000000000000 to i64*) monotonic, align 4096
+ store atomic i64 %conv, ptr inttoptr (i64 1000000000000 to ptr) monotonic, align 4096
ret void
}
; versions of the instructions.
; Indexed version of loads
-define i8 @load_x_i8_seq_cst([100000 x i8]* %mem) {
+define i8 @load_x_i8_seq_cst(ptr %mem) {
; PPC32-LABEL: load_x_i8_seq_cst:
; PPC32: # %bb.0:
; PPC32-NEXT: lis r4, 1
; PPC64-NEXT: bne- cr7, .+4
; PPC64-NEXT: isync
; PPC64-NEXT: blr
- %ptr = getelementptr inbounds [100000 x i8], [100000 x i8]* %mem, i64 0, i64 90000
- %val = load atomic i8, i8* %ptr seq_cst, align 1
+ %ptr = getelementptr inbounds [100000 x i8], ptr %mem, i64 0, i64 90000
+ %val = load atomic i8, ptr %ptr seq_cst, align 1
ret i8 %val
}
-define i16 @load_x_i16_acquire([100000 x i16]* %mem) {
+define i16 @load_x_i16_acquire(ptr %mem) {
; PPC32-LABEL: load_x_i16_acquire:
; PPC32: # %bb.0:
; PPC32-NEXT: lis r4, 2
; PPC64-NEXT: bne- cr7, .+4
; PPC64-NEXT: isync
; PPC64-NEXT: blr
- %ptr = getelementptr inbounds [100000 x i16], [100000 x i16]* %mem, i64 0, i64 90000
- %val = load atomic i16, i16* %ptr acquire, align 2
+ %ptr = getelementptr inbounds [100000 x i16], ptr %mem, i64 0, i64 90000
+ %val = load atomic i16, ptr %ptr acquire, align 2
ret i16 %val
}
-define i32 @load_x_i32_monotonic([100000 x i32]* %mem) {
+define i32 @load_x_i32_monotonic(ptr %mem) {
; CHECK-LABEL: load_x_i32_monotonic:
; CHECK: # %bb.0:
; CHECK-NEXT: lis r4, 5
; CHECK-NEXT: ori r4, r4, 32320
; CHECK-NEXT: lwzx r3, r3, r4
; CHECK-NEXT: blr
- %ptr = getelementptr inbounds [100000 x i32], [100000 x i32]* %mem, i64 0, i64 90000
- %val = load atomic i32, i32* %ptr monotonic, align 4
+ %ptr = getelementptr inbounds [100000 x i32], ptr %mem, i64 0, i64 90000
+ %val = load atomic i32, ptr %ptr monotonic, align 4
ret i32 %val
}
-define i64 @load_x_i64_unordered([100000 x i64]* %mem) {
+define i64 @load_x_i64_unordered(ptr %mem) {
; PPC32-LABEL: load_x_i64_unordered:
; PPC32: # %bb.0:
; PPC32-NEXT: mflr r0
; PPC64-NEXT: ori r4, r4, 64640
; PPC64-NEXT: ldx r3, r3, r4
; PPC64-NEXT: blr
- %ptr = getelementptr inbounds [100000 x i64], [100000 x i64]* %mem, i64 0, i64 90000
- %val = load atomic i64, i64* %ptr unordered, align 8
+ %ptr = getelementptr inbounds [100000 x i64], ptr %mem, i64 0, i64 90000
+ %val = load atomic i64, ptr %ptr unordered, align 8
ret i64 %val
}
; Indexed version of stores
-define void @store_x_i8_seq_cst([100000 x i8]* %mem) {
+define void @store_x_i8_seq_cst(ptr %mem) {
; CHECK-LABEL: store_x_i8_seq_cst:
; CHECK: # %bb.0:
; CHECK-NEXT: lis r4, 1
; CHECK-NEXT: sync
; CHECK-NEXT: stbx r5, r3, r4
; CHECK-NEXT: blr
- %ptr = getelementptr inbounds [100000 x i8], [100000 x i8]* %mem, i64 0, i64 90000
- store atomic i8 42, i8* %ptr seq_cst, align 1
+ %ptr = getelementptr inbounds [100000 x i8], ptr %mem, i64 0, i64 90000
+ store atomic i8 42, ptr %ptr seq_cst, align 1
ret void
}
-define void @store_x_i16_release([100000 x i16]* %mem) {
+define void @store_x_i16_release(ptr %mem) {
; CHECK-LABEL: store_x_i16_release:
; CHECK: # %bb.0:
; CHECK-NEXT: lis r4, 2
; CHECK-NEXT: lwsync
; CHECK-NEXT: sthx r5, r3, r4
; CHECK-NEXT: blr
- %ptr = getelementptr inbounds [100000 x i16], [100000 x i16]* %mem, i64 0, i64 90000
- store atomic i16 42, i16* %ptr release, align 2
+ %ptr = getelementptr inbounds [100000 x i16], ptr %mem, i64 0, i64 90000
+ store atomic i16 42, ptr %ptr release, align 2
ret void
}
-define void @store_x_i32_monotonic([100000 x i32]* %mem) {
+define void @store_x_i32_monotonic(ptr %mem) {
; CHECK-LABEL: store_x_i32_monotonic:
; CHECK: # %bb.0:
; CHECK-NEXT: lis r4, 5
; CHECK-NEXT: li r5, 42
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
- %ptr = getelementptr inbounds [100000 x i32], [100000 x i32]* %mem, i64 0, i64 90000
- store atomic i32 42, i32* %ptr monotonic, align 4
+ %ptr = getelementptr inbounds [100000 x i32], ptr %mem, i64 0, i64 90000
+ store atomic i32 42, ptr %ptr monotonic, align 4
ret void
}
-define void @store_x_i64_unordered([100000 x i64]* %mem) {
+define void @store_x_i64_unordered(ptr %mem) {
; PPC32-LABEL: store_x_i64_unordered:
; PPC32: # %bb.0:
; PPC32-NEXT: mflr r0
; PPC64-NEXT: li r5, 42
; PPC64-NEXT: stdx r5, r3, r4
; PPC64-NEXT: blr
- %ptr = getelementptr inbounds [100000 x i64], [100000 x i64]* %mem, i64 0, i64 90000
- store atomic i64 42, i64* %ptr unordered, align 8
+ %ptr = getelementptr inbounds [100000 x i64], ptr %mem, i64 0, i64 90000
+ store atomic i64 42, ptr %ptr unordered, align 8
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=powerpc64le-linux-gnu < %s | FileCheck %s -check-prefix=PPC64LE
-define i8 @test0(i8* %ptr) {
+define i8 @test0(ptr %ptr) {
; PPC64LE-LABEL: test0:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lbz 3, 0(3)
; PPC64LE-NEXT: blr
- %val = load atomic i8, i8* %ptr unordered, align 1
+ %val = load atomic i8, ptr %ptr unordered, align 1
ret i8 %val
}
-define i8 @test1(i8* %ptr) {
+define i8 @test1(ptr %ptr) {
; PPC64LE-LABEL: test1:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lbz 3, 0(3)
; PPC64LE-NEXT: blr
- %val = load atomic i8, i8* %ptr monotonic, align 1
+ %val = load atomic i8, ptr %ptr monotonic, align 1
ret i8 %val
}
-define i8 @test2(i8* %ptr) {
+define i8 @test2(ptr %ptr) {
; PPC64LE-LABEL: test2:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lbz 3, 0(3)
; PPC64LE-NEXT: bne- 7, .+4
; PPC64LE-NEXT: isync
; PPC64LE-NEXT: blr
- %val = load atomic i8, i8* %ptr acquire, align 1
+ %val = load atomic i8, ptr %ptr acquire, align 1
ret i8 %val
}
-define i8 @test3(i8* %ptr) {
+define i8 @test3(ptr %ptr) {
; PPC64LE-LABEL: test3:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: bne- 7, .+4
; PPC64LE-NEXT: isync
; PPC64LE-NEXT: blr
- %val = load atomic i8, i8* %ptr seq_cst, align 1
+ %val = load atomic i8, ptr %ptr seq_cst, align 1
ret i8 %val
}
-define i16 @test4(i16* %ptr) {
+define i16 @test4(ptr %ptr) {
; PPC64LE-LABEL: test4:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lhz 3, 0(3)
; PPC64LE-NEXT: blr
- %val = load atomic i16, i16* %ptr unordered, align 2
+ %val = load atomic i16, ptr %ptr unordered, align 2
ret i16 %val
}
-define i16 @test5(i16* %ptr) {
+define i16 @test5(ptr %ptr) {
; PPC64LE-LABEL: test5:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lhz 3, 0(3)
; PPC64LE-NEXT: blr
- %val = load atomic i16, i16* %ptr monotonic, align 2
+ %val = load atomic i16, ptr %ptr monotonic, align 2
ret i16 %val
}
-define i16 @test6(i16* %ptr) {
+define i16 @test6(ptr %ptr) {
; PPC64LE-LABEL: test6:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lhz 3, 0(3)
; PPC64LE-NEXT: bne- 7, .+4
; PPC64LE-NEXT: isync
; PPC64LE-NEXT: blr
- %val = load atomic i16, i16* %ptr acquire, align 2
+ %val = load atomic i16, ptr %ptr acquire, align 2
ret i16 %val
}
-define i16 @test7(i16* %ptr) {
+define i16 @test7(ptr %ptr) {
; PPC64LE-LABEL: test7:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: bne- 7, .+4
; PPC64LE-NEXT: isync
; PPC64LE-NEXT: blr
- %val = load atomic i16, i16* %ptr seq_cst, align 2
+ %val = load atomic i16, ptr %ptr seq_cst, align 2
ret i16 %val
}
-define i32 @test8(i32* %ptr) {
+define i32 @test8(ptr %ptr) {
; PPC64LE-LABEL: test8:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwz 3, 0(3)
; PPC64LE-NEXT: blr
- %val = load atomic i32, i32* %ptr unordered, align 4
+ %val = load atomic i32, ptr %ptr unordered, align 4
ret i32 %val
}
-define i32 @test9(i32* %ptr) {
+define i32 @test9(ptr %ptr) {
; PPC64LE-LABEL: test9:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwz 3, 0(3)
; PPC64LE-NEXT: blr
- %val = load atomic i32, i32* %ptr monotonic, align 4
+ %val = load atomic i32, ptr %ptr monotonic, align 4
ret i32 %val
}
-define i32 @test10(i32* %ptr) {
+define i32 @test10(ptr %ptr) {
; PPC64LE-LABEL: test10:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwz 3, 0(3)
; PPC64LE-NEXT: bne- 7, .+4
; PPC64LE-NEXT: isync
; PPC64LE-NEXT: blr
- %val = load atomic i32, i32* %ptr acquire, align 4
+ %val = load atomic i32, ptr %ptr acquire, align 4
ret i32 %val
}
-define i32 @test11(i32* %ptr) {
+define i32 @test11(ptr %ptr) {
; PPC64LE-LABEL: test11:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: bne- 7, .+4
; PPC64LE-NEXT: isync
; PPC64LE-NEXT: blr
- %val = load atomic i32, i32* %ptr seq_cst, align 4
+ %val = load atomic i32, ptr %ptr seq_cst, align 4
ret i32 %val
}
-define i64 @test12(i64* %ptr) {
+define i64 @test12(ptr %ptr) {
; PPC64LE-LABEL: test12:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: ld 3, 0(3)
; PPC64LE-NEXT: blr
- %val = load atomic i64, i64* %ptr unordered, align 8
+ %val = load atomic i64, ptr %ptr unordered, align 8
ret i64 %val
}
-define i64 @test13(i64* %ptr) {
+define i64 @test13(ptr %ptr) {
; PPC64LE-LABEL: test13:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: ld 3, 0(3)
; PPC64LE-NEXT: blr
- %val = load atomic i64, i64* %ptr monotonic, align 8
+ %val = load atomic i64, ptr %ptr monotonic, align 8
ret i64 %val
}
-define i64 @test14(i64* %ptr) {
+define i64 @test14(ptr %ptr) {
; PPC64LE-LABEL: test14:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: ld 3, 0(3)
; PPC64LE-NEXT: bne- 7, .+4
; PPC64LE-NEXT: isync
; PPC64LE-NEXT: blr
- %val = load atomic i64, i64* %ptr acquire, align 8
+ %val = load atomic i64, ptr %ptr acquire, align 8
ret i64 %val
}
-define i64 @test15(i64* %ptr) {
+define i64 @test15(ptr %ptr) {
; PPC64LE-LABEL: test15:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: bne- 7, .+4
; PPC64LE-NEXT: isync
; PPC64LE-NEXT: blr
- %val = load atomic i64, i64* %ptr seq_cst, align 8
+ %val = load atomic i64, ptr %ptr seq_cst, align 8
ret i64 %val
}
-define void @test16(i8* %ptr, i8 %val) {
+define void @test16(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test16:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: stb 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i8 %val, i8* %ptr unordered, align 1
+ store atomic i8 %val, ptr %ptr unordered, align 1
ret void
}
-define void @test17(i8* %ptr, i8 %val) {
+define void @test17(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test17:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: stb 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i8 %val, i8* %ptr monotonic, align 1
+ store atomic i8 %val, ptr %ptr monotonic, align 1
ret void
}
-define void @test18(i8* %ptr, i8 %val) {
+define void @test18(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test18:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: stb 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i8 %val, i8* %ptr release, align 1
+ store atomic i8 %val, ptr %ptr release, align 1
ret void
}
-define void @test19(i8* %ptr, i8 %val) {
+define void @test19(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test19:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: stb 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i8 %val, i8* %ptr seq_cst, align 1
+ store atomic i8 %val, ptr %ptr seq_cst, align 1
ret void
}
-define void @test20(i16* %ptr, i16 %val) {
+define void @test20(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test20:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sth 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i16 %val, i16* %ptr unordered, align 2
+ store atomic i16 %val, ptr %ptr unordered, align 2
ret void
}
-define void @test21(i16* %ptr, i16 %val) {
+define void @test21(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test21:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sth 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i16 %val, i16* %ptr monotonic, align 2
+ store atomic i16 %val, ptr %ptr monotonic, align 2
ret void
}
-define void @test22(i16* %ptr, i16 %val) {
+define void @test22(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test22:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: sth 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i16 %val, i16* %ptr release, align 2
+ store atomic i16 %val, ptr %ptr release, align 2
ret void
}
-define void @test23(i16* %ptr, i16 %val) {
+define void @test23(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test23:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: sth 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i16 %val, i16* %ptr seq_cst, align 2
+ store atomic i16 %val, ptr %ptr seq_cst, align 2
ret void
}
-define void @test24(i32* %ptr, i32 %val) {
+define void @test24(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test24:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: stw 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i32 %val, i32* %ptr unordered, align 4
+ store atomic i32 %val, ptr %ptr unordered, align 4
ret void
}
-define void @test25(i32* %ptr, i32 %val) {
+define void @test25(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test25:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: stw 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i32 %val, i32* %ptr monotonic, align 4
+ store atomic i32 %val, ptr %ptr monotonic, align 4
ret void
}
-define void @test26(i32* %ptr, i32 %val) {
+define void @test26(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test26:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: stw 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i32 %val, i32* %ptr release, align 4
+ store atomic i32 %val, ptr %ptr release, align 4
ret void
}
-define void @test27(i32* %ptr, i32 %val) {
+define void @test27(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test27:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: stw 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i32 %val, i32* %ptr seq_cst, align 4
+ store atomic i32 %val, ptr %ptr seq_cst, align 4
ret void
}
-define void @test28(i64* %ptr, i64 %val) {
+define void @test28(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test28:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: std 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i64 %val, i64* %ptr unordered, align 8
+ store atomic i64 %val, ptr %ptr unordered, align 8
ret void
}
-define void @test29(i64* %ptr, i64 %val) {
+define void @test29(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test29:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: std 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i64 %val, i64* %ptr monotonic, align 8
+ store atomic i64 %val, ptr %ptr monotonic, align 8
ret void
}
-define void @test30(i64* %ptr, i64 %val) {
+define void @test30(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test30:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: std 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i64 %val, i64* %ptr release, align 8
+ store atomic i64 %val, ptr %ptr release, align 8
ret void
}
-define void @test31(i64* %ptr, i64 %val) {
+define void @test31(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test31:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: std 4, 0(3)
; PPC64LE-NEXT: blr
- store atomic i64 %val, i64* %ptr seq_cst, align 8
+ store atomic i64 %val, ptr %ptr seq_cst, align 8
ret void
}
ret void
}
-define void @test40(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test40(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test40:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: bne 0, .LBB40_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val monotonic monotonic
ret void
}
-define void @test41(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test41(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test41:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB41_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val acquire monotonic
ret void
}
-define void @test42(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test42(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test42:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB42_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val acquire acquire
ret void
}
-define void @test43(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test43(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test43:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: bne 0, .LBB43_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val release monotonic
ret void
}
-define void @test44(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test44(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test44:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB44_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val release acquire
ret void
}
-define void @test45(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test45(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test45:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB45_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val acq_rel monotonic
ret void
}
-define void @test46(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test46(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test46:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB46_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val acq_rel acquire
ret void
}
-define void @test47(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test47(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test47:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB47_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val seq_cst monotonic
ret void
}
-define void @test48(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test48(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test48:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB48_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val seq_cst acquire
ret void
}
-define void @test49(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test49(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test49:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB49_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val seq_cst seq_cst
ret void
}
-define void @test50(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test50(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test50:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: bne 0, .LBB50_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val monotonic monotonic
ret void
}
-define void @test51(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test51(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test51:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB51_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val acquire monotonic
ret void
}
-define void @test52(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test52(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test52:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB52_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val acquire acquire
ret void
}
-define void @test53(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test53(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test53:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: bne 0, .LBB53_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val release monotonic
ret void
}
-define void @test54(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test54(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test54:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB54_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val release acquire
ret void
}
-define void @test55(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test55(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test55:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB55_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val acq_rel monotonic
ret void
}
-define void @test56(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test56(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test56:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB56_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val acq_rel acquire
ret void
}
-define void @test57(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test57(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test57:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB57_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val seq_cst monotonic
ret void
}
-define void @test58(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test58(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test58:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB58_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val seq_cst acquire
ret void
}
-define void @test59(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test59(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test59:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB59_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val seq_cst seq_cst
ret void
}
-define void @test60(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test60(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test60:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB60_1:
; PPC64LE-NEXT: bne 0, .LBB60_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val monotonic monotonic
ret void
}
-define void @test61(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test61(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test61:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB61_1:
; PPC64LE-NEXT: .LBB61_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val acquire monotonic
ret void
}
-define void @test62(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test62(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test62:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB62_1:
; PPC64LE-NEXT: .LBB62_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val acquire acquire
ret void
}
-define void @test63(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test63(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test63:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: bne 0, .LBB63_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val release monotonic
ret void
}
-define void @test64(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test64(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test64:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB64_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val release acquire
ret void
}
-define void @test65(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test65(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test65:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB65_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val acq_rel monotonic
ret void
}
-define void @test66(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test66(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test66:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB66_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val acq_rel acquire
ret void
}
-define void @test67(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test67(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test67:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB67_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val seq_cst monotonic
ret void
}
-define void @test68(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test68(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test68:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB68_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val seq_cst acquire
ret void
}
-define void @test69(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test69(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test69:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB69_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val seq_cst seq_cst
ret void
}
-define void @test70(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test70(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test70:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB70_1:
; PPC64LE-NEXT: bne 0, .LBB70_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val monotonic monotonic
ret void
}
-define void @test71(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test71(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test71:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB71_1:
; PPC64LE-NEXT: .LBB71_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val acquire monotonic
ret void
}
-define void @test72(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test72(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test72:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB72_1:
; PPC64LE-NEXT: .LBB72_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val acquire acquire
ret void
}
-define void @test73(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test73(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test73:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: bne 0, .LBB73_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val release monotonic
ret void
}
-define void @test74(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test74(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test74:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB74_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val release acquire
ret void
}
-define void @test75(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test75(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test75:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB75_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val acq_rel monotonic
ret void
}
-define void @test76(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test76(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test76:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB76_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val acq_rel acquire
ret void
}
-define void @test77(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test77(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test77:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB77_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val seq_cst monotonic
ret void
}
-define void @test78(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test78(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test78:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB78_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val seq_cst acquire
ret void
}
-define void @test79(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test79(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test79:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB79_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val seq_cst seq_cst
ret void
}
-define void @test80(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test80(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test80:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: bne 0, .LBB80_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") monotonic monotonic
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val syncscope("singlethread") monotonic monotonic
ret void
}
-define void @test81(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test81(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test81:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB81_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") acquire monotonic
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val syncscope("singlethread") acquire monotonic
ret void
}
-define void @test82(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test82(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test82:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB82_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") acquire acquire
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val syncscope("singlethread") acquire acquire
ret void
}
-define void @test83(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test83(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test83:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: bne 0, .LBB83_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") release monotonic
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val syncscope("singlethread") release monotonic
ret void
}
-define void @test84(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test84(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test84:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB84_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") release acquire
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val syncscope("singlethread") release acquire
ret void
}
-define void @test85(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test85(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test85:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB85_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") acq_rel monotonic
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val syncscope("singlethread") acq_rel monotonic
ret void
}
-define void @test86(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test86(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test86:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB86_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") acq_rel acquire
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val syncscope("singlethread") acq_rel acquire
ret void
}
-define void @test87(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test87(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test87:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB87_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") seq_cst monotonic
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val syncscope("singlethread") seq_cst monotonic
ret void
}
-define void @test88(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test88(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test88:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB88_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") seq_cst acquire
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val syncscope("singlethread") seq_cst acquire
ret void
}
-define void @test89(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test89(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test89:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: .LBB89_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") seq_cst seq_cst
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val syncscope("singlethread") seq_cst seq_cst
ret void
}
-define void @test90(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test90(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test90:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: bne 0, .LBB90_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") monotonic monotonic
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val syncscope("singlethread") monotonic monotonic
ret void
}
-define void @test91(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test91(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test91:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB91_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") acquire monotonic
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val syncscope("singlethread") acquire monotonic
ret void
}
-define void @test92(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test92(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test92:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB92_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") acquire acquire
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val syncscope("singlethread") acquire acquire
ret void
}
-define void @test93(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test93(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test93:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: bne 0, .LBB93_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") release monotonic
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val syncscope("singlethread") release monotonic
ret void
}
-define void @test94(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test94(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test94:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB94_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") release acquire
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val syncscope("singlethread") release acquire
ret void
}
-define void @test95(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test95(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test95:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB95_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") acq_rel monotonic
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val syncscope("singlethread") acq_rel monotonic
ret void
}
-define void @test96(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test96(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test96:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB96_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") acq_rel acquire
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val syncscope("singlethread") acq_rel acquire
ret void
}
-define void @test97(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test97(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test97:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB97_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") seq_cst monotonic
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val syncscope("singlethread") seq_cst monotonic
ret void
}
-define void @test98(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test98(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test98:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB98_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") seq_cst acquire
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val syncscope("singlethread") seq_cst acquire
ret void
}
-define void @test99(i16* %ptr, i16 %cmp, i16 %val) {
+define void @test99(ptr %ptr, i16 %cmp, i16 %val) {
; PPC64LE-LABEL: test99:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 16
; PPC64LE-NEXT: .LBB99_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") seq_cst seq_cst
+ %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val syncscope("singlethread") seq_cst seq_cst
ret void
}
-define void @test100(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test100(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test100:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB100_1:
; PPC64LE-NEXT: bne 0, .LBB100_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") monotonic monotonic
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val syncscope("singlethread") monotonic monotonic
ret void
}
-define void @test101(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test101(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test101:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB101_1:
; PPC64LE-NEXT: .LBB101_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") acquire monotonic
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val syncscope("singlethread") acquire monotonic
ret void
}
-define void @test102(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test102(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test102:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB102_1:
; PPC64LE-NEXT: .LBB102_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") acquire acquire
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val syncscope("singlethread") acquire acquire
ret void
}
-define void @test103(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test103(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test103:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: bne 0, .LBB103_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") release monotonic
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val syncscope("singlethread") release monotonic
ret void
}
-define void @test104(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test104(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test104:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB104_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") release acquire
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val syncscope("singlethread") release acquire
ret void
}
-define void @test105(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test105(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test105:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB105_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") acq_rel monotonic
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val syncscope("singlethread") acq_rel monotonic
ret void
}
-define void @test106(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test106(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test106:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB106_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") acq_rel acquire
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val syncscope("singlethread") acq_rel acquire
ret void
}
-define void @test107(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test107(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test107:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB107_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") seq_cst monotonic
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val syncscope("singlethread") seq_cst monotonic
ret void
}
-define void @test108(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test108(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test108:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB108_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") seq_cst acquire
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val syncscope("singlethread") seq_cst acquire
ret void
}
-define void @test109(i32* %ptr, i32 %cmp, i32 %val) {
+define void @test109(ptr %ptr, i32 %cmp, i32 %val) {
; PPC64LE-LABEL: test109:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB109_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") seq_cst seq_cst
+ %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val syncscope("singlethread") seq_cst seq_cst
ret void
}
-define void @test110(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test110(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test110:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB110_1:
; PPC64LE-NEXT: bne 0, .LBB110_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") monotonic monotonic
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val syncscope("singlethread") monotonic monotonic
ret void
}
-define void @test111(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test111(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test111:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB111_1:
; PPC64LE-NEXT: .LBB111_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") acquire monotonic
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val syncscope("singlethread") acquire monotonic
ret void
}
-define void @test112(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test112(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test112:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB112_1:
; PPC64LE-NEXT: .LBB112_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") acquire acquire
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val syncscope("singlethread") acquire acquire
ret void
}
-define void @test113(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test113(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test113:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: bne 0, .LBB113_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") release monotonic
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val syncscope("singlethread") release monotonic
ret void
}
-define void @test114(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test114(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test114:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB114_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") release acquire
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val syncscope("singlethread") release acquire
ret void
}
-define void @test115(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test115(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test115:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB115_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") acq_rel monotonic
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val syncscope("singlethread") acq_rel monotonic
ret void
}
-define void @test116(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test116(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test116:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB116_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") acq_rel acquire
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val syncscope("singlethread") acq_rel acquire
ret void
}
-define void @test117(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test117(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test117:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB117_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") seq_cst monotonic
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val syncscope("singlethread") seq_cst monotonic
ret void
}
-define void @test118(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test118(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test118:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB118_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") seq_cst acquire
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val syncscope("singlethread") seq_cst acquire
ret void
}
-define void @test119(i64* %ptr, i64 %cmp, i64 %val) {
+define void @test119(ptr %ptr, i64 %cmp, i64 %val) {
; PPC64LE-LABEL: test119:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: .LBB119_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") seq_cst seq_cst
+ %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val syncscope("singlethread") seq_cst seq_cst
ret void
}
-define i8 @test120(i8* %ptr, i8 %val) {
+define i8 @test120(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test120:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB120_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val monotonic
+ %ret = atomicrmw xchg ptr %ptr, i8 %val monotonic
ret i8 %ret
}
-define i8 @test121(i8* %ptr, i8 %val) {
+define i8 @test121(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test121:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val acquire
+ %ret = atomicrmw xchg ptr %ptr, i8 %val acquire
ret i8 %ret
}
-define i8 @test122(i8* %ptr, i8 %val) {
+define i8 @test122(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test122:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val release
+ %ret = atomicrmw xchg ptr %ptr, i8 %val release
ret i8 %ret
}
-define i8 @test123(i8* %ptr, i8 %val) {
+define i8 @test123(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test123:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val acq_rel
+ %ret = atomicrmw xchg ptr %ptr, i8 %val acq_rel
ret i8 %ret
}
-define i8 @test124(i8* %ptr, i8 %val) {
+define i8 @test124(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test124:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val seq_cst
+ %ret = atomicrmw xchg ptr %ptr, i8 %val seq_cst
ret i8 %ret
}
-define i16 @test125(i16* %ptr, i16 %val) {
+define i16 @test125(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test125:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB125_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val monotonic
+ %ret = atomicrmw xchg ptr %ptr, i16 %val monotonic
ret i16 %ret
}
-define i16 @test126(i16* %ptr, i16 %val) {
+define i16 @test126(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test126:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val acquire
+ %ret = atomicrmw xchg ptr %ptr, i16 %val acquire
ret i16 %ret
}
-define i16 @test127(i16* %ptr, i16 %val) {
+define i16 @test127(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test127:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val release
+ %ret = atomicrmw xchg ptr %ptr, i16 %val release
ret i16 %ret
}
-define i16 @test128(i16* %ptr, i16 %val) {
+define i16 @test128(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test128:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val acq_rel
+ %ret = atomicrmw xchg ptr %ptr, i16 %val acq_rel
ret i16 %ret
}
-define i16 @test129(i16* %ptr, i16 %val) {
+define i16 @test129(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test129:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val seq_cst
+ %ret = atomicrmw xchg ptr %ptr, i16 %val seq_cst
ret i16 %ret
}
-define i32 @test130(i32* %ptr, i32 %val) {
+define i32 @test130(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test130:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB130_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val monotonic
+ %ret = atomicrmw xchg ptr %ptr, i32 %val monotonic
ret i32 %ret
}
-define i32 @test131(i32* %ptr, i32 %val) {
+define i32 @test131(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test131:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val acquire
+ %ret = atomicrmw xchg ptr %ptr, i32 %val acquire
ret i32 %ret
}
-define i32 @test132(i32* %ptr, i32 %val) {
+define i32 @test132(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test132:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val release
+ %ret = atomicrmw xchg ptr %ptr, i32 %val release
ret i32 %ret
}
-define i32 @test133(i32* %ptr, i32 %val) {
+define i32 @test133(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test133:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val acq_rel
+ %ret = atomicrmw xchg ptr %ptr, i32 %val acq_rel
ret i32 %ret
}
-define i32 @test134(i32* %ptr, i32 %val) {
+define i32 @test134(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test134:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val seq_cst
+ %ret = atomicrmw xchg ptr %ptr, i32 %val seq_cst
ret i32 %ret
}
-define i64 @test135(i64* %ptr, i64 %val) {
+define i64 @test135(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test135:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB135_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val monotonic
+ %ret = atomicrmw xchg ptr %ptr, i64 %val monotonic
ret i64 %ret
}
-define i64 @test136(i64* %ptr, i64 %val) {
+define i64 @test136(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test136:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val acquire
+ %ret = atomicrmw xchg ptr %ptr, i64 %val acquire
ret i64 %ret
}
-define i64 @test137(i64* %ptr, i64 %val) {
+define i64 @test137(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test137:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val release
+ %ret = atomicrmw xchg ptr %ptr, i64 %val release
ret i64 %ret
}
-define i64 @test138(i64* %ptr, i64 %val) {
+define i64 @test138(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test138:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val acq_rel
+ %ret = atomicrmw xchg ptr %ptr, i64 %val acq_rel
ret i64 %ret
}
-define i64 @test139(i64* %ptr, i64 %val) {
+define i64 @test139(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test139:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val seq_cst
+ %ret = atomicrmw xchg ptr %ptr, i64 %val seq_cst
ret i64 %ret
}
-define i8 @test140(i8* %ptr, i8 %val) {
+define i8 @test140(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test140:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB140_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val monotonic
+ %ret = atomicrmw add ptr %ptr, i8 %val monotonic
ret i8 %ret
}
-define i8 @test141(i8* %ptr, i8 %val) {
+define i8 @test141(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test141:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val acquire
+ %ret = atomicrmw add ptr %ptr, i8 %val acquire
ret i8 %ret
}
-define i8 @test142(i8* %ptr, i8 %val) {
+define i8 @test142(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test142:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val release
+ %ret = atomicrmw add ptr %ptr, i8 %val release
ret i8 %ret
}
-define i8 @test143(i8* %ptr, i8 %val) {
+define i8 @test143(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test143:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val acq_rel
+ %ret = atomicrmw add ptr %ptr, i8 %val acq_rel
ret i8 %ret
}
-define i8 @test144(i8* %ptr, i8 %val) {
+define i8 @test144(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test144:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val seq_cst
+ %ret = atomicrmw add ptr %ptr, i8 %val seq_cst
ret i8 %ret
}
-define i16 @test145(i16* %ptr, i16 %val) {
+define i16 @test145(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test145:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB145_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val monotonic
+ %ret = atomicrmw add ptr %ptr, i16 %val monotonic
ret i16 %ret
}
-define i16 @test146(i16* %ptr, i16 %val) {
+define i16 @test146(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test146:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val acquire
+ %ret = atomicrmw add ptr %ptr, i16 %val acquire
ret i16 %ret
}
-define i16 @test147(i16* %ptr, i16 %val) {
+define i16 @test147(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test147:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val release
+ %ret = atomicrmw add ptr %ptr, i16 %val release
ret i16 %ret
}
-define i16 @test148(i16* %ptr, i16 %val) {
+define i16 @test148(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test148:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val acq_rel
+ %ret = atomicrmw add ptr %ptr, i16 %val acq_rel
ret i16 %ret
}
-define i16 @test149(i16* %ptr, i16 %val) {
+define i16 @test149(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test149:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val seq_cst
+ %ret = atomicrmw add ptr %ptr, i16 %val seq_cst
ret i16 %ret
}
-define i32 @test150(i32* %ptr, i32 %val) {
+define i32 @test150(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test150:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB150_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val monotonic
+ %ret = atomicrmw add ptr %ptr, i32 %val monotonic
ret i32 %ret
}
-define i32 @test151(i32* %ptr, i32 %val) {
+define i32 @test151(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test151:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val acquire
+ %ret = atomicrmw add ptr %ptr, i32 %val acquire
ret i32 %ret
}
-define i32 @test152(i32* %ptr, i32 %val) {
+define i32 @test152(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test152:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val release
+ %ret = atomicrmw add ptr %ptr, i32 %val release
ret i32 %ret
}
-define i32 @test153(i32* %ptr, i32 %val) {
+define i32 @test153(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test153:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val acq_rel
+ %ret = atomicrmw add ptr %ptr, i32 %val acq_rel
ret i32 %ret
}
-define i32 @test154(i32* %ptr, i32 %val) {
+define i32 @test154(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test154:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val seq_cst
+ %ret = atomicrmw add ptr %ptr, i32 %val seq_cst
ret i32 %ret
}
-define i64 @test155(i64* %ptr, i64 %val) {
+define i64 @test155(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test155:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB155_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val monotonic
+ %ret = atomicrmw add ptr %ptr, i64 %val monotonic
ret i64 %ret
}
-define i64 @test156(i64* %ptr, i64 %val) {
+define i64 @test156(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test156:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val acquire
+ %ret = atomicrmw add ptr %ptr, i64 %val acquire
ret i64 %ret
}
-define i64 @test157(i64* %ptr, i64 %val) {
+define i64 @test157(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test157:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val release
+ %ret = atomicrmw add ptr %ptr, i64 %val release
ret i64 %ret
}
-define i64 @test158(i64* %ptr, i64 %val) {
+define i64 @test158(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test158:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val acq_rel
+ %ret = atomicrmw add ptr %ptr, i64 %val acq_rel
ret i64 %ret
}
-define i64 @test159(i64* %ptr, i64 %val) {
+define i64 @test159(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test159:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val seq_cst
+ %ret = atomicrmw add ptr %ptr, i64 %val seq_cst
ret i64 %ret
}
-define i8 @test160(i8* %ptr, i8 %val) {
+define i8 @test160(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test160:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB160_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val monotonic
+ %ret = atomicrmw sub ptr %ptr, i8 %val monotonic
ret i8 %ret
}
-define i8 @test161(i8* %ptr, i8 %val) {
+define i8 @test161(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test161:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val acquire
+ %ret = atomicrmw sub ptr %ptr, i8 %val acquire
ret i8 %ret
}
-define i8 @test162(i8* %ptr, i8 %val) {
+define i8 @test162(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test162:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val release
+ %ret = atomicrmw sub ptr %ptr, i8 %val release
ret i8 %ret
}
-define i8 @test163(i8* %ptr, i8 %val) {
+define i8 @test163(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test163:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val acq_rel
+ %ret = atomicrmw sub ptr %ptr, i8 %val acq_rel
ret i8 %ret
}
-define i8 @test164(i8* %ptr, i8 %val) {
+define i8 @test164(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test164:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val seq_cst
+ %ret = atomicrmw sub ptr %ptr, i8 %val seq_cst
ret i8 %ret
}
-define i16 @test165(i16* %ptr, i16 %val) {
+define i16 @test165(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test165:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB165_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val monotonic
+ %ret = atomicrmw sub ptr %ptr, i16 %val monotonic
ret i16 %ret
}
-define i16 @test166(i16* %ptr, i16 %val) {
+define i16 @test166(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test166:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val acquire
+ %ret = atomicrmw sub ptr %ptr, i16 %val acquire
ret i16 %ret
}
-define i16 @test167(i16* %ptr, i16 %val) {
+define i16 @test167(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test167:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val release
+ %ret = atomicrmw sub ptr %ptr, i16 %val release
ret i16 %ret
}
-define i16 @test168(i16* %ptr, i16 %val) {
+define i16 @test168(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test168:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val acq_rel
+ %ret = atomicrmw sub ptr %ptr, i16 %val acq_rel
ret i16 %ret
}
-define i16 @test169(i16* %ptr, i16 %val) {
+define i16 @test169(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test169:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val seq_cst
+ %ret = atomicrmw sub ptr %ptr, i16 %val seq_cst
ret i16 %ret
}
-define i32 @test170(i32* %ptr, i32 %val) {
+define i32 @test170(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test170:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB170_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val monotonic
+ %ret = atomicrmw sub ptr %ptr, i32 %val monotonic
ret i32 %ret
}
-define i32 @test171(i32* %ptr, i32 %val) {
+define i32 @test171(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test171:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val acquire
+ %ret = atomicrmw sub ptr %ptr, i32 %val acquire
ret i32 %ret
}
-define i32 @test172(i32* %ptr, i32 %val) {
+define i32 @test172(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test172:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val release
+ %ret = atomicrmw sub ptr %ptr, i32 %val release
ret i32 %ret
}
-define i32 @test173(i32* %ptr, i32 %val) {
+define i32 @test173(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test173:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val acq_rel
+ %ret = atomicrmw sub ptr %ptr, i32 %val acq_rel
ret i32 %ret
}
-define i32 @test174(i32* %ptr, i32 %val) {
+define i32 @test174(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test174:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val seq_cst
+ %ret = atomicrmw sub ptr %ptr, i32 %val seq_cst
ret i32 %ret
}
-define i64 @test175(i64* %ptr, i64 %val) {
+define i64 @test175(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test175:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB175_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val monotonic
+ %ret = atomicrmw sub ptr %ptr, i64 %val monotonic
ret i64 %ret
}
-define i64 @test176(i64* %ptr, i64 %val) {
+define i64 @test176(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test176:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val acquire
+ %ret = atomicrmw sub ptr %ptr, i64 %val acquire
ret i64 %ret
}
-define i64 @test177(i64* %ptr, i64 %val) {
+define i64 @test177(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test177:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val release
+ %ret = atomicrmw sub ptr %ptr, i64 %val release
ret i64 %ret
}
-define i64 @test178(i64* %ptr, i64 %val) {
+define i64 @test178(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test178:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val acq_rel
+ %ret = atomicrmw sub ptr %ptr, i64 %val acq_rel
ret i64 %ret
}
-define i64 @test179(i64* %ptr, i64 %val) {
+define i64 @test179(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test179:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val seq_cst
+ %ret = atomicrmw sub ptr %ptr, i64 %val seq_cst
ret i64 %ret
}
-define i8 @test180(i8* %ptr, i8 %val) {
+define i8 @test180(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test180:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB180_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val monotonic
+ %ret = atomicrmw and ptr %ptr, i8 %val monotonic
ret i8 %ret
}
-define i8 @test181(i8* %ptr, i8 %val) {
+define i8 @test181(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test181:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val acquire
+ %ret = atomicrmw and ptr %ptr, i8 %val acquire
ret i8 %ret
}
-define i8 @test182(i8* %ptr, i8 %val) {
+define i8 @test182(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test182:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val release
+ %ret = atomicrmw and ptr %ptr, i8 %val release
ret i8 %ret
}
-define i8 @test183(i8* %ptr, i8 %val) {
+define i8 @test183(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test183:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val acq_rel
+ %ret = atomicrmw and ptr %ptr, i8 %val acq_rel
ret i8 %ret
}
-define i8 @test184(i8* %ptr, i8 %val) {
+define i8 @test184(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test184:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val seq_cst
+ %ret = atomicrmw and ptr %ptr, i8 %val seq_cst
ret i8 %ret
}
-define i16 @test185(i16* %ptr, i16 %val) {
+define i16 @test185(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test185:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB185_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val monotonic
+ %ret = atomicrmw and ptr %ptr, i16 %val monotonic
ret i16 %ret
}
-define i16 @test186(i16* %ptr, i16 %val) {
+define i16 @test186(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test186:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val acquire
+ %ret = atomicrmw and ptr %ptr, i16 %val acquire
ret i16 %ret
}
-define i16 @test187(i16* %ptr, i16 %val) {
+define i16 @test187(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test187:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val release
+ %ret = atomicrmw and ptr %ptr, i16 %val release
ret i16 %ret
}
-define i16 @test188(i16* %ptr, i16 %val) {
+define i16 @test188(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test188:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val acq_rel
+ %ret = atomicrmw and ptr %ptr, i16 %val acq_rel
ret i16 %ret
}
-define i16 @test189(i16* %ptr, i16 %val) {
+define i16 @test189(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test189:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val seq_cst
+ %ret = atomicrmw and ptr %ptr, i16 %val seq_cst
ret i16 %ret
}
-define i32 @test190(i32* %ptr, i32 %val) {
+define i32 @test190(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test190:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB190_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val monotonic
+ %ret = atomicrmw and ptr %ptr, i32 %val monotonic
ret i32 %ret
}
-define i32 @test191(i32* %ptr, i32 %val) {
+define i32 @test191(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test191:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val acquire
+ %ret = atomicrmw and ptr %ptr, i32 %val acquire
ret i32 %ret
}
-define i32 @test192(i32* %ptr, i32 %val) {
+define i32 @test192(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test192:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val release
+ %ret = atomicrmw and ptr %ptr, i32 %val release
ret i32 %ret
}
-define i32 @test193(i32* %ptr, i32 %val) {
+define i32 @test193(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test193:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val acq_rel
+ %ret = atomicrmw and ptr %ptr, i32 %val acq_rel
ret i32 %ret
}
-define i32 @test194(i32* %ptr, i32 %val) {
+define i32 @test194(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test194:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val seq_cst
+ %ret = atomicrmw and ptr %ptr, i32 %val seq_cst
ret i32 %ret
}
-define i64 @test195(i64* %ptr, i64 %val) {
+define i64 @test195(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test195:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB195_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val monotonic
+ %ret = atomicrmw and ptr %ptr, i64 %val monotonic
ret i64 %ret
}
-define i64 @test196(i64* %ptr, i64 %val) {
+define i64 @test196(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test196:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val acquire
+ %ret = atomicrmw and ptr %ptr, i64 %val acquire
ret i64 %ret
}
-define i64 @test197(i64* %ptr, i64 %val) {
+define i64 @test197(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test197:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val release
+ %ret = atomicrmw and ptr %ptr, i64 %val release
ret i64 %ret
}
-define i64 @test198(i64* %ptr, i64 %val) {
+define i64 @test198(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test198:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val acq_rel
+ %ret = atomicrmw and ptr %ptr, i64 %val acq_rel
ret i64 %ret
}
-define i64 @test199(i64* %ptr, i64 %val) {
+define i64 @test199(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test199:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val seq_cst
+ %ret = atomicrmw and ptr %ptr, i64 %val seq_cst
ret i64 %ret
}
-define i8 @test200(i8* %ptr, i8 %val) {
+define i8 @test200(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test200:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB200_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val monotonic
+ %ret = atomicrmw nand ptr %ptr, i8 %val monotonic
ret i8 %ret
}
-define i8 @test201(i8* %ptr, i8 %val) {
+define i8 @test201(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test201:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val acquire
+ %ret = atomicrmw nand ptr %ptr, i8 %val acquire
ret i8 %ret
}
-define i8 @test202(i8* %ptr, i8 %val) {
+define i8 @test202(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test202:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val release
+ %ret = atomicrmw nand ptr %ptr, i8 %val release
ret i8 %ret
}
-define i8 @test203(i8* %ptr, i8 %val) {
+define i8 @test203(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test203:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val acq_rel
+ %ret = atomicrmw nand ptr %ptr, i8 %val acq_rel
ret i8 %ret
}
-define i8 @test204(i8* %ptr, i8 %val) {
+define i8 @test204(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test204:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val seq_cst
+ %ret = atomicrmw nand ptr %ptr, i8 %val seq_cst
ret i8 %ret
}
-define i16 @test205(i16* %ptr, i16 %val) {
+define i16 @test205(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test205:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB205_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val monotonic
+ %ret = atomicrmw nand ptr %ptr, i16 %val monotonic
ret i16 %ret
}
-define i16 @test206(i16* %ptr, i16 %val) {
+define i16 @test206(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test206:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val acquire
+ %ret = atomicrmw nand ptr %ptr, i16 %val acquire
ret i16 %ret
}
-define i16 @test207(i16* %ptr, i16 %val) {
+define i16 @test207(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test207:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val release
+ %ret = atomicrmw nand ptr %ptr, i16 %val release
ret i16 %ret
}
-define i16 @test208(i16* %ptr, i16 %val) {
+define i16 @test208(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test208:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val acq_rel
+ %ret = atomicrmw nand ptr %ptr, i16 %val acq_rel
ret i16 %ret
}
-define i16 @test209(i16* %ptr, i16 %val) {
+define i16 @test209(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test209:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val seq_cst
+ %ret = atomicrmw nand ptr %ptr, i16 %val seq_cst
ret i16 %ret
}
-define i32 @test210(i32* %ptr, i32 %val) {
+define i32 @test210(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test210:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB210_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val monotonic
+ %ret = atomicrmw nand ptr %ptr, i32 %val monotonic
ret i32 %ret
}
-define i32 @test211(i32* %ptr, i32 %val) {
+define i32 @test211(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test211:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val acquire
+ %ret = atomicrmw nand ptr %ptr, i32 %val acquire
ret i32 %ret
}
-define i32 @test212(i32* %ptr, i32 %val) {
+define i32 @test212(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test212:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val release
+ %ret = atomicrmw nand ptr %ptr, i32 %val release
ret i32 %ret
}
-define i32 @test213(i32* %ptr, i32 %val) {
+define i32 @test213(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test213:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val acq_rel
+ %ret = atomicrmw nand ptr %ptr, i32 %val acq_rel
ret i32 %ret
}
-define i32 @test214(i32* %ptr, i32 %val) {
+define i32 @test214(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test214:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val seq_cst
+ %ret = atomicrmw nand ptr %ptr, i32 %val seq_cst
ret i32 %ret
}
-define i64 @test215(i64* %ptr, i64 %val) {
+define i64 @test215(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test215:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB215_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val monotonic
+ %ret = atomicrmw nand ptr %ptr, i64 %val monotonic
ret i64 %ret
}
-define i64 @test216(i64* %ptr, i64 %val) {
+define i64 @test216(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test216:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val acquire
+ %ret = atomicrmw nand ptr %ptr, i64 %val acquire
ret i64 %ret
}
-define i64 @test217(i64* %ptr, i64 %val) {
+define i64 @test217(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test217:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val release
+ %ret = atomicrmw nand ptr %ptr, i64 %val release
ret i64 %ret
}
-define i64 @test218(i64* %ptr, i64 %val) {
+define i64 @test218(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test218:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val acq_rel
+ %ret = atomicrmw nand ptr %ptr, i64 %val acq_rel
ret i64 %ret
}
-define i64 @test219(i64* %ptr, i64 %val) {
+define i64 @test219(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test219:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val seq_cst
+ %ret = atomicrmw nand ptr %ptr, i64 %val seq_cst
ret i64 %ret
}
-define i8 @test220(i8* %ptr, i8 %val) {
+define i8 @test220(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test220:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB220_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val monotonic
+ %ret = atomicrmw or ptr %ptr, i8 %val monotonic
ret i8 %ret
}
-define i8 @test221(i8* %ptr, i8 %val) {
+define i8 @test221(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test221:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val acquire
+ %ret = atomicrmw or ptr %ptr, i8 %val acquire
ret i8 %ret
}
-define i8 @test222(i8* %ptr, i8 %val) {
+define i8 @test222(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test222:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val release
+ %ret = atomicrmw or ptr %ptr, i8 %val release
ret i8 %ret
}
-define i8 @test223(i8* %ptr, i8 %val) {
+define i8 @test223(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test223:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val acq_rel
+ %ret = atomicrmw or ptr %ptr, i8 %val acq_rel
ret i8 %ret
}
-define i8 @test224(i8* %ptr, i8 %val) {
+define i8 @test224(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test224:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val seq_cst
+ %ret = atomicrmw or ptr %ptr, i8 %val seq_cst
ret i8 %ret
}
-define i16 @test225(i16* %ptr, i16 %val) {
+define i16 @test225(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test225:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB225_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val monotonic
+ %ret = atomicrmw or ptr %ptr, i16 %val monotonic
ret i16 %ret
}
-define i16 @test226(i16* %ptr, i16 %val) {
+define i16 @test226(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test226:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val acquire
+ %ret = atomicrmw or ptr %ptr, i16 %val acquire
ret i16 %ret
}
-define i16 @test227(i16* %ptr, i16 %val) {
+define i16 @test227(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test227:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val release
+ %ret = atomicrmw or ptr %ptr, i16 %val release
ret i16 %ret
}
-define i16 @test228(i16* %ptr, i16 %val) {
+define i16 @test228(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test228:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val acq_rel
+ %ret = atomicrmw or ptr %ptr, i16 %val acq_rel
ret i16 %ret
}
-define i16 @test229(i16* %ptr, i16 %val) {
+define i16 @test229(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test229:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val seq_cst
+ %ret = atomicrmw or ptr %ptr, i16 %val seq_cst
ret i16 %ret
}
-define i32 @test230(i32* %ptr, i32 %val) {
+define i32 @test230(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test230:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB230_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val monotonic
+ %ret = atomicrmw or ptr %ptr, i32 %val monotonic
ret i32 %ret
}
-define i32 @test231(i32* %ptr, i32 %val) {
+define i32 @test231(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test231:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val acquire
+ %ret = atomicrmw or ptr %ptr, i32 %val acquire
ret i32 %ret
}
-define i32 @test232(i32* %ptr, i32 %val) {
+define i32 @test232(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test232:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val release
+ %ret = atomicrmw or ptr %ptr, i32 %val release
ret i32 %ret
}
-define i32 @test233(i32* %ptr, i32 %val) {
+define i32 @test233(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test233:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val acq_rel
+ %ret = atomicrmw or ptr %ptr, i32 %val acq_rel
ret i32 %ret
}
-define i32 @test234(i32* %ptr, i32 %val) {
+define i32 @test234(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test234:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val seq_cst
+ %ret = atomicrmw or ptr %ptr, i32 %val seq_cst
ret i32 %ret
}
-define i64 @test235(i64* %ptr, i64 %val) {
+define i64 @test235(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test235:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB235_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val monotonic
+ %ret = atomicrmw or ptr %ptr, i64 %val monotonic
ret i64 %ret
}
-define i64 @test236(i64* %ptr, i64 %val) {
+define i64 @test236(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test236:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val acquire
+ %ret = atomicrmw or ptr %ptr, i64 %val acquire
ret i64 %ret
}
-define i64 @test237(i64* %ptr, i64 %val) {
+define i64 @test237(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test237:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val release
+ %ret = atomicrmw or ptr %ptr, i64 %val release
ret i64 %ret
}
-define i64 @test238(i64* %ptr, i64 %val) {
+define i64 @test238(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test238:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val acq_rel
+ %ret = atomicrmw or ptr %ptr, i64 %val acq_rel
ret i64 %ret
}
-define i64 @test239(i64* %ptr, i64 %val) {
+define i64 @test239(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test239:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val seq_cst
+ %ret = atomicrmw or ptr %ptr, i64 %val seq_cst
ret i64 %ret
}
-define i8 @test240(i8* %ptr, i8 %val) {
+define i8 @test240(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test240:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB240_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val monotonic
+ %ret = atomicrmw xor ptr %ptr, i8 %val monotonic
ret i8 %ret
}
-define i8 @test241(i8* %ptr, i8 %val) {
+define i8 @test241(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test241:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val acquire
+ %ret = atomicrmw xor ptr %ptr, i8 %val acquire
ret i8 %ret
}
-define i8 @test242(i8* %ptr, i8 %val) {
+define i8 @test242(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test242:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val release
+ %ret = atomicrmw xor ptr %ptr, i8 %val release
ret i8 %ret
}
-define i8 @test243(i8* %ptr, i8 %val) {
+define i8 @test243(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test243:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val acq_rel
+ %ret = atomicrmw xor ptr %ptr, i8 %val acq_rel
ret i8 %ret
}
-define i8 @test244(i8* %ptr, i8 %val) {
+define i8 @test244(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test244:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val seq_cst
+ %ret = atomicrmw xor ptr %ptr, i8 %val seq_cst
ret i8 %ret
}
-define i16 @test245(i16* %ptr, i16 %val) {
+define i16 @test245(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test245:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB245_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val monotonic
+ %ret = atomicrmw xor ptr %ptr, i16 %val monotonic
ret i16 %ret
}
-define i16 @test246(i16* %ptr, i16 %val) {
+define i16 @test246(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test246:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val acquire
+ %ret = atomicrmw xor ptr %ptr, i16 %val acquire
ret i16 %ret
}
-define i16 @test247(i16* %ptr, i16 %val) {
+define i16 @test247(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test247:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val release
+ %ret = atomicrmw xor ptr %ptr, i16 %val release
ret i16 %ret
}
-define i16 @test248(i16* %ptr, i16 %val) {
+define i16 @test248(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test248:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val acq_rel
+ %ret = atomicrmw xor ptr %ptr, i16 %val acq_rel
ret i16 %ret
}
-define i16 @test249(i16* %ptr, i16 %val) {
+define i16 @test249(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test249:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val seq_cst
+ %ret = atomicrmw xor ptr %ptr, i16 %val seq_cst
ret i16 %ret
}
-define i32 @test250(i32* %ptr, i32 %val) {
+define i32 @test250(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test250:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB250_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val monotonic
+ %ret = atomicrmw xor ptr %ptr, i32 %val monotonic
ret i32 %ret
}
-define i32 @test251(i32* %ptr, i32 %val) {
+define i32 @test251(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test251:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val acquire
+ %ret = atomicrmw xor ptr %ptr, i32 %val acquire
ret i32 %ret
}
-define i32 @test252(i32* %ptr, i32 %val) {
+define i32 @test252(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test252:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val release
+ %ret = atomicrmw xor ptr %ptr, i32 %val release
ret i32 %ret
}
-define i32 @test253(i32* %ptr, i32 %val) {
+define i32 @test253(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test253:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val acq_rel
+ %ret = atomicrmw xor ptr %ptr, i32 %val acq_rel
ret i32 %ret
}
-define i32 @test254(i32* %ptr, i32 %val) {
+define i32 @test254(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test254:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val seq_cst
+ %ret = atomicrmw xor ptr %ptr, i32 %val seq_cst
ret i32 %ret
}
-define i64 @test255(i64* %ptr, i64 %val) {
+define i64 @test255(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test255:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB255_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val monotonic
+ %ret = atomicrmw xor ptr %ptr, i64 %val monotonic
ret i64 %ret
}
-define i64 @test256(i64* %ptr, i64 %val) {
+define i64 @test256(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test256:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val acquire
+ %ret = atomicrmw xor ptr %ptr, i64 %val acquire
ret i64 %ret
}
-define i64 @test257(i64* %ptr, i64 %val) {
+define i64 @test257(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test257:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val release
+ %ret = atomicrmw xor ptr %ptr, i64 %val release
ret i64 %ret
}
-define i64 @test258(i64* %ptr, i64 %val) {
+define i64 @test258(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test258:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val acq_rel
+ %ret = atomicrmw xor ptr %ptr, i64 %val acq_rel
ret i64 %ret
}
-define i64 @test259(i64* %ptr, i64 %val) {
+define i64 @test259(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test259:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val seq_cst
+ %ret = atomicrmw xor ptr %ptr, i64 %val seq_cst
ret i64 %ret
}
-define i8 @test260(i8* %ptr, i8 %val) {
+define i8 @test260(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test260:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: .LBB260_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val monotonic
+ %ret = atomicrmw max ptr %ptr, i8 %val monotonic
ret i8 %ret
}
-define i8 @test261(i8* %ptr, i8 %val) {
+define i8 @test261(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test261:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val acquire
+ %ret = atomicrmw max ptr %ptr, i8 %val acquire
ret i8 %ret
}
-define i8 @test262(i8* %ptr, i8 %val) {
+define i8 @test262(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test262:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: .LBB262_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val release
+ %ret = atomicrmw max ptr %ptr, i8 %val release
ret i8 %ret
}
-define i8 @test263(i8* %ptr, i8 %val) {
+define i8 @test263(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test263:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val acq_rel
+ %ret = atomicrmw max ptr %ptr, i8 %val acq_rel
ret i8 %ret
}
-define i8 @test264(i8* %ptr, i8 %val) {
+define i8 @test264(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test264:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val seq_cst
+ %ret = atomicrmw max ptr %ptr, i8 %val seq_cst
ret i8 %ret
}
-define i16 @test265(i16* %ptr, i16 %val) {
+define i16 @test265(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test265:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: .LBB265_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val monotonic
+ %ret = atomicrmw max ptr %ptr, i16 %val monotonic
ret i16 %ret
}
-define i16 @test266(i16* %ptr, i16 %val) {
+define i16 @test266(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test266:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val acquire
+ %ret = atomicrmw max ptr %ptr, i16 %val acquire
ret i16 %ret
}
-define i16 @test267(i16* %ptr, i16 %val) {
+define i16 @test267(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test267:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: .LBB267_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val release
+ %ret = atomicrmw max ptr %ptr, i16 %val release
ret i16 %ret
}
-define i16 @test268(i16* %ptr, i16 %val) {
+define i16 @test268(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test268:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val acq_rel
+ %ret = atomicrmw max ptr %ptr, i16 %val acq_rel
ret i16 %ret
}
-define i16 @test269(i16* %ptr, i16 %val) {
+define i16 @test269(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test269:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val seq_cst
+ %ret = atomicrmw max ptr %ptr, i16 %val seq_cst
ret i16 %ret
}
-define i32 @test270(i32* %ptr, i32 %val) {
+define i32 @test270(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test270:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB270_1:
; PPC64LE-NEXT: .LBB270_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val monotonic
+ %ret = atomicrmw max ptr %ptr, i32 %val monotonic
ret i32 %ret
}
-define i32 @test271(i32* %ptr, i32 %val) {
+define i32 @test271(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test271:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB271_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val acquire
+ %ret = atomicrmw max ptr %ptr, i32 %val acquire
ret i32 %ret
}
-define i32 @test272(i32* %ptr, i32 %val) {
+define i32 @test272(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test272:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB272_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val release
+ %ret = atomicrmw max ptr %ptr, i32 %val release
ret i32 %ret
}
-define i32 @test273(i32* %ptr, i32 %val) {
+define i32 @test273(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test273:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val acq_rel
+ %ret = atomicrmw max ptr %ptr, i32 %val acq_rel
ret i32 %ret
}
-define i32 @test274(i32* %ptr, i32 %val) {
+define i32 @test274(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test274:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val seq_cst
+ %ret = atomicrmw max ptr %ptr, i32 %val seq_cst
ret i32 %ret
}
-define i64 @test275(i64* %ptr, i64 %val) {
+define i64 @test275(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test275:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB275_1:
; PPC64LE-NEXT: .LBB275_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val monotonic
+ %ret = atomicrmw max ptr %ptr, i64 %val monotonic
ret i64 %ret
}
-define i64 @test276(i64* %ptr, i64 %val) {
+define i64 @test276(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test276:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB276_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val acquire
+ %ret = atomicrmw max ptr %ptr, i64 %val acquire
ret i64 %ret
}
-define i64 @test277(i64* %ptr, i64 %val) {
+define i64 @test277(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test277:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB277_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val release
+ %ret = atomicrmw max ptr %ptr, i64 %val release
ret i64 %ret
}
-define i64 @test278(i64* %ptr, i64 %val) {
+define i64 @test278(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test278:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val acq_rel
+ %ret = atomicrmw max ptr %ptr, i64 %val acq_rel
ret i64 %ret
}
-define i64 @test279(i64* %ptr, i64 %val) {
+define i64 @test279(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test279:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val seq_cst
+ %ret = atomicrmw max ptr %ptr, i64 %val seq_cst
ret i64 %ret
}
-define i8 @test280(i8* %ptr, i8 %val) {
+define i8 @test280(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test280:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: .LBB280_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val monotonic
+ %ret = atomicrmw min ptr %ptr, i8 %val monotonic
ret i8 %ret
}
-define i8 @test281(i8* %ptr, i8 %val) {
+define i8 @test281(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test281:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val acquire
+ %ret = atomicrmw min ptr %ptr, i8 %val acquire
ret i8 %ret
}
-define i8 @test282(i8* %ptr, i8 %val) {
+define i8 @test282(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test282:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: .LBB282_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val release
+ %ret = atomicrmw min ptr %ptr, i8 %val release
ret i8 %ret
}
-define i8 @test283(i8* %ptr, i8 %val) {
+define i8 @test283(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test283:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val acq_rel
+ %ret = atomicrmw min ptr %ptr, i8 %val acq_rel
ret i8 %ret
}
-define i8 @test284(i8* %ptr, i8 %val) {
+define i8 @test284(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test284:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val seq_cst
+ %ret = atomicrmw min ptr %ptr, i8 %val seq_cst
ret i8 %ret
}
-define i16 @test285(i16* %ptr, i16 %val) {
+define i16 @test285(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test285:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: .LBB285_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val monotonic
+ %ret = atomicrmw min ptr %ptr, i16 %val monotonic
ret i16 %ret
}
-define i16 @test286(i16* %ptr, i16 %val) {
+define i16 @test286(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test286:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val acquire
+ %ret = atomicrmw min ptr %ptr, i16 %val acquire
ret i16 %ret
}
-define i16 @test287(i16* %ptr, i16 %val) {
+define i16 @test287(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test287:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: .LBB287_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val release
+ %ret = atomicrmw min ptr %ptr, i16 %val release
ret i16 %ret
}
-define i16 @test288(i16* %ptr, i16 %val) {
+define i16 @test288(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test288:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val acq_rel
+ %ret = atomicrmw min ptr %ptr, i16 %val acq_rel
ret i16 %ret
}
-define i16 @test289(i16* %ptr, i16 %val) {
+define i16 @test289(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test289:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val seq_cst
+ %ret = atomicrmw min ptr %ptr, i16 %val seq_cst
ret i16 %ret
}
-define i32 @test290(i32* %ptr, i32 %val) {
+define i32 @test290(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test290:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB290_1:
; PPC64LE-NEXT: .LBB290_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val monotonic
+ %ret = atomicrmw min ptr %ptr, i32 %val monotonic
ret i32 %ret
}
-define i32 @test291(i32* %ptr, i32 %val) {
+define i32 @test291(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test291:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB291_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val acquire
+ %ret = atomicrmw min ptr %ptr, i32 %val acquire
ret i32 %ret
}
-define i32 @test292(i32* %ptr, i32 %val) {
+define i32 @test292(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test292:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB292_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val release
+ %ret = atomicrmw min ptr %ptr, i32 %val release
ret i32 %ret
}
-define i32 @test293(i32* %ptr, i32 %val) {
+define i32 @test293(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test293:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val acq_rel
+ %ret = atomicrmw min ptr %ptr, i32 %val acq_rel
ret i32 %ret
}
-define i32 @test294(i32* %ptr, i32 %val) {
+define i32 @test294(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test294:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val seq_cst
+ %ret = atomicrmw min ptr %ptr, i32 %val seq_cst
ret i32 %ret
}
-define i64 @test295(i64* %ptr, i64 %val) {
+define i64 @test295(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test295:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB295_1:
; PPC64LE-NEXT: .LBB295_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val monotonic
+ %ret = atomicrmw min ptr %ptr, i64 %val monotonic
ret i64 %ret
}
-define i64 @test296(i64* %ptr, i64 %val) {
+define i64 @test296(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test296:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB296_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val acquire
+ %ret = atomicrmw min ptr %ptr, i64 %val acquire
ret i64 %ret
}
-define i64 @test297(i64* %ptr, i64 %val) {
+define i64 @test297(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test297:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB297_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val release
+ %ret = atomicrmw min ptr %ptr, i64 %val release
ret i64 %ret
}
-define i64 @test298(i64* %ptr, i64 %val) {
+define i64 @test298(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test298:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val acq_rel
+ %ret = atomicrmw min ptr %ptr, i64 %val acq_rel
ret i64 %ret
}
-define i64 @test299(i64* %ptr, i64 %val) {
+define i64 @test299(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test299:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val seq_cst
+ %ret = atomicrmw min ptr %ptr, i64 %val seq_cst
ret i64 %ret
}
-define i8 @test300(i8* %ptr, i8 %val) {
+define i8 @test300(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test300:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB300_1:
; PPC64LE-NEXT: .LBB300_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val monotonic
+ %ret = atomicrmw umax ptr %ptr, i8 %val monotonic
ret i8 %ret
}
-define i8 @test301(i8* %ptr, i8 %val) {
+define i8 @test301(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test301:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB301_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val acquire
+ %ret = atomicrmw umax ptr %ptr, i8 %val acquire
ret i8 %ret
}
-define i8 @test302(i8* %ptr, i8 %val) {
+define i8 @test302(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test302:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB302_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val release
+ %ret = atomicrmw umax ptr %ptr, i8 %val release
ret i8 %ret
}
-define i8 @test303(i8* %ptr, i8 %val) {
+define i8 @test303(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test303:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val acq_rel
+ %ret = atomicrmw umax ptr %ptr, i8 %val acq_rel
ret i8 %ret
}
-define i8 @test304(i8* %ptr, i8 %val) {
+define i8 @test304(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test304:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val seq_cst
+ %ret = atomicrmw umax ptr %ptr, i8 %val seq_cst
ret i8 %ret
}
-define i16 @test305(i16* %ptr, i16 %val) {
+define i16 @test305(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test305:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB305_1:
; PPC64LE-NEXT: .LBB305_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val monotonic
+ %ret = atomicrmw umax ptr %ptr, i16 %val monotonic
ret i16 %ret
}
-define i16 @test306(i16* %ptr, i16 %val) {
+define i16 @test306(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test306:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB306_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val acquire
+ %ret = atomicrmw umax ptr %ptr, i16 %val acquire
ret i16 %ret
}
-define i16 @test307(i16* %ptr, i16 %val) {
+define i16 @test307(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test307:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB307_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val release
+ %ret = atomicrmw umax ptr %ptr, i16 %val release
ret i16 %ret
}
-define i16 @test308(i16* %ptr, i16 %val) {
+define i16 @test308(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test308:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val acq_rel
+ %ret = atomicrmw umax ptr %ptr, i16 %val acq_rel
ret i16 %ret
}
-define i16 @test309(i16* %ptr, i16 %val) {
+define i16 @test309(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test309:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val seq_cst
+ %ret = atomicrmw umax ptr %ptr, i16 %val seq_cst
ret i16 %ret
}
-define i32 @test310(i32* %ptr, i32 %val) {
+define i32 @test310(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test310:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB310_1:
; PPC64LE-NEXT: .LBB310_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val monotonic
+ %ret = atomicrmw umax ptr %ptr, i32 %val monotonic
ret i32 %ret
}
-define i32 @test311(i32* %ptr, i32 %val) {
+define i32 @test311(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test311:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB311_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val acquire
+ %ret = atomicrmw umax ptr %ptr, i32 %val acquire
ret i32 %ret
}
-define i32 @test312(i32* %ptr, i32 %val) {
+define i32 @test312(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test312:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB312_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val release
+ %ret = atomicrmw umax ptr %ptr, i32 %val release
ret i32 %ret
}
-define i32 @test313(i32* %ptr, i32 %val) {
+define i32 @test313(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test313:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val acq_rel
+ %ret = atomicrmw umax ptr %ptr, i32 %val acq_rel
ret i32 %ret
}
-define i32 @test314(i32* %ptr, i32 %val) {
+define i32 @test314(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test314:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val seq_cst
+ %ret = atomicrmw umax ptr %ptr, i32 %val seq_cst
ret i32 %ret
}
-define i64 @test315(i64* %ptr, i64 %val) {
+define i64 @test315(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test315:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB315_1:
; PPC64LE-NEXT: .LBB315_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val monotonic
+ %ret = atomicrmw umax ptr %ptr, i64 %val monotonic
ret i64 %ret
}
-define i64 @test316(i64* %ptr, i64 %val) {
+define i64 @test316(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test316:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB316_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val acquire
+ %ret = atomicrmw umax ptr %ptr, i64 %val acquire
ret i64 %ret
}
-define i64 @test317(i64* %ptr, i64 %val) {
+define i64 @test317(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test317:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB317_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val release
+ %ret = atomicrmw umax ptr %ptr, i64 %val release
ret i64 %ret
}
-define i64 @test318(i64* %ptr, i64 %val) {
+define i64 @test318(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test318:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val acq_rel
+ %ret = atomicrmw umax ptr %ptr, i64 %val acq_rel
ret i64 %ret
}
-define i64 @test319(i64* %ptr, i64 %val) {
+define i64 @test319(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test319:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val seq_cst
+ %ret = atomicrmw umax ptr %ptr, i64 %val seq_cst
ret i64 %ret
}
-define i8 @test320(i8* %ptr, i8 %val) {
+define i8 @test320(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test320:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB320_1:
; PPC64LE-NEXT: .LBB320_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val monotonic
+ %ret = atomicrmw umin ptr %ptr, i8 %val monotonic
ret i8 %ret
}
-define i8 @test321(i8* %ptr, i8 %val) {
+define i8 @test321(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test321:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB321_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val acquire
+ %ret = atomicrmw umin ptr %ptr, i8 %val acquire
ret i8 %ret
}
-define i8 @test322(i8* %ptr, i8 %val) {
+define i8 @test322(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test322:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB322_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val release
+ %ret = atomicrmw umin ptr %ptr, i8 %val release
ret i8 %ret
}
-define i8 @test323(i8* %ptr, i8 %val) {
+define i8 @test323(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test323:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val acq_rel
+ %ret = atomicrmw umin ptr %ptr, i8 %val acq_rel
ret i8 %ret
}
-define i8 @test324(i8* %ptr, i8 %val) {
+define i8 @test324(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test324:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val seq_cst
+ %ret = atomicrmw umin ptr %ptr, i8 %val seq_cst
ret i8 %ret
}
-define i16 @test325(i16* %ptr, i16 %val) {
+define i16 @test325(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test325:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB325_1:
; PPC64LE-NEXT: .LBB325_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val monotonic
+ %ret = atomicrmw umin ptr %ptr, i16 %val monotonic
ret i16 %ret
}
-define i16 @test326(i16* %ptr, i16 %val) {
+define i16 @test326(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test326:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB326_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val acquire
+ %ret = atomicrmw umin ptr %ptr, i16 %val acquire
ret i16 %ret
}
-define i16 @test327(i16* %ptr, i16 %val) {
+define i16 @test327(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test327:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB327_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val release
+ %ret = atomicrmw umin ptr %ptr, i16 %val release
ret i16 %ret
}
-define i16 @test328(i16* %ptr, i16 %val) {
+define i16 @test328(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test328:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val acq_rel
+ %ret = atomicrmw umin ptr %ptr, i16 %val acq_rel
ret i16 %ret
}
-define i16 @test329(i16* %ptr, i16 %val) {
+define i16 @test329(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test329:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val seq_cst
+ %ret = atomicrmw umin ptr %ptr, i16 %val seq_cst
ret i16 %ret
}
-define i32 @test330(i32* %ptr, i32 %val) {
+define i32 @test330(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test330:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB330_1:
; PPC64LE-NEXT: .LBB330_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val monotonic
+ %ret = atomicrmw umin ptr %ptr, i32 %val monotonic
ret i32 %ret
}
-define i32 @test331(i32* %ptr, i32 %val) {
+define i32 @test331(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test331:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB331_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val acquire
+ %ret = atomicrmw umin ptr %ptr, i32 %val acquire
ret i32 %ret
}
-define i32 @test332(i32* %ptr, i32 %val) {
+define i32 @test332(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test332:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB332_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val release
+ %ret = atomicrmw umin ptr %ptr, i32 %val release
ret i32 %ret
}
-define i32 @test333(i32* %ptr, i32 %val) {
+define i32 @test333(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test333:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val acq_rel
+ %ret = atomicrmw umin ptr %ptr, i32 %val acq_rel
ret i32 %ret
}
-define i32 @test334(i32* %ptr, i32 %val) {
+define i32 @test334(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test334:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val seq_cst
+ %ret = atomicrmw umin ptr %ptr, i32 %val seq_cst
ret i32 %ret
}
-define i64 @test335(i64* %ptr, i64 %val) {
+define i64 @test335(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test335:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB335_1:
; PPC64LE-NEXT: .LBB335_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val monotonic
+ %ret = atomicrmw umin ptr %ptr, i64 %val monotonic
ret i64 %ret
}
-define i64 @test336(i64* %ptr, i64 %val) {
+define i64 @test336(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test336:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB336_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val acquire
+ %ret = atomicrmw umin ptr %ptr, i64 %val acquire
ret i64 %ret
}
-define i64 @test337(i64* %ptr, i64 %val) {
+define i64 @test337(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test337:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB337_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val release
+ %ret = atomicrmw umin ptr %ptr, i64 %val release
ret i64 %ret
}
-define i64 @test338(i64* %ptr, i64 %val) {
+define i64 @test338(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test338:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val acq_rel
+ %ret = atomicrmw umin ptr %ptr, i64 %val acq_rel
ret i64 %ret
}
-define i64 @test339(i64* %ptr, i64 %val) {
+define i64 @test339(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test339:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val seq_cst
+ %ret = atomicrmw umin ptr %ptr, i64 %val seq_cst
ret i64 %ret
}
-define i8 @test340(i8* %ptr, i8 %val) {
+define i8 @test340(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test340:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB340_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw xchg ptr %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
-define i8 @test341(i8* %ptr, i8 %val) {
+define i8 @test341(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test341:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") acquire
+ %ret = atomicrmw xchg ptr %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
-define i8 @test342(i8* %ptr, i8 %val) {
+define i8 @test342(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test342:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") release
+ %ret = atomicrmw xchg ptr %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
-define i8 @test343(i8* %ptr, i8 %val) {
+define i8 @test343(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test343:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw xchg ptr %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
-define i8 @test344(i8* %ptr, i8 %val) {
+define i8 @test344(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test344:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw xchg ptr %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
-define i16 @test345(i16* %ptr, i16 %val) {
+define i16 @test345(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test345:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB345_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw xchg ptr %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
-define i16 @test346(i16* %ptr, i16 %val) {
+define i16 @test346(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test346:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") acquire
+ %ret = atomicrmw xchg ptr %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
-define i16 @test347(i16* %ptr, i16 %val) {
+define i16 @test347(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test347:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") release
+ %ret = atomicrmw xchg ptr %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
-define i16 @test348(i16* %ptr, i16 %val) {
+define i16 @test348(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test348:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw xchg ptr %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
-define i16 @test349(i16* %ptr, i16 %val) {
+define i16 @test349(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test349:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw xchg ptr %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
-define i32 @test350(i32* %ptr, i32 %val) {
+define i32 @test350(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test350:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB350_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw xchg ptr %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
-define i32 @test351(i32* %ptr, i32 %val) {
+define i32 @test351(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test351:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") acquire
+ %ret = atomicrmw xchg ptr %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
-define i32 @test352(i32* %ptr, i32 %val) {
+define i32 @test352(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test352:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") release
+ %ret = atomicrmw xchg ptr %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
-define i32 @test353(i32* %ptr, i32 %val) {
+define i32 @test353(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test353:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw xchg ptr %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
-define i32 @test354(i32* %ptr, i32 %val) {
+define i32 @test354(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test354:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw xchg ptr %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
-define i64 @test355(i64* %ptr, i64 %val) {
+define i64 @test355(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test355:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB355_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw xchg ptr %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
-define i64 @test356(i64* %ptr, i64 %val) {
+define i64 @test356(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test356:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") acquire
+ %ret = atomicrmw xchg ptr %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
-define i64 @test357(i64* %ptr, i64 %val) {
+define i64 @test357(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test357:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") release
+ %ret = atomicrmw xchg ptr %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
-define i64 @test358(i64* %ptr, i64 %val) {
+define i64 @test358(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test358:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw xchg ptr %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
-define i64 @test359(i64* %ptr, i64 %val) {
+define i64 @test359(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test359:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw xchg ptr %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
-define i8 @test360(i8* %ptr, i8 %val) {
+define i8 @test360(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test360:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB360_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw add ptr %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
-define i8 @test361(i8* %ptr, i8 %val) {
+define i8 @test361(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test361:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") acquire
+ %ret = atomicrmw add ptr %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
-define i8 @test362(i8* %ptr, i8 %val) {
+define i8 @test362(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test362:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") release
+ %ret = atomicrmw add ptr %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
-define i8 @test363(i8* %ptr, i8 %val) {
+define i8 @test363(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test363:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw add ptr %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
-define i8 @test364(i8* %ptr, i8 %val) {
+define i8 @test364(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test364:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw add ptr %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
-define i16 @test365(i16* %ptr, i16 %val) {
+define i16 @test365(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test365:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB365_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw add ptr %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
-define i16 @test366(i16* %ptr, i16 %val) {
+define i16 @test366(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test366:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") acquire
+ %ret = atomicrmw add ptr %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
-define i16 @test367(i16* %ptr, i16 %val) {
+define i16 @test367(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test367:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") release
+ %ret = atomicrmw add ptr %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
-define i16 @test368(i16* %ptr, i16 %val) {
+define i16 @test368(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test368:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw add ptr %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
-define i16 @test369(i16* %ptr, i16 %val) {
+define i16 @test369(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test369:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw add ptr %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
-define i32 @test370(i32* %ptr, i32 %val) {
+define i32 @test370(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test370:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB370_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw add ptr %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
-define i32 @test371(i32* %ptr, i32 %val) {
+define i32 @test371(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test371:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") acquire
+ %ret = atomicrmw add ptr %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
-define i32 @test372(i32* %ptr, i32 %val) {
+define i32 @test372(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test372:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") release
+ %ret = atomicrmw add ptr %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
-define i32 @test373(i32* %ptr, i32 %val) {
+define i32 @test373(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test373:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw add ptr %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
-define i32 @test374(i32* %ptr, i32 %val) {
+define i32 @test374(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test374:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw add ptr %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
-define i64 @test375(i64* %ptr, i64 %val) {
+define i64 @test375(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test375:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB375_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw add ptr %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
-define i64 @test376(i64* %ptr, i64 %val) {
+define i64 @test376(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test376:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") acquire
+ %ret = atomicrmw add ptr %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
-define i64 @test377(i64* %ptr, i64 %val) {
+define i64 @test377(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test377:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") release
+ %ret = atomicrmw add ptr %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
-define i64 @test378(i64* %ptr, i64 %val) {
+define i64 @test378(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test378:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw add ptr %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
-define i64 @test379(i64* %ptr, i64 %val) {
+define i64 @test379(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test379:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw add ptr %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
-define i8 @test380(i8* %ptr, i8 %val) {
+define i8 @test380(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test380:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB380_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw sub ptr %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
-define i8 @test381(i8* %ptr, i8 %val) {
+define i8 @test381(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test381:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") acquire
+ %ret = atomicrmw sub ptr %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
-define i8 @test382(i8* %ptr, i8 %val) {
+define i8 @test382(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test382:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") release
+ %ret = atomicrmw sub ptr %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
-define i8 @test383(i8* %ptr, i8 %val) {
+define i8 @test383(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test383:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw sub ptr %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
-define i8 @test384(i8* %ptr, i8 %val) {
+define i8 @test384(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test384:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw sub ptr %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
-define i16 @test385(i16* %ptr, i16 %val) {
+define i16 @test385(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test385:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB385_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw sub ptr %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
-define i16 @test386(i16* %ptr, i16 %val) {
+define i16 @test386(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test386:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") acquire
+ %ret = atomicrmw sub ptr %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
-define i16 @test387(i16* %ptr, i16 %val) {
+define i16 @test387(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test387:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") release
+ %ret = atomicrmw sub ptr %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
-define i16 @test388(i16* %ptr, i16 %val) {
+define i16 @test388(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test388:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw sub ptr %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
-define i16 @test389(i16* %ptr, i16 %val) {
+define i16 @test389(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test389:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw sub ptr %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
-define i32 @test390(i32* %ptr, i32 %val) {
+define i32 @test390(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test390:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB390_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw sub ptr %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
-define i32 @test391(i32* %ptr, i32 %val) {
+define i32 @test391(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test391:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") acquire
+ %ret = atomicrmw sub ptr %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
-define i32 @test392(i32* %ptr, i32 %val) {
+define i32 @test392(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test392:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") release
+ %ret = atomicrmw sub ptr %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
-define i32 @test393(i32* %ptr, i32 %val) {
+define i32 @test393(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test393:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw sub ptr %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
-define i32 @test394(i32* %ptr, i32 %val) {
+define i32 @test394(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test394:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw sub ptr %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
-define i64 @test395(i64* %ptr, i64 %val) {
+define i64 @test395(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test395:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB395_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw sub ptr %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
-define i64 @test396(i64* %ptr, i64 %val) {
+define i64 @test396(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test396:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") acquire
+ %ret = atomicrmw sub ptr %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
-define i64 @test397(i64* %ptr, i64 %val) {
+define i64 @test397(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test397:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") release
+ %ret = atomicrmw sub ptr %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
-define i64 @test398(i64* %ptr, i64 %val) {
+define i64 @test398(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test398:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw sub ptr %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
-define i64 @test399(i64* %ptr, i64 %val) {
+define i64 @test399(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test399:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw sub ptr %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
-define i8 @test400(i8* %ptr, i8 %val) {
+define i8 @test400(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test400:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB400_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw and ptr %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
-define i8 @test401(i8* %ptr, i8 %val) {
+define i8 @test401(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test401:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") acquire
+ %ret = atomicrmw and ptr %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
-define i8 @test402(i8* %ptr, i8 %val) {
+define i8 @test402(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test402:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") release
+ %ret = atomicrmw and ptr %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
-define i8 @test403(i8* %ptr, i8 %val) {
+define i8 @test403(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test403:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw and ptr %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
-define i8 @test404(i8* %ptr, i8 %val) {
+define i8 @test404(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test404:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw and ptr %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
-define i16 @test405(i16* %ptr, i16 %val) {
+define i16 @test405(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test405:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB405_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw and ptr %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
-define i16 @test406(i16* %ptr, i16 %val) {
+define i16 @test406(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test406:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") acquire
+ %ret = atomicrmw and ptr %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
-define i16 @test407(i16* %ptr, i16 %val) {
+define i16 @test407(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test407:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") release
+ %ret = atomicrmw and ptr %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
-define i16 @test408(i16* %ptr, i16 %val) {
+define i16 @test408(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test408:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw and ptr %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
-define i16 @test409(i16* %ptr, i16 %val) {
+define i16 @test409(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test409:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw and ptr %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
-define i32 @test410(i32* %ptr, i32 %val) {
+define i32 @test410(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test410:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB410_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw and ptr %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
-define i32 @test411(i32* %ptr, i32 %val) {
+define i32 @test411(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test411:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") acquire
+ %ret = atomicrmw and ptr %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
-define i32 @test412(i32* %ptr, i32 %val) {
+define i32 @test412(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test412:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") release
+ %ret = atomicrmw and ptr %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
-define i32 @test413(i32* %ptr, i32 %val) {
+define i32 @test413(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test413:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw and ptr %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
-define i32 @test414(i32* %ptr, i32 %val) {
+define i32 @test414(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test414:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw and ptr %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
-define i64 @test415(i64* %ptr, i64 %val) {
+define i64 @test415(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test415:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB415_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw and ptr %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
-define i64 @test416(i64* %ptr, i64 %val) {
+define i64 @test416(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test416:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") acquire
+ %ret = atomicrmw and ptr %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
-define i64 @test417(i64* %ptr, i64 %val) {
+define i64 @test417(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test417:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") release
+ %ret = atomicrmw and ptr %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
-define i64 @test418(i64* %ptr, i64 %val) {
+define i64 @test418(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test418:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw and ptr %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
-define i64 @test419(i64* %ptr, i64 %val) {
+define i64 @test419(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test419:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw and ptr %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
-define i8 @test420(i8* %ptr, i8 %val) {
+define i8 @test420(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test420:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB420_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw nand ptr %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
-define i8 @test421(i8* %ptr, i8 %val) {
+define i8 @test421(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test421:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") acquire
+ %ret = atomicrmw nand ptr %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
-define i8 @test422(i8* %ptr, i8 %val) {
+define i8 @test422(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test422:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") release
+ %ret = atomicrmw nand ptr %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
-define i8 @test423(i8* %ptr, i8 %val) {
+define i8 @test423(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test423:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw nand ptr %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
-define i8 @test424(i8* %ptr, i8 %val) {
+define i8 @test424(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test424:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw nand ptr %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
-define i16 @test425(i16* %ptr, i16 %val) {
+define i16 @test425(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test425:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB425_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw nand ptr %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
-define i16 @test426(i16* %ptr, i16 %val) {
+define i16 @test426(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test426:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") acquire
+ %ret = atomicrmw nand ptr %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
-define i16 @test427(i16* %ptr, i16 %val) {
+define i16 @test427(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test427:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") release
+ %ret = atomicrmw nand ptr %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
-define i16 @test428(i16* %ptr, i16 %val) {
+define i16 @test428(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test428:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw nand ptr %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
-define i16 @test429(i16* %ptr, i16 %val) {
+define i16 @test429(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test429:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw nand ptr %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
-define i32 @test430(i32* %ptr, i32 %val) {
+define i32 @test430(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test430:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB430_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw nand ptr %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
-define i32 @test431(i32* %ptr, i32 %val) {
+define i32 @test431(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test431:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") acquire
+ %ret = atomicrmw nand ptr %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
-define i32 @test432(i32* %ptr, i32 %val) {
+define i32 @test432(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test432:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") release
+ %ret = atomicrmw nand ptr %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
-define i32 @test433(i32* %ptr, i32 %val) {
+define i32 @test433(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test433:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw nand ptr %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
-define i32 @test434(i32* %ptr, i32 %val) {
+define i32 @test434(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test434:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw nand ptr %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
-define i64 @test435(i64* %ptr, i64 %val) {
+define i64 @test435(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test435:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB435_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw nand ptr %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
-define i64 @test436(i64* %ptr, i64 %val) {
+define i64 @test436(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test436:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") acquire
+ %ret = atomicrmw nand ptr %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
-define i64 @test437(i64* %ptr, i64 %val) {
+define i64 @test437(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test437:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") release
+ %ret = atomicrmw nand ptr %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
-define i64 @test438(i64* %ptr, i64 %val) {
+define i64 @test438(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test438:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw nand ptr %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
-define i64 @test439(i64* %ptr, i64 %val) {
+define i64 @test439(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test439:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw nand ptr %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
-define i8 @test440(i8* %ptr, i8 %val) {
+define i8 @test440(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test440:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB440_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw or ptr %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
-define i8 @test441(i8* %ptr, i8 %val) {
+define i8 @test441(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test441:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") acquire
+ %ret = atomicrmw or ptr %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
-define i8 @test442(i8* %ptr, i8 %val) {
+define i8 @test442(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test442:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") release
+ %ret = atomicrmw or ptr %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
-define i8 @test443(i8* %ptr, i8 %val) {
+define i8 @test443(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test443:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw or ptr %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
-define i8 @test444(i8* %ptr, i8 %val) {
+define i8 @test444(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test444:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw or ptr %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
-define i16 @test445(i16* %ptr, i16 %val) {
+define i16 @test445(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test445:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB445_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw or ptr %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
-define i16 @test446(i16* %ptr, i16 %val) {
+define i16 @test446(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test446:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") acquire
+ %ret = atomicrmw or ptr %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
-define i16 @test447(i16* %ptr, i16 %val) {
+define i16 @test447(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test447:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") release
+ %ret = atomicrmw or ptr %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
-define i16 @test448(i16* %ptr, i16 %val) {
+define i16 @test448(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test448:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw or ptr %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
-define i16 @test449(i16* %ptr, i16 %val) {
+define i16 @test449(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test449:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw or ptr %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
-define i32 @test450(i32* %ptr, i32 %val) {
+define i32 @test450(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test450:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB450_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw or ptr %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
-define i32 @test451(i32* %ptr, i32 %val) {
+define i32 @test451(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test451:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") acquire
+ %ret = atomicrmw or ptr %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
-define i32 @test452(i32* %ptr, i32 %val) {
+define i32 @test452(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test452:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") release
+ %ret = atomicrmw or ptr %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
-define i32 @test453(i32* %ptr, i32 %val) {
+define i32 @test453(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test453:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw or ptr %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
-define i32 @test454(i32* %ptr, i32 %val) {
+define i32 @test454(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test454:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw or ptr %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
-define i64 @test455(i64* %ptr, i64 %val) {
+define i64 @test455(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test455:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB455_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw or ptr %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
-define i64 @test456(i64* %ptr, i64 %val) {
+define i64 @test456(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test456:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") acquire
+ %ret = atomicrmw or ptr %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
-define i64 @test457(i64* %ptr, i64 %val) {
+define i64 @test457(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test457:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") release
+ %ret = atomicrmw or ptr %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
-define i64 @test458(i64* %ptr, i64 %val) {
+define i64 @test458(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test458:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw or ptr %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
-define i64 @test459(i64* %ptr, i64 %val) {
+define i64 @test459(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test459:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw or ptr %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
-define i8 @test460(i8* %ptr, i8 %val) {
+define i8 @test460(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test460:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB460_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw xor ptr %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
-define i8 @test461(i8* %ptr, i8 %val) {
+define i8 @test461(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test461:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") acquire
+ %ret = atomicrmw xor ptr %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
-define i8 @test462(i8* %ptr, i8 %val) {
+define i8 @test462(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test462:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") release
+ %ret = atomicrmw xor ptr %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
-define i8 @test463(i8* %ptr, i8 %val) {
+define i8 @test463(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test463:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw xor ptr %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
-define i8 @test464(i8* %ptr, i8 %val) {
+define i8 @test464(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test464:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw xor ptr %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
-define i16 @test465(i16* %ptr, i16 %val) {
+define i16 @test465(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test465:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB465_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw xor ptr %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
-define i16 @test466(i16* %ptr, i16 %val) {
+define i16 @test466(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test466:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") acquire
+ %ret = atomicrmw xor ptr %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
-define i16 @test467(i16* %ptr, i16 %val) {
+define i16 @test467(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test467:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") release
+ %ret = atomicrmw xor ptr %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
-define i16 @test468(i16* %ptr, i16 %val) {
+define i16 @test468(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test468:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw xor ptr %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
-define i16 @test469(i16* %ptr, i16 %val) {
+define i16 @test469(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test469:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw xor ptr %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
-define i32 @test470(i32* %ptr, i32 %val) {
+define i32 @test470(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test470:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB470_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw xor ptr %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
-define i32 @test471(i32* %ptr, i32 %val) {
+define i32 @test471(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test471:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") acquire
+ %ret = atomicrmw xor ptr %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
-define i32 @test472(i32* %ptr, i32 %val) {
+define i32 @test472(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test472:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") release
+ %ret = atomicrmw xor ptr %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
-define i32 @test473(i32* %ptr, i32 %val) {
+define i32 @test473(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test473:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw xor ptr %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
-define i32 @test474(i32* %ptr, i32 %val) {
+define i32 @test474(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test474:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw xor ptr %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
-define i64 @test475(i64* %ptr, i64 %val) {
+define i64 @test475(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test475:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB475_1:
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw xor ptr %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
-define i64 @test476(i64* %ptr, i64 %val) {
+define i64 @test476(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test476:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") acquire
+ %ret = atomicrmw xor ptr %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
-define i64 @test477(i64* %ptr, i64 %val) {
+define i64 @test477(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test477:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") release
+ %ret = atomicrmw xor ptr %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
-define i64 @test478(i64* %ptr, i64 %val) {
+define i64 @test478(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test478:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw xor ptr %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
-define i64 @test479(i64* %ptr, i64 %val) {
+define i64 @test479(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test479:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw xor ptr %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
-define i8 @test480(i8* %ptr, i8 %val) {
+define i8 @test480(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test480:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: .LBB480_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw max ptr %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
-define i8 @test481(i8* %ptr, i8 %val) {
+define i8 @test481(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test481:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") acquire
+ %ret = atomicrmw max ptr %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
-define i8 @test482(i8* %ptr, i8 %val) {
+define i8 @test482(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test482:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: .LBB482_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") release
+ %ret = atomicrmw max ptr %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
-define i8 @test483(i8* %ptr, i8 %val) {
+define i8 @test483(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test483:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw max ptr %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
-define i8 @test484(i8* %ptr, i8 %val) {
+define i8 @test484(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test484:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i8* %ptr, i8 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw max ptr %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
-define i16 @test485(i16* %ptr, i16 %val) {
+define i16 @test485(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test485:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: .LBB485_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw max ptr %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
-define i16 @test486(i16* %ptr, i16 %val) {
+define i16 @test486(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test486:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") acquire
+ %ret = atomicrmw max ptr %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
-define i16 @test487(i16* %ptr, i16 %val) {
+define i16 @test487(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test487:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: .LBB487_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") release
+ %ret = atomicrmw max ptr %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
-define i16 @test488(i16* %ptr, i16 %val) {
+define i16 @test488(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test488:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw max ptr %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
-define i16 @test489(i16* %ptr, i16 %val) {
+define i16 @test489(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test489:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i16* %ptr, i16 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw max ptr %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
-define i32 @test490(i32* %ptr, i32 %val) {
+define i32 @test490(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test490:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB490_1:
; PPC64LE-NEXT: .LBB490_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw max ptr %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
-define i32 @test491(i32* %ptr, i32 %val) {
+define i32 @test491(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test491:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB491_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") acquire
+ %ret = atomicrmw max ptr %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
-define i32 @test492(i32* %ptr, i32 %val) {
+define i32 @test492(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test492:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB492_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") release
+ %ret = atomicrmw max ptr %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
-define i32 @test493(i32* %ptr, i32 %val) {
+define i32 @test493(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test493:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw max ptr %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
-define i32 @test494(i32* %ptr, i32 %val) {
+define i32 @test494(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test494:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i32* %ptr, i32 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw max ptr %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
-define i64 @test495(i64* %ptr, i64 %val) {
+define i64 @test495(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test495:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB495_1:
; PPC64LE-NEXT: .LBB495_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw max ptr %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
-define i64 @test496(i64* %ptr, i64 %val) {
+define i64 @test496(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test496:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB496_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") acquire
+ %ret = atomicrmw max ptr %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
-define i64 @test497(i64* %ptr, i64 %val) {
+define i64 @test497(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test497:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB497_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") release
+ %ret = atomicrmw max ptr %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
-define i64 @test498(i64* %ptr, i64 %val) {
+define i64 @test498(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test498:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw max ptr %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
-define i64 @test499(i64* %ptr, i64 %val) {
+define i64 @test499(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test499:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw max i64* %ptr, i64 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw max ptr %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
-define i8 @test500(i8* %ptr, i8 %val) {
+define i8 @test500(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test500:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: .LBB500_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw min ptr %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
-define i8 @test501(i8* %ptr, i8 %val) {
+define i8 @test501(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test501:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") acquire
+ %ret = atomicrmw min ptr %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
-define i8 @test502(i8* %ptr, i8 %val) {
+define i8 @test502(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test502:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: .LBB502_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") release
+ %ret = atomicrmw min ptr %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
-define i8 @test503(i8* %ptr, i8 %val) {
+define i8 @test503(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test503:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw min ptr %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
-define i8 @test504(i8* %ptr, i8 %val) {
+define i8 @test504(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test504:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsb 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i8* %ptr, i8 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw min ptr %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
-define i16 @test505(i16* %ptr, i16 %val) {
+define i16 @test505(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test505:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: .LBB505_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw min ptr %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
-define i16 @test506(i16* %ptr, i16 %val) {
+define i16 @test506(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test506:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") acquire
+ %ret = atomicrmw min ptr %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
-define i16 @test507(i16* %ptr, i16 %val) {
+define i16 @test507(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test507:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: .LBB507_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") release
+ %ret = atomicrmw min ptr %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
-define i16 @test508(i16* %ptr, i16 %val) {
+define i16 @test508(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test508:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw min ptr %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
-define i16 @test509(i16* %ptr, i16 %val) {
+define i16 @test509(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test509:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: extsh 5, 4
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i16* %ptr, i16 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw min ptr %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
-define i32 @test510(i32* %ptr, i32 %val) {
+define i32 @test510(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test510:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB510_1:
; PPC64LE-NEXT: .LBB510_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw min ptr %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
-define i32 @test511(i32* %ptr, i32 %val) {
+define i32 @test511(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test511:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB511_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") acquire
+ %ret = atomicrmw min ptr %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
-define i32 @test512(i32* %ptr, i32 %val) {
+define i32 @test512(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test512:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB512_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") release
+ %ret = atomicrmw min ptr %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
-define i32 @test513(i32* %ptr, i32 %val) {
+define i32 @test513(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test513:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw min ptr %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
-define i32 @test514(i32* %ptr, i32 %val) {
+define i32 @test514(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test514:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i32* %ptr, i32 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw min ptr %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
-define i64 @test515(i64* %ptr, i64 %val) {
+define i64 @test515(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test515:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB515_1:
; PPC64LE-NEXT: .LBB515_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw min ptr %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
-define i64 @test516(i64* %ptr, i64 %val) {
+define i64 @test516(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test516:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB516_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") acquire
+ %ret = atomicrmw min ptr %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
-define i64 @test517(i64* %ptr, i64 %val) {
+define i64 @test517(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test517:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB517_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") release
+ %ret = atomicrmw min ptr %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
-define i64 @test518(i64* %ptr, i64 %val) {
+define i64 @test518(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test518:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw min ptr %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
-define i64 @test519(i64* %ptr, i64 %val) {
+define i64 @test519(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test519:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw min i64* %ptr, i64 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw min ptr %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
-define i8 @test520(i8* %ptr, i8 %val) {
+define i8 @test520(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test520:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB520_1:
; PPC64LE-NEXT: .LBB520_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw umax ptr %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
-define i8 @test521(i8* %ptr, i8 %val) {
+define i8 @test521(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test521:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB521_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") acquire
+ %ret = atomicrmw umax ptr %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
-define i8 @test522(i8* %ptr, i8 %val) {
+define i8 @test522(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test522:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB522_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") release
+ %ret = atomicrmw umax ptr %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
-define i8 @test523(i8* %ptr, i8 %val) {
+define i8 @test523(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test523:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw umax ptr %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
-define i8 @test524(i8* %ptr, i8 %val) {
+define i8 @test524(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test524:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i8* %ptr, i8 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw umax ptr %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
-define i16 @test525(i16* %ptr, i16 %val) {
+define i16 @test525(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test525:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB525_1:
; PPC64LE-NEXT: .LBB525_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw umax ptr %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
-define i16 @test526(i16* %ptr, i16 %val) {
+define i16 @test526(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test526:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB526_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") acquire
+ %ret = atomicrmw umax ptr %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
-define i16 @test527(i16* %ptr, i16 %val) {
+define i16 @test527(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test527:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB527_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") release
+ %ret = atomicrmw umax ptr %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
-define i16 @test528(i16* %ptr, i16 %val) {
+define i16 @test528(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test528:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw umax ptr %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
-define i16 @test529(i16* %ptr, i16 %val) {
+define i16 @test529(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test529:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i16* %ptr, i16 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw umax ptr %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
-define i32 @test530(i32* %ptr, i32 %val) {
+define i32 @test530(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test530:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB530_1:
; PPC64LE-NEXT: .LBB530_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw umax ptr %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
-define i32 @test531(i32* %ptr, i32 %val) {
+define i32 @test531(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test531:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB531_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") acquire
+ %ret = atomicrmw umax ptr %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
-define i32 @test532(i32* %ptr, i32 %val) {
+define i32 @test532(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test532:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB532_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") release
+ %ret = atomicrmw umax ptr %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
-define i32 @test533(i32* %ptr, i32 %val) {
+define i32 @test533(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test533:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw umax ptr %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
-define i32 @test534(i32* %ptr, i32 %val) {
+define i32 @test534(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test534:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i32* %ptr, i32 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw umax ptr %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
-define i64 @test535(i64* %ptr, i64 %val) {
+define i64 @test535(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test535:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB535_1:
; PPC64LE-NEXT: .LBB535_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw umax ptr %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
-define i64 @test536(i64* %ptr, i64 %val) {
+define i64 @test536(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test536:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB536_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") acquire
+ %ret = atomicrmw umax ptr %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
-define i64 @test537(i64* %ptr, i64 %val) {
+define i64 @test537(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test537:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB537_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") release
+ %ret = atomicrmw umax ptr %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
-define i64 @test538(i64* %ptr, i64 %val) {
+define i64 @test538(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test538:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw umax ptr %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
-define i64 @test539(i64* %ptr, i64 %val) {
+define i64 @test539(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test539:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umax i64* %ptr, i64 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw umax ptr %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
-define i8 @test540(i8* %ptr, i8 %val) {
+define i8 @test540(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test540:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB540_1:
; PPC64LE-NEXT: .LBB540_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw umin ptr %ptr, i8 %val syncscope("singlethread") monotonic
ret i8 %ret
}
-define i8 @test541(i8* %ptr, i8 %val) {
+define i8 @test541(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test541:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB541_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") acquire
+ %ret = atomicrmw umin ptr %ptr, i8 %val syncscope("singlethread") acquire
ret i8 %ret
}
-define i8 @test542(i8* %ptr, i8 %val) {
+define i8 @test542(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test542:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB542_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") release
+ %ret = atomicrmw umin ptr %ptr, i8 %val syncscope("singlethread") release
ret i8 %ret
}
-define i8 @test543(i8* %ptr, i8 %val) {
+define i8 @test543(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test543:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw umin ptr %ptr, i8 %val syncscope("singlethread") acq_rel
ret i8 %ret
}
-define i8 @test544(i8* %ptr, i8 %val) {
+define i8 @test544(ptr %ptr, i8 %val) {
; PPC64LE-LABEL: test544:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i8* %ptr, i8 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw umin ptr %ptr, i8 %val syncscope("singlethread") seq_cst
ret i8 %ret
}
-define i16 @test545(i16* %ptr, i16 %val) {
+define i16 @test545(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test545:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB545_1:
; PPC64LE-NEXT: .LBB545_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw umin ptr %ptr, i16 %val syncscope("singlethread") monotonic
ret i16 %ret
}
-define i16 @test546(i16* %ptr, i16 %val) {
+define i16 @test546(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test546:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB546_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") acquire
+ %ret = atomicrmw umin ptr %ptr, i16 %val syncscope("singlethread") acquire
ret i16 %ret
}
-define i16 @test547(i16* %ptr, i16 %val) {
+define i16 @test547(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test547:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB547_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") release
+ %ret = atomicrmw umin ptr %ptr, i16 %val syncscope("singlethread") release
ret i16 %ret
}
-define i16 @test548(i16* %ptr, i16 %val) {
+define i16 @test548(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test548:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw umin ptr %ptr, i16 %val syncscope("singlethread") acq_rel
ret i16 %ret
}
-define i16 @test549(i16* %ptr, i16 %val) {
+define i16 @test549(ptr %ptr, i16 %val) {
; PPC64LE-LABEL: test549:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i16* %ptr, i16 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw umin ptr %ptr, i16 %val syncscope("singlethread") seq_cst
ret i16 %ret
}
-define i32 @test550(i32* %ptr, i32 %val) {
+define i32 @test550(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test550:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB550_1:
; PPC64LE-NEXT: .LBB550_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw umin ptr %ptr, i32 %val syncscope("singlethread") monotonic
ret i32 %ret
}
-define i32 @test551(i32* %ptr, i32 %val) {
+define i32 @test551(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test551:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB551_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") acquire
+ %ret = atomicrmw umin ptr %ptr, i32 %val syncscope("singlethread") acquire
ret i32 %ret
}
-define i32 @test552(i32* %ptr, i32 %val) {
+define i32 @test552(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test552:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB552_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") release
+ %ret = atomicrmw umin ptr %ptr, i32 %val syncscope("singlethread") release
ret i32 %ret
}
-define i32 @test553(i32* %ptr, i32 %val) {
+define i32 @test553(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test553:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw umin ptr %ptr, i32 %val syncscope("singlethread") acq_rel
ret i32 %ret
}
-define i32 @test554(i32* %ptr, i32 %val) {
+define i32 @test554(ptr %ptr, i32 %val) {
; PPC64LE-LABEL: test554:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i32* %ptr, i32 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw umin ptr %ptr, i32 %val syncscope("singlethread") seq_cst
ret i32 %ret
}
-define i64 @test555(i64* %ptr, i64 %val) {
+define i64 @test555(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test555:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: .LBB555_1:
; PPC64LE-NEXT: .LBB555_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") monotonic
+ %ret = atomicrmw umin ptr %ptr, i64 %val syncscope("singlethread") monotonic
ret i64 %ret
}
-define i64 @test556(i64* %ptr, i64 %val) {
+define i64 @test556(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test556:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: mr 5, 3
; PPC64LE-NEXT: .LBB556_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") acquire
+ %ret = atomicrmw umin ptr %ptr, i64 %val syncscope("singlethread") acquire
ret i64 %ret
}
-define i64 @test557(i64* %ptr, i64 %val) {
+define i64 @test557(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test557:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: .LBB557_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") release
+ %ret = atomicrmw umin ptr %ptr, i64 %val syncscope("singlethread") release
ret i64 %ret
}
-define i64 @test558(i64* %ptr, i64 %val) {
+define i64 @test558(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test558:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") acq_rel
+ %ret = atomicrmw umin ptr %ptr, i64 %val syncscope("singlethread") acq_rel
ret i64 %ret
}
-define i64 @test559(i64* %ptr, i64 %val) {
+define i64 @test559(ptr %ptr, i64 %val) {
; PPC64LE-LABEL: test559:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: sync
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
- %ret = atomicrmw umin i64* %ptr, i64 %val syncscope("singlethread") seq_cst
+ %ret = atomicrmw umin ptr %ptr, i64 %val syncscope("singlethread") seq_cst
ret i64 %ret
}
; The second load should never be scheduled before isync.
-define i32 @test_ordering0(i32* %ptr1, i32* %ptr2) {
+define i32 @test_ordering0(ptr %ptr1, ptr %ptr2) {
; PPC64LE-LABEL: test_ordering0:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwz 4, 0(3)
; PPC64LE-NEXT: lwz 3, 0(3)
; PPC64LE-NEXT: add 3, 4, 3
; PPC64LE-NEXT: blr
- %val1 = load atomic i32, i32* %ptr1 acquire, align 4
- %val2 = load i32, i32* %ptr1
+ %val1 = load atomic i32, ptr %ptr1 acquire, align 4
+ %val2 = load i32, ptr %ptr1
%add = add i32 %val1, %val2
ret i32 %add
}
; The second store should never be scheduled before isync.
-define i32 @test_ordering1(i32* %ptr1, i32 %val1, i32* %ptr2) {
+define i32 @test_ordering1(ptr %ptr1, i32 %val1, ptr %ptr2) {
; PPC64LE-LABEL: test_ordering1:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: lwz 3, 0(3)
; PPC64LE-NEXT: isync
; PPC64LE-NEXT: stw 4, 0(5)
; PPC64LE-NEXT: blr
- %val2 = load atomic i32, i32* %ptr1 acquire, align 4
- store i32 %val1, i32* %ptr2
+ %val2 = load atomic i32, ptr %ptr1 acquire, align 4
+ store i32 %val1, ptr %ptr2
ret i32 %val2
}
; We first check loads, for all sizes from i8 to i64.
; We also vary orderings to check for barriers.
-define i8 @load_i8_unordered(i8* %mem) {
+define i8 @load_i8_unordered(ptr %mem) {
; CHECK-LABEL: load_i8_unordered:
; CHECK: # %bb.0:
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
- %val = load atomic i8, i8* %mem unordered, align 1
+ %val = load atomic i8, ptr %mem unordered, align 1
ret i8 %val
}
-define i16 @load_i16_monotonic(i16* %mem) {
+define i16 @load_i16_monotonic(ptr %mem) {
; CHECK-LABEL: load_i16_monotonic:
; CHECK: # %bb.0:
; CHECK-NEXT: lhz r3, 0(r3)
; CHECK-NEXT: blr
- %val = load atomic i16, i16* %mem monotonic, align 2
+ %val = load atomic i16, ptr %mem monotonic, align 2
ret i16 %val
}
-define i32 @load_i32_acquire(i32* %mem) {
+define i32 @load_i32_acquire(ptr %mem) {
; PPC32-LABEL: load_i32_acquire:
; PPC32: # %bb.0:
; PPC32-NEXT: lwz r3, 0(r3)
; PPC64-NEXT: bne- cr7, .+4
; PPC64-NEXT: isync
; PPC64-NEXT: blr
- %val = load atomic i32, i32* %mem acquire, align 4
+ %val = load atomic i32, ptr %mem acquire, align 4
; CHECK-PPC32: lwsync
; CHECK-PPC64: cmpw [[CR:cr[0-9]+]], [[VAL]], [[VAL]]
; CHECK-PPC64: bne- [[CR]], .+4
; CHECK-PPC64: isync
ret i32 %val
}
-define i64 @load_i64_seq_cst(i64* %mem) {
+define i64 @load_i64_seq_cst(ptr %mem) {
; PPC32-LABEL: load_i64_seq_cst:
; PPC32: # %bb.0:
; PPC32-NEXT: mflr r0
; PPC64-NEXT: bne- cr7, .+4
; PPC64-NEXT: isync
; PPC64-NEXT: blr
- %val = load atomic i64, i64* %mem seq_cst, align 8
+ %val = load atomic i64, ptr %mem seq_cst, align 8
; CHECK-PPC32: lwsync
; CHECK-PPC64: cmpw [[CR:cr[0-9]+]], [[VAL]], [[VAL]]
; CHECK-PPC64: bne- [[CR]], .+4
}
; Stores
-define void @store_i8_unordered(i8* %mem) {
+define void @store_i8_unordered(ptr %mem) {
; CHECK-LABEL: store_i8_unordered:
; CHECK: # %bb.0:
; CHECK-NEXT: li r4, 42
; CHECK-NEXT: stb r4, 0(r3)
; CHECK-NEXT: blr
- store atomic i8 42, i8* %mem unordered, align 1
+ store atomic i8 42, ptr %mem unordered, align 1
ret void
}
-define void @store_i16_monotonic(i16* %mem) {
+define void @store_i16_monotonic(ptr %mem) {
; CHECK-LABEL: store_i16_monotonic:
; CHECK: # %bb.0:
; CHECK-NEXT: li r4, 42
; CHECK-NEXT: sth r4, 0(r3)
; CHECK-NEXT: blr
- store atomic i16 42, i16* %mem monotonic, align 2
+ store atomic i16 42, ptr %mem monotonic, align 2
ret void
}
-define void @store_i32_release(i32* %mem) {
+define void @store_i32_release(ptr %mem) {
; CHECK-LABEL: store_i32_release:
; CHECK: # %bb.0:
; CHECK-NEXT: li r4, 42
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw r4, 0(r3)
; CHECK-NEXT: blr
- store atomic i32 42, i32* %mem release, align 4
+ store atomic i32 42, ptr %mem release, align 4
ret void
}
-define void @store_i64_seq_cst(i64* %mem) {
+define void @store_i64_seq_cst(ptr %mem) {
; PPC32-LABEL: store_i64_seq_cst:
; PPC32: # %bb.0:
; PPC32-NEXT: mflr r0
; PPC64-NEXT: sync
; PPC64-NEXT: std r4, 0(r3)
; PPC64-NEXT: blr
- store atomic i64 42, i64* %mem seq_cst, align 8
+ store atomic i64 42, ptr %mem seq_cst, align 8
ret void
}
; Atomic CmpXchg
-define i8 @cas_strong_i8_sc_sc(i8* %mem) {
+define i8 @cas_strong_i8_sc_sc(ptr %mem) {
; PPC32-LABEL: cas_strong_i8_sc_sc:
; PPC32: # %bb.0:
; PPC32-NEXT: rlwinm r8, r3, 3, 27, 28
; PPC64-NEXT: srw r3, r8, r3
; PPC64-NEXT: lwsync
; PPC64-NEXT: blr
- %val = cmpxchg i8* %mem, i8 0, i8 1 seq_cst seq_cst
+ %val = cmpxchg ptr %mem, i8 0, i8 1 seq_cst seq_cst
%loaded = extractvalue { i8, i1} %val, 0
ret i8 %loaded
}
-define i16 @cas_weak_i16_acquire_acquire(i16* %mem) {
+define i16 @cas_weak_i16_acquire_acquire(ptr %mem) {
; PPC32-LABEL: cas_weak_i16_acquire_acquire:
; PPC32: # %bb.0:
; PPC32-NEXT: li r6, 0
; PPC64-NEXT: srw r3, r8, r4
; PPC64-NEXT: lwsync
; PPC64-NEXT: blr
- %val = cmpxchg weak i16* %mem, i16 0, i16 1 acquire acquire
+ %val = cmpxchg weak ptr %mem, i16 0, i16 1 acquire acquire
%loaded = extractvalue { i16, i1} %val, 0
ret i16 %loaded
}
-define i32 @cas_strong_i32_acqrel_acquire(i32* %mem) {
+define i32 @cas_strong_i32_acqrel_acquire(ptr %mem) {
; CHECK-LABEL: cas_strong_i32_acqrel_acquire:
; CHECK: # %bb.0:
; CHECK-NEXT: li r5, 1
; CHECK-NEXT: mr r3, r4
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
- %val = cmpxchg i32* %mem, i32 0, i32 1 acq_rel acquire
+ %val = cmpxchg ptr %mem, i32 0, i32 1 acq_rel acquire
%loaded = extractvalue { i32, i1} %val, 0
ret i32 %loaded
}
-define i64 @cas_weak_i64_release_monotonic(i64* %mem) {
+define i64 @cas_weak_i64_release_monotonic(ptr %mem) {
; PPC32-LABEL: cas_weak_i64_release_monotonic:
; PPC32: # %bb.0:
; PPC32-NEXT: mflr r0
; PPC64-NEXT: .LBB11_3:
; PPC64-NEXT: mr r3, r4
; PPC64-NEXT: blr
- %val = cmpxchg weak i64* %mem, i64 0, i64 1 release monotonic
+ %val = cmpxchg weak ptr %mem, i64 0, i64 1 release monotonic
%loaded = extractvalue { i64, i1} %val, 0
ret i64 %loaded
}
; AtomicRMW
-define i8 @add_i8_monotonic(i8* %mem, i8 %operand) {
+define i8 @add_i8_monotonic(ptr %mem, i8 %operand) {
; PPC32-LABEL: add_i8_monotonic:
; PPC32: # %bb.0:
; PPC32-NEXT: rlwinm r7, r3, 3, 27, 28
; PPC64-NEXT: srw r3, r7, r3
; PPC64-NEXT: clrlwi r3, r3, 24
; PPC64-NEXT: blr
- %val = atomicrmw add i8* %mem, i8 %operand monotonic
+ %val = atomicrmw add ptr %mem, i8 %operand monotonic
ret i8 %val
}
-define i16 @xor_i16_seq_cst(i16* %mem, i16 %operand) {
+define i16 @xor_i16_seq_cst(ptr %mem, i16 %operand) {
; PPC32-LABEL: xor_i16_seq_cst:
; PPC32: # %bb.0:
; PPC32-NEXT: li r6, 0
; PPC64-NEXT: clrlwi r3, r3, 16
; PPC64-NEXT: lwsync
; PPC64-NEXT: blr
- %val = atomicrmw xor i16* %mem, i16 %operand seq_cst
+ %val = atomicrmw xor ptr %mem, i16 %operand seq_cst
ret i16 %val
}
-define i32 @xchg_i32_acq_rel(i32* %mem, i32 %operand) {
+define i32 @xchg_i32_acq_rel(ptr %mem, i32 %operand) {
; CHECK-LABEL: xchg_i32_acq_rel:
; CHECK: # %bb.0:
; CHECK-NEXT: lwsync
; CHECK-NEXT: mr r3, r5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
- %val = atomicrmw xchg i32* %mem, i32 %operand acq_rel
+ %val = atomicrmw xchg ptr %mem, i32 %operand acq_rel
ret i32 %val
}
-define i64 @and_i64_release(i64* %mem, i64 %operand) {
+define i64 @and_i64_release(ptr %mem, i64 %operand) {
; PPC32-LABEL: and_i64_release:
; PPC32: # %bb.0:
; PPC32-NEXT: mflr r0
; PPC64-NEXT: # %bb.2:
; PPC64-NEXT: mr r3, r5
; PPC64-NEXT: blr
- %val = atomicrmw and i64* %mem, i64 %operand release
+ %val = atomicrmw and ptr %mem, i64 %operand release
ret i64 %val
}
@i = external global i32, align 4 #0
; Function Attrs: noinline nounwind optnone
-define i32* @get() {
+define ptr @get() {
entry:
- ret i32* @i
+ ret ptr @i
}
; CHECK: la 3, i[TD](2)
define dso_local i32 @read_i32_local_linkage() {
entry:
- %0 = load i32, i32* @ilocal, align 4
+ %0 = load i32, ptr @ilocal, align 4
ret i32 %0
}
target triple = "powerpc64-unknown-linux-gnu"
%struct.lua_TValue.17.692 = type { %union.Value.16.691, i32 }
-%union.Value.16.691 = type { %union.GCObject.15.690* }
+%union.Value.16.691 = type { ptr }
%union.GCObject.15.690 = type { %struct.lua_State.14.689 }
-%struct.lua_State.14.689 = type { %union.GCObject.15.690*, i8, i8, i8, %struct.lua_TValue.17.692*, %struct.lua_TValue.17.692*, %struct.global_State.10.685*, %struct.CallInfo.11.686*, i32*, %struct.lua_TValue.17.692*, %struct.lua_TValue.17.692*, %struct.CallInfo.11.686*, %struct.CallInfo.11.686*, i32, i32, i16, i16, i8, i8, i32, i32, void (%struct.lua_State.14.689*, %struct.lua_Debug.12.687*)*, %struct.lua_TValue.17.692, %struct.lua_TValue.17.692, %union.GCObject.15.690*, %union.GCObject.15.690*, %struct.lua_longjmp.13.688*, i64 }
-%struct.global_State.10.685 = type { %struct.stringtable.0.675, i8* (i8*, i8*, i64, i64)*, i8*, i8, i8, i32, %union.GCObject.15.690*, %union.GCObject.15.690**, %union.GCObject.15.690*, %union.GCObject.15.690*, %union.GCObject.15.690*, %union.GCObject.15.690*, %struct.Mbuffer.1.676, i64, i64, i64, i64, i32, i32, i32 (%struct.lua_State.14.689*)*, %struct.lua_TValue.17.692, %struct.lua_State.14.689*, %struct.UpVal.3.678, [9 x %struct.Table.7.682*], [17 x %union.TString.9.684*] }
-%struct.stringtable.0.675 = type { %union.GCObject.15.690**, i32, i32 }
-%struct.Mbuffer.1.676 = type { i8*, i64, i64 }
-%struct.UpVal.3.678 = type { %union.GCObject.15.690*, i8, i8, %struct.lua_TValue.17.692*, %union.anon.2.677 }
+%struct.lua_State.14.689 = type { ptr, i8, i8, i8, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, i16, i16, i8, i8, i32, i32, ptr, %struct.lua_TValue.17.692, %struct.lua_TValue.17.692, ptr, ptr, ptr, i64 }
+%struct.global_State.10.685 = type { %struct.stringtable.0.675, ptr, ptr, i8, i8, i32, ptr, ptr, ptr, ptr, ptr, ptr, %struct.Mbuffer.1.676, i64, i64, i64, i64, i32, i32, ptr, %struct.lua_TValue.17.692, ptr, %struct.UpVal.3.678, [9 x ptr], [17 x ptr] }
+%struct.stringtable.0.675 = type { ptr, i32, i32 }
+%struct.Mbuffer.1.676 = type { ptr, i64, i64 }
+%struct.UpVal.3.678 = type { ptr, i8, i8, ptr, %union.anon.2.677 }
%union.anon.2.677 = type { %struct.lua_TValue.17.692 }
-%struct.Table.7.682 = type { %union.GCObject.15.690*, i8, i8, i8, i8, %struct.Table.7.682*, %struct.lua_TValue.17.692*, %struct.Node.6.681*, %struct.Node.6.681*, %union.GCObject.15.690*, i32 }
+%struct.Table.7.682 = type { ptr, i8, i8, i8, i8, ptr, ptr, ptr, ptr, ptr, i32 }
%struct.Node.6.681 = type { %struct.lua_TValue.17.692, %union.TKey.5.680 }
%union.TKey.5.680 = type { %struct.anon.0.4.679 }
-%struct.anon.0.4.679 = type { %union.Value.16.691, i32, %struct.Node.6.681* }
+%struct.anon.0.4.679 = type { %union.Value.16.691, i32, ptr }
%union.TString.9.684 = type { %struct.anon.1.8.683 }
-%struct.anon.1.8.683 = type { %union.GCObject.15.690*, i8, i8, i8, i32, i64 }
-%struct.CallInfo.11.686 = type { %struct.lua_TValue.17.692*, %struct.lua_TValue.17.692*, %struct.lua_TValue.17.692*, i32*, i32, i32 }
-%struct.lua_Debug.12.687 = type { i32, i8*, i8*, i8*, i8*, i32, i32, i32, i32, [60 x i8], i32 }
+%struct.anon.1.8.683 = type { ptr, i8, i8, i8, i32, i64 }
+%struct.CallInfo.11.686 = type { ptr, ptr, ptr, ptr, i32, i32 }
+%struct.lua_Debug.12.687 = type { i32, ptr, ptr, ptr, ptr, i32, i32, i32, i32, [60 x i8], i32 }
%struct.lua_longjmp.13.688 = type opaque
define void @lua_xmove(i32 signext %n) #0 {
br label %for.body
for.body: ; preds = %for.body.for.body_crit_edge, %for.body.lr.ph
- %0 = phi %struct.lua_TValue.17.692* [ undef, %for.body.lr.ph ], [ %.pre, %for.body.for.body_crit_edge ]
+ %0 = phi ptr [ undef, %for.body.lr.ph ], [ %.pre, %for.body.for.body_crit_edge ]
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body.for.body_crit_edge ]
- %tt = getelementptr inbounds %struct.lua_TValue.17.692, %struct.lua_TValue.17.692* %0, i64 %indvars.iv, i32 1
- %1 = load i32, i32* %tt, align 4
+ %tt = getelementptr inbounds %struct.lua_TValue.17.692, ptr %0, i64 %indvars.iv, i32 1
+ %1 = load i32, ptr %tt, align 4
%2 = add i32 %1, %1
- store i32 %2, i32* %tt, align 4
+ store i32 %2, ptr %tt, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
br i1 %exitcond, label %for.end, label %for.body.for.body_crit_edge
for.body.for.body_crit_edge: ; preds = %for.body
- %.pre = load %struct.lua_TValue.17.692*, %struct.lua_TValue.17.692** undef, align 8
+ %.pre = load ptr, ptr undef, align 8
br label %for.body
for.end: ; preds = %for.body, %if.end, %entry
; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=CHECK-BE
; Function Attrs: nofree nounwind writeonly
-define dso_local void @test50(i8* nocapture readnone %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) {
+define dso_local void @test50(ptr nocapture readnone %vqp, ptr nocapture readnone %vpp, <16 x i8> %vc, ptr nocapture %resp) {
; CHECK-LABEL: test50:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xvbf16ger2 acc0, v2, v2
; CHECK-BE-NEXT: blr
entry:
%0 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2(<16 x i8> %vc, <16 x i8> %vc)
- %1 = bitcast i8* %resp to <512 x i1>*
- store <512 x i1> %0, <512 x i1>* %1, align 64
+ store <512 x i1> %0, ptr %resp, align 64
ret void
}
declare <512 x i1> @llvm.ppc.mma.xvbf16ger2(<16 x i8>, <16 x i8>)
; Function Attrs: nofree nounwind writeonly
-define dso_local void @test51(i8* nocapture readnone %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) {
+define dso_local void @test51(ptr nocapture readnone %vqp, ptr nocapture readnone %vpp, <16 x i8> %vc, ptr nocapture %resp) {
; CHECK-LABEL: test51:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pmxvbf16ger2 acc0, v2, v2, 0, 0, 0
; CHECK-BE-NEXT: blr
entry:
%0 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2(<16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0)
- %1 = bitcast i8* %resp to <512 x i1>*
- store <512 x i1> %0, <512 x i1>* %1, align 64
+ store <512 x i1> %0, ptr %resp, align 64
ret void
}
declare <512 x i1> @llvm.ppc.mma.pmxvbf16ger2(<16 x i8>, <16 x i8>, i32, i32, i32)
; Function Attrs: nofree nounwind
-define dso_local void @test52(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) {
+define dso_local void @test52(ptr nocapture readonly %vqp, ptr nocapture readnone %vpp, <16 x i8> %vc, ptr nocapture %resp) {
; CHECK-LABEL: test52:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv vs1, 32(r3)
; CHECK-BE-NEXT: stxv vs2, 32(r7)
; CHECK-BE-NEXT: blr
entry:
- %0 = bitcast i8* %vqp to <512 x i1>*
- %1 = load <512 x i1>, <512 x i1>* %0, align 64
- %2 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2pp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc)
- %3 = bitcast i8* %resp to <512 x i1>*
- store <512 x i1> %2, <512 x i1>* %3, align 64
+ %0 = load <512 x i1>, ptr %vqp, align 64
+ %1 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2pp(<512 x i1> %0, <16 x i8> %vc, <16 x i8> %vc)
+ store <512 x i1> %1, ptr %resp, align 64
ret void
}
declare <512 x i1> @llvm.ppc.mma.xvbf16ger2pp(<512 x i1>, <16 x i8>, <16 x i8>)
; Function Attrs: nofree nounwind
-define dso_local void @test53(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) {
+define dso_local void @test53(ptr nocapture readonly %vqp, ptr nocapture readnone %vpp, <16 x i8> %vc, ptr nocapture %resp) {
; CHECK-LABEL: test53:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv vs1, 32(r3)
; CHECK-BE-NEXT: stxv vs2, 32(r7)
; CHECK-BE-NEXT: blr
entry:
- %0 = bitcast i8* %vqp to <512 x i1>*
- %1 = load <512 x i1>, <512 x i1>* %0, align 64
- %2 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2pn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc)
- %3 = bitcast i8* %resp to <512 x i1>*
- store <512 x i1> %2, <512 x i1>* %3, align 64
+ %0 = load <512 x i1>, ptr %vqp, align 64
+ %1 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2pn(<512 x i1> %0, <16 x i8> %vc, <16 x i8> %vc)
+ store <512 x i1> %1, ptr %resp, align 64
ret void
}
declare <512 x i1> @llvm.ppc.mma.xvbf16ger2pn(<512 x i1>, <16 x i8>, <16 x i8>)
; Function Attrs: nofree nounwind
-define dso_local void @test54(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) {
+define dso_local void @test54(ptr nocapture readonly %vqp, ptr nocapture readnone %vpp, <16 x i8> %vc, ptr nocapture %resp) {
; CHECK-LABEL: test54:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv vs1, 32(r3)
; CHECK-BE-NEXT: stxv vs2, 32(r7)
; CHECK-BE-NEXT: blr
entry:
- %0 = bitcast i8* %vqp to <512 x i1>*
- %1 = load <512 x i1>, <512 x i1>* %0, align 64
- %2 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2np(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc)
- %3 = bitcast i8* %resp to <512 x i1>*
- store <512 x i1> %2, <512 x i1>* %3, align 64
+ %0 = load <512 x i1>, ptr %vqp, align 64
+ %1 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2np(<512 x i1> %0, <16 x i8> %vc, <16 x i8> %vc)
+ store <512 x i1> %1, ptr %resp, align 64
ret void
}
declare <512 x i1> @llvm.ppc.mma.xvbf16ger2np(<512 x i1>, <16 x i8>, <16 x i8>)
; Function Attrs: nofree nounwind
-define dso_local void @test55(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) {
+define dso_local void @test55(ptr nocapture readonly %vqp, ptr nocapture readnone %vpp, <16 x i8> %vc, ptr nocapture %resp) {
; CHECK-LABEL: test55:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv vs1, 32(r3)
; CHECK-BE-NEXT: stxv vs2, 32(r7)
; CHECK-BE-NEXT: blr
entry:
- %0 = bitcast i8* %vqp to <512 x i1>*
- %1 = load <512 x i1>, <512 x i1>* %0, align 64
- %2 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2nn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc)
- %3 = bitcast i8* %resp to <512 x i1>*
- store <512 x i1> %2, <512 x i1>* %3, align 64
+ %0 = load <512 x i1>, ptr %vqp, align 64
+ %1 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2nn(<512 x i1> %0, <16 x i8> %vc, <16 x i8> %vc)
+ store <512 x i1> %1, ptr %resp, align 64
ret void
}
declare <512 x i1> @llvm.ppc.mma.xvbf16ger2nn(<512 x i1>, <16 x i8>, <16 x i8>)
; Function Attrs: nofree nounwind
-define dso_local void @test56(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) {
+define dso_local void @test56(ptr nocapture readonly %vqp, ptr nocapture readnone %vpp, <16 x i8> %vc, ptr nocapture %resp) {
; CHECK-LABEL: test56:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv vs1, 32(r3)
; CHECK-BE-NEXT: stxv vs2, 32(r7)
; CHECK-BE-NEXT: blr
entry:
- %0 = bitcast i8* %vqp to <512 x i1>*
- %1 = load <512 x i1>, <512 x i1>* %0, align 64
- %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0)
- %3 = bitcast i8* %resp to <512 x i1>*
- store <512 x i1> %2, <512 x i1>* %3, align 64
+ %0 = load <512 x i1>, ptr %vqp, align 64
+ %1 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pp(<512 x i1> %0, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0)
+ store <512 x i1> %1, ptr %resp, align 64
ret void
}
declare <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pp(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32)
; Function Attrs: nofree nounwind
-define dso_local void @test57(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) {
+define dso_local void @test57(ptr nocapture readonly %vqp, ptr nocapture readnone %vpp, <16 x i8> %vc, ptr nocapture %resp) {
; CHECK-LABEL: test57:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv vs1, 32(r3)
; CHECK-BE-NEXT: stxv vs2, 32(r7)
; CHECK-BE-NEXT: blr
entry:
- %0 = bitcast i8* %vqp to <512 x i1>*
- %1 = load <512 x i1>, <512 x i1>* %0, align 64
- %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0)
- %3 = bitcast i8* %resp to <512 x i1>*
- store <512 x i1> %2, <512 x i1>* %3, align 64
+ %0 = load <512 x i1>, ptr %vqp, align 64
+ %1 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pn(<512 x i1> %0, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0)
+ store <512 x i1> %1, ptr %resp, align 64
ret void
}
declare <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pn(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32)
; Function Attrs: nofree nounwind
-define dso_local void @test58(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) {
+define dso_local void @test58(ptr nocapture readonly %vqp, ptr nocapture readnone %vpp, <16 x i8> %vc, ptr nocapture %resp) {
; CHECK-LABEL: test58:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv vs1, 32(r3)
; CHECK-BE-NEXT: stxv vs2, 32(r7)
; CHECK-BE-NEXT: blr
entry:
- %0 = bitcast i8* %vqp to <512 x i1>*
- %1 = load <512 x i1>, <512 x i1>* %0, align 64
- %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2np(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0)
- %3 = bitcast i8* %resp to <512 x i1>*
- store <512 x i1> %2, <512 x i1>* %3, align 64
+ %0 = load <512 x i1>, ptr %vqp, align 64
+ %1 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2np(<512 x i1> %0, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0)
+ store <512 x i1> %1, ptr %resp, align 64
ret void
}
declare <512 x i1> @llvm.ppc.mma.pmxvbf16ger2np(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32)
; Function Attrs: nofree nounwind
-define dso_local void @test59(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) {
+define dso_local void @test59(ptr nocapture readonly %vqp, ptr nocapture readnone %vpp, <16 x i8> %vc, ptr nocapture %resp) {
; CHECK-LABEL: test59:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv vs1, 32(r3)
; CHECK-BE-NEXT: stxv vs2, 32(r7)
; CHECK-BE-NEXT: blr
entry:
- %0 = bitcast i8* %vqp to <512 x i1>*
- %1 = load <512 x i1>, <512 x i1>* %0, align 64
- %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2nn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0)
- %3 = bitcast i8* %resp to <512 x i1>*
- store <512 x i1> %2, <512 x i1>* %3, align 64
+ %0 = load <512 x i1>, ptr %vqp, align 64
+ %1 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2nn(<512 x i1> %0, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0)
+ store <512 x i1> %1, ptr %resp, align 64
ret void
}
declare <512 x i1> @llvm.ppc.mma.pmxvbf16ger2nn(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32)
; Function Attrs: nofree nounwind writeonly
-define dso_local void @test60(i8* nocapture readnone %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) {
+define dso_local void @test60(ptr nocapture readnone %vqp, ptr nocapture readnone %vpp, <16 x i8> %vc, ptr nocapture %resp) {
; CHECK-LABEL: test60:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xvcvspbf16 vs0, v2
; CHECK-BE-NEXT: blr
entry:
%0 = tail call <16 x i8> @llvm.ppc.vsx.xvcvspbf16(<16 x i8> %vc)
- %1 = bitcast i8* %resp to <16 x i8>*
- store <16 x i8> %0, <16 x i8>* %1, align 16
+ store <16 x i8> %0, ptr %resp, align 16
ret void
}
declare <16 x i8> @llvm.ppc.vsx.xvcvspbf16(<16 x i8>)
; Function Attrs: nofree nounwind writeonly
-define dso_local void @test61(i8* nocapture readnone %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) {
+define dso_local void @test61(ptr nocapture readnone %vqp, ptr nocapture readnone %vpp, <16 x i8> %vc, ptr nocapture %resp) {
; CHECK-LABEL: test61:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xvcvbf16spn vs0, v2
; CHECK-BE-NEXT: blr
entry:
%0 = tail call <16 x i8> @llvm.ppc.vsx.xvcvbf16spn(<16 x i8> %vc)
- %1 = bitcast i8* %resp to <16 x i8>*
- store <16 x i8> %0, <16 x i8>* %1, align 16
+ store <16 x i8> %0, ptr %resp, align 16
ret void
}
; CHECK: sth 3, -2(1)
; CHECK: lbz 3, -2(1)
%p32 = alloca i16
- store i16 %v, i16* %p32
- %p16 = bitcast i16* %p32 to i8*
- %tmp = load i8, i8* %p16
- store i8 %tmp, i8* @g
+ store i16 %v, ptr %p32
+ %tmp = load i8, ptr %p32
+ store i8 %tmp, ptr @g
ret void
}
%struct.s64 = type { i64 }
-define void @bitfieldinsert64(%struct.s64* nocapture %p, i16 zeroext %v) {
+define void @bitfieldinsert64(ptr nocapture %p, i16 zeroext %v) {
; CHECK-LABEL: @bitfieldinsert64
; CHECK: ld [[REG1:[0-9]+]], 0(3)
; CHECK-NEXT: rlwimi [[REG1]], 4, 5, 11, 26
; CHECK-NEXT: std [[REG1]], 0(3)
; CHECK-NEXT: blr
entry:
- %0 = getelementptr inbounds %struct.s64, %struct.s64* %p, i64 0, i32 0
- %1 = zext i16 %v to i64
- %bf.load = load i64, i64* %0, align 8
- %bf.shl = shl nuw nsw i64 %1, 5
+ %0 = zext i16 %v to i64
+ %bf.load = load i64, ptr %p, align 8
+ %bf.shl = shl nuw nsw i64 %0, 5
%bf.clear = and i64 %bf.load, -2097121
%bf.set = or i64 %bf.clear, %bf.shl
- store i64 %bf.set, i64* %0, align 8
+ store i64 %bf.set, ptr %p, align 8
ret void
}
%struct.s32 = type { i32 }
-define void @bitfieldinsert32(%struct.s32* nocapture %p, i32 zeroext %v) {
+define void @bitfieldinsert32(ptr nocapture %p, i32 zeroext %v) {
; CHECK-LABEL: @bitfieldinsert32
; CHECK: lwz [[REG1:[0-9]+]], 0(3)
; CHECK-NEXT: rlwimi [[REG1]], 4, 8, 8, 23
; CHECK-NEXT: stw [[REG1]], 0(3)
; CHECK-NEXT: blr
entry:
- %0 = getelementptr inbounds %struct.s32, %struct.s32* %p, i64 0, i32 0
- %bf.load = load i32, i32* %0, align 4
+ %bf.load = load i32, ptr %p, align 4
%bf.value = shl i32 %v, 8
%bf.shl = and i32 %bf.value, 16776960
%bf.clear = and i32 %bf.load, -16776961
%bf.set = or i32 %bf.clear, %bf.shl
- store i32 %bf.set, i32* %0, align 4
+ store i32 %bf.set, ptr %p, align 4
ret void
}
%struct.s64b = type { i24, i24 }
-define void @bitfieldinsert64b(%struct.s64b* nocapture %p, i8 zeroext %v) {
+define void @bitfieldinsert64b(ptr nocapture %p, i8 zeroext %v) {
; CHECK-LABEL: @bitfieldinsert64b
; CHECK: lwz [[REG1:[0-9]+]], 0(3)
; CHECK-NEXT: rlwimi [[REG1]], 4, 4, 12, 27
; CHECK-NEXT: blr
entry:
%conv = zext i8 %v to i32
- %0 = bitcast %struct.s64b* %p to i32*
- %bf.load = load i32, i32* %0, align 4
+ %bf.load = load i32, ptr %p, align 4
%bf.shl = shl nuw nsw i32 %conv, 4
%bf.clear = and i32 %bf.load, -1048561
%bf.set = or i32 %bf.clear, %bf.shl
- store i32 %bf.set, i32* %0, align 4
+ store i32 %bf.set, ptr %p, align 4
ret void
}
%struct.s64c = type { i32, [4 x i8] }
-define void @bitfieldinsert64c(%struct.s64c* nocapture %p, i16 zeroext %v) {
+define void @bitfieldinsert64c(ptr nocapture %p, i16 zeroext %v) {
; CHECK-LABEL: @bitfieldinsert64c
; CHECK: lwz [[REG1:[0-9]+]], 0(3)
; CHECK-NEXT: rlwimi [[REG1]], 4, 5, 11, 26
; CHECK-NEXT: blr
entry:
%conv = zext i16 %v to i32
- %0 = getelementptr inbounds %struct.s64c, %struct.s64c* %p, i64 0, i32 0
- %bf.load = load i32, i32* %0, align 8
+ %bf.load = load i32, ptr %p, align 8
%bf.shl = shl nuw nsw i32 %conv, 5
%bf.clear = and i32 %bf.load, -2097121
%bf.set = or i32 %bf.clear, %bf.shl
- store i32 %bf.set, i32* %0, align 8
+ store i32 %bf.set, ptr %p, align 8
ret void
}
; RUN: llc -verify-machineinstrs < %s -code-model=medium -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s -check-prefix=MEDIUM
; RUN: llc -verify-machineinstrs < %s -code-model=large -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s -check-prefix=MEDIUM
-define i8* @test() {
+define ptr @test() {
entry:
br label %here
; SMALL: blr
; SMALL: .LC[[LC0]]:
; SMALL: .tc .Ltmp[[TMP0]][TC],.Ltmp[[TMP0]]
- ret i8* blockaddress(@test, %here)
+ ret ptr blockaddress(@test, %here)
}
; RUN: llc -ppc-asm-full-reg-names -verify-machineinstrs < %s -mtriple=powerpc64-- -mcpu=a2 | FileCheck %s --check-prefixes=A2_64
-define void @STWBRX(i32 %i, i8* %ptr, i32 %off) {
+define void @STWBRX(i32 %i, ptr %ptr, i32 %off) {
; X32-LABEL: STWBRX:
; X32: # %bb.0:
; X32-NEXT: stwbrx r3, r4, r5
; A2_64-NEXT: extsw r5, r5
; A2_64-NEXT: stwbrx r3, r4, r5
; A2_64-NEXT: blr
- %tmp1 = getelementptr i8, i8* %ptr, i32 %off
- %tmp1.upgrd.1 = bitcast i8* %tmp1 to i32*
+ %tmp1 = getelementptr i8, ptr %ptr, i32 %off
%tmp13 = tail call i32 @llvm.bswap.i32( i32 %i )
- store i32 %tmp13, i32* %tmp1.upgrd.1
+ store i32 %tmp13, ptr %tmp1
ret void
}
-define i32 @LWBRX(i8* %ptr, i32 %off) {
+define i32 @LWBRX(ptr %ptr, i32 %off) {
; X32-LABEL: LWBRX:
; X32: # %bb.0:
; X32-NEXT: lwbrx r3, r3, r4
; A2_64-NEXT: extsw r4, r4
; A2_64-NEXT: lwbrx r3, r3, r4
; A2_64-NEXT: blr
- %tmp1 = getelementptr i8, i8* %ptr, i32 %off
- %tmp1.upgrd.2 = bitcast i8* %tmp1 to i32*
- %tmp = load i32, i32* %tmp1.upgrd.2
+ %tmp1 = getelementptr i8, ptr %ptr, i32 %off
+ %tmp = load i32, ptr %tmp1
%tmp14 = tail call i32 @llvm.bswap.i32( i32 %tmp )
ret i32 %tmp14
}
-define void @STHBRX(i16 %s, i8* %ptr, i32 %off) {
+define void @STHBRX(i16 %s, ptr %ptr, i32 %off) {
; X32-LABEL: STHBRX:
; X32: # %bb.0:
; X32-NEXT: sthbrx r3, r4, r5
; A2_64-NEXT: extsw r5, r5
; A2_64-NEXT: sthbrx r3, r4, r5
; A2_64-NEXT: blr
- %tmp1 = getelementptr i8, i8* %ptr, i32 %off
- %tmp1.upgrd.3 = bitcast i8* %tmp1 to i16*
+ %tmp1 = getelementptr i8, ptr %ptr, i32 %off
%tmp5 = call i16 @llvm.bswap.i16( i16 %s )
- store i16 %tmp5, i16* %tmp1.upgrd.3
+ store i16 %tmp5, ptr %tmp1
ret void
}
-define i16 @LHBRX(i8* %ptr, i32 %off) {
+define i16 @LHBRX(ptr %ptr, i32 %off) {
; X32-LABEL: LHBRX:
; X32: # %bb.0:
; X32-NEXT: lhbrx r3, r3, r4
; A2_64-NEXT: extsw r4, r4
; A2_64-NEXT: lhbrx r3, r3, r4
; A2_64-NEXT: blr
- %tmp1 = getelementptr i8, i8* %ptr, i32 %off
- %tmp1.upgrd.4 = bitcast i8* %tmp1 to i16*
- %tmp = load i16, i16* %tmp1.upgrd.4
+ %tmp1 = getelementptr i8, ptr %ptr, i32 %off
+ %tmp = load i16, ptr %tmp1
%tmp6 = call i16 @llvm.bswap.i16( i16 %tmp )
ret i16 %tmp6
}
; TODO: combine the bswap feeding a store on subtargets
; that do not have an STDBRX.
-define void @STDBRX(i64 %i, i8* %ptr, i64 %off) {
+define void @STDBRX(i64 %i, ptr %ptr, i64 %off) {
; PWR7_32-LABEL: STDBRX:
; PWR7_32: # %bb.0:
; PWR7_32-NEXT: li r6, 4
; A2_64: # %bb.0:
; A2_64-NEXT: stdbrx r3, r4, r5
; A2_64-NEXT: blr
- %tmp1 = getelementptr i8, i8* %ptr, i64 %off
- %tmp1.upgrd.1 = bitcast i8* %tmp1 to i64*
+ %tmp1 = getelementptr i8, ptr %ptr, i64 %off
%tmp13 = tail call i64 @llvm.bswap.i64( i64 %i )
- store i64 %tmp13, i64* %tmp1.upgrd.1
+ store i64 %tmp13, ptr %tmp1
ret void
}
-define i64 @LDBRX(i8* %ptr, i64 %off) {
+define i64 @LDBRX(ptr %ptr, i64 %off) {
; PWR7_32-LABEL: LDBRX:
; PWR7_32: # %bb.0:
; PWR7_32-NEXT: li r5, 4
; A2_64: # %bb.0:
; A2_64-NEXT: ldbrx r3, r3, r4
; A2_64-NEXT: blr
- %tmp1 = getelementptr i8, i8* %ptr, i64 %off
- %tmp1.upgrd.2 = bitcast i8* %tmp1 to i64*
- %tmp = load i64, i64* %tmp1.upgrd.2
+ %tmp1 = getelementptr i8, ptr %ptr, i64 %off
+ %tmp = load i64, ptr %tmp1
%tmp14 = tail call i64 @llvm.bswap.i64( i64 %tmp )
ret i64 %tmp14
}
;} //
;// P8: lxvd2x, xxswapd, xvcvspsxws //
;// P9: lxvx, xvcvspsxws //
-;vector int fromDiffMemConsAConvftoi(float *ptr) { //
+;vector int fromDiffMemConsAConvftoi(ptr ptr) { //
; return (vector int) { ptr[0], ptr[1], ptr[2], ptr[3] }; //
;} //
;// P8: 2 x lxvd2x, 2 x xxswapd, vperm, xvcvspsxws //
;// P9: 2 x lxvx, vperm, xvcvspsxws //
-;vector int fromDiffMemConsDConvftoi(float *ptr) { //
+;vector int fromDiffMemConsDConvftoi(ptr ptr) { //
; return (vector int) { ptr[3], ptr[2], ptr[1], ptr[0] }; //
;} //
;// P8: 4 x lxsspx, 2 x xxmrghd, 2 x xvcvspsxws, vmrgew //
;// P9: 4 x lxssp, 2 x xxmrghd, 2 x xvcvspsxws, vmrgew //
;// Note: if the consecutive loads learns to handle pre-inc, this can be: //
;// sldi 2, load, xvcvspuxws //
-;vector int fromDiffMemVarAConvftoi(float *arr, int elem) { //
+;vector int fromDiffMemVarAConvftoi(ptr arr, int elem) { //
; return (vector int) { arr[elem], arr[elem+1], arr[elem+2], arr[elem+3] }; //
;} //
;// P8: 4 x lxsspx, 2 x xxmrghd, 2 x xvcvspsxws, vmrgew //
;// P9: 4 x lxssp, 2 x xxmrghd, 2 x xvcvspsxws, vmrgew //
;// Note: if the consecutive loads learns to handle pre-inc, this can be: //
;// sldi 2, 2 x load, vperm, xvcvspuxws //
-;vector int fromDiffMemVarDConvftoi(float *arr, int elem) { //
+;vector int fromDiffMemVarDConvftoi(ptr arr, int elem) { //
; return (vector int) { arr[elem], arr[elem-1], arr[elem-2], arr[elem-3] }; //
;} //
;// P8: xscvdpsxws, xxspltw //
;} //
;// P8: lxsspx, xscvdpsxws, xxspltw //
;// P9: lxvwsx, xvcvspsxws //
-;vector int spltMemValConvftoi(float *ptr) { //
+;vector int spltMemValConvftoi(ptr ptr) { //
; return (vector int)*ptr; //
;} //
;// P8: vspltisw //
;} //
;// P8: 2 x lxvd2x, 2 x xxswapd, xxmrgld, xxmrghd, 2 x xvcvspsxws, vmrgew //
;// P9: 2 x lxvx, 2 x xxswapd, xxmrgld, xxmrghd, 2 x xvcvspsxws, vmrgew //
-;vector int fromDiffMemConsAConvdtoi(double *ptr) { //
+;vector int fromDiffMemConsAConvdtoi(ptr ptr) { //
; return (vector int) { ptr[0], ptr[1], ptr[2], ptr[3] }; //
;} //
;// P8: 4 x lxsdx, 2 x xxmrghd, 2 x xvcvspsxws, vmrgew //
;// P9: 4 x lfd, 2 x xxmrghd, 2 x xvcvspsxws, vmrgew //
-;vector int fromDiffMemConsDConvdtoi(double *ptr) { //
+;vector int fromDiffMemConsDConvdtoi(ptr ptr) { //
; return (vector int) { ptr[3], ptr[2], ptr[1], ptr[0] }; //
;} //
;// P8: lfdux, 3 x lxsdx, 2 x xxmrghd, 2 x xvcvspsxws, vmrgew //
;// P9: lfdux, 3 x lfd, 2 x xxmrghd, 2 x xvcvspsxws, vmrgew //
-;vector int fromDiffMemVarAConvdtoi(double *arr, int elem) { //
+;vector int fromDiffMemVarAConvdtoi(ptr arr, int elem) { //
; return (vector int) { arr[elem], arr[elem+1], arr[elem+2], arr[elem+3] }; //
;} //
;// P8: lfdux, 3 x lxsdx, 2 x xxmrghd, 2 x xvcvspsxws, vmrgew //
;// P9: lfdux, 3 x lfd, 2 x xxmrghd, 2 x xvcvspsxws, vmrgew //
-;vector int fromDiffMemVarDConvdtoi(double *arr, int elem) { //
+;vector int fromDiffMemVarDConvdtoi(ptr arr, int elem) { //
; return (vector int) { arr[elem], arr[elem-1], arr[elem-2], arr[elem-3] }; //
;} //
;// P8: xscvdpsxws, xxspltw //
;} //
;// P8: lxsdx, xscvdpsxws, xxspltw //
;// P9: lxssp, xscvdpsxws, xxspltw //
-;vector int spltMemValConvdtoi(double *ptr) { //
+;vector int spltMemValConvdtoi(ptr ptr) { //
; return (vector int)*ptr; //
;} //
;/*=================================== int ===================================*/
;} //
;// P8: lxvd2x, xxswapd, xvcvspuxws //
;// P9: lxvx, xvcvspuxws //
-;vector unsigned int fromDiffMemConsAConvftoui(float *ptr) { //
+;vector unsigned int fromDiffMemConsAConvftoui(ptr ptr) { //
; return (vector unsigned int) { ptr[0], ptr[1], ptr[2], ptr[3] }; //
;} //
;// P8: 2 x lxvd2x, 2 x xxswapd, vperm, xvcvspuxws //
;// P9: 2 x lxvx, vperm, xvcvspuxws //
-;vector unsigned int fromDiffMemConsDConvftoui(float *ptr) { //
+;vector unsigned int fromDiffMemConsDConvftoui(ptr ptr) { //
; return (vector unsigned int) { ptr[3], ptr[2], ptr[1], ptr[0] }; //
;} //
;// P8: lfsux, 3 x lxsspx, 2 x xxmrghd, 2 x xvcvspuxws, vmrgew //
;// P9: lfsux, 3 x lfs, 2 x xxmrghd, 2 x xvcvspuxws, vmrgew //
;// Note: if the consecutive loads learns to handle pre-inc, this can be: //
;// sldi 2, load, xvcvspuxws //
-;vector unsigned int fromDiffMemVarAConvftoui(float *arr, int elem) { //
+;vector unsigned int fromDiffMemVarAConvftoui(ptr arr, int elem) { //
; return (vector unsigned int) { arr[elem], arr[elem+1], //
; arr[elem+2], arr[elem+3] }; //
;} //
;// P9: lfsux, 3 x lfs, 2 x xxmrghd, 2 x xvcvspuxws, vmrgew //
;// Note: if the consecutive loads learns to handle pre-inc, this can be: //
;// sldi 2, 2 x load, vperm, xvcvspuxws //
-;vector unsigned int fromDiffMemVarDConvftoui(float *arr, int elem) { //
+;vector unsigned int fromDiffMemVarDConvftoui(ptr arr, int elem) { //
; return (vector unsigned int) { arr[elem], arr[elem-1], //
; arr[elem-2], arr[elem-3] }; //
;} //
;} //
;// P8: lxsspx, xscvdpuxws, xxspltw //
;// P9: lxvwsx, xvcvspuxws //
-;vector unsigned int spltMemValConvftoui(float *ptr) { //
+;vector unsigned int spltMemValConvftoui(ptr ptr) { //
; return (vector unsigned int)*ptr; //
;} //
;// P8: vspltisw //
;} //
;// P8: 2 x lxvd2x, 2 x xxswapd, xxmrgld, xxmrghd, 2 x xvcvspuxws, vmrgew //
;// P9: 2 x lxvx, xxmrgld, xxmrghd, 2 x xvcvspuxws, vmrgew //
-;vector unsigned int fromDiffMemConsAConvdtoui(double *ptr) { //
+;vector unsigned int fromDiffMemConsAConvdtoui(ptr ptr) { //
; return (vector unsigned int) { ptr[0], ptr[1], ptr[2], ptr[3] }; //
;} //
;// P8: 4 x lxsdx, 2 x xxmrghd, 2 x xvcvspuxws, vmrgew //
;// P9: 4 x lfd, 2 x xxmrghd, 2 x xvcvspuxws, vmrgew //
-;vector unsigned int fromDiffMemConsDConvdtoui(double *ptr) { //
+;vector unsigned int fromDiffMemConsDConvdtoui(ptr ptr) { //
; return (vector unsigned int) { ptr[3], ptr[2], ptr[1], ptr[0] }; //
;} //
;// P8: lfdux, 3 x lxsdx, 2 x xxmrghd, 2 x xvcvspuxws, vmrgew //
;// P9: lfdux, 3 x lfd, 2 x xxmrghd, 2 x xvcvspuxws, vmrgew //
-;vector unsigned int fromDiffMemVarAConvdtoui(double *arr, int elem) { //
+;vector unsigned int fromDiffMemVarAConvdtoui(ptr arr, int elem) { //
; return (vector unsigned int) { arr[elem], arr[elem+1], //
; arr[elem+2], arr[elem+3] }; //
;} //
;// P8: lfdux, 3 x lxsdx, 2 x xxmrghd, 2 x xvcvspuxws, vmrgew //
;// P9: lfdux, 3 x lfd, 2 x xxmrghd, 2 x xvcvspuxws, vmrgew //
-;vector unsigned int fromDiffMemVarDConvdtoui(double *arr, int elem) { //
+;vector unsigned int fromDiffMemVarDConvdtoui(ptr arr, int elem) { //
; return (vector unsigned int) { arr[elem], arr[elem-1], //
; arr[elem-2], arr[elem-3] }; //
;} //
;} //
;// P8: lxsspx, xscvdpuxws, xxspltw //
;// P9: lfd, xscvdpuxws, xxspltw //
-;vector unsigned int spltMemValConvdtoui(double *ptr) { //
+;vector unsigned int spltMemValConvdtoui(ptr ptr) { //
; return (vector unsigned int)*ptr; //
;} //
;/*=============================== unsigned int ==============================*/
;} //
;// P8: 2 x lxsspx, xxmrghd, xvcvdpsxds //
;// P9: 2 x lxssp, xxmrghd, xvcvdpsxds //
-;vector long long fromDiffMemConsAConvftoll(float *ptr) { //
+;vector long long fromDiffMemConsAConvftoll(ptr ptr) { //
; return (vector long long) { ptr[0], ptr[1] }; //
;} //
;// P8: 2 x lxsspx, xxmrghd, xvcvdpsxds //
;// P9: 2 x lxssp, xxmrghd, xvcvdpsxds //
-;vector long long fromDiffMemConsDConvftoll(float *ptr) { //
+;vector long long fromDiffMemConsDConvftoll(ptr ptr) { //
; return (vector long long) { ptr[3], ptr[2] }; //
;} //
;// P8: sldi 2, lfsux, lxsspx, xxmrghd, xvcvdpsxds //
;// P9: sldi 2, lfsux, lfs, xxmrghd, xvcvdpsxds //
-;vector long long fromDiffMemVarAConvftoll(float *arr, int elem) { //
+;vector long long fromDiffMemVarAConvftoll(ptr arr, int elem) { //
; return (vector long long) { arr[elem], arr[elem+1] }; //
;} //
;// P8: sldi 2, lfsux, lxsspx, xxmrghd, xvcvdpsxds //
;// P9: sldi 2, lfsux, lfs, xxmrghd, xvcvdpsxds //
-;vector long long fromDiffMemVarDConvftoll(float *arr, int elem) { //
+;vector long long fromDiffMemVarDConvftoll(ptr arr, int elem) { //
; return (vector long long) { arr[elem], arr[elem-1] }; //
;} //
;// P8: xscvdpsxds, xxspltd //
;} //
;// P8: lxsspx, xscvdpsxds, xxspltd //
;// P9: lfs, xscvdpsxds, xxspltd //
-;vector long long spltMemValConvftoll(float *ptr) { //
+;vector long long spltMemValConvftoll(ptr ptr) { //
; return (vector long long)*ptr; //
;} //
;// P8: constant pool load (possible: vmrgew (xxlxor), (vspltisw)) //
;} //
;// P8: lxvd2x, xxswapd, xvcvdpsxds //
;// P9: lxvx, xvcvdpsxds //
-;vector long long fromDiffMemConsAConvdtoll(double *ptr) { //
+;vector long long fromDiffMemConsAConvdtoll(ptr ptr) { //
; return (vector long long) { ptr[0], ptr[1] }; //
;} //
;// P8: lxvd2x, xvcvdpsxds //
;// P9: lxvx, xxswapd, xvcvdpsxds //
-;vector long long fromDiffMemConsDConvdtoll(double *ptr) { //
+;vector long long fromDiffMemConsDConvdtoll(ptr ptr) { //
; return (vector long long) { ptr[3], ptr[2] }; //
;} //
;// P8: sldi 3, lxvd2x, xxswapd, xvcvdpsxds //
;// P9: sldi 3, lxvx, xvcvdpsxds //
-;vector long long fromDiffMemVarAConvdtoll(double *arr, int elem) { //
+;vector long long fromDiffMemVarAConvdtoll(ptr arr, int elem) { //
; return (vector long long) { arr[elem], arr[elem+1] }; //
;} //
;// P8: sldi 3, lxvd2x, xvcvdpsxds //
;// P9: sldi 3, lxvx, xxswapd, xvcvdpsxds //
-;vector long long fromDiffMemVarDConvdtoll(double *arr, int elem) { //
+;vector long long fromDiffMemVarDConvdtoll(ptr arr, int elem) { //
; return (vector long long) { arr[elem], arr[elem-1] }; //
;} //
;// P8: xscvdpsxds, xxspltd //
;} //
;// P8: lxvdsx, xvcvdpsxds //
;// P9: lxvdsx, xvcvdpsxds //
-;vector long long spltMemValConvdtoll(double *ptr) { //
+;vector long long spltMemValConvdtoll(ptr ptr) { //
; return (vector long long)*ptr; //
;} //
;/*=============================== long long =================================*/
;} //
;// P8: 2 x lxsspx, xxmrghd, xvcvdpuxds //
;// P9: 2 x lxssp, xxmrghd, xvcvdpuxds //
-;vector unsigned long long fromDiffMemConsAConvftoull(float *ptr) { //
+;vector unsigned long long fromDiffMemConsAConvftoull(ptr ptr) { //
; return (vector unsigned long long) { ptr[0], ptr[1] }; //
;} //
;// P8: 2 x lxsspx, xxmrghd, xvcvdpuxds //
;// P9: 2 x lxssp, xxmrghd, xvcvdpuxds //
-;vector unsigned long long fromDiffMemConsDConvftoull(float *ptr) { //
+;vector unsigned long long fromDiffMemConsDConvftoull(ptr ptr) { //
; return (vector unsigned long long) { ptr[3], ptr[2] }; //
;} //
;// P8: sldi 2, lfsux, lxsspx, xxmrghd, xvcvdpuxds //
;// P9: sldi 2, lfsux, lfs, xxmrghd, xvcvdpuxds //
-;vector unsigned long long fromDiffMemVarAConvftoull(float *arr, int elem) { //
+;vector unsigned long long fromDiffMemVarAConvftoull(ptr arr, int elem) { //
; return (vector unsigned long long) { arr[elem], arr[elem+1] }; //
;} //
;// P8: sldi 2, lfsux, lxsspx, xxmrghd, xvcvdpuxds //
;// P9: sldi 2, lfsux, lfs, xxmrghd, xvcvdpuxds //
-;vector unsigned long long fromDiffMemVarDConvftoull(float *arr, int elem) { //
+;vector unsigned long long fromDiffMemVarDConvftoull(ptr arr, int elem) { //
; return (vector unsigned long long) { arr[elem], arr[elem-1] }; //
;} //
;// P8: xscvdpuxds, xxspltd //
;} //
;// P8: lxsspx, xscvdpuxds, xxspltd //
;// P9: lfs, xscvdpuxds, xxspltd //
-;vector unsigned long long spltMemValConvftoull(float *ptr) { //
+;vector unsigned long long spltMemValConvftoull(ptr ptr) { //
; return (vector unsigned long long)*ptr; //
;} //
;// P8: constant pool load (possible: vmrgew (xxlxor), (vspltisw)) //
;} //
;// P8: lxvd2x, xxswapd, xvcvdpuxds //
;// P9: lxvx, xvcvdpuxds //
-;vector unsigned long long fromDiffMemConsAConvdtoull(double *ptr) { //
+;vector unsigned long long fromDiffMemConsAConvdtoull(ptr ptr) { //
; return (vector unsigned long long) { ptr[0], ptr[1] }; //
;} //
;// P8: lxvd2x, xvcvdpuxds //
;// P9: lxvx, xxswapd, xvcvdpuxds //
-;vector unsigned long long fromDiffMemConsDConvdtoull(double *ptr) { //
+;vector unsigned long long fromDiffMemConsDConvdtoull(ptr ptr) { //
; return (vector unsigned long long) { ptr[3], ptr[2] }; //
;} //
;// P8: sldi 3, lxvd2x, xxswapd, xvcvdpuxds //
;// P9: sldi 3, lxvx, xvcvdpuxds //
-;vector unsigned long long fromDiffMemVarAConvdtoull(double *arr, int elem) { //
+;vector unsigned long long fromDiffMemVarAConvdtoull(ptr arr, int elem) { //
; return (vector unsigned long long) { arr[elem], arr[elem+1] }; //
;} //
;// P8: sldi 3, lxvd2x, xvcvdpuxds //
;// P9: sldi 3, lxvx, xxswapd, xvcvdpuxds //
-;vector unsigned long long fromDiffMemVarDConvdtoull(double *arr, int elem) { //
+;vector unsigned long long fromDiffMemVarDConvdtoull(ptr arr, int elem) { //
; return (vector unsigned long long) { arr[elem], arr[elem-1] }; //
;} //
;// P8: xscvdpuxds, xxspltd //
;} //
;// P8: lxvdsx, xvcvdpuxds //
;// P9: lxvdsx, xvcvdpuxds //
-;vector unsigned long long spltMemValConvdtoull(double *ptr) { //
+;vector unsigned long long spltMemValConvdtoull(ptr ptr) { //
; return (vector unsigned long long)*ptr; //
;} //
;/*========================== unsigned long long ==============================*/
ret <4 x i32> <i32 242, i32 -113, i32 889, i32 19>
}
-define <4 x i32> @fromDiffMemConsAi(i32* nocapture readonly %arr) {
+define <4 x i32> @fromDiffMemConsAi(ptr nocapture readonly %arr) {
; P9BE-LABEL: fromDiffMemConsAi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv v2, 0(r3)
; P8LE-NEXT: xxswapd v2, vs0
; P8LE-NEXT: blr
entry:
- %0 = load i32, i32* %arr, align 4
+ %0 = load i32, ptr %arr, align 4
%vecinit = insertelement <4 x i32> undef, i32 %0, i32 0
- %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 1
- %1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %arr, i64 1
+ %1 = load i32, ptr %arrayidx1, align 4
%vecinit2 = insertelement <4 x i32> %vecinit, i32 %1, i32 1
- %arrayidx3 = getelementptr inbounds i32, i32* %arr, i64 2
- %2 = load i32, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %arr, i64 2
+ %2 = load i32, ptr %arrayidx3, align 4
%vecinit4 = insertelement <4 x i32> %vecinit2, i32 %2, i32 2
- %arrayidx5 = getelementptr inbounds i32, i32* %arr, i64 3
- %3 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %arr, i64 3
+ %3 = load i32, ptr %arrayidx5, align 4
%vecinit6 = insertelement <4 x i32> %vecinit4, i32 %3, i32 3
ret <4 x i32> %vecinit6
}
-define <4 x i32> @fromDiffMemConsDi(i32* nocapture readonly %arr) {
+define <4 x i32> @fromDiffMemConsDi(ptr nocapture readonly %arr) {
; P9BE-LABEL: fromDiffMemConsDi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv v2, 0(r3)
; P8LE-NEXT: vperm v2, v2, v2, v3
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 3
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 3
+ %0 = load i32, ptr %arrayidx, align 4
%vecinit = insertelement <4 x i32> undef, i32 %0, i32 0
- %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 2
- %1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %arr, i64 2
+ %1 = load i32, ptr %arrayidx1, align 4
%vecinit2 = insertelement <4 x i32> %vecinit, i32 %1, i32 1
- %arrayidx3 = getelementptr inbounds i32, i32* %arr, i64 1
- %2 = load i32, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %arr, i64 1
+ %2 = load i32, ptr %arrayidx3, align 4
%vecinit4 = insertelement <4 x i32> %vecinit2, i32 %2, i32 2
- %3 = load i32, i32* %arr, align 4
+ %3 = load i32, ptr %arr, align 4
%vecinit6 = insertelement <4 x i32> %vecinit4, i32 %3, i32 3
ret <4 x i32> %vecinit6
}
-define <4 x i32> @fromDiffMemVarAi(i32* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromDiffMemVarAi(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarAi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%vecinit = insertelement <4 x i32> undef, i32 %0, i32 0
%add = add nsw i32 %elem, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds i32, i32* %arr, i64 %idxprom1
- %1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %arr, i64 %idxprom1
+ %1 = load i32, ptr %arrayidx2, align 4
%vecinit3 = insertelement <4 x i32> %vecinit, i32 %1, i32 1
%add4 = add nsw i32 %elem, 2
%idxprom5 = sext i32 %add4 to i64
- %arrayidx6 = getelementptr inbounds i32, i32* %arr, i64 %idxprom5
- %2 = load i32, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %arr, i64 %idxprom5
+ %2 = load i32, ptr %arrayidx6, align 4
%vecinit7 = insertelement <4 x i32> %vecinit3, i32 %2, i32 2
%add8 = add nsw i32 %elem, 3
%idxprom9 = sext i32 %add8 to i64
- %arrayidx10 = getelementptr inbounds i32, i32* %arr, i64 %idxprom9
- %3 = load i32, i32* %arrayidx10, align 4
+ %arrayidx10 = getelementptr inbounds i32, ptr %arr, i64 %idxprom9
+ %3 = load i32, ptr %arrayidx10, align 4
%vecinit11 = insertelement <4 x i32> %vecinit7, i32 %3, i32 3
ret <4 x i32> %vecinit11
}
-define <4 x i32> @fromDiffMemVarDi(i32* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromDiffMemVarDi(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarDi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%vecinit = insertelement <4 x i32> undef, i32 %0, i32 0
%sub = add nsw i32 %elem, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds i32, i32* %arr, i64 %idxprom1
- %1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %arr, i64 %idxprom1
+ %1 = load i32, ptr %arrayidx2, align 4
%vecinit3 = insertelement <4 x i32> %vecinit, i32 %1, i32 1
%sub4 = add nsw i32 %elem, -2
%idxprom5 = sext i32 %sub4 to i64
- %arrayidx6 = getelementptr inbounds i32, i32* %arr, i64 %idxprom5
- %2 = load i32, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %arr, i64 %idxprom5
+ %2 = load i32, ptr %arrayidx6, align 4
%vecinit7 = insertelement <4 x i32> %vecinit3, i32 %2, i32 2
%sub8 = add nsw i32 %elem, -3
%idxprom9 = sext i32 %sub8 to i64
- %arrayidx10 = getelementptr inbounds i32, i32* %arr, i64 %idxprom9
- %3 = load i32, i32* %arrayidx10, align 4
+ %arrayidx10 = getelementptr inbounds i32, ptr %arr, i64 %idxprom9
+ %3 = load i32, ptr %arrayidx10, align 4
%vecinit11 = insertelement <4 x i32> %vecinit7, i32 %3, i32 3
ret <4 x i32> %vecinit11
}
-define <4 x i32> @fromRandMemConsi(i32* nocapture readonly %arr) {
+define <4 x i32> @fromRandMemConsi(ptr nocapture readonly %arr) {
; P9BE-LABEL: fromRandMemConsi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lwz r4, 16(r3)
; P8LE-NEXT: xxmrghd v2, vs1, vs0
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 4
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 4
+ %0 = load i32, ptr %arrayidx, align 4
%vecinit = insertelement <4 x i32> undef, i32 %0, i32 0
- %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 18
- %1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %arr, i64 18
+ %1 = load i32, ptr %arrayidx1, align 4
%vecinit2 = insertelement <4 x i32> %vecinit, i32 %1, i32 1
- %arrayidx3 = getelementptr inbounds i32, i32* %arr, i64 2
- %2 = load i32, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %arr, i64 2
+ %2 = load i32, ptr %arrayidx3, align 4
%vecinit4 = insertelement <4 x i32> %vecinit2, i32 %2, i32 2
- %arrayidx5 = getelementptr inbounds i32, i32* %arr, i64 88
- %3 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %arr, i64 88
+ %3 = load i32, ptr %arrayidx5, align 4
%vecinit6 = insertelement <4 x i32> %vecinit4, i32 %3, i32 3
ret <4 x i32> %vecinit6
}
-define <4 x i32> @fromRandMemVari(i32* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromRandMemVari(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromRandMemVari:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
entry:
%add = add nsw i32 %elem, 4
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%vecinit = insertelement <4 x i32> undef, i32 %0, i32 0
%add1 = add nsw i32 %elem, 1
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds i32, i32* %arr, i64 %idxprom2
- %1 = load i32, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %arr, i64 %idxprom2
+ %1 = load i32, ptr %arrayidx3, align 4
%vecinit4 = insertelement <4 x i32> %vecinit, i32 %1, i32 1
%add5 = add nsw i32 %elem, 2
%idxprom6 = sext i32 %add5 to i64
- %arrayidx7 = getelementptr inbounds i32, i32* %arr, i64 %idxprom6
- %2 = load i32, i32* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds i32, ptr %arr, i64 %idxprom6
+ %2 = load i32, ptr %arrayidx7, align 4
%vecinit8 = insertelement <4 x i32> %vecinit4, i32 %2, i32 2
%add9 = add nsw i32 %elem, 8
%idxprom10 = sext i32 %add9 to i64
- %arrayidx11 = getelementptr inbounds i32, i32* %arr, i64 %idxprom10
- %3 = load i32, i32* %arrayidx11, align 4
+ %arrayidx11 = getelementptr inbounds i32, ptr %arr, i64 %idxprom10
+ %3 = load i32, ptr %arrayidx11, align 4
%vecinit12 = insertelement <4 x i32> %vecinit8, i32 %3, i32 3
ret <4 x i32> %vecinit12
}
ret <4 x i32> %splat.splat
}
-define <4 x i32> @spltMemVali(i32* nocapture readonly %ptr) {
+define <4 x i32> @spltMemVali(ptr nocapture readonly %ptr) {
; P9BE-LABEL: spltMemVali:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxvwsx v2, 0, r3
; P8LE-NEXT: xxspltw v2, vs0, 1
; P8LE-NEXT: blr
entry:
- %0 = load i32, i32* %ptr, align 4
+ %0 = load i32, ptr %ptr, align 4
%splat.splatinsert = insertelement <4 x i32> undef, i32 %0, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> %splat.splat
ret <4 x i32> <i32 24, i32 234, i32 988, i32 422>
}
-define <4 x i32> @fromDiffMemConsAConvftoi(float* nocapture readonly %ptr) {
+define <4 x i32> @fromDiffMemConsAConvftoi(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsAConvftoi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv vs0, 0(r3)
; P8LE-NEXT: xvcvspsxws v2, v2
; P8LE-NEXT: blr
entry:
- %0 = bitcast float* %ptr to <4 x float>*
- %1 = load <4 x float>, <4 x float>* %0, align 4
- %2 = fptosi <4 x float> %1 to <4 x i32>
- ret <4 x i32> %2
+ %0 = load <4 x float>, ptr %ptr, align 4
+ %1 = fptosi <4 x float> %0 to <4 x i32>
+ ret <4 x i32> %1
}
-define <4 x i32> @fromDiffMemConsDConvftoi(float* nocapture readonly %ptr) {
+define <4 x i32> @fromDiffMemConsDConvftoi(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsDConvftoi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv v2, 0(r3)
; P8LE-NEXT: xvcvspsxws v2, v2
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds float, float* %ptr, i64 3
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %ptr, i64 3
+ %0 = load float, ptr %arrayidx, align 4
%conv = fptosi float %0 to i32
%vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
- %arrayidx1 = getelementptr inbounds float, float* %ptr, i64 2
- %1 = load float, float* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %ptr, i64 2
+ %1 = load float, ptr %arrayidx1, align 4
%conv2 = fptosi float %1 to i32
%vecinit3 = insertelement <4 x i32> %vecinit, i32 %conv2, i32 1
- %arrayidx4 = getelementptr inbounds float, float* %ptr, i64 1
- %2 = load float, float* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %ptr, i64 1
+ %2 = load float, ptr %arrayidx4, align 4
%conv5 = fptosi float %2 to i32
%vecinit6 = insertelement <4 x i32> %vecinit3, i32 %conv5, i32 2
- %3 = load float, float* %ptr, align 4
+ %3 = load float, ptr %ptr, align 4
%conv8 = fptosi float %3 to i32
%vecinit9 = insertelement <4 x i32> %vecinit6, i32 %conv8, i32 3
ret <4 x i32> %vecinit9
}
-define <4 x i32> @fromDiffMemVarAConvftoi(float* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromDiffMemVarAConvftoi(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarAConvftoi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds float, float* %arr, i64 %idxprom
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %arr, i64 %idxprom
+ %0 = load float, ptr %arrayidx, align 4
%conv = fptosi float %0 to i32
%vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
%add = add nsw i32 %elem, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds float, float* %arr, i64 %idxprom1
- %1 = load float, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %arr, i64 %idxprom1
+ %1 = load float, ptr %arrayidx2, align 4
%conv3 = fptosi float %1 to i32
%vecinit4 = insertelement <4 x i32> %vecinit, i32 %conv3, i32 1
%add5 = add nsw i32 %elem, 2
%idxprom6 = sext i32 %add5 to i64
- %arrayidx7 = getelementptr inbounds float, float* %arr, i64 %idxprom6
- %2 = load float, float* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds float, ptr %arr, i64 %idxprom6
+ %2 = load float, ptr %arrayidx7, align 4
%conv8 = fptosi float %2 to i32
%vecinit9 = insertelement <4 x i32> %vecinit4, i32 %conv8, i32 2
%add10 = add nsw i32 %elem, 3
%idxprom11 = sext i32 %add10 to i64
- %arrayidx12 = getelementptr inbounds float, float* %arr, i64 %idxprom11
- %3 = load float, float* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds float, ptr %arr, i64 %idxprom11
+ %3 = load float, ptr %arrayidx12, align 4
%conv13 = fptosi float %3 to i32
%vecinit14 = insertelement <4 x i32> %vecinit9, i32 %conv13, i32 3
ret <4 x i32> %vecinit14
}
-define <4 x i32> @fromDiffMemVarDConvftoi(float* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromDiffMemVarDConvftoi(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarDConvftoi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds float, float* %arr, i64 %idxprom
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %arr, i64 %idxprom
+ %0 = load float, ptr %arrayidx, align 4
%conv = fptosi float %0 to i32
%vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
%sub = add nsw i32 %elem, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds float, float* %arr, i64 %idxprom1
- %1 = load float, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %arr, i64 %idxprom1
+ %1 = load float, ptr %arrayidx2, align 4
%conv3 = fptosi float %1 to i32
%vecinit4 = insertelement <4 x i32> %vecinit, i32 %conv3, i32 1
%sub5 = add nsw i32 %elem, -2
%idxprom6 = sext i32 %sub5 to i64
- %arrayidx7 = getelementptr inbounds float, float* %arr, i64 %idxprom6
- %2 = load float, float* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds float, ptr %arr, i64 %idxprom6
+ %2 = load float, ptr %arrayidx7, align 4
%conv8 = fptosi float %2 to i32
%vecinit9 = insertelement <4 x i32> %vecinit4, i32 %conv8, i32 2
%sub10 = add nsw i32 %elem, -3
%idxprom11 = sext i32 %sub10 to i64
- %arrayidx12 = getelementptr inbounds float, float* %arr, i64 %idxprom11
- %3 = load float, float* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds float, ptr %arr, i64 %idxprom11
+ %3 = load float, ptr %arrayidx12, align 4
%conv13 = fptosi float %3 to i32
%vecinit14 = insertelement <4 x i32> %vecinit9, i32 %conv13, i32 3
ret <4 x i32> %vecinit14
ret <4 x i32> %splat.splat
}
-define <4 x i32> @spltMemValConvftoi(float* nocapture readonly %ptr) {
+define <4 x i32> @spltMemValConvftoi(ptr nocapture readonly %ptr) {
; P9BE-LABEL: spltMemValConvftoi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lfiwzx f0, 0, r3
; P8LE-NEXT: xxspltw v2, vs0, 1
; P8LE-NEXT: blr
entry:
- %0 = load float, float* %ptr, align 4
+ %0 = load float, ptr %ptr, align 4
%conv = fptosi float %0 to i32
%splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> <i32 24, i32 234, i32 988, i32 422>
}
-define <4 x i32> @fromDiffMemConsAConvdtoi(double* nocapture readonly %ptr) {
+define <4 x i32> @fromDiffMemConsAConvdtoi(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsAConvdtoi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv vs0, 0(r3)
; P8LE-NEXT: vmrgew v2, v3, v2
; P8LE-NEXT: blr
entry:
- %0 = bitcast double* %ptr to <2 x double>*
- %1 = load <2 x double>, <2 x double>* %0, align 8
- %2 = fptosi <2 x double> %1 to <2 x i32>
- %arrayidx4 = getelementptr inbounds double, double* %ptr, i64 2
- %3 = bitcast double* %arrayidx4 to <2 x double>*
- %4 = load <2 x double>, <2 x double>* %3, align 8
- %5 = fptosi <2 x double> %4 to <2 x i32>
- %vecinit9 = shufflevector <2 x i32> %2, <2 x i32> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %0 = load <2 x double>, ptr %ptr, align 8
+ %1 = fptosi <2 x double> %0 to <2 x i32>
+ %arrayidx4 = getelementptr inbounds double, ptr %ptr, i64 2
+ %2 = load <2 x double>, ptr %arrayidx4, align 8
+ %3 = fptosi <2 x double> %2 to <2 x i32>
+ %vecinit9 = shufflevector <2 x i32> %1, <2 x i32> %3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
ret <4 x i32> %vecinit9
}
-define <4 x i32> @fromDiffMemConsDConvdtoi(double* nocapture readonly %ptr) {
+define <4 x i32> @fromDiffMemConsDConvdtoi(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsDConvdtoi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lfd f0, 24(r3)
; P8LE-NEXT: vmrgew v2, v3, v2
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds double, double* %ptr, i64 3
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %ptr, i64 3
+ %0 = load double, ptr %arrayidx, align 8
%conv = fptosi double %0 to i32
%vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
- %arrayidx1 = getelementptr inbounds double, double* %ptr, i64 2
- %1 = load double, double* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds double, ptr %ptr, i64 2
+ %1 = load double, ptr %arrayidx1, align 8
%conv2 = fptosi double %1 to i32
%vecinit3 = insertelement <4 x i32> %vecinit, i32 %conv2, i32 1
- %arrayidx4 = getelementptr inbounds double, double* %ptr, i64 1
- %2 = load double, double* %arrayidx4, align 8
+ %arrayidx4 = getelementptr inbounds double, ptr %ptr, i64 1
+ %2 = load double, ptr %arrayidx4, align 8
%conv5 = fptosi double %2 to i32
%vecinit6 = insertelement <4 x i32> %vecinit3, i32 %conv5, i32 2
- %3 = load double, double* %ptr, align 8
+ %3 = load double, ptr %ptr, align 8
%conv8 = fptosi double %3 to i32
%vecinit9 = insertelement <4 x i32> %vecinit6, i32 %conv8, i32 3
ret <4 x i32> %vecinit9
}
-define <4 x i32> @fromDiffMemVarAConvdtoi(double* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromDiffMemVarAConvdtoi(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarAConvdtoi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds double, double* %arr, i64 %idxprom
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %arr, i64 %idxprom
+ %0 = load double, ptr %arrayidx, align 8
%conv = fptosi double %0 to i32
%vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
%add = add nsw i32 %elem, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds double, double* %arr, i64 %idxprom1
- %1 = load double, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %arr, i64 %idxprom1
+ %1 = load double, ptr %arrayidx2, align 8
%conv3 = fptosi double %1 to i32
%vecinit4 = insertelement <4 x i32> %vecinit, i32 %conv3, i32 1
%add5 = add nsw i32 %elem, 2
%idxprom6 = sext i32 %add5 to i64
- %arrayidx7 = getelementptr inbounds double, double* %arr, i64 %idxprom6
- %2 = load double, double* %arrayidx7, align 8
+ %arrayidx7 = getelementptr inbounds double, ptr %arr, i64 %idxprom6
+ %2 = load double, ptr %arrayidx7, align 8
%conv8 = fptosi double %2 to i32
%vecinit9 = insertelement <4 x i32> %vecinit4, i32 %conv8, i32 2
%add10 = add nsw i32 %elem, 3
%idxprom11 = sext i32 %add10 to i64
- %arrayidx12 = getelementptr inbounds double, double* %arr, i64 %idxprom11
- %3 = load double, double* %arrayidx12, align 8
+ %arrayidx12 = getelementptr inbounds double, ptr %arr, i64 %idxprom11
+ %3 = load double, ptr %arrayidx12, align 8
%conv13 = fptosi double %3 to i32
%vecinit14 = insertelement <4 x i32> %vecinit9, i32 %conv13, i32 3
ret <4 x i32> %vecinit14
}
-define <4 x i32> @fromDiffMemVarDConvdtoi(double* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromDiffMemVarDConvdtoi(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarDConvdtoi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds double, double* %arr, i64 %idxprom
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %arr, i64 %idxprom
+ %0 = load double, ptr %arrayidx, align 8
%conv = fptosi double %0 to i32
%vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
%sub = add nsw i32 %elem, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds double, double* %arr, i64 %idxprom1
- %1 = load double, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %arr, i64 %idxprom1
+ %1 = load double, ptr %arrayidx2, align 8
%conv3 = fptosi double %1 to i32
%vecinit4 = insertelement <4 x i32> %vecinit, i32 %conv3, i32 1
%sub5 = add nsw i32 %elem, -2
%idxprom6 = sext i32 %sub5 to i64
- %arrayidx7 = getelementptr inbounds double, double* %arr, i64 %idxprom6
- %2 = load double, double* %arrayidx7, align 8
+ %arrayidx7 = getelementptr inbounds double, ptr %arr, i64 %idxprom6
+ %2 = load double, ptr %arrayidx7, align 8
%conv8 = fptosi double %2 to i32
%vecinit9 = insertelement <4 x i32> %vecinit4, i32 %conv8, i32 2
%sub10 = add nsw i32 %elem, -3
%idxprom11 = sext i32 %sub10 to i64
- %arrayidx12 = getelementptr inbounds double, double* %arr, i64 %idxprom11
- %3 = load double, double* %arrayidx12, align 8
+ %arrayidx12 = getelementptr inbounds double, ptr %arr, i64 %idxprom11
+ %3 = load double, ptr %arrayidx12, align 8
%conv13 = fptosi double %3 to i32
%vecinit14 = insertelement <4 x i32> %vecinit9, i32 %conv13, i32 3
ret <4 x i32> %vecinit14
ret <4 x i32> %splat.splat
}
-define <4 x i32> @spltMemValConvdtoi(double* nocapture readonly %ptr) {
+define <4 x i32> @spltMemValConvdtoi(ptr nocapture readonly %ptr) {
; P9BE-LABEL: spltMemValConvdtoi:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lfd f0, 0(r3)
; P8LE-NEXT: xxspltw v2, vs0, 1
; P8LE-NEXT: blr
entry:
- %0 = load double, double* %ptr, align 8
+ %0 = load double, ptr %ptr, align 8
%conv = fptosi double %0 to i32
%splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> <i32 242, i32 -113, i32 889, i32 19>
}
-define <4 x i32> @fromDiffMemConsAui(i32* nocapture readonly %arr) {
+define <4 x i32> @fromDiffMemConsAui(ptr nocapture readonly %arr) {
; P9BE-LABEL: fromDiffMemConsAui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv v2, 0(r3)
; P8LE-NEXT: xxswapd v2, vs0
; P8LE-NEXT: blr
entry:
- %0 = load i32, i32* %arr, align 4
+ %0 = load i32, ptr %arr, align 4
%vecinit = insertelement <4 x i32> undef, i32 %0, i32 0
- %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 1
- %1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %arr, i64 1
+ %1 = load i32, ptr %arrayidx1, align 4
%vecinit2 = insertelement <4 x i32> %vecinit, i32 %1, i32 1
- %arrayidx3 = getelementptr inbounds i32, i32* %arr, i64 2
- %2 = load i32, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %arr, i64 2
+ %2 = load i32, ptr %arrayidx3, align 4
%vecinit4 = insertelement <4 x i32> %vecinit2, i32 %2, i32 2
- %arrayidx5 = getelementptr inbounds i32, i32* %arr, i64 3
- %3 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %arr, i64 3
+ %3 = load i32, ptr %arrayidx5, align 4
%vecinit6 = insertelement <4 x i32> %vecinit4, i32 %3, i32 3
ret <4 x i32> %vecinit6
}
-define <4 x i32> @fromDiffMemConsDui(i32* nocapture readonly %arr) {
+define <4 x i32> @fromDiffMemConsDui(ptr nocapture readonly %arr) {
; P9BE-LABEL: fromDiffMemConsDui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv v2, 0(r3)
; P8LE-NEXT: vperm v2, v2, v2, v3
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 3
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 3
+ %0 = load i32, ptr %arrayidx, align 4
%vecinit = insertelement <4 x i32> undef, i32 %0, i32 0
- %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 2
- %1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %arr, i64 2
+ %1 = load i32, ptr %arrayidx1, align 4
%vecinit2 = insertelement <4 x i32> %vecinit, i32 %1, i32 1
- %arrayidx3 = getelementptr inbounds i32, i32* %arr, i64 1
- %2 = load i32, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %arr, i64 1
+ %2 = load i32, ptr %arrayidx3, align 4
%vecinit4 = insertelement <4 x i32> %vecinit2, i32 %2, i32 2
- %3 = load i32, i32* %arr, align 4
+ %3 = load i32, ptr %arr, align 4
%vecinit6 = insertelement <4 x i32> %vecinit4, i32 %3, i32 3
ret <4 x i32> %vecinit6
}
-define <4 x i32> @fromDiffMemVarAui(i32* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromDiffMemVarAui(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarAui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%vecinit = insertelement <4 x i32> undef, i32 %0, i32 0
%add = add nsw i32 %elem, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds i32, i32* %arr, i64 %idxprom1
- %1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %arr, i64 %idxprom1
+ %1 = load i32, ptr %arrayidx2, align 4
%vecinit3 = insertelement <4 x i32> %vecinit, i32 %1, i32 1
%add4 = add nsw i32 %elem, 2
%idxprom5 = sext i32 %add4 to i64
- %arrayidx6 = getelementptr inbounds i32, i32* %arr, i64 %idxprom5
- %2 = load i32, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %arr, i64 %idxprom5
+ %2 = load i32, ptr %arrayidx6, align 4
%vecinit7 = insertelement <4 x i32> %vecinit3, i32 %2, i32 2
%add8 = add nsw i32 %elem, 3
%idxprom9 = sext i32 %add8 to i64
- %arrayidx10 = getelementptr inbounds i32, i32* %arr, i64 %idxprom9
- %3 = load i32, i32* %arrayidx10, align 4
+ %arrayidx10 = getelementptr inbounds i32, ptr %arr, i64 %idxprom9
+ %3 = load i32, ptr %arrayidx10, align 4
%vecinit11 = insertelement <4 x i32> %vecinit7, i32 %3, i32 3
ret <4 x i32> %vecinit11
}
-define <4 x i32> @fromDiffMemVarDui(i32* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromDiffMemVarDui(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarDui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%vecinit = insertelement <4 x i32> undef, i32 %0, i32 0
%sub = add nsw i32 %elem, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds i32, i32* %arr, i64 %idxprom1
- %1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %arr, i64 %idxprom1
+ %1 = load i32, ptr %arrayidx2, align 4
%vecinit3 = insertelement <4 x i32> %vecinit, i32 %1, i32 1
%sub4 = add nsw i32 %elem, -2
%idxprom5 = sext i32 %sub4 to i64
- %arrayidx6 = getelementptr inbounds i32, i32* %arr, i64 %idxprom5
- %2 = load i32, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %arr, i64 %idxprom5
+ %2 = load i32, ptr %arrayidx6, align 4
%vecinit7 = insertelement <4 x i32> %vecinit3, i32 %2, i32 2
%sub8 = add nsw i32 %elem, -3
%idxprom9 = sext i32 %sub8 to i64
- %arrayidx10 = getelementptr inbounds i32, i32* %arr, i64 %idxprom9
- %3 = load i32, i32* %arrayidx10, align 4
+ %arrayidx10 = getelementptr inbounds i32, ptr %arr, i64 %idxprom9
+ %3 = load i32, ptr %arrayidx10, align 4
%vecinit11 = insertelement <4 x i32> %vecinit7, i32 %3, i32 3
ret <4 x i32> %vecinit11
}
-define <4 x i32> @fromRandMemConsui(i32* nocapture readonly %arr) {
+define <4 x i32> @fromRandMemConsui(ptr nocapture readonly %arr) {
; P9BE-LABEL: fromRandMemConsui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lwz r4, 16(r3)
; P8LE-NEXT: xxmrghd v2, vs1, vs0
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 4
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 4
+ %0 = load i32, ptr %arrayidx, align 4
%vecinit = insertelement <4 x i32> undef, i32 %0, i32 0
- %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 18
- %1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %arr, i64 18
+ %1 = load i32, ptr %arrayidx1, align 4
%vecinit2 = insertelement <4 x i32> %vecinit, i32 %1, i32 1
- %arrayidx3 = getelementptr inbounds i32, i32* %arr, i64 2
- %2 = load i32, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %arr, i64 2
+ %2 = load i32, ptr %arrayidx3, align 4
%vecinit4 = insertelement <4 x i32> %vecinit2, i32 %2, i32 2
- %arrayidx5 = getelementptr inbounds i32, i32* %arr, i64 88
- %3 = load i32, i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %arr, i64 88
+ %3 = load i32, ptr %arrayidx5, align 4
%vecinit6 = insertelement <4 x i32> %vecinit4, i32 %3, i32 3
ret <4 x i32> %vecinit6
}
-define <4 x i32> @fromRandMemVarui(i32* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromRandMemVarui(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromRandMemVarui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
entry:
%add = add nsw i32 %elem, 4
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%vecinit = insertelement <4 x i32> undef, i32 %0, i32 0
%add1 = add nsw i32 %elem, 1
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds i32, i32* %arr, i64 %idxprom2
- %1 = load i32, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %arr, i64 %idxprom2
+ %1 = load i32, ptr %arrayidx3, align 4
%vecinit4 = insertelement <4 x i32> %vecinit, i32 %1, i32 1
%add5 = add nsw i32 %elem, 2
%idxprom6 = sext i32 %add5 to i64
- %arrayidx7 = getelementptr inbounds i32, i32* %arr, i64 %idxprom6
- %2 = load i32, i32* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds i32, ptr %arr, i64 %idxprom6
+ %2 = load i32, ptr %arrayidx7, align 4
%vecinit8 = insertelement <4 x i32> %vecinit4, i32 %2, i32 2
%add9 = add nsw i32 %elem, 8
%idxprom10 = sext i32 %add9 to i64
- %arrayidx11 = getelementptr inbounds i32, i32* %arr, i64 %idxprom10
- %3 = load i32, i32* %arrayidx11, align 4
+ %arrayidx11 = getelementptr inbounds i32, ptr %arr, i64 %idxprom10
+ %3 = load i32, ptr %arrayidx11, align 4
%vecinit12 = insertelement <4 x i32> %vecinit8, i32 %3, i32 3
ret <4 x i32> %vecinit12
}
ret <4 x i32> %splat.splat
}
-define <4 x i32> @spltMemValui(i32* nocapture readonly %ptr) {
+define <4 x i32> @spltMemValui(ptr nocapture readonly %ptr) {
; P9BE-LABEL: spltMemValui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxvwsx v2, 0, r3
; P8LE-NEXT: xxspltw v2, vs0, 1
; P8LE-NEXT: blr
entry:
- %0 = load i32, i32* %ptr, align 4
+ %0 = load i32, ptr %ptr, align 4
%splat.splatinsert = insertelement <4 x i32> undef, i32 %0, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> %splat.splat
ret <4 x i32> <i32 24, i32 234, i32 988, i32 422>
}
-define <4 x i32> @fromDiffMemConsAConvftoui(float* nocapture readonly %ptr) {
+define <4 x i32> @fromDiffMemConsAConvftoui(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsAConvftoui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv vs0, 0(r3)
; P8LE-NEXT: xvcvspuxws v2, v2
; P8LE-NEXT: blr
entry:
- %0 = bitcast float* %ptr to <4 x float>*
- %1 = load <4 x float>, <4 x float>* %0, align 4
- %2 = fptoui <4 x float> %1 to <4 x i32>
- ret <4 x i32> %2
+ %0 = load <4 x float>, ptr %ptr, align 4
+ %1 = fptoui <4 x float> %0 to <4 x i32>
+ ret <4 x i32> %1
}
-define <4 x i32> @fromDiffMemConsDConvftoui(float* nocapture readonly %ptr) {
+define <4 x i32> @fromDiffMemConsDConvftoui(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsDConvftoui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv v2, 0(r3)
; P8LE-NEXT: xvcvspuxws v2, v2
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds float, float* %ptr, i64 3
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %ptr, i64 3
+ %0 = load float, ptr %arrayidx, align 4
%conv = fptoui float %0 to i32
%vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
- %arrayidx1 = getelementptr inbounds float, float* %ptr, i64 2
- %1 = load float, float* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %ptr, i64 2
+ %1 = load float, ptr %arrayidx1, align 4
%conv2 = fptoui float %1 to i32
%vecinit3 = insertelement <4 x i32> %vecinit, i32 %conv2, i32 1
- %arrayidx4 = getelementptr inbounds float, float* %ptr, i64 1
- %2 = load float, float* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %ptr, i64 1
+ %2 = load float, ptr %arrayidx4, align 4
%conv5 = fptoui float %2 to i32
%vecinit6 = insertelement <4 x i32> %vecinit3, i32 %conv5, i32 2
- %3 = load float, float* %ptr, align 4
+ %3 = load float, ptr %ptr, align 4
%conv8 = fptoui float %3 to i32
%vecinit9 = insertelement <4 x i32> %vecinit6, i32 %conv8, i32 3
ret <4 x i32> %vecinit9
}
-define <4 x i32> @fromDiffMemVarAConvftoui(float* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromDiffMemVarAConvftoui(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarAConvftoui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds float, float* %arr, i64 %idxprom
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %arr, i64 %idxprom
+ %0 = load float, ptr %arrayidx, align 4
%conv = fptoui float %0 to i32
%vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
%add = add nsw i32 %elem, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds float, float* %arr, i64 %idxprom1
- %1 = load float, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %arr, i64 %idxprom1
+ %1 = load float, ptr %arrayidx2, align 4
%conv3 = fptoui float %1 to i32
%vecinit4 = insertelement <4 x i32> %vecinit, i32 %conv3, i32 1
%add5 = add nsw i32 %elem, 2
%idxprom6 = sext i32 %add5 to i64
- %arrayidx7 = getelementptr inbounds float, float* %arr, i64 %idxprom6
- %2 = load float, float* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds float, ptr %arr, i64 %idxprom6
+ %2 = load float, ptr %arrayidx7, align 4
%conv8 = fptoui float %2 to i32
%vecinit9 = insertelement <4 x i32> %vecinit4, i32 %conv8, i32 2
%add10 = add nsw i32 %elem, 3
%idxprom11 = sext i32 %add10 to i64
- %arrayidx12 = getelementptr inbounds float, float* %arr, i64 %idxprom11
- %3 = load float, float* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds float, ptr %arr, i64 %idxprom11
+ %3 = load float, ptr %arrayidx12, align 4
%conv13 = fptoui float %3 to i32
%vecinit14 = insertelement <4 x i32> %vecinit9, i32 %conv13, i32 3
ret <4 x i32> %vecinit14
; FIXME: implement finding consecutive loads with pre-inc
}
-define <4 x i32> @fromDiffMemVarDConvftoui(float* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromDiffMemVarDConvftoui(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarDConvftoui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds float, float* %arr, i64 %idxprom
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %arr, i64 %idxprom
+ %0 = load float, ptr %arrayidx, align 4
%conv = fptoui float %0 to i32
%vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
%sub = add nsw i32 %elem, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds float, float* %arr, i64 %idxprom1
- %1 = load float, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %arr, i64 %idxprom1
+ %1 = load float, ptr %arrayidx2, align 4
%conv3 = fptoui float %1 to i32
%vecinit4 = insertelement <4 x i32> %vecinit, i32 %conv3, i32 1
%sub5 = add nsw i32 %elem, -2
%idxprom6 = sext i32 %sub5 to i64
- %arrayidx7 = getelementptr inbounds float, float* %arr, i64 %idxprom6
- %2 = load float, float* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds float, ptr %arr, i64 %idxprom6
+ %2 = load float, ptr %arrayidx7, align 4
%conv8 = fptoui float %2 to i32
%vecinit9 = insertelement <4 x i32> %vecinit4, i32 %conv8, i32 2
%sub10 = add nsw i32 %elem, -3
%idxprom11 = sext i32 %sub10 to i64
- %arrayidx12 = getelementptr inbounds float, float* %arr, i64 %idxprom11
- %3 = load float, float* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds float, ptr %arr, i64 %idxprom11
+ %3 = load float, ptr %arrayidx12, align 4
%conv13 = fptoui float %3 to i32
%vecinit14 = insertelement <4 x i32> %vecinit9, i32 %conv13, i32 3
ret <4 x i32> %vecinit14
ret <4 x i32> %splat.splat
}
-define <4 x i32> @spltMemValConvftoui(float* nocapture readonly %ptr) {
+define <4 x i32> @spltMemValConvftoui(ptr nocapture readonly %ptr) {
; P9BE-LABEL: spltMemValConvftoui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lfiwzx f0, 0, r3
; P8LE-NEXT: xxspltw v2, vs0, 1
; P8LE-NEXT: blr
entry:
- %0 = load float, float* %ptr, align 4
+ %0 = load float, ptr %ptr, align 4
%conv = fptoui float %0 to i32
%splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> <i32 24, i32 234, i32 988, i32 422>
}
-define <4 x i32> @fromDiffMemConsAConvdtoui(double* nocapture readonly %ptr) {
+define <4 x i32> @fromDiffMemConsAConvdtoui(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsAConvdtoui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv vs0, 0(r3)
; P8LE-NEXT: vmrgew v2, v3, v2
; P8LE-NEXT: blr
entry:
- %0 = bitcast double* %ptr to <2 x double>*
- %1 = load <2 x double>, <2 x double>* %0, align 8
- %2 = fptoui <2 x double> %1 to <2 x i32>
- %arrayidx4 = getelementptr inbounds double, double* %ptr, i64 2
- %3 = bitcast double* %arrayidx4 to <2 x double>*
- %4 = load <2 x double>, <2 x double>* %3, align 8
- %5 = fptoui <2 x double> %4 to <2 x i32>
- %vecinit9 = shufflevector <2 x i32> %2, <2 x i32> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %0 = load <2 x double>, ptr %ptr, align 8
+ %1 = fptoui <2 x double> %0 to <2 x i32>
+ %arrayidx4 = getelementptr inbounds double, ptr %ptr, i64 2
+ %2 = load <2 x double>, ptr %arrayidx4, align 8
+ %3 = fptoui <2 x double> %2 to <2 x i32>
+ %vecinit9 = shufflevector <2 x i32> %1, <2 x i32> %3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
ret <4 x i32> %vecinit9
}
-define <4 x i32> @fromDiffMemConsDConvdtoui(double* nocapture readonly %ptr) {
+define <4 x i32> @fromDiffMemConsDConvdtoui(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsDConvdtoui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lfd f0, 24(r3)
; P8LE-NEXT: vmrgew v2, v3, v2
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds double, double* %ptr, i64 3
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %ptr, i64 3
+ %0 = load double, ptr %arrayidx, align 8
%conv = fptoui double %0 to i32
%vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
- %arrayidx1 = getelementptr inbounds double, double* %ptr, i64 2
- %1 = load double, double* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds double, ptr %ptr, i64 2
+ %1 = load double, ptr %arrayidx1, align 8
%conv2 = fptoui double %1 to i32
%vecinit3 = insertelement <4 x i32> %vecinit, i32 %conv2, i32 1
- %arrayidx4 = getelementptr inbounds double, double* %ptr, i64 1
- %2 = load double, double* %arrayidx4, align 8
+ %arrayidx4 = getelementptr inbounds double, ptr %ptr, i64 1
+ %2 = load double, ptr %arrayidx4, align 8
%conv5 = fptoui double %2 to i32
%vecinit6 = insertelement <4 x i32> %vecinit3, i32 %conv5, i32 2
- %3 = load double, double* %ptr, align 8
+ %3 = load double, ptr %ptr, align 8
%conv8 = fptoui double %3 to i32
%vecinit9 = insertelement <4 x i32> %vecinit6, i32 %conv8, i32 3
ret <4 x i32> %vecinit9
}
-define <4 x i32> @fromDiffMemVarAConvdtoui(double* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromDiffMemVarAConvdtoui(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarAConvdtoui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds double, double* %arr, i64 %idxprom
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %arr, i64 %idxprom
+ %0 = load double, ptr %arrayidx, align 8
%conv = fptoui double %0 to i32
%vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
%add = add nsw i32 %elem, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds double, double* %arr, i64 %idxprom1
- %1 = load double, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %arr, i64 %idxprom1
+ %1 = load double, ptr %arrayidx2, align 8
%conv3 = fptoui double %1 to i32
%vecinit4 = insertelement <4 x i32> %vecinit, i32 %conv3, i32 1
%add5 = add nsw i32 %elem, 2
%idxprom6 = sext i32 %add5 to i64
- %arrayidx7 = getelementptr inbounds double, double* %arr, i64 %idxprom6
- %2 = load double, double* %arrayidx7, align 8
+ %arrayidx7 = getelementptr inbounds double, ptr %arr, i64 %idxprom6
+ %2 = load double, ptr %arrayidx7, align 8
%conv8 = fptoui double %2 to i32
%vecinit9 = insertelement <4 x i32> %vecinit4, i32 %conv8, i32 2
%add10 = add nsw i32 %elem, 3
%idxprom11 = sext i32 %add10 to i64
- %arrayidx12 = getelementptr inbounds double, double* %arr, i64 %idxprom11
- %3 = load double, double* %arrayidx12, align 8
+ %arrayidx12 = getelementptr inbounds double, ptr %arr, i64 %idxprom11
+ %3 = load double, ptr %arrayidx12, align 8
%conv13 = fptoui double %3 to i32
%vecinit14 = insertelement <4 x i32> %vecinit9, i32 %conv13, i32 3
ret <4 x i32> %vecinit14
}
-define <4 x i32> @fromDiffMemVarDConvdtoui(double* nocapture readonly %arr, i32 signext %elem) {
+define <4 x i32> @fromDiffMemVarDConvdtoui(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarDConvdtoui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds double, double* %arr, i64 %idxprom
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %arr, i64 %idxprom
+ %0 = load double, ptr %arrayidx, align 8
%conv = fptoui double %0 to i32
%vecinit = insertelement <4 x i32> undef, i32 %conv, i32 0
%sub = add nsw i32 %elem, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds double, double* %arr, i64 %idxprom1
- %1 = load double, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %arr, i64 %idxprom1
+ %1 = load double, ptr %arrayidx2, align 8
%conv3 = fptoui double %1 to i32
%vecinit4 = insertelement <4 x i32> %vecinit, i32 %conv3, i32 1
%sub5 = add nsw i32 %elem, -2
%idxprom6 = sext i32 %sub5 to i64
- %arrayidx7 = getelementptr inbounds double, double* %arr, i64 %idxprom6
- %2 = load double, double* %arrayidx7, align 8
+ %arrayidx7 = getelementptr inbounds double, ptr %arr, i64 %idxprom6
+ %2 = load double, ptr %arrayidx7, align 8
%conv8 = fptoui double %2 to i32
%vecinit9 = insertelement <4 x i32> %vecinit4, i32 %conv8, i32 2
%sub10 = add nsw i32 %elem, -3
%idxprom11 = sext i32 %sub10 to i64
- %arrayidx12 = getelementptr inbounds double, double* %arr, i64 %idxprom11
- %3 = load double, double* %arrayidx12, align 8
+ %arrayidx12 = getelementptr inbounds double, ptr %arr, i64 %idxprom11
+ %3 = load double, ptr %arrayidx12, align 8
%conv13 = fptoui double %3 to i32
%vecinit14 = insertelement <4 x i32> %vecinit9, i32 %conv13, i32 3
ret <4 x i32> %vecinit14
ret <4 x i32> %splat.splat
}
-define <4 x i32> @spltMemValConvdtoui(double* nocapture readonly %ptr) {
+define <4 x i32> @spltMemValConvdtoui(ptr nocapture readonly %ptr) {
; P9BE-LABEL: spltMemValConvdtoui:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lfd f0, 0(r3)
; P8LE-NEXT: xxspltw v2, vs0, 1
; P8LE-NEXT: blr
entry:
- %0 = load double, double* %ptr, align 8
+ %0 = load double, ptr %ptr, align 8
%conv = fptoui double %0 to i32
%splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
ret <2 x i64> <i64 242, i64 -113>
}
-define <2 x i64> @fromDiffMemConsAll(i64* nocapture readonly %arr) {
+define <2 x i64> @fromDiffMemConsAll(ptr nocapture readonly %arr) {
; P9BE-LABEL: fromDiffMemConsAll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv v2, 0(r3)
; P8LE-NEXT: xxswapd v2, vs0
; P8LE-NEXT: blr
entry:
- %0 = load i64, i64* %arr, align 8
+ %0 = load i64, ptr %arr, align 8
%vecinit = insertelement <2 x i64> undef, i64 %0, i32 0
- %arrayidx1 = getelementptr inbounds i64, i64* %arr, i64 1
- %1 = load i64, i64* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds i64, ptr %arr, i64 1
+ %1 = load i64, ptr %arrayidx1, align 8
%vecinit2 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit2
}
-define <2 x i64> @fromDiffMemConsDll(i64* nocapture readonly %arr) {
+define <2 x i64> @fromDiffMemConsDll(ptr nocapture readonly %arr) {
; P9BE-LABEL: fromDiffMemConsDll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv v2, 16(r3)
; P8LE-NEXT: lxvd2x v2, 0, r3
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i64, i64* %arr, i64 3
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %arr, i64 3
+ %0 = load i64, ptr %arrayidx, align 8
%vecinit = insertelement <2 x i64> undef, i64 %0, i32 0
- %arrayidx1 = getelementptr inbounds i64, i64* %arr, i64 2
- %1 = load i64, i64* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds i64, ptr %arr, i64 2
+ %1 = load i64, ptr %arrayidx1, align 8
%vecinit2 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit2
}
-define <2 x i64> @fromDiffMemVarAll(i64* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromDiffMemVarAll(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarAll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds i64, i64* %arr, i64 %idxprom
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %arr, i64 %idxprom
+ %0 = load i64, ptr %arrayidx, align 8
%vecinit = insertelement <2 x i64> undef, i64 %0, i32 0
%add = add nsw i32 %elem, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds i64, i64* %arr, i64 %idxprom1
- %1 = load i64, i64* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds i64, ptr %arr, i64 %idxprom1
+ %1 = load i64, ptr %arrayidx2, align 8
%vecinit3 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit3
}
-define <2 x i64> @fromDiffMemVarDll(i64* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromDiffMemVarDll(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarDll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds i64, i64* %arr, i64 %idxprom
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %arr, i64 %idxprom
+ %0 = load i64, ptr %arrayidx, align 8
%vecinit = insertelement <2 x i64> undef, i64 %0, i32 0
%sub = add nsw i32 %elem, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds i64, i64* %arr, i64 %idxprom1
- %1 = load i64, i64* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds i64, ptr %arr, i64 %idxprom1
+ %1 = load i64, ptr %arrayidx2, align 8
%vecinit3 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit3
}
-define <2 x i64> @fromRandMemConsll(i64* nocapture readonly %arr) {
+define <2 x i64> @fromRandMemConsll(ptr nocapture readonly %arr) {
; P9BE-LABEL: fromRandMemConsll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: ld r4, 32(r3)
; P8LE-NEXT: xxmrghd v2, vs1, vs0
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i64, i64* %arr, i64 4
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %arr, i64 4
+ %0 = load i64, ptr %arrayidx, align 8
%vecinit = insertelement <2 x i64> undef, i64 %0, i32 0
- %arrayidx1 = getelementptr inbounds i64, i64* %arr, i64 18
- %1 = load i64, i64* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds i64, ptr %arr, i64 18
+ %1 = load i64, ptr %arrayidx1, align 8
%vecinit2 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit2
}
-define <2 x i64> @fromRandMemVarll(i64* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromRandMemVarll(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromRandMemVarll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
entry:
%add = add nsw i32 %elem, 4
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i64, i64* %arr, i64 %idxprom
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %arr, i64 %idxprom
+ %0 = load i64, ptr %arrayidx, align 8
%vecinit = insertelement <2 x i64> undef, i64 %0, i32 0
%add1 = add nsw i32 %elem, 1
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds i64, i64* %arr, i64 %idxprom2
- %1 = load i64, i64* %arrayidx3, align 8
+ %arrayidx3 = getelementptr inbounds i64, ptr %arr, i64 %idxprom2
+ %1 = load i64, ptr %arrayidx3, align 8
%vecinit4 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit4
}
ret <2 x i64> %splat.splat
}
-define <2 x i64> @spltMemValll(i64* nocapture readonly %ptr) {
+define <2 x i64> @spltMemValll(ptr nocapture readonly %ptr) {
; P9BE-LABEL: spltMemValll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxvdsx v2, 0, r3
; P8LE-NEXT: lxvdsx v2, 0, r3
; P8LE-NEXT: blr
entry:
- %0 = load i64, i64* %ptr, align 8
+ %0 = load i64, ptr %ptr, align 8
%splat.splatinsert = insertelement <2 x i64> undef, i64 %0, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> %splat.splat
ret <2 x i64> <i64 24, i64 234>
}
-define <2 x i64> @fromDiffMemConsAConvftoll(float* nocapture readonly %ptr) {
+define <2 x i64> @fromDiffMemConsAConvftoll(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsAConvftoll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lfs f0, 0(r3)
; P8LE-NEXT: xvcvdpsxds v2, vs0
; P8LE-NEXT: blr
entry:
- %0 = load float, float* %ptr, align 4
+ %0 = load float, ptr %ptr, align 4
%conv = fptosi float %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
- %arrayidx1 = getelementptr inbounds float, float* %ptr, i64 1
- %1 = load float, float* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %ptr, i64 1
+ %1 = load float, ptr %arrayidx1, align 4
%conv2 = fptosi float %1 to i64
%vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
ret <2 x i64> %vecinit3
}
-define <2 x i64> @fromDiffMemConsDConvftoll(float* nocapture readonly %ptr) {
+define <2 x i64> @fromDiffMemConsDConvftoll(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsDConvftoll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lfs f0, 12(r3)
; P8LE-NEXT: xvcvdpsxds v2, vs0
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds float, float* %ptr, i64 3
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %ptr, i64 3
+ %0 = load float, ptr %arrayidx, align 4
%conv = fptosi float %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
- %arrayidx1 = getelementptr inbounds float, float* %ptr, i64 2
- %1 = load float, float* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %ptr, i64 2
+ %1 = load float, ptr %arrayidx1, align 4
%conv2 = fptosi float %1 to i64
%vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
ret <2 x i64> %vecinit3
}
-define <2 x i64> @fromDiffMemVarAConvftoll(float* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromDiffMemVarAConvftoll(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarAConvftoll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds float, float* %arr, i64 %idxprom
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %arr, i64 %idxprom
+ %0 = load float, ptr %arrayidx, align 4
%conv = fptosi float %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
%add = add nsw i32 %elem, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds float, float* %arr, i64 %idxprom1
- %1 = load float, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %arr, i64 %idxprom1
+ %1 = load float, ptr %arrayidx2, align 4
%conv3 = fptosi float %1 to i64
%vecinit4 = insertelement <2 x i64> %vecinit, i64 %conv3, i32 1
ret <2 x i64> %vecinit4
}
-define <2 x i64> @fromDiffMemVarDConvftoll(float* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromDiffMemVarDConvftoll(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarDConvftoll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds float, float* %arr, i64 %idxprom
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %arr, i64 %idxprom
+ %0 = load float, ptr %arrayidx, align 4
%conv = fptosi float %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
%sub = add nsw i32 %elem, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds float, float* %arr, i64 %idxprom1
- %1 = load float, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %arr, i64 %idxprom1
+ %1 = load float, ptr %arrayidx2, align 4
%conv3 = fptosi float %1 to i64
%vecinit4 = insertelement <2 x i64> %vecinit, i64 %conv3, i32 1
ret <2 x i64> %vecinit4
ret <2 x i64> %splat.splat
}
-define <2 x i64> @spltMemValConvftoll(float* nocapture readonly %ptr) {
+define <2 x i64> @spltMemValConvftoll(ptr nocapture readonly %ptr) {
; P9BE-LABEL: spltMemValConvftoll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lfs f0, 0(r3)
; P8LE-NEXT: xxspltd v2, vs0, 0
; P8LE-NEXT: blr
entry:
- %0 = load float, float* %ptr, align 4
+ %0 = load float, ptr %ptr, align 4
%conv = fptosi float %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> <i64 24, i64 234>
}
-define <2 x i64> @fromDiffMemConsAConvdtoll(double* nocapture readonly %ptr) {
+define <2 x i64> @fromDiffMemConsAConvdtoll(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsAConvdtoll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv vs0, 0(r3)
; P8LE-NEXT: xvcvdpsxds v2, vs0
; P8LE-NEXT: blr
entry:
- %0 = bitcast double* %ptr to <2 x double>*
- %1 = load <2 x double>, <2 x double>* %0, align 8
- %2 = fptosi <2 x double> %1 to <2 x i64>
- ret <2 x i64> %2
+ %0 = load <2 x double>, ptr %ptr, align 8
+ %1 = fptosi <2 x double> %0 to <2 x i64>
+ ret <2 x i64> %1
}
-define <2 x i64> @fromDiffMemConsDConvdtoll(double* nocapture readonly %ptr) {
+define <2 x i64> @fromDiffMemConsDConvdtoll(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsDConvdtoll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv vs0, 16(r3)
; P8LE-NEXT: xvcvdpsxds v2, vs0
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds double, double* %ptr, i64 3
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %ptr, i64 3
+ %0 = load double, ptr %arrayidx, align 8
%conv = fptosi double %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
- %arrayidx1 = getelementptr inbounds double, double* %ptr, i64 2
- %1 = load double, double* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds double, ptr %ptr, i64 2
+ %1 = load double, ptr %arrayidx1, align 8
%conv2 = fptosi double %1 to i64
%vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
ret <2 x i64> %vecinit3
}
-define <2 x i64> @fromDiffMemVarAConvdtoll(double* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromDiffMemVarAConvdtoll(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarAConvdtoll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds double, double* %arr, i64 %idxprom
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %arr, i64 %idxprom
+ %0 = load double, ptr %arrayidx, align 8
%conv = fptosi double %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
%add = add nsw i32 %elem, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds double, double* %arr, i64 %idxprom1
- %1 = load double, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %arr, i64 %idxprom1
+ %1 = load double, ptr %arrayidx2, align 8
%conv3 = fptosi double %1 to i64
%vecinit4 = insertelement <2 x i64> %vecinit, i64 %conv3, i32 1
ret <2 x i64> %vecinit4
}
-define <2 x i64> @fromDiffMemVarDConvdtoll(double* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromDiffMemVarDConvdtoll(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarDConvdtoll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds double, double* %arr, i64 %idxprom
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %arr, i64 %idxprom
+ %0 = load double, ptr %arrayidx, align 8
%conv = fptosi double %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
%sub = add nsw i32 %elem, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds double, double* %arr, i64 %idxprom1
- %1 = load double, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %arr, i64 %idxprom1
+ %1 = load double, ptr %arrayidx2, align 8
%conv3 = fptosi double %1 to i64
%vecinit4 = insertelement <2 x i64> %vecinit, i64 %conv3, i32 1
ret <2 x i64> %vecinit4
ret <2 x i64> %splat.splat
}
-define <2 x i64> @spltMemValConvdtoll(double* nocapture readonly %ptr) {
+define <2 x i64> @spltMemValConvdtoll(ptr nocapture readonly %ptr) {
; P9BE-LABEL: spltMemValConvdtoll:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxvdsx vs0, 0, r3
; P8LE-NEXT: xvcvdpsxds v2, vs0
; P8LE-NEXT: blr
entry:
- %0 = load double, double* %ptr, align 8
+ %0 = load double, ptr %ptr, align 8
%conv = fptosi double %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> <i64 242, i64 -113>
}
-define <2 x i64> @fromDiffMemConsAull(i64* nocapture readonly %arr) {
+define <2 x i64> @fromDiffMemConsAull(ptr nocapture readonly %arr) {
; P9BE-LABEL: fromDiffMemConsAull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv v2, 0(r3)
; P8LE-NEXT: xxswapd v2, vs0
; P8LE-NEXT: blr
entry:
- %0 = load i64, i64* %arr, align 8
+ %0 = load i64, ptr %arr, align 8
%vecinit = insertelement <2 x i64> undef, i64 %0, i32 0
- %arrayidx1 = getelementptr inbounds i64, i64* %arr, i64 1
- %1 = load i64, i64* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds i64, ptr %arr, i64 1
+ %1 = load i64, ptr %arrayidx1, align 8
%vecinit2 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit2
}
-define <2 x i64> @fromDiffMemConsDull(i64* nocapture readonly %arr) {
+define <2 x i64> @fromDiffMemConsDull(ptr nocapture readonly %arr) {
; P9BE-LABEL: fromDiffMemConsDull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv v2, 16(r3)
; P8LE-NEXT: lxvd2x v2, 0, r3
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i64, i64* %arr, i64 3
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %arr, i64 3
+ %0 = load i64, ptr %arrayidx, align 8
%vecinit = insertelement <2 x i64> undef, i64 %0, i32 0
- %arrayidx1 = getelementptr inbounds i64, i64* %arr, i64 2
- %1 = load i64, i64* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds i64, ptr %arr, i64 2
+ %1 = load i64, ptr %arrayidx1, align 8
%vecinit2 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit2
}
-define <2 x i64> @fromDiffMemVarAull(i64* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromDiffMemVarAull(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarAull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds i64, i64* %arr, i64 %idxprom
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %arr, i64 %idxprom
+ %0 = load i64, ptr %arrayidx, align 8
%vecinit = insertelement <2 x i64> undef, i64 %0, i32 0
%add = add nsw i32 %elem, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds i64, i64* %arr, i64 %idxprom1
- %1 = load i64, i64* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds i64, ptr %arr, i64 %idxprom1
+ %1 = load i64, ptr %arrayidx2, align 8
%vecinit3 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit3
}
-define <2 x i64> @fromDiffMemVarDull(i64* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromDiffMemVarDull(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarDull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds i64, i64* %arr, i64 %idxprom
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %arr, i64 %idxprom
+ %0 = load i64, ptr %arrayidx, align 8
%vecinit = insertelement <2 x i64> undef, i64 %0, i32 0
%sub = add nsw i32 %elem, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds i64, i64* %arr, i64 %idxprom1
- %1 = load i64, i64* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds i64, ptr %arr, i64 %idxprom1
+ %1 = load i64, ptr %arrayidx2, align 8
%vecinit3 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit3
}
-define <2 x i64> @fromRandMemConsull(i64* nocapture readonly %arr) {
+define <2 x i64> @fromRandMemConsull(ptr nocapture readonly %arr) {
; P9BE-LABEL: fromRandMemConsull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: ld r4, 32(r3)
; P8LE-NEXT: xxmrghd v2, vs1, vs0
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i64, i64* %arr, i64 4
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %arr, i64 4
+ %0 = load i64, ptr %arrayidx, align 8
%vecinit = insertelement <2 x i64> undef, i64 %0, i32 0
- %arrayidx1 = getelementptr inbounds i64, i64* %arr, i64 18
- %1 = load i64, i64* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds i64, ptr %arr, i64 18
+ %1 = load i64, ptr %arrayidx1, align 8
%vecinit2 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit2
}
-define <2 x i64> @fromRandMemVarull(i64* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromRandMemVarull(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromRandMemVarull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
entry:
%add = add nsw i32 %elem, 4
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i64, i64* %arr, i64 %idxprom
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %arr, i64 %idxprom
+ %0 = load i64, ptr %arrayidx, align 8
%vecinit = insertelement <2 x i64> undef, i64 %0, i32 0
%add1 = add nsw i32 %elem, 1
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds i64, i64* %arr, i64 %idxprom2
- %1 = load i64, i64* %arrayidx3, align 8
+ %arrayidx3 = getelementptr inbounds i64, ptr %arr, i64 %idxprom2
+ %1 = load i64, ptr %arrayidx3, align 8
%vecinit4 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit4
}
ret <2 x i64> %splat.splat
}
-define <2 x i64> @spltMemValull(i64* nocapture readonly %ptr) {
+define <2 x i64> @spltMemValull(ptr nocapture readonly %ptr) {
; P9BE-LABEL: spltMemValull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxvdsx v2, 0, r3
; P8LE-NEXT: lxvdsx v2, 0, r3
; P8LE-NEXT: blr
entry:
- %0 = load i64, i64* %ptr, align 8
+ %0 = load i64, ptr %ptr, align 8
%splat.splatinsert = insertelement <2 x i64> undef, i64 %0, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> %splat.splat
ret <2 x i64> <i64 24, i64 234>
}
-define <2 x i64> @fromDiffMemConsAConvftoull(float* nocapture readonly %ptr) {
+define <2 x i64> @fromDiffMemConsAConvftoull(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsAConvftoull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lfs f0, 0(r3)
; P8LE-NEXT: xvcvdpuxds v2, vs0
; P8LE-NEXT: blr
entry:
- %0 = load float, float* %ptr, align 4
+ %0 = load float, ptr %ptr, align 4
%conv = fptoui float %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
- %arrayidx1 = getelementptr inbounds float, float* %ptr, i64 1
- %1 = load float, float* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %ptr, i64 1
+ %1 = load float, ptr %arrayidx1, align 4
%conv2 = fptoui float %1 to i64
%vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
ret <2 x i64> %vecinit3
}
-define <2 x i64> @fromDiffMemConsDConvftoull(float* nocapture readonly %ptr) {
+define <2 x i64> @fromDiffMemConsDConvftoull(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsDConvftoull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lfs f0, 12(r3)
; P8LE-NEXT: xvcvdpuxds v2, vs0
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds float, float* %ptr, i64 3
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %ptr, i64 3
+ %0 = load float, ptr %arrayidx, align 4
%conv = fptoui float %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
- %arrayidx1 = getelementptr inbounds float, float* %ptr, i64 2
- %1 = load float, float* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %ptr, i64 2
+ %1 = load float, ptr %arrayidx1, align 4
%conv2 = fptoui float %1 to i64
%vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
ret <2 x i64> %vecinit3
}
-define <2 x i64> @fromDiffMemVarAConvftoull(float* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromDiffMemVarAConvftoull(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarAConvftoull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds float, float* %arr, i64 %idxprom
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %arr, i64 %idxprom
+ %0 = load float, ptr %arrayidx, align 4
%conv = fptoui float %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
%add = add nsw i32 %elem, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds float, float* %arr, i64 %idxprom1
- %1 = load float, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %arr, i64 %idxprom1
+ %1 = load float, ptr %arrayidx2, align 4
%conv3 = fptoui float %1 to i64
%vecinit4 = insertelement <2 x i64> %vecinit, i64 %conv3, i32 1
ret <2 x i64> %vecinit4
}
-define <2 x i64> @fromDiffMemVarDConvftoull(float* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromDiffMemVarDConvftoull(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarDConvftoull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 2
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds float, float* %arr, i64 %idxprom
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %arr, i64 %idxprom
+ %0 = load float, ptr %arrayidx, align 4
%conv = fptoui float %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
%sub = add nsw i32 %elem, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds float, float* %arr, i64 %idxprom1
- %1 = load float, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %arr, i64 %idxprom1
+ %1 = load float, ptr %arrayidx2, align 4
%conv3 = fptoui float %1 to i64
%vecinit4 = insertelement <2 x i64> %vecinit, i64 %conv3, i32 1
ret <2 x i64> %vecinit4
ret <2 x i64> %splat.splat
}
-define <2 x i64> @spltMemValConvftoull(float* nocapture readonly %ptr) {
+define <2 x i64> @spltMemValConvftoull(ptr nocapture readonly %ptr) {
; P9BE-LABEL: spltMemValConvftoull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lfs f0, 0(r3)
; P8LE-NEXT: xxspltd v2, vs0, 0
; P8LE-NEXT: blr
entry:
- %0 = load float, float* %ptr, align 4
+ %0 = load float, ptr %ptr, align 4
%conv = fptoui float %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> <i64 24, i64 234>
}
-define <2 x i64> @fromDiffMemConsAConvdtoull(double* nocapture readonly %ptr) {
+define <2 x i64> @fromDiffMemConsAConvdtoull(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsAConvdtoull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv vs0, 0(r3)
; P8LE-NEXT: xvcvdpuxds v2, vs0
; P8LE-NEXT: blr
entry:
- %0 = bitcast double* %ptr to <2 x double>*
- %1 = load <2 x double>, <2 x double>* %0, align 8
- %2 = fptoui <2 x double> %1 to <2 x i64>
- ret <2 x i64> %2
+ %0 = load <2 x double>, ptr %ptr, align 8
+ %1 = fptoui <2 x double> %0 to <2 x i64>
+ ret <2 x i64> %1
}
-define <2 x i64> @fromDiffMemConsDConvdtoull(double* nocapture readonly %ptr) {
+define <2 x i64> @fromDiffMemConsDConvdtoull(ptr nocapture readonly %ptr) {
; P9BE-LABEL: fromDiffMemConsDConvdtoull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxv vs0, 16(r3)
; P8LE-NEXT: xvcvdpuxds v2, vs0
; P8LE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds double, double* %ptr, i64 3
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %ptr, i64 3
+ %0 = load double, ptr %arrayidx, align 8
%conv = fptoui double %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
- %arrayidx1 = getelementptr inbounds double, double* %ptr, i64 2
- %1 = load double, double* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds double, ptr %ptr, i64 2
+ %1 = load double, ptr %arrayidx1, align 8
%conv2 = fptoui double %1 to i64
%vecinit3 = insertelement <2 x i64> %vecinit, i64 %conv2, i32 1
ret <2 x i64> %vecinit3
}
-define <2 x i64> @fromDiffMemVarAConvdtoull(double* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromDiffMemVarAConvdtoull(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarAConvdtoull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds double, double* %arr, i64 %idxprom
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %arr, i64 %idxprom
+ %0 = load double, ptr %arrayidx, align 8
%conv = fptoui double %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
%add = add nsw i32 %elem, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds double, double* %arr, i64 %idxprom1
- %1 = load double, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %arr, i64 %idxprom1
+ %1 = load double, ptr %arrayidx2, align 8
%conv3 = fptoui double %1 to i64
%vecinit4 = insertelement <2 x i64> %vecinit, i64 %conv3, i32 1
ret <2 x i64> %vecinit4
}
-define <2 x i64> @fromDiffMemVarDConvdtoull(double* nocapture readonly %arr, i32 signext %elem) {
+define <2 x i64> @fromDiffMemVarDConvdtoull(ptr nocapture readonly %arr, i32 signext %elem) {
; P9BE-LABEL: fromDiffMemVarDConvdtoull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: sldi r4, r4, 3
; P8LE-NEXT: blr
entry:
%idxprom = sext i32 %elem to i64
- %arrayidx = getelementptr inbounds double, double* %arr, i64 %idxprom
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %arr, i64 %idxprom
+ %0 = load double, ptr %arrayidx, align 8
%conv = fptoui double %0 to i64
%vecinit = insertelement <2 x i64> undef, i64 %conv, i32 0
%sub = add nsw i32 %elem, -1
%idxprom1 = sext i32 %sub to i64
- %arrayidx2 = getelementptr inbounds double, double* %arr, i64 %idxprom1
- %1 = load double, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %arr, i64 %idxprom1
+ %1 = load double, ptr %arrayidx2, align 8
%conv3 = fptoui double %1 to i64
%vecinit4 = insertelement <2 x i64> %vecinit, i64 %conv3, i32 1
ret <2 x i64> %vecinit4
ret <2 x i64> %splat.splat
}
-define <2 x i64> @spltMemValConvdtoull(double* nocapture readonly %ptr) {
+define <2 x i64> @spltMemValConvdtoull(ptr nocapture readonly %ptr) {
; P9BE-LABEL: spltMemValConvdtoull:
; P9BE: # %bb.0: # %entry
; P9BE-NEXT: lxvdsx vs0, 0, r3
; P8LE-NEXT: xvcvdpuxds v2, vs0
; P8LE-NEXT: blr
entry:
- %0 = load double, double* %ptr, align 8
+ %0 = load double, ptr %ptr, align 8
%conv = fptoui double %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mattr=+altivec | FileCheck %s
-define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
- %tmp = load <4 x float>, <4 x float>* %P3 ; <<4 x float>> [#uses=1]
- %tmp3 = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
+define void @VXOR(ptr %P1, ptr %P2, ptr %P3) {
+ %tmp = load <4 x float>, ptr %P3 ; <<4 x float>> [#uses=1]
+ %tmp3 = load <4 x float>, ptr %P1 ; <<4 x float>> [#uses=1]
%tmp4 = fmul <4 x float> %tmp, %tmp3 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp4, <4 x float>* %P3
- store <4 x float> zeroinitializer, <4 x float>* %P1
- store <4 x i32> zeroinitializer, <4 x i32>* %P2
+ store <4 x float> %tmp4, ptr %P3
+ store <4 x float> zeroinitializer, ptr %P1
+ store <4 x i32> zeroinitializer, ptr %P2
ret void
}
; The fmul will spill a vspltisw to create a -0.0 vector used as the addend
; CHECK: vsplti
; CHECK: vxor
-define void @VSPLTI(<4 x i32>* %P2, <8 x i16>* %P3) {
- store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), <4 x i32>* %P2
- store <8 x i16> < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >, <8 x i16>* %P3
+define void @VSPLTI(ptr %P2, ptr %P3) {
+ store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), ptr %P2
+ store <8 x i16> < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >, ptr %P3
ret void
}
; CHECK: @VSPLTI
; Function Attrs: nounwind
define void @test1() {
entry:
- %0 = load <2 x double>, <2 x double>* @vda, align 16
- %1 = load <2 x double>, <2 x double>* @vdb, align 16
+ %0 = load <2 x double>, ptr @vda, align 16
+ %1 = load <2 x double>, ptr @vdb, align 16
%2 = call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* @vdr, align 16
+ store <2 x double> %2, ptr @vdr, align 16
ret void
; CHECK-LABEL: @test1
; CHECK: xvdivdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
; Function Attrs: nounwind
define void @test2() {
entry:
- %0 = load <4 x float>, <4 x float>* @vfa, align 16
- %1 = load <4 x float>, <4 x float>* @vfb, align 16
+ %0 = load <4 x float>, ptr @vfa, align 16
+ %1 = load <4 x float>, ptr @vfb, align 16
%2 = call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* @vfr, align 16
+ store <4 x float> %2, ptr @vfr, align 16
ret void
; CHECK-LABEL: @test2
; CHECK: xvdivsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
; Function Attrs: nounwind
define void @test3() {
entry:
- %0 = load <2 x double>, <2 x double>* @vda, align 16
- %1 = load <2 x double>, <2 x double>* @vda, align 16
+ %0 = load <2 x double>, ptr @vda, align 16
+ %1 = load <2 x double>, ptr @vda, align 16
%2 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %1)
- store <2 x double> %2, <2 x double>* @vdr, align 16
+ store <2 x double> %2, ptr @vdr, align 16
ret void
; CHECK-LABEL: @test3
; CHECK: xvrdpip {{[0-9]+}}, {{[0-9]+}}
; Function Attrs: nounwind
define void @test4() {
entry:
- %0 = load <4 x float>, <4 x float>* @vfa, align 16
- %1 = load <4 x float>, <4 x float>* @vfa, align 16
+ %0 = load <4 x float>, ptr @vfa, align 16
+ %1 = load <4 x float>, ptr @vfa, align 16
%2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %1)
- store <4 x float> %2, <4 x float>* @vfr, align 16
+ store <4 x float> %2, ptr @vfr, align 16
ret void
; CHECK-LABEL: @test4
; CHECK: xvrspip {{[0-9]+}}, {{[0-9]+}}
; Function Attrs: nounwind
define void @test5() {
entry:
- %0 = load <2 x double>, <2 x double>* @vda, align 16
- %1 = load <2 x double>, <2 x double>* @vdb, align 16
+ %0 = load <2 x double>, ptr @vda, align 16
+ %1 = load <2 x double>, ptr @vdb, align 16
%2 = call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @vbllr, align 16
+ store <2 x i64> %2, ptr @vbllr, align 16
ret void
; CHECK-LABEL: @test5
; CHECK: xvcmpeqdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
; Function Attrs: nounwind
define void @test6() {
entry:
- %0 = load <4 x float>, <4 x float>* @vfa, align 16
- %1 = load <4 x float>, <4 x float>* @vfb, align 16
+ %0 = load <4 x float>, ptr @vfa, align 16
+ %1 = load <4 x float>, ptr @vfb, align 16
%2 = call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @vbir, align 16
+ store <4 x i32> %2, ptr @vbir, align 16
ret void
; CHECK-LABEL: @test6
; CHECK: xvcmpeqsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
; Function Attrs: nounwind
define void @test7() {
entry:
- %0 = load <2 x double>, <2 x double>* @vda, align 16
- %1 = load <2 x double>, <2 x double>* @vdb, align 16
+ %0 = load <2 x double>, ptr @vda, align 16
+ %1 = load <2 x double>, ptr @vdb, align 16
%2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @vbllr, align 16
+ store <2 x i64> %2, ptr @vbllr, align 16
ret void
; CHECK-LABEL: @test7
; CHECK: xvcmpgedp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
; Function Attrs: nounwind
define void @test8() {
entry:
- %0 = load <4 x float>, <4 x float>* @vfa, align 16
- %1 = load <4 x float>, <4 x float>* @vfb, align 16
+ %0 = load <4 x float>, ptr @vfa, align 16
+ %1 = load <4 x float>, ptr @vfb, align 16
%2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @vbir, align 16
+ store <4 x i32> %2, ptr @vbir, align 16
ret void
; CHECK-LABEL: @test8
; CHECK: xvcmpgesp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
; Function Attrs: nounwind
define void @test9() {
entry:
- %0 = load <2 x double>, <2 x double>* @vda, align 16
- %1 = load <2 x double>, <2 x double>* @vdb, align 16
+ %0 = load <2 x double>, ptr @vda, align 16
+ %1 = load <2 x double>, ptr @vdb, align 16
%2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @vbllr, align 16
+ store <2 x i64> %2, ptr @vbllr, align 16
ret void
; CHECK-LABEL: @test9
; CHECK: xvcmpgtdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
; Function Attrs: nounwind
define void @test10() {
entry:
- %0 = load <4 x float>, <4 x float>* @vfa, align 16
- %1 = load <4 x float>, <4 x float>* @vfb, align 16
+ %0 = load <4 x float>, ptr @vfa, align 16
+ %1 = load <4 x float>, ptr @vfb, align 16
%2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @vbir, align 16
+ store <4 x i32> %2, ptr @vbir, align 16
ret void
; CHECK-LABEL: @test10
; CHECK: xvcmpgtsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
define <4 x float> @emit_xvresp(<4 x float> %a) {
entry:
%a.addr = alloca <4 x float>, align 16
- store <4 x float> %a, <4 x float>* %a.addr, align 16
- %0 = load <4 x float>, <4 x float>* %a.addr, align 16
+ store <4 x float> %a, ptr %a.addr, align 16
+ %0 = load <4 x float>, ptr %a.addr, align 16
%1 = call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float> %0)
ret <4 x float> %1
; CHECK-LABEL: @emit_xvresp
define <2 x double> @emit_xvredp(<2 x double> %a) {
entry:
%a.addr = alloca <2 x double>, align 16
- store <2 x double> %a, <2 x double>* %a.addr, align 16
- %0 = load <2 x double>, <2 x double>* %a.addr, align 16
+ store <2 x double> %a, ptr %a.addr, align 16
+ %0 = load <2 x double>, ptr %a.addr, align 16
%1 = call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double> %0)
ret <2 x double> %1
; CHECK-LABEL: @emit_xvredp
ret i32 %0
}
-define void @vec_xst_trunc_sc(<1 x i128> %__vec, i64 %__offset, i8* nocapture %__ptr) {
+define void @vec_xst_trunc_sc(<1 x i128> %__vec, i64 %__offset, ptr nocapture %__ptr) {
; CHECK-LE-LABEL: vec_xst_trunc_sc:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: stxvrbx v2, r6, r5
entry:
%0 = bitcast <1 x i128> %__vec to <16 x i8>
%conv = extractelement <16 x i8> %0, i32 0
- %add.ptr = getelementptr inbounds i8, i8* %__ptr, i64 %__offset
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %__ptr, i64 %__offset
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
-define void @vec_xst_trunc_uc(<1 x i128> %__vec, i64 %__offset, i8* nocapture %__ptr) {
+define void @vec_xst_trunc_uc(<1 x i128> %__vec, i64 %__offset, ptr nocapture %__ptr) {
; CHECK-LE-LABEL: vec_xst_trunc_uc:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: stxvrbx v2, r6, r5
entry:
%0 = bitcast <1 x i128> %__vec to <16 x i8>
%conv = extractelement <16 x i8> %0, i32 0
- %add.ptr = getelementptr inbounds i8, i8* %__ptr, i64 %__offset
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %__ptr, i64 %__offset
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
-define void @vec_xst_trunc_ss(<1 x i128> %__vec, i64 %__offset, i16* nocapture %__ptr) {
+define void @vec_xst_trunc_ss(<1 x i128> %__vec, i64 %__offset, ptr nocapture %__ptr) {
; CHECK-LE-LABEL: vec_xst_trunc_ss:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: sldi r3, r5, 1
entry:
%0 = bitcast <1 x i128> %__vec to <8 x i16>
%conv = extractelement <8 x i16> %0, i32 0
- %add.ptr = getelementptr inbounds i16, i16* %__ptr, i64 %__offset
- store i16 %conv, i16* %add.ptr, align 2
+ %add.ptr = getelementptr inbounds i16, ptr %__ptr, i64 %__offset
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
-define void @vec_xst_trunc_us(<1 x i128> %__vec, i64 %__offset, i16* nocapture %__ptr) {
+define void @vec_xst_trunc_us(<1 x i128> %__vec, i64 %__offset, ptr nocapture %__ptr) {
; CHECK-LE-LABEL: vec_xst_trunc_us:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: sldi r3, r5, 1
entry:
%0 = bitcast <1 x i128> %__vec to <8 x i16>
%conv = extractelement <8 x i16> %0, i32 0
- %add.ptr = getelementptr inbounds i16, i16* %__ptr, i64 %__offset
- store i16 %conv, i16* %add.ptr, align 2
+ %add.ptr = getelementptr inbounds i16, ptr %__ptr, i64 %__offset
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
-define void @vec_xst_trunc_si(<1 x i128> %__vec, i64 %__offset, i32* nocapture %__ptr) {
+define void @vec_xst_trunc_si(<1 x i128> %__vec, i64 %__offset, ptr nocapture %__ptr) {
; CHECK-LE-LABEL: vec_xst_trunc_si:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: sldi r3, r5, 2
entry:
%0 = bitcast <1 x i128> %__vec to <4 x i32>
%conv = extractelement <4 x i32> %0, i32 0
- %add.ptr = getelementptr inbounds i32, i32* %__ptr, i64 %__offset
- store i32 %conv, i32* %add.ptr, align 4
+ %add.ptr = getelementptr inbounds i32, ptr %__ptr, i64 %__offset
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
-define void @vec_xst_trunc_ui(<1 x i128> %__vec, i64 %__offset, i32* nocapture %__ptr) {
+define void @vec_xst_trunc_ui(<1 x i128> %__vec, i64 %__offset, ptr nocapture %__ptr) {
; CHECK-LE-LABEL: vec_xst_trunc_ui:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: sldi r3, r5, 2
entry:
%0 = bitcast <1 x i128> %__vec to <4 x i32>
%conv = extractelement <4 x i32> %0, i32 0
- %add.ptr = getelementptr inbounds i32, i32* %__ptr, i64 %__offset
- store i32 %conv, i32* %add.ptr, align 4
+ %add.ptr = getelementptr inbounds i32, ptr %__ptr, i64 %__offset
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
-define void @vec_xst_trunc_sll(<1 x i128> %__vec, i64 %__offset, i64* nocapture %__ptr) {
+define void @vec_xst_trunc_sll(<1 x i128> %__vec, i64 %__offset, ptr nocapture %__ptr) {
; CHECK-LE-LABEL: vec_xst_trunc_sll:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: sldi r3, r5, 3
entry:
%0 = bitcast <1 x i128> %__vec to <2 x i64>
%conv = extractelement <2 x i64> %0, i32 0
- %add.ptr = getelementptr inbounds i64, i64* %__ptr, i64 %__offset
- store i64 %conv, i64* %add.ptr, align 8
+ %add.ptr = getelementptr inbounds i64, ptr %__ptr, i64 %__offset
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
-define void @vec_xst_trunc_ull(<1 x i128> %__vec, i64 %__offset, i64* nocapture %__ptr) {
+define void @vec_xst_trunc_ull(<1 x i128> %__vec, i64 %__offset, ptr nocapture %__ptr) {
; CHECK-LE-LABEL: vec_xst_trunc_ull:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: sldi r3, r5, 3
entry:
%0 = bitcast <1 x i128> %__vec to <2 x i64>
%conv = extractelement <2 x i64> %0, i32 0
- %add.ptr = getelementptr inbounds i64, i64* %__ptr, i64 %__offset
- store i64 %conv, i64* %add.ptr, align 8
+ %add.ptr = getelementptr inbounds i64, ptr %__ptr, i64 %__offset
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
-define dso_local <1 x i128> @vec_xl_zext(i64 %__offset, i8* nocapture readonly %__pointer) {
+define dso_local <1 x i128> @vec_xl_zext(i64 %__offset, ptr nocapture readonly %__pointer) {
; CHECK-LABEL: vec_xl_zext:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvrbx v2, r4, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %__pointer, i64 %__offset
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %__pointer, i64 %__offset
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i128
%splat.splatinsert = insertelement <1 x i128> undef, i128 %conv, i32 0
ret <1 x i128> %splat.splatinsert
}
-define dso_local <1 x i128> @vec_xl_zext_short(i64 %__offset, i16* nocapture readonly %__pointer) {
+define dso_local <1 x i128> @vec_xl_zext_short(i64 %__offset, ptr nocapture readonly %__pointer) {
; CHECK-LABEL: vec_xl_zext_short:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sldi r3, r3, 1
; CHECK-NEXT: lxvrhx v2, r4, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i16, i16* %__pointer, i64 %__offset
- %0 = load i16, i16* %add.ptr, align 2
+ %add.ptr = getelementptr inbounds i16, ptr %__pointer, i64 %__offset
+ %0 = load i16, ptr %add.ptr, align 2
%conv = zext i16 %0 to i128
%splat.splatinsert = insertelement <1 x i128> undef, i128 %conv, i32 0
ret <1 x i128> %splat.splatinsert
}
-define dso_local <1 x i128> @vec_xl_zext_word(i64 %__offset, i32* nocapture readonly %__pointer) {
+define dso_local <1 x i128> @vec_xl_zext_word(i64 %__offset, ptr nocapture readonly %__pointer) {
; CHECK-LABEL: vec_xl_zext_word:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sldi r3, r3, 2
; CHECK-NEXT: lxvrwx v2, r4, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i32, i32* %__pointer, i64 %__offset
- %0 = load i32, i32* %add.ptr, align 4
+ %add.ptr = getelementptr inbounds i32, ptr %__pointer, i64 %__offset
+ %0 = load i32, ptr %add.ptr, align 4
%conv = zext i32 %0 to i128
%splat.splatinsert = insertelement <1 x i128> undef, i128 %conv, i32 0
ret <1 x i128> %splat.splatinsert
}
-define dso_local <1 x i128> @vec_xl_zext_dw(i64 %__offset, i64* nocapture readonly %__pointer) {
+define dso_local <1 x i128> @vec_xl_zext_dw(i64 %__offset, ptr nocapture readonly %__pointer) {
; CHECK-LABEL: vec_xl_zext_dw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sldi r3, r3, 3
; CHECK-NEXT: lxvrdx v2, r4, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i64, i64* %__pointer, i64 %__offset
- %0 = load i64, i64* %add.ptr, align 8
+ %add.ptr = getelementptr inbounds i64, ptr %__pointer, i64 %__offset
+ %0 = load i64, ptr %add.ptr, align 8
%conv = zext i64 %0 to i128
%splat.splatinsert = insertelement <1 x i128> undef, i128 %conv, i32 0
ret <1 x i128> %splat.splatinsert
}
-define dso_local <1 x i128> @vec_xl_sext_b(i64 %offset, i8* %p) {
+define dso_local <1 x i128> @vec_xl_sext_b(i64 %offset, ptr %p) {
; CHECK-LE-LABEL: vec_xl_sext_b:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbzx r3, r4, r3
; CHECK-AIXBE-NEXT: mtvsrdd v2, r4, r3
; CHECK-AIXBE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %p, i64 %offset
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %p, i64 %offset
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i128
%splat.splatinsert = insertelement <1 x i128> undef, i128 %conv, i32 0
ret <1 x i128> %splat.splatinsert
}
-define dso_local <1 x i128> @vec_xl_sext_h(i64 %offset, i16* %p) {
+define dso_local <1 x i128> @vec_xl_sext_h(i64 %offset, ptr %p) {
; CHECK-LE-LABEL: vec_xl_sext_h:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: sldi r3, r3, 1
; CHECK-AIXBE-NEXT: mtvsrdd v2, r4, r3
; CHECK-AIXBE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i16, i16* %p, i64 %offset
- %0 = load i16, i16* %add.ptr, align 2
+ %add.ptr = getelementptr inbounds i16, ptr %p, i64 %offset
+ %0 = load i16, ptr %add.ptr, align 2
%conv = sext i16 %0 to i128
%splat.splatinsert = insertelement <1 x i128> undef, i128 %conv, i32 0
ret <1 x i128> %splat.splatinsert
}
-define dso_local <1 x i128> @vec_xl_sext_w(i64 %offset, i32* %p) {
+define dso_local <1 x i128> @vec_xl_sext_w(i64 %offset, ptr %p) {
; CHECK-LE-LABEL: vec_xl_sext_w:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: sldi r3, r3, 2
; CHECK-AIXBE-NEXT: mtvsrdd v2, r4, r3
; CHECK-AIXBE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i32, i32* %p, i64 %offset
- %0 = load i32, i32* %add.ptr, align 4
+ %add.ptr = getelementptr inbounds i32, ptr %p, i64 %offset
+ %0 = load i32, ptr %add.ptr, align 4
%conv = sext i32 %0 to i128
%splat.splatinsert = insertelement <1 x i128> undef, i128 %conv, i32 0
ret <1 x i128> %splat.splatinsert
}
-define dso_local <1 x i128> @vec_xl_sext_d(i64 %offset, i64* %p) {
+define dso_local <1 x i128> @vec_xl_sext_d(i64 %offset, ptr %p) {
; CHECK-LE-LABEL: vec_xl_sext_d:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: sldi r3, r3, 3
; CHECK-AIXBE-NEXT: mtvsrdd v2, r4, r3
; CHECK-AIXBE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i64, i64* %p, i64 %offset
- %0 = load i64, i64* %add.ptr, align 8
+ %add.ptr = getelementptr inbounds i64, ptr %p, i64 %offset
+ %0 = load i64, ptr %add.ptr, align 8
%conv = sext i64 %0 to i128
%splat.splatinsert = insertelement <1 x i128> undef, i128 %conv, i32 0
ret <1 x i128> %splat.splatinsert
; Function Attrs: nounwind
define void @test1() {
entry:
- %0 = load <16 x i8>, <16 x i8>* @vsc, align 16
- %1 = load <16 x i8>, <16 x i8>* @vsc2, align 16
+ %0 = load <16 x i8>, ptr @vsc, align 16
+ %1 = load <16 x i8>, ptr @vsc2, align 16
%2 = call <2 x i64> @llvm.ppc.altivec.vbpermq(<16 x i8> %0, <16 x i8> %1)
- store <2 x i64> %2, <2 x i64>* @res_vll, align 16
+ store <2 x i64> %2, ptr @res_vll, align 16
ret void
; CHECK-LABEL: @test1
; CHECK: lvx [[REG1:[0-9]+]], 0, 3
; Function Attrs: nounwind
define void @test2() {
entry:
- %0 = load <16 x i8>, <16 x i8>* @vuc, align 16
- %1 = load <16 x i8>, <16 x i8>* @vuc2, align 16
+ %0 = load <16 x i8>, ptr @vuc, align 16
+ %1 = load <16 x i8>, ptr @vuc2, align 16
%2 = call <2 x i64> @llvm.ppc.altivec.vbpermq(<16 x i8> %0, <16 x i8> %1)
- store <2 x i64> %2, <2 x i64>* @res_vull, align 16
+ store <2 x i64> %2, ptr @res_vull, align 16
ret void
; CHECK-LABEL: @test2
; CHECK: lvx [[REG1:[0-9]+]], 0, 3
; Function Attrs: nounwind
define void @test3() {
entry:
- %0 = load <16 x i8>, <16 x i8>* @vsc, align 16
+ %0 = load <16 x i8>, ptr @vsc, align 16
%1 = call <16 x i8> @llvm.ppc.altivec.vgbbd(<16 x i8> %0)
- store <16 x i8> %1, <16 x i8>* @res_vsc, align 16
+ store <16 x i8> %1, ptr @res_vsc, align 16
ret void
; CHECK-LABEL: @test3
; CHECK: lvx [[REG1:[0-9]+]],
; Function Attrs: nounwind
define void @test4() {
entry:
- %0 = load <16 x i8>, <16 x i8>* @vuc, align 16
+ %0 = load <16 x i8>, ptr @vuc, align 16
%1 = call <16 x i8> @llvm.ppc.altivec.vgbbd(<16 x i8> %0)
- store <16 x i8> %1, <16 x i8>* @res_vuc, align 16
+ store <16 x i8> %1, ptr @res_vuc, align 16
ret void
; CHECK-LABEL: @test4
; CHECK: lvx [[REG1:[0-9]+]],
ret i64 %1
}
-define void @darn_loop(i64* noundef %darn) {
+define void @darn_loop(ptr noundef %darn) {
; OPT-LABEL: @darn_loop
; OPT-COUNT-32: tail call i64 @llvm.ppc.darn()
entry:
%inc = alloca i32, align 4
- store i32 0, i32* %inc, align 4
+ store i32 0, ptr %inc, align 4
br label %cond
cond:
- %0 = load i32, i32* %inc, align 4
+ %0 = load i32, ptr %inc, align 4
%cmp = icmp ne i32 %0, 32
br i1 %cmp, label %body, label %end_loop
body:
%1 = call i64 @llvm.ppc.darn()
- %2 = load i32, i32* %inc, align 4
- %idx = getelementptr inbounds i64, i64* %darn, i32 %2
- store i64 %1, i64* %idx, align 8
+ %2 = load i32, ptr %inc, align 4
+ %idx = getelementptr inbounds i64, ptr %darn, i32 %2
+ store i64 %1, ptr %idx, align 8
br label %incr
incr:
- %3 = load i32, i32* %inc, align 4
+ %3 = load i32, ptr %inc, align 4
%ninc = add nsw i32 %3, 1
- store i32 %ninc, i32* %inc, align 4
+ store i32 %ninc, ptr %inc, align 4
br label %cond
end_loop:
define void @testFMAOdd(fp128 %a, fp128 %b, fp128 %c) {
entry:
%0 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %c)
- store fp128 %0, fp128* @A, align 16
+ store fp128 %0, ptr @A, align 16
%sub = fsub fp128 0xL00000000000000008000000000000000, %c
%1 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %sub)
- store fp128 %1, fp128* @B, align 16
+ store fp128 %1, ptr @B, align 16
%2 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %c)
%sub1 = fsub fp128 0xL00000000000000008000000000000000, %2
- store fp128 %sub1, fp128* @C, align 16
+ store fp128 %sub1, ptr @C, align 16
%sub2 = fsub fp128 0xL00000000000000008000000000000000, %c
%3 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %sub2)
%sub3 = fsub fp128 0xL00000000000000008000000000000000, %3
- store fp128 %sub3, fp128* @D, align 16
+ store fp128 %sub3, ptr @D, align 16
ret void
; CHECK-LABEL: testFMAOdd
; CHECK-DAG: xsmaddqpo v{{[0-9]+}}, v2, v3
define fp128 @insert_exp_qp(i64 %b) {
entry:
%b.addr = alloca i64, align 8
- store i64 %b, i64* %b.addr, align 8
- %0 = load fp128, fp128* @A, align 16
- %1 = load i64, i64* %b.addr, align 8
+ store i64 %b, ptr %b.addr, align 8
+ %0 = load fp128, ptr @A, align 16
+ %1 = load i64, ptr %b.addr, align 8
%2 = call fp128 @llvm.ppc.scalar.insert.exp.qp(fp128 %0, i64 %1)
ret fp128 %2
; CHECK-LABEL: insert_exp_qp
; Function Attrs: noinline nounwind optnone
define i64 @extract_exp() {
entry:
- %0 = load fp128, fp128* @A, align 16
+ %0 = load fp128, ptr @A, align 16
%1 = call i64 @llvm.ppc.scalar.extract.expq(fp128 %0)
ret i64 %1
; CHECK-LABEL: extract_exp
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-aix \
; RUN: -mcpu=pwr8 < %s | FileCheck %s --check-prefix=CHECK
-declare i64 @llvm.ppc.ldarx(i8*)
-define dso_local i64 @test_ldarx(i64* readnone %a) {
+declare i64 @llvm.ppc.ldarx(ptr)
+define dso_local i64 @test_ldarx(ptr readnone %a) {
; CHECK-LABEL: test_ldarx:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: blr
entry:
- %0 = call i64 asm sideeffect "ldarx $0, ${1:y}", "=r,*Z,~{memory}"(i64* elementtype(i64) %a)
+ %0 = call i64 asm sideeffect "ldarx $0, ${1:y}", "=r,*Z,~{memory}"(ptr elementtype(i64) %a)
ret i64 %0
}
-declare i32 @llvm.ppc.stdcx(i8*, i64)
-define dso_local i64 @test_stdcx(i64* %a, i64 %b) {
+declare i32 @llvm.ppc.stdcx(ptr, i64)
+define dso_local i64 @test_stdcx(ptr %a, i64 %b) {
; CHECK-LABEL: test_stdcx:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdcx. 4, 0, 3
; CHECK-NEXT: extsw 3, 3
; CHECK-NEXT: blr
entry:
- %0 = bitcast i64* %a to i8*
- %1 = tail call i32 @llvm.ppc.stdcx(i8* %0, i64 %b)
- %conv = sext i32 %1 to i64
+ %0 = tail call i32 @llvm.ppc.stdcx(ptr %a, i64 %b)
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-aix \
; RUN: -mcpu=pwr8 < %s | FileCheck %s --check-prefix=CHECK-64
-declare i32 @llvm.ppc.lwarx(i8*)
-define dso_local signext i32 @test_lwarx(i32* readnone %a) {
+declare i32 @llvm.ppc.lwarx(ptr)
+define dso_local signext i32 @test_lwarx(ptr readnone %a) {
; CHECK-64-LABEL: test_lwarx:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: #APP
; CHECK-32-NEXT: #NO_APP
; CHECK-32-NEXT: blr
entry:
- %0 = call i32 asm sideeffect "lwarx $0, ${1:y}", "=r,*Z,~{memory}"(i32* elementtype(i32) %a)
+ %0 = call i32 asm sideeffect "lwarx $0, ${1:y}", "=r,*Z,~{memory}"(ptr elementtype(i32) %a)
ret i32 %0
}
-declare i32 @llvm.ppc.stwcx(i8*, i32)
-define dso_local signext i32 @test_stwcx(i32* %a, i32 signext %b) {
+declare i32 @llvm.ppc.stwcx(ptr, i32)
+define dso_local signext i32 @test_stwcx(ptr %a, i32 signext %b) {
; CHECK-64-LABEL: test_stwcx:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: stwcx. 4, 0, 3
; CHECK-32-NEXT: srwi 3, 3, 28
; CHECK-32-NEXT: blr
entry:
- %0 = bitcast i32* %a to i8*
- %1 = tail call i32 @llvm.ppc.stwcx(i8* %0, i32 %b)
- ret i32 %1
+ %0 = tail call i32 @llvm.ppc.stwcx(ptr %a, i32 %b)
+ ret i32 %0
}
-declare i32 @llvm.ppc.sthcx(i8*, i32)
-define dso_local signext i32 @test_sthcx(i16* %a, i16 signext %val) {
+declare i32 @llvm.ppc.sthcx(ptr, i32)
+define dso_local signext i32 @test_sthcx(ptr %a, i16 signext %val) {
; CHECK-64-LABEL: test_sthcx:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: sthcx. 4, 0, 3
; CHECK-32-NEXT: srwi 3, 3, 28
; CHECK-32-NEXT: blr
entry:
- %0 = bitcast i16* %a to i8*
- %1 = sext i16 %val to i32
- %2 = tail call i32 @llvm.ppc.sthcx(i8* %0, i32 %1)
- ret i32 %2
+ %0 = sext i16 %val to i32
+ %1 = tail call i32 @llvm.ppc.sthcx(ptr %a, i32 %0)
+ ret i32 %1
}
-declare i32 @llvm.ppc.stbcx(i8*, i32)
-define signext i32 @test_stbcx(i8* %addr, i8 signext %val) {
+declare i32 @llvm.ppc.stbcx(ptr, i32)
+define signext i32 @test_stbcx(ptr %addr, i8 signext %val) {
; CHECK-64-LABEL: test_stbcx:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: stbcx. 4, 0, 3
; CHECK-32-NEXT: blr
entry:
%conv = sext i8 %val to i32
- %0 = tail call i32 @llvm.ppc.stbcx(i8* %addr, i32 %conv)
+ %0 = tail call i32 @llvm.ppc.stbcx(ptr %addr, i32 %conv)
ret i32 %0
}
-define dso_local signext i16 @test_lharx(i16* %a) {
+define dso_local signext i16 @test_lharx(ptr %a) {
; CHECK-64-LABEL: test_lharx:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: #APP
; CHECK-32-NEXT: extsh 3, 3
; CHECK-32-NEXT: blr
entry:
- %0 = tail call i16 asm sideeffect "lharx $0, ${1:y}", "=r,*Z,~{memory}"(i16* elementtype(i16) %a)
+ %0 = tail call i16 asm sideeffect "lharx $0, ${1:y}", "=r,*Z,~{memory}"(ptr elementtype(i16) %a)
ret i16 %0
}
; Function Attrs: nounwind uwtable
-define dso_local zeroext i8 @test_lbarx(i8* %a) {
+define dso_local zeroext i8 @test_lbarx(ptr %a) {
; CHECK-64-LABEL: test_lbarx:
; CHECK-64: # %bb.0: # %entry
; CHECK-64-NEXT: #APP
; CHECK-32-NEXT: clrlwi 3, 3, 24
; CHECK-32-NEXT: blr
entry:
- %0 = tail call i8 asm sideeffect "lbarx $0, ${1:y}", "=r,*Z,~{memory}"(i8* elementtype(i8) %a)
+ %0 = tail call i8 asm sideeffect "lbarx $0, ${1:y}", "=r,*Z,~{memory}"(ptr elementtype(i8) %a)
ret i8 %0
}
; CHECK-AIX-NEXT: blr
entry:
%x64 = alloca i64, align 8
- %0 = bitcast i64* %x64 to i8*
- call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
- store i64 -1, i64* %x64, align 8
+ call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %x64)
+ store i64 -1, ptr %x64, align 8
br label %do.body
do.body: ; preds = %do.body, %entry
- %1 = call i64 asm sideeffect "ldarx $0, ${1:y}", "=r,*Z,~{memory}"(i64* elementtype(i64) nonnull %x64)
- %2 = call i32 @llvm.ppc.stdcx(i8* nonnull %0, i64 0)
- %tobool.not = icmp eq i32 %2, 0
+ %0 = call i64 asm sideeffect "ldarx $0, ${1:y}", "=r,*Z,~{memory}"(ptr elementtype(i64) nonnull %x64)
+ %1 = call i32 @llvm.ppc.stdcx(ptr nonnull %x64, i64 0)
+ %tobool.not = icmp eq i32 %1, 0
br i1 %tobool.not, label %do.body, label %do.end
do.end: ; preds = %do.body
- %3 = load i64, i64* %x64, align 8
- %cmp = icmp eq i64 %3, 0
+ %2 = load i64, ptr %x64, align 8
+ %cmp = icmp eq i64 %2, 0
%. = select i1 %cmp, i32 55, i32 66
- call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
+ call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %x64)
ret i32 %.
}
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
; Function Attrs: nounwind writeonly
-declare i32 @llvm.ppc.stdcx(i8*, i64)
+declare i32 @llvm.ppc.stdcx(ptr, i64)
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
; RUN: -mcpu=pwr7 < %s | FileCheck %s
@ull = external global i64, align 8
-@ull_addr = external global i64*, align 8
+@ull_addr = external global ptr, align 8
define dso_local void @test_builtin_ppc_store8r() {
; CHECK-LABEL: test_builtin_ppc_store8r:
; CHECK-NEXT: blr
;
entry:
- %0 = load i64, i64* @ull, align 8
- %1 = load i64*, i64** @ull_addr, align 8
- %2 = bitcast i64* %1 to i8*
- call void @llvm.ppc.store8r(i64 %0, i8* %2)
+ %0 = load i64, ptr @ull, align 8
+ %1 = load ptr, ptr @ull_addr, align 8
+ call void @llvm.ppc.store8r(i64 %0, ptr %1)
ret void
}
-declare void @llvm.ppc.store8r(i64, i8*)
+declare void @llvm.ppc.store8r(i64, ptr)
define dso_local i64 @test_builtin_ppc_load8r() {
; CHECK-LABEL: test_builtin_ppc_load8r:
; CHECK: ldbrx 3, 0, 3
; CHECK-NEXT: blr
entry:
- %0 = load i64*, i64** @ull_addr, align 8
- %1 = bitcast i64* %0 to i8*
- %2 = call i64 @llvm.ppc.load8r(i8* %1)
- ret i64 %2
+ %0 = load ptr, ptr @ull_addr, align 8
+ %1 = call i64 @llvm.ppc.load8r(ptr %0)
+ ret i64 %1
}
-declare i64 @llvm.ppc.load8r(i8*)
+declare i64 @llvm.ppc.load8r(ptr)
; RUN: -mcpu=pwr7 < %s | FileCheck %s --check-prefix=CHECK-64B
@us = external global i16, align 2
-@us_addr = external global i16*, align 8
+@us_addr = external global ptr, align 8
@ui = external global i32, align 4
-@ui_addr = external global i32*, align 8
+@ui_addr = external global ptr, align 8
define dso_local void @test_builtin_ppc_store2r() {
; CHECK-64B-LABEL: test_builtin_ppc_store2r:
; CHECK-32B: sthbrx 3, 0, 4
; CHECK-32B-NEXT: blr
entry:
- %0 = load i16, i16* @us, align 2
+ %0 = load i16, ptr @us, align 2
%conv = zext i16 %0 to i32
- %1 = load i16*, i16** @us_addr, align 8
- %2 = bitcast i16* %1 to i8*
- call void @llvm.ppc.store2r(i32 %conv, i8* %2)
+ %1 = load ptr, ptr @us_addr, align 8
+ call void @llvm.ppc.store2r(i32 %conv, ptr %1)
ret void
}
-declare void @llvm.ppc.store2r(i32, i8*)
+declare void @llvm.ppc.store2r(i32, ptr)
define dso_local void @test_builtin_ppc_store4r() {
; CHECK-64B-LABEL: test_builtin_ppc_store4r:
; CHECK-32B: stwbrx 3, 0, 4
; CHECK-32B-NEXT: blr
entry:
- %0 = load i32, i32* @ui, align 4
- %1 = load i32*, i32** @ui_addr, align 8
- %2 = bitcast i32* %1 to i8*
- call void @llvm.ppc.store4r(i32 %0, i8* %2)
+ %0 = load i32, ptr @ui, align 4
+ %1 = load ptr, ptr @ui_addr, align 8
+ call void @llvm.ppc.store4r(i32 %0, ptr %1)
ret void
}
-declare void @llvm.ppc.store4r(i32, i8*)
+declare void @llvm.ppc.store4r(i32, ptr)
define dso_local zeroext i16 @test_builtin_ppc_load2r() {
; CHECK-64B-LABEL: test_builtin_ppc_load2r:
; CHECK-32B: lhbrx 3, 0, 3
; CHECK-32B-NEXT: blr
entry:
- %0 = load i16*, i16** @us_addr, align 8
- %1 = bitcast i16* %0 to i8*
- %2 = call i32 @llvm.ppc.load2r(i8* %1)
- %conv = trunc i32 %2 to i16
+ %0 = load ptr, ptr @us_addr, align 8
+ %1 = call i32 @llvm.ppc.load2r(ptr %0)
+ %conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-32B-NEXT: extsh 3, 3
; CHECK-32B-NEXT: blr
entry:
- %0 = load i16*, i16** @us_addr, align 8
- %1 = bitcast i16* %0 to i8*
- %2 = call i32 @llvm.ppc.load2r(i8* %1)
- %conv = trunc i32 %2 to i16
+ %0 = load ptr, ptr @us_addr, align 8
+ %1 = call i32 @llvm.ppc.load2r(ptr %0)
+ %conv = trunc i32 %1 to i16
ret i16 %conv
}
-declare i32 @llvm.ppc.load2r(i8*)
+declare i32 @llvm.ppc.load2r(ptr)
define dso_local zeroext i32 @test_builtin_ppc_load4r() {
; CHECK-64B-LABEL: test_builtin_ppc_load4r:
; CHECK-32B: lwbrx 3, 0, 3
; CHECK-32B-NEXT: blr
entry:
- %0 = load i32*, i32** @ui_addr, align 8
- %1 = bitcast i32* %0 to i8*
- %2 = call i32 @llvm.ppc.load4r(i8* %1)
- ret i32 %2
+ %0 = load ptr, ptr @ui_addr, align 8
+ %1 = call i32 @llvm.ppc.load4r(ptr %0)
+ ret i32 %1
}
-declare i32 @llvm.ppc.load4r(i8*)
+declare i32 @llvm.ppc.load4r(ptr)
; CHECK-AIX64-NEXT: mtxer 3
; CHECK-AIX64-NEXT: blr
entry:
- %0 = load i64, i64* @ula, align 8
+ %0 = load i64, ptr @ula, align 8
tail call void @llvm.ppc.mtspr.i64(i32 1, i64 %0)
ret void
}
; CHECK-AIX64-NEXT: mtspr 8, 3
; CHECK-AIX64-NEXT: blr
entry:
- %0 = load i64, i64* @ula, align 8
+ %0 = load i64, ptr @ula, align 8
tail call void @llvm.ppc.mtspr.i64(i32 8, i64 %0)
ret void
}
; CHECK-AIX64-NEXT: mtspr 9, 3
; CHECK-AIX64-NEXT: blr
entry:
- %0 = load i64, i64* @ula, align 8
+ %0 = load i64, ptr @ula, align 8
tail call void @llvm.ppc.mtspr.i64(i32 9, i64 %0)
ret void
}
; CHECK-AIX64-NEXT: mtspr 896, 3
; CHECK-AIX64-NEXT: blr
entry:
- %0 = load i64, i64* @ula, align 8
+ %0 = load i64, ptr @ula, align 8
tail call void @llvm.ppc.mtspr.i64(i32 896, i64 %0)
ret void
}
; CHECK-AIX64-NEXT: mtspr 898, 3
; CHECK-AIX64-NEXT: blr
entry:
- %0 = load i64, i64* @ula, align 8
+ %0 = load i64, ptr @ula, align 8
tail call void @llvm.ppc.mtspr.i64(i32 898, i64 %0)
ret void
}
; CHECK-NEXT: mtxer 3
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @ula, align 8
+ %0 = load i32, ptr @ula, align 8
tail call void @llvm.ppc.mtspr.i32(i32 1, i32 %0)
ret void
}
; CHECK-NEXT: mtspr 8, 3
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @ula, align 8
+ %0 = load i32, ptr @ula, align 8
tail call void @llvm.ppc.mtspr.i32(i32 8, i32 %0)
ret void
}
; CHECK-NEXT: mtspr 9, 3
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @ula, align 8
+ %0 = load i32, ptr @ula, align 8
tail call void @llvm.ppc.mtspr.i32(i32 9, i32 %0)
ret void
}
; CHECK-NEXT: mtspr 896, 3
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @ula, align 8
+ %0 = load i32, ptr @ula, align 8
tail call void @llvm.ppc.mtspr.i32(i32 896, i32 %0)
ret void
}
; CHECK-NEXT: mtspr 898, 3
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @ula, align 8
+ %0 = load i32, ptr @ula, align 8
tail call void @llvm.ppc.mtspr.i32(i32 898, i32 %0)
ret void
}
; CHECK-32BIT-NEXT: mtmsr 3, 0
; CHECK-32BIT-NEXT: blr
entry:
- %0 = load i64, i64* @ula, align 8
+ %0 = load i64, ptr @ula, align 8
%conv = trunc i64 %0 to i32
call void @llvm.ppc.mtmsr(i32 %conv)
ret void
; CHECK-64B-NEXT: clrlwi 3, 3, 31
; CHECK-64B-NEXT: blr
entry:
- %0 = load i32, i32* @ui, align 4
- %1 = load i32, i32* @ui, align 4
+ %0 = load i32, ptr @ui, align 4
+ %1 = load i32, ptr @ui, align 4
%2 = call i32 @llvm.ctpop.i32(i32 %1)
%3 = and i32 %2, 1
ret i32 %3
; CHECK-64B-NEXT: clrldi 3, 3, 63
; CHECK-64B-NEXT: blr
entry:
- %0 = load i64, i64* @ull, align 8
- %1 = load i64, i64* @ull, align 8
+ %0 = load i64, ptr @ull, align 8
+ %1 = load i64, ptr @ull, align 8
%2 = call i64 @llvm.ctpop.i64(i64 %1)
%3 = and i64 %2, 1
%cast = trunc i64 %3 to i32
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-aix \
; RUN: -mcpu=pwr7 < %s | FileCheck %s --check-prefix=CHECK-AIX64
-declare void @llvm.ppc.dcbtstt(i8*)
-declare void @llvm.ppc.dcbtt(i8*)
+declare void @llvm.ppc.dcbtstt(ptr)
+declare void @llvm.ppc.dcbtt(ptr)
-@vpa = external local_unnamed_addr global i8*, align 8
+@vpa = external local_unnamed_addr global ptr, align 8
define dso_local void @test_dcbtstt() {
; CHECK-LABEL: test_dcbtstt:
; CHECK-AIX64-NEXT: dcbtst 0, 3, 16
; CHECK-AIX64-NEXT: blr
entry:
- %0 = load i8*, i8** @vpa, align 8
- tail call void @llvm.ppc.dcbtstt(i8* %0)
+ %0 = load ptr, ptr @vpa, align 8
+ tail call void @llvm.ppc.dcbtstt(ptr %0)
ret void
}
; CHECK-AIX64-NEXT: dcbt 0, 3, 16
; CHECK-AIX64-NEXT: blr
entry:
- %0 = load i8*, i8** @vpa, align 8
- tail call void @llvm.ppc.dcbtt(i8* %0)
+ %0 = load ptr, ptr @vpa, align 8
+ tail call void @llvm.ppc.dcbtt(ptr %0)
ret void
}
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-aix \
; RUN: -mcpu=pwr8 < %s | FileCheck %s
-declare void @llvm.ppc.stfiw(i8*, double)
-define dso_local void @test_stfiw(i32* %cia, double %da) {
+declare void @llvm.ppc.stfiw(ptr, double)
+define dso_local void @test_stfiw(ptr %cia, double %da) {
; CHECK-LABEL: test_stfiw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxsiwx 1, 0, 3
; CHECK-32BIT-NEXT: stfiwx 1, 0, 3
; CHECK-32BIT-NEXT: blr
entry:
- %0 = bitcast i32* %cia to i8*
- tail call void @llvm.ppc.stfiw(i8* %0, double %da)
+ tail call void @llvm.ppc.stfiw(ptr %cia, double %da)
ret void
}
-define dso_local void @test_xl_stfiw(i32* %cia, double %da) {
+define dso_local void @test_xl_stfiw(ptr %cia, double %da) {
; CHECK-LABEL: test_xl_stfiw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxsiwx 1, 0, 3
; CHECK-32BIT-NEXT: stfiwx 1, 0, 3
; CHECK-32BIT-NEXT: blr
entry:
- %0 = bitcast i32* %cia to i8*
- tail call void @llvm.ppc.stfiw(i8* %0, double %da)
+ tail call void @llvm.ppc.stfiw(ptr %cia, double %da)
ret void
}
; CHECK-NEXT: icbt 0, 0, r3
; CHECK-NEXT: blr
entry:
- %a = alloca i8*, align 8
- %0 = load i8*, i8** %a, align 8
- call void @llvm.ppc.icbt(i8* %0)
+ %a = alloca ptr, align 8
+ %0 = load ptr, ptr %a, align 8
+ call void @llvm.ppc.icbt(ptr %0)
ret void
}
-declare void @llvm.ppc.icbt(i8*)
+declare void @llvm.ppc.icbt(ptr)
%struct.anon = type { i32, i32 }
-declare void @foo(%struct.anon* %v)
-define void @test(i32 %a, i32 %b, %struct.anon* byval(%struct.anon) nocapture %v) {
+declare void @foo(ptr %v)
+define void @test(i32 %a, i32 %b, ptr byval(%struct.anon) nocapture %v) {
entry:
- call void @foo(%struct.anon* %v)
+ call void @foo(ptr %v)
ret void
}
%struct.sm = type { i8, i8 }
; Function Attrs: nounwind ssp
-define void @foo(%struct.sm* byval(%struct.sm) %s) #0 {
+define void @foo(ptr byval(%struct.sm) %s) #0 {
entry:
- %a = getelementptr inbounds %struct.sm, %struct.sm* %s, i32 0, i32 0
- %0 = load i8, i8* %a, align 1
+ %0 = load i8, ptr %s, align 1
%conv2 = zext i8 %0 to i32
%add = add nuw nsw i32 %conv2, 3
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %a, align 1
- call void @bar(%struct.sm* byval(%struct.sm) %s, %struct.sm* byval(%struct.sm) %s) #1
+ store i8 %conv1, ptr %s, align 1
+ call void @bar(ptr byval(%struct.sm) %s, ptr byval(%struct.sm) %s) #1
ret void
}
; CHECK: bl bar
; CHECK: blr
-declare void @bar(%struct.sm* byval(%struct.sm), %struct.sm* byval(%struct.sm))
+declare void @bar(ptr byval(%struct.sm), ptr byval(%struct.sm))
attributes #0 = { nounwind ssp }
attributes #1 = { nounwind }
%struct = type { [4 x i32], [20 x i8] }
-declare dso_local i32 @foo1(%struct* byval(%struct) %var)
-declare dso_local void @foo(%struct* %var)
+declare dso_local i32 @foo1(ptr byval(%struct) %var)
+declare dso_local void @foo(ptr %var)
; check that 36bytes byval parameter is passed all in registers.
; CHECK-NEXT: blr
entry:
%x = alloca %struct, align 4
- call void @foo(%struct* %x)
- %r = call i32 @foo1(%struct* byval(%struct) %x)
+ call void @foo(ptr %x)
+ %r = call i32 @foo1(ptr byval(%struct) %x)
ret i32 %r
}
ret void
}
-define void @test_indirect(void ()* %fp) {
+define void @test_indirect(ptr %fp) {
call void %fp( )
ret void
}
define void @test_abs() {
- %fp = inttoptr i32 400 to void ()* ; <void ()*> [#uses=1]
+ %fp = inttoptr i32 400 to ptr ; <ptr> [#uses=1]
call void %fp( )
ret void
}
ret <16 x i8> %shuffle
}
-define dso_local <8 x i16> @testmrglb3(<8 x i8>* nocapture readonly %a) local_unnamed_addr #0 {
+define dso_local <8 x i16> @testmrglb3(ptr nocapture readonly %a) local_unnamed_addr #0 {
; CHECK-P8-LABEL: testmrglb3:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lxsdx v2, 0, r3
; P8-AIX-32-NEXT: vmrghb v2, v3, v2
; P8-AIX-32-NEXT: blr
entry:
- %0 = load <8 x i8>, <8 x i8>* %a, align 8
+ %0 = load <8 x i8>, ptr %a, align 8
%1 = zext <8 x i8> %0 to <8 x i16>
ret <8 x i16> %1
}
-define dso_local void @no_crash_elt0_from_RHS(<2 x double>* noalias nocapture dereferenceable(16) %.vtx6) #0 {
+define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferenceable(16) %.vtx6) #0 {
; CHECK-P8-LABEL: no_crash_elt0_from_RHS:
; CHECK-P8: # %bb.0: # %test_entry
; CHECK-P8-NEXT: mflr r0
%.splatinsert = insertelement <2 x double> undef, double %0, i32 0
%.splat = shufflevector <2 x double> %.splatinsert, <2 x double> undef, <2 x i32> zeroinitializer
%1 = shufflevector <2 x double> zeroinitializer, <2 x double> %.splat, <2 x i32> <i32 0, i32 3>
- store <2 x double> %1, <2 x double>* %.vtx6, align 16
+ store <2 x double> %1, ptr %.vtx6, align 16
unreachable
}
ret <4 x i32> %vecins1
}
-define dso_local <16 x i8> @no_RAUW_in_combine_during_legalize(i32* nocapture readonly %ptr, i32 signext %offset) local_unnamed_addr #0 {
+define dso_local <16 x i8> @no_RAUW_in_combine_during_legalize(ptr nocapture readonly %ptr, i32 signext %offset) local_unnamed_addr #0 {
; CHECK-P8-LABEL: no_RAUW_in_combine_during_legalize:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: sldi r4, r4, 2
; P8-AIX-32-NEXT: blr
entry:
%idx.ext = sext i32 %offset to i64
- %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 %idx.ext
- %0 = load i32, i32* %add.ptr, align 4
+ %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 %idx.ext
+ %0 = load i32, ptr %add.ptr, align 4
%conv = zext i32 %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%1 = bitcast <2 x i64> %splat.splatinsert to <16 x i8>
ret <16 x i8> %shuffle
}
-define dso_local <4 x i32> @testSplat4Low(<8 x i8>* nocapture readonly %ptr) local_unnamed_addr #0 {
+define dso_local <4 x i32> @testSplat4Low(ptr nocapture readonly %ptr) local_unnamed_addr #0 {
; CHECK-P8-LABEL: testSplat4Low:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lfdx f0, 0, r3
; P8-AIX-32-NEXT: xxspltw v2, vs0, 1
; P8-AIX-32-NEXT: blr
entry:
- %0 = load <8 x i8>, <8 x i8>* %ptr, align 8
+ %0 = load <8 x i8>, ptr %ptr, align 8
%vecinit18 = shufflevector <8 x i8> %0, <8 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%1 = bitcast <16 x i8> %vecinit18 to <4 x i32>
ret <4 x i32> %1
}
; Function Attrs: norecurse nounwind readonly
-define dso_local <4 x i32> @testSplat4hi(<8 x i8>* nocapture readonly %ptr) local_unnamed_addr #0 {
+define dso_local <4 x i32> @testSplat4hi(ptr nocapture readonly %ptr) local_unnamed_addr #0 {
; CHECK-P8-LABEL: testSplat4hi:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lfdx f0, 0, r3
; P8-AIX-32-NEXT: xxspltw v2, vs0, 1
; P8-AIX-32-NEXT: blr
entry:
- %0 = load <8 x i8>, <8 x i8>* %ptr, align 8
+ %0 = load <8 x i8>, ptr %ptr, align 8
%vecinit22 = shufflevector <8 x i8> %0, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%1 = bitcast <16 x i8> %vecinit22 to <4 x i32>
ret <4 x i32> %1
}
; Function Attrs: norecurse nounwind readonly
-define dso_local <2 x i64> @testSplat8(<8 x i8>* nocapture readonly %ptr) local_unnamed_addr #0 {
+define dso_local <2 x i64> @testSplat8(ptr nocapture readonly %ptr) local_unnamed_addr #0 {
; CHECK-P8-LABEL: testSplat8:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lxvdsx v2, 0, r3
; P8-AIX-32-NEXT: xxmrghd v2, vs0, vs0
; P8-AIX-32-NEXT: blr
entry:
- %0 = load <8 x i8>, <8 x i8>* %ptr, align 8
+ %0 = load <8 x i8>, ptr %ptr, align 8
%vecinit30 = shufflevector <8 x i8> %0, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%1 = bitcast <16 x i8> %vecinit30 to <2 x i64>
ret <2 x i64> %1
}
-define <2 x i64> @testSplati64_0(<1 x i64>* nocapture readonly %ptr) #0 {
+define <2 x i64> @testSplati64_0(ptr nocapture readonly %ptr) #0 {
; CHECK-P8-LABEL: testSplati64_0:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lxvdsx v2, 0, r3
; P8-AIX-32-NEXT: vperm v2, v4, v3, v2
; P8-AIX-32-NEXT: blr
entry:
- %0 = load <1 x i64>, <1 x i64>* %ptr, align 8
+ %0 = load <1 x i64>, ptr %ptr, align 8
%1 = shufflevector <1 x i64> %0, <1 x i64> undef, <2 x i32> <i32 0, i32 0>
ret <2 x i64> %1
}
-define <2 x i64> @testSplati64_1(<2 x i64>* nocapture readonly %ptr) #0 {
+define <2 x i64> @testSplati64_1(ptr nocapture readonly %ptr) #0 {
; CHECK-P8-LABEL: testSplati64_1:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addi r3, r3, 8
; P8-AIX-NEXT: lxvdsx v2, 0, r3
; P8-AIX-NEXT: blr
entry:
- %0 = load <2 x i64>, <2 x i64>* %ptr, align 8
+ %0 = load <2 x i64>, ptr %ptr, align 8
%1 = shufflevector <2 x i64> %0, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
ret <2 x i64> %1
}
; P8-AIX-NEXT: stxvw4x v2, 0, r3
; P8-AIX-NEXT: blr
entry:
- %0 = load i8, i8* undef, align 1
+ %0 = load i8, ptr undef, align 1
%splat.splatinsert.i = insertelement <16 x i8> poison, i8 %0, i32 0
%splat.splat.i = shufflevector <16 x i8> %splat.splatinsert.i, <16 x i8> poison, <16 x i32> zeroinitializer
- store <16 x i8> %splat.splat.i, <16 x i8>* undef, align 16
+ store <16 x i8> %splat.splat.i, ptr undef, align 16
ret void
}
; RUN: llc -opaque-pointers -mtriple=powerpc64-unknown-unknown \
; RUN: < %s 2>&1 | FileCheck %s
-define double @foo(double* %dp) {
+define double @foo(ptr %dp) {
; CHECK-LE-LABEL: foo:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: ld 3, 0(3)
; CHECK-NEXT: lfd 1, -8(1)
; CHECK-NEXT: blr
entry:
- %0 = load atomic double, double* %dp acquire, align 8
+ %0 = load atomic double, ptr %dp acquire, align 8
ret double %0
}
; RUN: llc -opaque-pointers -mtriple=powerpc64-unknown-unknown \
; RUN: < %s 2>&1 | FileCheck %s
-define float @bar(float* %fp) {
+define float @bar(ptr %fp) {
; CHECK-LE-LABEL: bar:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lwz 3, 0(3)
; CHECK-NEXT: lfs 1, -4(1)
; CHECK-NEXT: blr
entry:
- %0 = load atomic float, float* %fp acquire, align 4
+ %0 = load atomic float, ptr %fp acquire, align 4
ret float %0
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O3 -mcpu=pwr9 -verify-machineinstrs -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s
-define dso_local void @wibble(float* nocapture readonly %arg, i32 signext %arg1, i32* nocapture %arg2, float* nocapture %arg3) {
+define dso_local void @wibble(ptr nocapture readonly %arg, i32 signext %arg1, ptr nocapture %arg2, ptr nocapture %arg3) {
; CHECK-LABEL: wibble:
; CHECK: # %bb.0: # %bb
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-NEXT: stfs 0, 0(6)
; CHECK-NEXT: blr
bb:
- %tmp = load float, float* %arg, align 4
+ %tmp = load float, ptr %arg, align 4
%tmp4 = fmul float %tmp, 2.000000e+00
%tmp5 = icmp sgt i32 %arg1, 1
br i1 %tmp5, label %bb6, label %bb8
bb8: ; preds = %bb11, %bb
%tmp9 = phi float [ %tmp4, %bb ], [ %tmp19, %bb11 ]
%tmp10 = phi i32 [ 7, %bb ], [ %tmp22, %bb11 ]
- store i32 %tmp10, i32* %arg2, align 4
- store float %tmp9, float* %arg3, align 4
+ store i32 %tmp10, ptr %arg2, align 4
+ store float %tmp9, ptr %arg3, align 4
ret void
bb11: ; preds = %bb11, %bb6
%tmp12 = phi i64 [ 1, %bb6 ], [ %tmp23, %bb11 ]
%tmp13 = phi i32 [ 7, %bb6 ], [ %tmp22, %bb11 ]
%tmp14 = phi float [ %tmp4, %bb6 ], [ %tmp19, %bb11 ]
- %tmp15 = getelementptr inbounds float, float* %arg, i64 %tmp12
- %tmp16 = load float, float* %tmp15, align 4
+ %tmp15 = getelementptr inbounds float, ptr %arg, i64 %tmp12
+ %tmp16 = load float, ptr %tmp15, align 4
%tmp17 = fcmp ogt float %tmp16, %tmp14
%tmp18 = fmul float %tmp16, 2.000000e+00
%tmp19 = select i1 %tmp17, float %tmp18, float %tmp14
br i1 %cmp1, label %if.end3.sink.split, label %if.end
if.end3.sink.split:
- %g2.sink = phi i32* [ @g2, %if.else ], [ @g1, %entry ]
- store i32 0, i32* %g2.sink, align 4
+ %g2.sink = phi ptr [ @g2, %if.else ], [ @g1, %entry ]
+ store i32 0, ptr %g2.sink, align 4
br label %if.end
if.end:
; RUN: llc -verify-machineinstrs -mcpu=g5 -mtriple=powerpc64-unknown-linux-gnu -ppc-asm-full-reg-names < %s | FileCheck %s
; Check that the peephole optimizer knows about sext and zext instructions.
; CHECK: test1sext
-define i32 @test1sext(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind {
+define i32 @test1sext(i64 %A, i64 %B, ptr %P, ptr %P2) nounwind {
%C = add i64 %A, %B
; CHECK: add [[SUM:r[0-9]+]], r3, r4
%D = trunc i64 %C to i32
%E = shl i64 %C, 32
%F = ashr i64 %E, 32
; CHECK: extsw [[EXT:r[0-9]+]], [[SUM]]
- store volatile i64 %F, i64 *%P2
+ store volatile i64 %F, ptr %P2
; CHECK-DAG: std [[EXT]]
- store volatile i32 %D, i32* %P
+ store volatile i32 %D, ptr %P
; Reuse low bits of extended register, don't extend live range of SUM.
; CHECK-DAG: stw [[SUM]]
%R = add i32 %D, %D
}
; Function Attrs: nounwind
-define void @loop(i32 signext %x, i32* nocapture %a) #1 {
+define void @loop(i32 signext %x, ptr nocapture %a) #1 {
entry:
br label %vector.body
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%induction45 = or i64 %index, 1
- %0 = getelementptr inbounds i32, i32* %a, i64 %index
- %1 = getelementptr inbounds i32, i32* %a, i64 %induction45
- %2 = load i32, i32* %0, align 4
- %3 = load i32, i32* %1, align 4
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %1 = getelementptr inbounds i32, ptr %a, i64 %induction45
+ %2 = load i32, ptr %0, align 4
+ %3 = load i32, ptr %1, align 4
%4 = add nsw i32 %2, 4
%5 = add nsw i32 %3, 4
%6 = mul nsw i32 %4, 3
%7 = mul nsw i32 %5, 3
- store i32 %6, i32* %0, align 4
- store i32 %7, i32* %1, align 4
+ store i32 %6, ptr %0, align 4
+ store i32 %7, ptr %1, align 4
%index.next = add i64 %index, 2
%8 = icmp eq i64 %index.next, 2048
br i1 %8, label %for.end, label %vector.body
}
; Function Attrs: nounwind
-define void @sloop(i32 signext %x, i32* nocapture %a) #1 {
+define void @sloop(i32 signext %x, ptr nocapture %a) #1 {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, 4
%mul = mul nsw i32 %add, 3
- store i32 %mul, i32* %arrayidx, align 4
+ store i32 %mul, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 2048
br i1 %exitcond, label %for.end, label %for.body
}
; Function Attrs: nounwind
-define void @test_minsize(i32 signext %x, i32* nocapture %a) #2 {
+define void @test_minsize(i32 signext %x, ptr nocapture %a) #2 {
entry:
br label %vector.body
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%induction45 = or i64 %index, 1
- %0 = getelementptr inbounds i32, i32* %a, i64 %index
- %1 = getelementptr inbounds i32, i32* %a, i64 %induction45
- %2 = load i32, i32* %0, align 4
- %3 = load i32, i32* %1, align 4
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %1 = getelementptr inbounds i32, ptr %a, i64 %induction45
+ %2 = load i32, ptr %0, align 4
+ %3 = load i32, ptr %1, align 4
%4 = add nsw i32 %2, 4
%5 = add nsw i32 %3, 4
%6 = mul nsw i32 %4, 3
%7 = mul nsw i32 %5, 3
- store i32 %6, i32* %0, align 4
- store i32 %7, i32* %1, align 4
+ store i32 %6, ptr %0, align 4
+ store i32 %7, ptr %1, align 4
%index.next = add i64 %index, 2
%8 = icmp eq i64 %index.next, 2048
br i1 %8, label %for.end, label %vector.body
%call = tail call coldcc { i64, i64 } @callee(i32 signext %a, i32 signext %b)
%0 = extractvalue { i64, i64 } %call, 0
%1 = extractvalue { i64, i64 } %call, 1
- store i64 %0, i64* bitcast (%struct.MyStruct* @caller.s to i64*), align 8
- store i64 %1, i64* bitcast (i32* getelementptr inbounds (%struct.MyStruct, %struct.MyStruct* @caller.s, i64 0, i32 2) to i64*), align 8
+ store i64 %0, ptr @caller.s, align 8
+ store i64 %1, ptr getelementptr inbounds (%struct.MyStruct, ptr @caller.s, i64 0, i32 2), align 8
%2 = lshr i64 %1, 32
%3 = trunc i64 %2 to i32
%sub = sub nsw i32 0, %3
; RUN: llc -mcpu=pwr9 -mtriple=powerpc64-unknown-unknown \
; RUN: -ppc-asm-full-reg-names -verify-machineinstrs -O2 < %s | FileCheck %s \
; RUN: --check-prefix=CHECK-P9-BE
-define dso_local i32 @poc(i32* %base, i32 %index, i1 %flag, i32 %default) {
+define dso_local i32 @poc(ptr %base, i32 %index, i1 %flag, i32 %default) {
; CHECK-LABEL: poc:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andi. r5, r5, 1
br i1 %flag, label %true, label %false
true:
- %ptr = getelementptr inbounds i32, i32* %base, i64 %iconv
- %value = load i32, i32* %ptr, align 4
+ %ptr = getelementptr inbounds i32, ptr %base, i64 %iconv
+ %value = load i32, ptr %ptr, align 4
ret i32 %value
false:
ret i32 %default
}
-define dso_local i64 @poc_i64(i64* %base, i32 %index, i1 %flag, i64 %default) {
+define dso_local i64 @poc_i64(ptr %base, i32 %index, i1 %flag, i64 %default) {
; CHECK-LABEL: poc_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andi. r5, r5, 1
br i1 %flag, label %true, label %false
true:
- %ptr = getelementptr inbounds i64, i64* %base, i64 %iconv
- %value = load i64, i64* %ptr, align 8
+ %ptr = getelementptr inbounds i64, ptr %base, i64 %iconv
+ %value = load i64, ptr %ptr, align 8
ret i64 %value
false:
ret i64 %default
}
-define dso_local i64 @no_extswsli(i64* %base, i32 %index, i1 %flag) {
+define dso_local i64 @no_extswsli(ptr %base, i32 %index, i1 %flag) {
; CHECK-LABEL: no_extswsli:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andi. r5, r5, 1
br i1 %flag, label %true, label %false
true:
- %ptr = getelementptr inbounds i64, i64* %base, i64 %iconv
- %value = load i64, i64* %ptr, align 8
+ %ptr = getelementptr inbounds i64, ptr %base, i64 %iconv
+ %value = load i64, ptr %ptr, align 8
ret i64 %value
false:
%StructA = type <{ i64, { i64, i64 }, { i64, i64 } }>
-define void @TestFoo(%StructA* %this) {
- %tmp = getelementptr inbounds %StructA, %StructA* %this, i64 0, i32 1
- %tmp11 = getelementptr inbounds %StructA, %StructA* %this, i64 0, i32 1, i32 1
- %tmp12 = bitcast { i64, i64 }* %tmp to i64**
- store i64* %tmp11, i64** %tmp12
+define void @TestFoo(ptr %this) {
+ %tmp = getelementptr inbounds %StructA, ptr %this, i64 0, i32 1
+ %tmp11 = getelementptr inbounds %StructA, ptr %this, i64 0, i32 1, i32 1
+ store ptr %tmp11, ptr %tmp
call void @TestBar()
- %tmp13 = getelementptr inbounds %StructA, %StructA* %this, i64 0, i32 2, i32 1
- store i64* %tmp13, i64** undef
- %.cast.i.i.i = bitcast i64* %tmp13 to i8*
- store i8 0, i8* %.cast.i.i.i
+ %tmp13 = getelementptr inbounds %StructA, ptr %this, i64 0, i32 2, i32 1
+ store ptr %tmp13, ptr undef
+ store i8 0, ptr %tmp13
ret void
}
; return sum;
; }
;
-define i64 @two_chain_same_offset_succ_i32(i8* %p, i32 %offset, i32 %base1, i64 %n) {
+define i64 @two_chain_same_offset_succ_i32(ptr %p, i32 %offset, i32 %base1, i64 %n) {
; CHECK-LABEL: two_chain_same_offset_succ_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmplwi r6, 0
%add3 = add nsw i32 %mul2, %base1
%mul4 = shl nsw i32 %offset, 2
%add5 = add nsw i32 %mul4, %base1
- %add.ptr = getelementptr inbounds i8, i8* %p, i32 %add
- %add.ptr6 = getelementptr inbounds i8, i8* %p, i32 %add1
- %add.ptr7 = getelementptr inbounds i8, i8* %p, i32 %add3
- %add.ptr8 = getelementptr inbounds i8, i8* %p, i32 %add5
+ %add.ptr = getelementptr inbounds i8, ptr %p, i32 %add
+ %add.ptr6 = getelementptr inbounds i8, ptr %p, i32 %add1
+ %add.ptr7 = getelementptr inbounds i8, ptr %p, i32 %add3
+ %add.ptr8 = getelementptr inbounds i8, ptr %p, i32 %add5
%cmp49 = icmp sgt i64 %n, 0
br i1 %cmp49, label %for.body, label %for.cond.cleanup
%sum.051 = phi i64 [ %add19, %for.body ], [ 0, %entry ]
%i.050 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%idx.ext = trunc i64 %i.050 to i32
- %add.ptr9 = getelementptr inbounds i8, i8* %add.ptr, i32 %idx.ext
- %0 = bitcast i8* %add.ptr9 to i32*
- %1 = load i32, i32* %0, align 4
- %add.ptr11 = getelementptr inbounds i8, i8* %add.ptr6, i32 %idx.ext
- %2 = bitcast i8* %add.ptr11 to i32*
- %3 = load i32, i32* %2, align 4
- %add.ptr13 = getelementptr inbounds i8, i8* %add.ptr7, i32 %idx.ext
- %4 = bitcast i8* %add.ptr13 to i32*
- %5 = load i32, i32* %4, align 4
- %add.ptr15 = getelementptr inbounds i8, i8* %add.ptr8, i32 %idx.ext
- %6 = bitcast i8* %add.ptr15 to i32*
- %7 = load i32, i32* %6, align 4
- %mul16 = mul i32 %3, %1
- %mul17 = mul i32 %mul16, %5
- %mul18 = mul i32 %mul17, %7
+ %add.ptr9 = getelementptr inbounds i8, ptr %add.ptr, i32 %idx.ext
+ %0 = load i32, ptr %add.ptr9, align 4
+ %add.ptr11 = getelementptr inbounds i8, ptr %add.ptr6, i32 %idx.ext
+ %1 = load i32, ptr %add.ptr11, align 4
+ %add.ptr13 = getelementptr inbounds i8, ptr %add.ptr7, i32 %idx.ext
+ %2 = load i32, ptr %add.ptr13, align 4
+ %add.ptr15 = getelementptr inbounds i8, ptr %add.ptr8, i32 %idx.ext
+ %3 = load i32, ptr %add.ptr15, align 4
+ %mul16 = mul i32 %1, %0
+ %mul17 = mul i32 %mul16, %2
+ %mul18 = mul i32 %mul17, %3
%conv = zext i32 %mul18 to i64
%add19 = add nuw nsw i64 %sum.051, %conv
%inc = add nuw nsw i64 %i.050, 1
entry:
%retval = alloca { ppc_fp128, ppc_fp128 }, align 16
%x = alloca { ppc_fp128, ppc_fp128 }, align 16
- %real = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 0
- %imag = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 1
- store ppc_fp128 0xM400C0000000000300000000010000000, ppc_fp128* %real
- store ppc_fp128 0xMC00547AE147AE1483CA47AE147AE147A, ppc_fp128* %imag
- %x.realp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 0
- %x.real = load ppc_fp128, ppc_fp128* %x.realp
- %x.imagp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 1
- %x.imag = load ppc_fp128, ppc_fp128* %x.imagp
- %real1 = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 0
- %imag2 = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 1
- store ppc_fp128 %x.real, ppc_fp128* %real1
- store ppc_fp128 %x.imag, ppc_fp128* %imag2
- %0 = load { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval
+ %real = getelementptr inbounds { ppc_fp128, ppc_fp128 }, ptr %x, i32 0, i32 0
+ %imag = getelementptr inbounds { ppc_fp128, ppc_fp128 }, ptr %x, i32 0, i32 1
+ store ppc_fp128 0xM400C0000000000300000000010000000, ptr %real
+ store ppc_fp128 0xMC00547AE147AE1483CA47AE147AE147A, ptr %imag
+ %x.realp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, ptr %x, i32 0, i32 0
+ %x.real = load ppc_fp128, ptr %x.realp
+ %x.imagp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, ptr %x, i32 0, i32 1
+ %x.imag = load ppc_fp128, ptr %x.imagp
+ %real1 = getelementptr inbounds { ppc_fp128, ppc_fp128 }, ptr %retval, i32 0, i32 0
+ %imag2 = getelementptr inbounds { ppc_fp128, ppc_fp128 }, ptr %retval, i32 0, i32 1
+ store ppc_fp128 %x.real, ptr %real1
+ store ppc_fp128 %x.imag, ptr %imag2
+ %0 = load { ppc_fp128, ppc_fp128 }, ptr %retval
ret { ppc_fp128, ppc_fp128 } %0
}
entry:
%retval = alloca { float, float }, align 4
%x = alloca { float, float }, align 4
- %real = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 0
- %imag = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 1
- store float 3.500000e+00, float* %real
- store float 0xC00547AE20000000, float* %imag
- %x.realp = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 0
- %x.real = load float, float* %x.realp
- %x.imagp = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 1
- %x.imag = load float, float* %x.imagp
- %real1 = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
- %imag2 = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
- store float %x.real, float* %real1
- store float %x.imag, float* %imag2
- %0 = load { float, float }, { float, float }* %retval
+ %real = getelementptr inbounds { float, float }, ptr %x, i32 0, i32 0
+ %imag = getelementptr inbounds { float, float }, ptr %x, i32 0, i32 1
+ store float 3.500000e+00, ptr %real
+ store float 0xC00547AE20000000, ptr %imag
+ %x.realp = getelementptr inbounds { float, float }, ptr %x, i32 0, i32 0
+ %x.real = load float, ptr %x.realp
+ %x.imagp = getelementptr inbounds { float, float }, ptr %x, i32 0, i32 1
+ %x.imag = load float, ptr %x.imagp
+ %real1 = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 0
+ %imag2 = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 1
+ store float %x.real, ptr %real1
+ store float %x.imag, ptr %imag2
+ %0 = load { float, float }, ptr %retval
ret { float, float } %0
}
define hidden fastcc void @test() {
freescalar:
- %0 = load i32, i32* undef, align 4
+ %0 = load i32, ptr undef, align 4
br label %if.end420
if.end420: ; preds = %freescalar
free_body: ; preds = %if.then430, %free_rv
%or502 = or i32 undef, 255
- store i32 %or502, i32* undef, align 4
+ store i32 %or502, ptr undef, align 4
ret void
}
; RUN: llc -mtriple=powerpc64-ibm-aix-xcoff -o - %s | FileCheck --check-prefix=BE %s
; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -o - %s | FileCheck --check-prefix=LE %s
-define void @fold_constant_stores_loaddr(i8* %i8_ptr) {
+define void @fold_constant_stores_loaddr(ptr %i8_ptr) {
; BE-LABEL: fold_constant_stores_loaddr:
; BE: # %bb.0: # %entry
; BE-NEXT: li 4, 0
; LE-NEXT: stb 5, 0(3)
; LE-NEXT: blr
entry:
- %i64_ptr = bitcast i8* %i8_ptr to i64*
- store i64 0, i64* %i64_ptr, align 8
- store i8 170, i8* %i8_ptr, align 1
+ store i64 0, ptr %i8_ptr, align 8
+ store i8 170, ptr %i8_ptr, align 1
ret void
}
-define void @fold_constant_stores_hiaddr(i8* %i8_ptr) {
+define void @fold_constant_stores_hiaddr(ptr %i8_ptr) {
; BE-LABEL: fold_constant_stores_hiaddr:
; BE: # %bb.0: # %entry
; BE-NEXT: li 4, 0
; LE-NEXT: stb 5, 0(3)
; LE-NEXT: blr
entry:
- %i64_ptr = bitcast i8* %i8_ptr to i64*
- store i64 0, i64* %i64_ptr, align 8
- %i8_ptr2 = getelementptr inbounds i8, i8* %i8_ptr, i64 7
- store i8 170, i8* %i8_ptr, align 1
+ store i64 0, ptr %i8_ptr, align 8
+ %i8_ptr2 = getelementptr inbounds i8, ptr %i8_ptr, i64 7
+ store i8 170, ptr %i8_ptr, align 1
ret void
}
land.end: ; preds = %land.rhs, %land.lhs.true, %entry
%0 = phi i1 [ %tobool21, %land.rhs ], [ false, %land.lhs.true ], [ false, %entry ]
- %cond = load i32*, i32** undef, align 8
+ %cond = load ptr, ptr undef, align 8
br i1 undef, label %if.then95, label %for.body.lr.ph
if.then95: ; preds = %land.end
br label %for.cond290.preheader
for.cond290.preheader: ; preds = %for.end520, %for.cond286.preheader
- %srcptr.31595 = phi i16* [ getelementptr inbounds ([768 x i16], [768 x i16]* @SetupFastFullPelSearch.orig_pels, i64 0, i64 0), %for.cond286.preheader ], [ null, %for.end520 ]
- %1 = load i32, i32* undef, align 4
- %2 = load i32, i32* @weight_luma, align 4
- %3 = load i32, i32* @wp_luma_round, align 4
- %4 = load i32, i32* @luma_log_weight_denom, align 4
- %5 = load i32, i32* @offset_luma, align 4
+ %srcptr.31595 = phi ptr [ @SetupFastFullPelSearch.orig_pels, %for.cond286.preheader ], [ null, %for.end520 ]
+ %1 = load i32, ptr undef, align 4
+ %2 = load i32, ptr @weight_luma, align 4
+ %3 = load i32, ptr @wp_luma_round, align 4
+ %4 = load i32, ptr @luma_log_weight_denom, align 4
+ %5 = load i32, ptr @offset_luma, align 4
%incdec.ptr502.sum = add i64 undef, 16
br label %for.body293
for.body293: ; preds = %for.body293, %for.cond290.preheader
- %srcptr.41591 = phi i16* [ %srcptr.31595, %for.cond290.preheader ], [ undef, %for.body293 ]
- %refptr.11590 = phi i16* [ undef, %for.cond290.preheader ], [ %add.ptr517, %for.body293 ]
+ %srcptr.41591 = phi ptr [ %srcptr.31595, %for.cond290.preheader ], [ undef, %for.body293 ]
+ %refptr.11590 = phi ptr [ undef, %for.cond290.preheader ], [ %add.ptr517, %for.body293 ]
%LineSadBlk0.01588 = phi i32 [ 0, %for.cond290.preheader ], [ %add346, %for.body293 ]
%LineSadBlk1.01587 = phi i32 [ 0, %for.cond290.preheader ], [ %add402, %for.body293 ]
%LineSadBlk3.01586 = phi i32 [ 0, %for.cond290.preheader ], [ %add514, %for.body293 ]
%LineSadBlk2.01585 = phi i32 [ 0, %for.cond290.preheader ], [ %add458, %for.body293 ]
- %6 = load i16, i16* %refptr.11590, align 2
+ %6 = load i16, ptr %refptr.11590, align 2
%conv294 = zext i16 %6 to i32
%mul295 = mul nsw i32 %conv294, %2
%add296 = add nsw i32 %mul295, %3
%cond.i.i1514 = select i1 %cmp.i.i1513, i32 %add297, i32 0
%cmp.i4.i1515 = icmp slt i32 %cond.i.i1514, %1
%cond.i5.i1516 = select i1 %cmp.i4.i1515, i32 %cond.i.i1514, i32 %1
- %7 = load i16, i16* %srcptr.41591, align 2
+ %7 = load i16, ptr %srcptr.41591, align 2
%conv300 = zext i16 %7 to i32
%sub301 = sub nsw i32 %cond.i5.i1516, %conv300
%idxprom302 = sext i32 %sub301 to i64
- %arrayidx303 = getelementptr inbounds i32, i32* %cond, i64 %idxprom302
- %8 = load i32, i32* %arrayidx303, align 4
+ %arrayidx303 = getelementptr inbounds i32, ptr %cond, i64 %idxprom302
+ %8 = load i32, ptr %arrayidx303, align 4
%add304 = add nsw i32 %8, %LineSadBlk0.01588
- %9 = load i32, i32* undef, align 4
+ %9 = load i32, ptr undef, align 4
%add318 = add nsw i32 %add304, %9
- %10 = load i16, i16* undef, align 2
+ %10 = load i16, ptr undef, align 2
%conv321 = zext i16 %10 to i32
%mul322 = mul nsw i32 %conv321, %2
%add323 = add nsw i32 %mul322, %3
%cond.i5.i1508 = select i1 %cmp.i4.i1507, i32 %cond.i.i1506, i32 %1
%sub329 = sub nsw i32 %cond.i5.i1508, 0
%idxprom330 = sext i32 %sub329 to i64
- %arrayidx331 = getelementptr inbounds i32, i32* %cond, i64 %idxprom330
- %11 = load i32, i32* %arrayidx331, align 4
+ %arrayidx331 = getelementptr inbounds i32, ptr %cond, i64 %idxprom330
+ %11 = load i32, ptr %arrayidx331, align 4
%add332 = add nsw i32 %add318, %11
%cmp.i.i1501 = icmp sgt i32 undef, 0
%cond.i.i1502 = select i1 %cmp.i.i1501, i32 undef, i32 0
%cmp.i4.i1503 = icmp slt i32 %cond.i.i1502, %1
%cond.i5.i1504 = select i1 %cmp.i4.i1503, i32 %cond.i.i1502, i32 %1
- %incdec.ptr341 = getelementptr inbounds i16, i16* %srcptr.41591, i64 4
- %12 = load i16, i16* null, align 2
+ %incdec.ptr341 = getelementptr inbounds i16, ptr %srcptr.41591, i64 4
+ %12 = load i16, ptr null, align 2
%conv342 = zext i16 %12 to i32
%sub343 = sub nsw i32 %cond.i5.i1504, %conv342
%idxprom344 = sext i32 %sub343 to i64
- %arrayidx345 = getelementptr inbounds i32, i32* %cond, i64 %idxprom344
- %13 = load i32, i32* %arrayidx345, align 4
+ %arrayidx345 = getelementptr inbounds i32, ptr %cond, i64 %idxprom344
+ %13 = load i32, ptr %arrayidx345, align 4
%add346 = add nsw i32 %add332, %13
- %incdec.ptr348 = getelementptr inbounds i16, i16* %refptr.11590, i64 5
- %14 = load i16, i16* null, align 2
+ %incdec.ptr348 = getelementptr inbounds i16, ptr %refptr.11590, i64 5
+ %14 = load i16, ptr null, align 2
%conv349 = zext i16 %14 to i32
%mul350 = mul nsw i32 %conv349, %2
%add351 = add nsw i32 %mul350, %3
%cond.i.i1498 = select i1 %cmp.i.i1497, i32 %add353, i32 0
%cmp.i4.i1499 = icmp slt i32 %cond.i.i1498, %1
%cond.i5.i1500 = select i1 %cmp.i4.i1499, i32 %cond.i.i1498, i32 %1
- %incdec.ptr355 = getelementptr inbounds i16, i16* %srcptr.41591, i64 5
- %15 = load i16, i16* %incdec.ptr341, align 2
+ %incdec.ptr355 = getelementptr inbounds i16, ptr %srcptr.41591, i64 5
+ %15 = load i16, ptr %incdec.ptr341, align 2
%conv356 = zext i16 %15 to i32
%sub357 = sub nsw i32 %cond.i5.i1500, %conv356
%idxprom358 = sext i32 %sub357 to i64
- %arrayidx359 = getelementptr inbounds i32, i32* %cond, i64 %idxprom358
- %16 = load i32, i32* %arrayidx359, align 4
+ %arrayidx359 = getelementptr inbounds i32, ptr %cond, i64 %idxprom358
+ %16 = load i32, ptr %arrayidx359, align 4
%add360 = add nsw i32 %16, %LineSadBlk1.01587
- %incdec.ptr362 = getelementptr inbounds i16, i16* %refptr.11590, i64 6
- %17 = load i16, i16* %incdec.ptr348, align 2
+ %incdec.ptr362 = getelementptr inbounds i16, ptr %refptr.11590, i64 6
+ %17 = load i16, ptr %incdec.ptr348, align 2
%conv363 = zext i16 %17 to i32
%mul364 = mul nsw i32 %conv363, %2
%add365 = add nsw i32 %mul364, %3
%cond.i.i1494 = select i1 %cmp.i.i1493, i32 %add367, i32 0
%cmp.i4.i1495 = icmp slt i32 %cond.i.i1494, %1
%cond.i5.i1496 = select i1 %cmp.i4.i1495, i32 %cond.i.i1494, i32 %1
- %incdec.ptr369 = getelementptr inbounds i16, i16* %srcptr.41591, i64 6
- %18 = load i16, i16* %incdec.ptr355, align 2
+ %incdec.ptr369 = getelementptr inbounds i16, ptr %srcptr.41591, i64 6
+ %18 = load i16, ptr %incdec.ptr355, align 2
%conv370 = zext i16 %18 to i32
%sub371 = sub nsw i32 %cond.i5.i1496, %conv370
%idxprom372 = sext i32 %sub371 to i64
- %arrayidx373 = getelementptr inbounds i32, i32* %cond, i64 %idxprom372
- %19 = load i32, i32* %arrayidx373, align 4
+ %arrayidx373 = getelementptr inbounds i32, ptr %cond, i64 %idxprom372
+ %19 = load i32, ptr %arrayidx373, align 4
%add374 = add nsw i32 %add360, %19
- %incdec.ptr376 = getelementptr inbounds i16, i16* %refptr.11590, i64 7
- %20 = load i16, i16* %incdec.ptr362, align 2
+ %incdec.ptr376 = getelementptr inbounds i16, ptr %refptr.11590, i64 7
+ %20 = load i16, ptr %incdec.ptr362, align 2
%conv377 = zext i16 %20 to i32
%mul378 = mul nsw i32 %conv377, %2
%add379 = add nsw i32 %mul378, %3
%cond.i.i1490 = select i1 %cmp.i.i1489, i32 %add381, i32 0
%cmp.i4.i1491 = icmp slt i32 %cond.i.i1490, %1
%cond.i5.i1492 = select i1 %cmp.i4.i1491, i32 %cond.i.i1490, i32 %1
- %incdec.ptr383 = getelementptr inbounds i16, i16* %srcptr.41591, i64 7
- %21 = load i16, i16* %incdec.ptr369, align 2
+ %incdec.ptr383 = getelementptr inbounds i16, ptr %srcptr.41591, i64 7
+ %21 = load i16, ptr %incdec.ptr369, align 2
%conv384 = zext i16 %21 to i32
%sub385 = sub nsw i32 %cond.i5.i1492, %conv384
%idxprom386 = sext i32 %sub385 to i64
- %arrayidx387 = getelementptr inbounds i32, i32* %cond, i64 %idxprom386
- %22 = load i32, i32* %arrayidx387, align 4
+ %arrayidx387 = getelementptr inbounds i32, ptr %cond, i64 %idxprom386
+ %22 = load i32, ptr %arrayidx387, align 4
%add388 = add nsw i32 %add374, %22
- %23 = load i16, i16* %incdec.ptr376, align 2
+ %23 = load i16, ptr %incdec.ptr376, align 2
%conv391 = zext i16 %23 to i32
%mul392 = mul nsw i32 %conv391, %2
%add395 = add nsw i32 0, %5
%cond.i.i1486 = select i1 %cmp.i.i1485, i32 %add395, i32 0
%cmp.i4.i1487 = icmp slt i32 %cond.i.i1486, %1
%cond.i5.i1488 = select i1 %cmp.i4.i1487, i32 %cond.i.i1486, i32 %1
- %incdec.ptr397 = getelementptr inbounds i16, i16* %srcptr.41591, i64 8
- %24 = load i16, i16* %incdec.ptr383, align 2
+ %incdec.ptr397 = getelementptr inbounds i16, ptr %srcptr.41591, i64 8
+ %24 = load i16, ptr %incdec.ptr383, align 2
%conv398 = zext i16 %24 to i32
%sub399 = sub nsw i32 %cond.i5.i1488, %conv398
%idxprom400 = sext i32 %sub399 to i64
- %arrayidx401 = getelementptr inbounds i32, i32* %cond, i64 %idxprom400
- %25 = load i32, i32* %arrayidx401, align 4
+ %arrayidx401 = getelementptr inbounds i32, ptr %cond, i64 %idxprom400
+ %25 = load i32, ptr %arrayidx401, align 4
%add402 = add nsw i32 %add388, %25
- %incdec.ptr404 = getelementptr inbounds i16, i16* %refptr.11590, i64 9
+ %incdec.ptr404 = getelementptr inbounds i16, ptr %refptr.11590, i64 9
%cmp.i4.i1483 = icmp slt i32 undef, %1
%cond.i5.i1484 = select i1 %cmp.i4.i1483, i32 undef, i32 %1
- %26 = load i16, i16* %incdec.ptr397, align 2
+ %26 = load i16, ptr %incdec.ptr397, align 2
%conv412 = zext i16 %26 to i32
%sub413 = sub nsw i32 %cond.i5.i1484, %conv412
%idxprom414 = sext i32 %sub413 to i64
- %arrayidx415 = getelementptr inbounds i32, i32* %cond, i64 %idxprom414
- %27 = load i32, i32* %arrayidx415, align 4
+ %arrayidx415 = getelementptr inbounds i32, ptr %cond, i64 %idxprom414
+ %27 = load i32, ptr %arrayidx415, align 4
%add416 = add nsw i32 %27, %LineSadBlk2.01585
- %incdec.ptr418 = getelementptr inbounds i16, i16* %refptr.11590, i64 10
- %28 = load i16, i16* %incdec.ptr404, align 2
+ %incdec.ptr418 = getelementptr inbounds i16, ptr %refptr.11590, i64 10
+ %28 = load i16, ptr %incdec.ptr404, align 2
%conv419 = zext i16 %28 to i32
%mul420 = mul nsw i32 %conv419, %2
%add421 = add nsw i32 %mul420, %3
%cond.i.i1478 = select i1 %cmp.i.i1477, i32 %add423, i32 0
%cmp.i4.i1479 = icmp slt i32 %cond.i.i1478, %1
%cond.i5.i1480 = select i1 %cmp.i4.i1479, i32 %cond.i.i1478, i32 %1
- %incdec.ptr425 = getelementptr inbounds i16, i16* %srcptr.41591, i64 10
+ %incdec.ptr425 = getelementptr inbounds i16, ptr %srcptr.41591, i64 10
%sub427 = sub nsw i32 %cond.i5.i1480, 0
%idxprom428 = sext i32 %sub427 to i64
- %arrayidx429 = getelementptr inbounds i32, i32* %cond, i64 %idxprom428
- %29 = load i32, i32* %arrayidx429, align 4
+ %arrayidx429 = getelementptr inbounds i32, ptr %cond, i64 %idxprom428
+ %29 = load i32, ptr %arrayidx429, align 4
%add430 = add nsw i32 %add416, %29
- %incdec.ptr432 = getelementptr inbounds i16, i16* %refptr.11590, i64 11
- %30 = load i16, i16* %incdec.ptr418, align 2
+ %incdec.ptr432 = getelementptr inbounds i16, ptr %refptr.11590, i64 11
+ %30 = load i16, ptr %incdec.ptr418, align 2
%conv433 = zext i16 %30 to i32
%mul434 = mul nsw i32 %conv433, %2
%add435 = add nsw i32 %mul434, %3
%cond.i.i1474 = select i1 %cmp.i.i1473, i32 %add437, i32 0
%cmp.i4.i1475 = icmp slt i32 %cond.i.i1474, %1
%cond.i5.i1476 = select i1 %cmp.i4.i1475, i32 %cond.i.i1474, i32 %1
- %31 = load i16, i16* %incdec.ptr425, align 2
+ %31 = load i16, ptr %incdec.ptr425, align 2
%conv440 = zext i16 %31 to i32
%sub441 = sub nsw i32 %cond.i5.i1476, %conv440
%idxprom442 = sext i32 %sub441 to i64
- %arrayidx443 = getelementptr inbounds i32, i32* %cond, i64 %idxprom442
- %32 = load i32, i32* %arrayidx443, align 4
+ %arrayidx443 = getelementptr inbounds i32, ptr %cond, i64 %idxprom442
+ %32 = load i32, ptr %arrayidx443, align 4
%add444 = add nsw i32 %add430, %32
- %incdec.ptr446 = getelementptr inbounds i16, i16* %refptr.11590, i64 12
- %33 = load i16, i16* %incdec.ptr432, align 2
+ %incdec.ptr446 = getelementptr inbounds i16, ptr %refptr.11590, i64 12
+ %33 = load i16, ptr %incdec.ptr432, align 2
%conv447 = zext i16 %33 to i32
%mul448 = mul nsw i32 %conv447, %2
%add449 = add nsw i32 %mul448, %3
%cond.i.i1470 = select i1 %cmp.i.i1469, i32 %add451, i32 0
%cmp.i4.i1471 = icmp slt i32 %cond.i.i1470, %1
%cond.i5.i1472 = select i1 %cmp.i4.i1471, i32 %cond.i.i1470, i32 %1
- %incdec.ptr453 = getelementptr inbounds i16, i16* %srcptr.41591, i64 12
- %34 = load i16, i16* undef, align 2
+ %incdec.ptr453 = getelementptr inbounds i16, ptr %srcptr.41591, i64 12
+ %34 = load i16, ptr undef, align 2
%conv454 = zext i16 %34 to i32
%sub455 = sub nsw i32 %cond.i5.i1472, %conv454
%idxprom456 = sext i32 %sub455 to i64
- %arrayidx457 = getelementptr inbounds i32, i32* %cond, i64 %idxprom456
- %35 = load i32, i32* %arrayidx457, align 4
+ %arrayidx457 = getelementptr inbounds i32, ptr %cond, i64 %idxprom456
+ %35 = load i32, ptr %arrayidx457, align 4
%add458 = add nsw i32 %add444, %35
- %incdec.ptr460 = getelementptr inbounds i16, i16* %refptr.11590, i64 13
- %36 = load i16, i16* %incdec.ptr446, align 2
+ %incdec.ptr460 = getelementptr inbounds i16, ptr %refptr.11590, i64 13
+ %36 = load i16, ptr %incdec.ptr446, align 2
%conv461 = zext i16 %36 to i32
%mul462 = mul nsw i32 %conv461, %2
%add463 = add nsw i32 %mul462, %3
%cond.i.i1466 = select i1 %cmp.i.i1465, i32 %add465, i32 0
%cmp.i4.i1467 = icmp slt i32 %cond.i.i1466, %1
%cond.i5.i1468 = select i1 %cmp.i4.i1467, i32 %cond.i.i1466, i32 %1
- %incdec.ptr467 = getelementptr inbounds i16, i16* %srcptr.41591, i64 13
- %37 = load i16, i16* %incdec.ptr453, align 2
+ %incdec.ptr467 = getelementptr inbounds i16, ptr %srcptr.41591, i64 13
+ %37 = load i16, ptr %incdec.ptr453, align 2
%conv468 = zext i16 %37 to i32
%sub469 = sub nsw i32 %cond.i5.i1468, %conv468
%idxprom470 = sext i32 %sub469 to i64
- %arrayidx471 = getelementptr inbounds i32, i32* %cond, i64 %idxprom470
- %38 = load i32, i32* %arrayidx471, align 4
+ %arrayidx471 = getelementptr inbounds i32, ptr %cond, i64 %idxprom470
+ %38 = load i32, ptr %arrayidx471, align 4
%add472 = add nsw i32 %38, %LineSadBlk3.01586
- %incdec.ptr474 = getelementptr inbounds i16, i16* %refptr.11590, i64 14
+ %incdec.ptr474 = getelementptr inbounds i16, ptr %refptr.11590, i64 14
%add477 = add nsw i32 0, %3
%shr478 = ashr i32 %add477, %4
%add479 = add nsw i32 %shr478, %5
%cond.i.i1462 = select i1 %cmp.i.i1461, i32 %add479, i32 0
%cmp.i4.i1463 = icmp slt i32 %cond.i.i1462, %1
%cond.i5.i1464 = select i1 %cmp.i4.i1463, i32 %cond.i.i1462, i32 %1
- %incdec.ptr481 = getelementptr inbounds i16, i16* %srcptr.41591, i64 14
- %39 = load i16, i16* %incdec.ptr467, align 2
+ %incdec.ptr481 = getelementptr inbounds i16, ptr %srcptr.41591, i64 14
+ %39 = load i16, ptr %incdec.ptr467, align 2
%conv482 = zext i16 %39 to i32
%sub483 = sub nsw i32 %cond.i5.i1464, %conv482
%idxprom484 = sext i32 %sub483 to i64
- %arrayidx485 = getelementptr inbounds i32, i32* %cond, i64 %idxprom484
- %40 = load i32, i32* %arrayidx485, align 4
+ %arrayidx485 = getelementptr inbounds i32, ptr %cond, i64 %idxprom484
+ %40 = load i32, ptr %arrayidx485, align 4
%add486 = add nsw i32 %add472, %40
- %incdec.ptr488 = getelementptr inbounds i16, i16* %refptr.11590, i64 15
- %41 = load i16, i16* %incdec.ptr474, align 2
+ %incdec.ptr488 = getelementptr inbounds i16, ptr %refptr.11590, i64 15
+ %41 = load i16, ptr %incdec.ptr474, align 2
%conv489 = zext i16 %41 to i32
%mul490 = mul nsw i32 %conv489, %2
%add491 = add nsw i32 %mul490, %3
%cond.i.i1458 = select i1 %cmp.i.i1457, i32 %add493, i32 0
%cmp.i4.i1459 = icmp slt i32 %cond.i.i1458, %1
%cond.i5.i1460 = select i1 %cmp.i4.i1459, i32 %cond.i.i1458, i32 %1
- %incdec.ptr495 = getelementptr inbounds i16, i16* %srcptr.41591, i64 15
- %42 = load i16, i16* %incdec.ptr481, align 2
+ %incdec.ptr495 = getelementptr inbounds i16, ptr %srcptr.41591, i64 15
+ %42 = load i16, ptr %incdec.ptr481, align 2
%conv496 = zext i16 %42 to i32
%sub497 = sub nsw i32 %cond.i5.i1460, %conv496
%idxprom498 = sext i32 %sub497 to i64
- %arrayidx499 = getelementptr inbounds i32, i32* %cond, i64 %idxprom498
- %43 = load i32, i32* %arrayidx499, align 4
+ %arrayidx499 = getelementptr inbounds i32, ptr %cond, i64 %idxprom498
+ %43 = load i32, ptr %arrayidx499, align 4
%add500 = add nsw i32 %add486, %43
- %44 = load i16, i16* %incdec.ptr488, align 2
+ %44 = load i16, ptr %incdec.ptr488, align 2
%conv503 = zext i16 %44 to i32
%mul504 = mul nsw i32 %conv503, %2
%add505 = add nsw i32 %mul504, %3
%cond.i.i1454 = select i1 %cmp.i.i1453, i32 %add507, i32 0
%cmp.i4.i1455 = icmp slt i32 %cond.i.i1454, %1
%cond.i5.i1456 = select i1 %cmp.i4.i1455, i32 %cond.i.i1454, i32 %1
- %45 = load i16, i16* %incdec.ptr495, align 2
+ %45 = load i16, ptr %incdec.ptr495, align 2
%conv510 = zext i16 %45 to i32
%sub511 = sub nsw i32 %cond.i5.i1456, %conv510
%idxprom512 = sext i32 %sub511 to i64
- %arrayidx513 = getelementptr inbounds i32, i32* %cond, i64 %idxprom512
- %46 = load i32, i32* %arrayidx513, align 4
+ %arrayidx513 = getelementptr inbounds i32, ptr %cond, i64 %idxprom512
+ %46 = load i32, ptr %arrayidx513, align 4
%add514 = add nsw i32 %add500, %46
- %add.ptr517 = getelementptr inbounds i16, i16* %refptr.11590, i64 %incdec.ptr502.sum
+ %add.ptr517 = getelementptr inbounds i16, ptr %refptr.11590, i64 %incdec.ptr502.sum
%exitcond1692 = icmp eq i32 undef, 4
br i1 %exitcond1692, label %for.end520, label %for.body293
for.end520: ; preds = %for.body293
- store i32 %add346, i32* undef, align 4
- store i32 %add402, i32* undef, align 4
- store i32 %add458, i32* undef, align 4
- store i32 %add514, i32* null, align 4
+ store i32 %add346, ptr undef, align 4
+ store i32 %add402, ptr undef, align 4
+ store i32 %add458, ptr undef, align 4
+ store i32 %add514, ptr null, align 4
br i1 undef, label %for.end543, label %for.cond290.preheader
for.end543: ; preds = %for.end520
br i1 undef, label %for.inc997, label %for.body549
for.body549: ; preds = %for.inc701, %for.end543
- %call554 = call i16* null(i16**** null, i32 signext undef, i32 signext %shl263) #1
+ %call554 = call ptr null(ptr null, i32 signext undef, i32 signext %shl263) #1
br label %for.cond559.preheader
for.cond559.preheader: ; preds = %for.cond559.preheader, %for.body549
define void @test(i32 %count) nounwind {
entry:
; CHECK: crxor 6, 6, 6
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i32 1) nounwind
+ %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 1) nounwind
%cmp2 = icmp sgt i32 %count, 0
br i1 %cmp2, label %for.body, label %for.end
for.body: ; preds = %entry, %for.body
%i.03 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
; CHECK: crxor 6, 6, 6
- %call1 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i32 1) nounwind
+ %call1 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 1) nounwind
%inc = add nsw i32 %i.03, 1
%exitcond = icmp eq i32 %inc, %count
br i1 %exitcond, label %for.end, label %for.body
ret void
}
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
define void @foo() nounwind {
entry:
; CHECK: crxor 6, 6, 6
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 1)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 1)
; CHECK: creqv 6, 6, 6
- %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str1, i32 0, i32 0), double 1.100000e+00)
+ %call1 = call i32 (ptr, ...) @printf(ptr @.str1, double 1.100000e+00)
ret void
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; RUN: llc < %s -mtriple=ppc32-- -regalloc=fast -O0 -relocation-model=pic -o -
; PR1638
-@.str242 = external constant [3 x i8] ; <[3 x i8]*> [#uses=1]
+@.str242 = external constant [3 x i8] ; <ptr> [#uses=1]
-define fastcc void @ParseContent(i8* %buf, i32 %bufsize) {
+define fastcc void @ParseContent(ptr %buf, i32 %bufsize) {
entry:
- %items = alloca [10000 x i8*], align 16 ; <[10000 x i8*]*> [#uses=0]
+ %items = alloca [10000 x ptr], align 16 ; <ptr> [#uses=0]
%tmp86 = add i32 0, -1 ; <i32> [#uses=1]
br i1 false, label %cond_true94, label %cond_next99
cond_true94: ; preds = %entry
- %tmp98 = call i32 (i8*, ...) @printf(i8* getelementptr ([3 x i8], [3 x i8]* @.str242, i32 0, i32 0), i8* null) ; <i32> [#uses=0]
+ %tmp98 = call i32 (ptr, ...) @printf(ptr @.str242, ptr null) ; <i32> [#uses=0]
%tmp20971 = icmp sgt i32 %tmp86, 0 ; <i1> [#uses=1]
br i1 %tmp20971, label %bb101, label %bb212
ret void
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7
-define void @test1(i1 %x, i8 %x2, i8* %x3, i64 %x4) {
+define void @test1(i1 %x, i8 %x2, ptr %x3, i64 %x4) {
entry:
%tmp3 = and i64 %x4, 16
%bf.shl = trunc i64 %tmp3 to i8
ret void
if.end:
- store i8 %bf.set, i8* %x3, align 4
+ store i8 %bf.set, ptr %x3, align 4
ret void
}
%retval = alloca i32, align 4
%__a = alloca i128, align 16
%b = alloca i64, align 8
- store i32 0, i32* %retval, align 4
- %0 = load i128, i128* %__a, align 16
+ store i32 0, ptr %retval, align 4
+ %0 = load i128, ptr %__a, align 16
%splat.splatinsert = insertelement <1 x i128> undef, i128 %0, i32 0
%splat.splat = shufflevector <1 x i128> %splat.splatinsert, <1 x i128> undef, <1 x i32> zeroinitializer
%1 = bitcast <1 x i128> %splat.splat to <2 x i64>
%2 = extractelement <2 x i64> %1, i32 0
- store i64 %2, i64* %b, align 8
+ store i64 %2, ptr %b, align 8
ret i32 0
}
; CHECK-P10-NEXT: clrldi r3, r3, 32
; CHECK-P10-NEXT: blr
entry:
- %v0 = load i64, i64* undef, align 8
+ %v0 = load i64, ptr undef, align 8
%sub = sub i64 80, %v0
%div = lshr i64 %sub, 1
%conv13 = trunc i64 %div to i32
entry:
%ret = alloca i32, align 4
%0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmpw 2,$2,$1\0A\09mfcr $0", "=r,r,r,r,r,~{cr2}"(i32 1, i32 2, i32 3, i32 0) nounwind
- store i32 %0, i32* %ret, align 4
+ store i32 %0, ptr %ret, align 4
call void @foo()
- %1 = load i32, i32* %ret, align 4
+ %1 = load i32, ptr %ret, align 4
ret i32 %1
}
entry:
%ret = alloca i32, align 4
%0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmpw 2,$2,$1\0A\09cmpw 3,$2,$2\0A\09cmpw 4,$2,$3\0A\09mfcr $0", "=r,r,r,r,r,~{cr2},~{cr3},~{cr4}"(i32 1, i32 2, i32 3, i32 0) nounwind
- store i32 %0, i32* %ret, align 4
+ store i32 %0, ptr %ret, align 4
call void @foo()
- %1 = load i32, i32* %ret, align 4
+ %1 = load i32, ptr %ret, align 4
ret i32 %1
}
entry:
%a = alloca <16 x i8>, align 16
%b = alloca <16 x i8>, align 16
- store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8>* %a, align 16
- store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %b, align 16
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, ptr %a, align 16
+ store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, ptr %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%2 = call <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8> %0, <16 x i8> %1)
ret <16 x i8> %2
; CHECK: vpmsumb 2,
entry:
%a = alloca <8 x i16>, align 16
%b = alloca <8 x i16>, align 16
- store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, <8 x i16>* %a, align 16
- store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %b, align 16
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, ptr %a, align 16
+ store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, ptr %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%2 = call <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16> %0, <8 x i16> %1)
ret <8 x i16> %2
; CHECK: vpmsumh 2,
entry:
%a = alloca <4 x i32>, align 16
%b = alloca <4 x i32>, align 16
- store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
- store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %b, align 16
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, ptr %a, align 16
+ store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, ptr %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%2 = call <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32> %0, <4 x i32> %1)
ret <4 x i32> %2
; CHECK: vpmsumw 2,
entry:
%a = alloca <2 x i64>, align 16
%b = alloca <2 x i64>, align 16
- store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
- store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%2 = call <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64> %0, <2 x i64> %1)
ret <2 x i64> %2
; CHECK: vpmsumd 2,
define <2 x i64> @test_vsbox() #0 {
entry:
%a = alloca <2 x i64>, align 16
- store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
%1 = call <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64> %0)
ret <2 x i64> %1
; CHECK: vsbox 2,
%a = alloca <16 x i8>, align 16
%b = alloca <16 x i8>, align 16
%c = alloca <16 x i8>, align 16
- store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8>* %a, align 16
- store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %b, align 16
- store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %c, align 16
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
- %2 = load <16 x i8>, <16 x i8>* %c, align 16
+ store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, ptr %a, align 16
+ store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, ptr %b, align 16
+ store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, ptr %c, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
+ %2 = load <16 x i8>, ptr %c, align 16
%3 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
ret <16 x i8> %3
; CHECK-LE: xxlnor
%a = alloca <8 x i16>, align 16
%b = alloca <8 x i16>, align 16
%c = alloca <8 x i16>, align 16
- store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, <8 x i16>* %a, align 16
- store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %b, align 16
- store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %c, align 16
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
+ store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, ptr %a, align 16
+ store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, ptr %b, align 16
+ store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, ptr %c, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
%1 = bitcast <8 x i16> %0 to <16 x i8>
- %2 = load <8 x i16>, <8 x i16>* %b, align 16
+ %2 = load <8 x i16>, ptr %b, align 16
%3 = bitcast <8 x i16> %2 to <16 x i8>
- %4 = load <8 x i16>, <8 x i16>* %c, align 16
+ %4 = load <8 x i16>, ptr %c, align 16
%5 = bitcast <8 x i16> %4 to <16 x i8>
%6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <8 x i16>
%a = alloca <4 x i32>, align 16
%b = alloca <4 x i32>, align 16
%c = alloca <4 x i32>, align 16
- store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
- store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %b, align 16
- store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %c, align 16
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
+ store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, ptr %a, align 16
+ store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, ptr %b, align 16
+ store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, ptr %c, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
%1 = bitcast <4 x i32> %0 to <16 x i8>
- %2 = load <4 x i32>, <4 x i32>* %b, align 16
+ %2 = load <4 x i32>, ptr %b, align 16
%3 = bitcast <4 x i32> %2 to <16 x i8>
- %4 = load <4 x i32>, <4 x i32>* %c, align 16
+ %4 = load <4 x i32>, ptr %c, align 16
%5 = bitcast <4 x i32> %4 to <16 x i8>
%6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <4 x i32>
%a = alloca <2 x i64>, align 16
%b = alloca <2 x i64>, align 16
%c = alloca <2 x i64>, align 16
- store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
- store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
- store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %c, align 16
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %b, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %c, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
%1 = bitcast <2 x i64> %0 to <16 x i8>
- %2 = load <2 x i64>, <2 x i64>* %b, align 16
+ %2 = load <2 x i64>, ptr %b, align 16
%3 = bitcast <2 x i64> %2 to <16 x i8>
- %4 = load <2 x i64>, <2 x i64>* %c, align 16
+ %4 = load <2 x i64>, ptr %c, align 16
%5 = bitcast <2 x i64> %4 to <16 x i8>
%6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <2 x i64>
entry:
%a = alloca <2 x i64>, align 16
%b = alloca <2 x i64>, align 16
- store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
- store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%2 = call <2 x i64> @llvm.ppc.altivec.crypto.vcipher(<2 x i64> %0, <2 x i64> %1)
ret <2 x i64> %2
; CHECK: vcipher 2,
entry:
%a = alloca <2 x i64>, align 16
%b = alloca <2 x i64>, align 16
- store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
- store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%2 = call <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast(<2 x i64> %0, <2 x i64> %1)
ret <2 x i64> %2
; CHECK: vcipherlast 2,
entry:
%a = alloca <2 x i64>, align 16
%b = alloca <2 x i64>, align 16
- store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
- store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%2 = call <2 x i64> @llvm.ppc.altivec.crypto.vncipher(<2 x i64> %0, <2 x i64> %1)
ret <2 x i64> %2
; CHECK: vncipher 2,
entry:
%a = alloca <2 x i64>, align 16
%b = alloca <2 x i64>, align 16
- store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
- store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, ptr %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%2 = call <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast(<2 x i64> %0, <2 x i64> %1)
ret <2 x i64> %2
; CHECK: vncipherlast 2,
define <4 x i32> @test_vshasigmaw() #0 {
entry:
%a = alloca <4 x i32>, align 16
- store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
+ store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, ptr %a, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
%1 = call <4 x i32> @llvm.ppc.altivec.crypto.vshasigmaw(<4 x i32> %0, i32 1, i32 15)
ret <4 x i32> %1
; CHECK: vshasigmaw 2,
define <2 x i64> @test_vshasigmad() #0 {
entry:
%a = alloca <2 x i64>, align 16
- store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %a, align 16
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, ptr %a, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
%1 = call <2 x i64> @llvm.ppc.altivec.crypto.vshasigmad(<2 x i64> %0, i32 1, i32 15)
ret <2 x i64> %1
; CHECK: vshasigmad 2,
; RUN: -mcpu=pwr9 -ppc-asm-full-reg-names < %s | grep 'xvmuldp' | count 2
; RUN: llc -verify-machineinstrs --mtriple powerpc64le-unknown-linux-gnu \
; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names < %s | grep 'xvmuldp' | count 2
-@IndirectCallPtr = dso_local local_unnamed_addr global void (...)* null, align 8
+@IndirectCallPtr = dso_local local_unnamed_addr global ptr null, align 8
define dso_local signext i32 @func1() local_unnamed_addr #0 {
entry:
- tail call void bitcast (void (...)* @directCall to void ()*)() #0
+ tail call void @directCall() #0
%0 = tail call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> <double -9.990000e+01, double 9.990000e+01>, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%vecext = extractelement <2 x double> %0, i32 0
%sub = tail call double @llvm.experimental.constrained.fsub.f64(double %vecext, double -9.900000e+01, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %sub, metadata !"fpexcept.ignore") #0
- tail call void bitcast (void (...)* @directCall to void ()*)() #0
+ tail call void @directCall() #0
%1 = tail call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> <double -9.990000e+01, double 9.990000e+01>, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%vecext3 = extractelement <2 x double> %1, i32 1
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %vecext3, double 9.900000e+01, metadata !"une", metadata !"fpexcept.ignore") #0
define dso_local signext i32 @func2() local_unnamed_addr #0 {
entry:
- %call = tail call <2 x double> bitcast (<2 x double> (...)* @getvector1 to <2 x double> ()*)() #0
- %call1 = tail call <2 x double> bitcast (<2 x double> (...)* @getvector2 to <2 x double> ()*)() #0
- tail call void bitcast (void (...)* @directCall to void ()*)() #0
+ %call = tail call <2 x double> @getvector1() #0
+ %call1 = tail call <2 x double> @getvector2() #0
+ tail call void @directCall() #0
%mul = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %call, <2 x double> %call1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%vecext = extractelement <2 x double> %mul, i32 0
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %vecext, double 4.000000e+00, metadata !"oeq", metadata !"fpexcept.ignore") #0
br i1 %cmp, label %cleanup, label %if.end
if.end: ; preds = %entry
- tail call void bitcast (void (...)* @directCall to void ()*)() #0
+ tail call void @directCall() #0
%mul10 = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %call, <2 x double> %call1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%0 = tail call i32 @llvm.ppc.vsx.xvcmpeqdp.p(i32 2, <2 x double> %mul, <2 x double> %mul10) #0
br label %cleanup
define dso_local signext i32 @func3() local_unnamed_addr #0 {
entry:
- %0 = load void ()*, void ()** bitcast (void (...)** @IndirectCallPtr to void ()**), align 8
+ %0 = load ptr, ptr @IndirectCallPtr, align 8
tail call void %0() #0
%1 = tail call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> <double -9.990000e+01, double 9.990000e+01>, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%vecext = extractelement <2 x double> %1, i32 0
%sub = tail call double @llvm.experimental.constrained.fsub.f64(double %vecext, double -9.900000e+01, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %sub, metadata !"fpexcept.ignore") #0
- %2 = load void ()*, void ()** bitcast (void (...)** @IndirectCallPtr to void ()**), align 8
+ %2 = load ptr, ptr @IndirectCallPtr, align 8
tail call void %2() #0
%3 = tail call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> <double -9.990000e+01, double 9.990000e+01>, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%vecext4 = extractelement <2 x double> %3, i32 1
define dso_local signext i32 @func4() local_unnamed_addr #0 {
entry:
- %call = tail call <2 x double> bitcast (<2 x double> (...)* @getvector1 to <2 x double> ()*)() #0
- %call1 = tail call <2 x double> bitcast (<2 x double> (...)* @getvector2 to <2 x double> ()*)() #0
- %0 = load void ()*, void ()** bitcast (void (...)** @IndirectCallPtr to void ()**), align 8
+ %call = tail call <2 x double> @getvector1() #0
+ %call1 = tail call <2 x double> @getvector2() #0
+ %0 = load ptr, ptr @IndirectCallPtr, align 8
tail call void %0() #0
%mul = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %call, <2 x double> %call1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%vecext = extractelement <2 x double> %mul, i32 0
br i1 %cmp, label %cleanup, label %if.end
if.end: ; preds = %entry
- %1 = load void ()*, void ()** bitcast (void (...)** @IndirectCallPtr to void ()**), align 8
+ %1 = load ptr, ptr @IndirectCallPtr, align 8
tail call void %1() #0
%mul11 = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %call, <2 x double> %call1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%2 = tail call i32 @llvm.ppc.vsx.xvcmpeqdp.p(i32 2, <2 x double> %mul, <2 x double> %mul11) #0
@a = dso_local local_unnamed_addr global i32 0, align 4
-define dso_local signext i32 @test1(i32* %b) local_unnamed_addr {
+define dso_local signext i32 @test1(ptr %b) local_unnamed_addr {
; CHECK-PWR9-LABEL: test1:
; CHECK-PWR9: # %bb.0: # %entry
; CHECK-PWR9-NEXT: mflr r0
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @a, align 4, !tbaa !2
+ %0 = load i32, ptr @a, align 4, !tbaa !2
%conv = sext i32 %0 to i64
- %1 = inttoptr i64 %conv to i32*
- %cmp = icmp eq i32* %1, %b
+ %1 = inttoptr i64 %conv to ptr
+ %cmp = icmp eq ptr %1, %b
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- %call = tail call signext i32 bitcast (i32 (...)* @callVoid to i32 ()*)()
- %call2 = tail call signext i32 @callNonVoid(i32* %b)
+ %call = tail call signext i32 @callVoid()
+ %call2 = tail call signext i32 @callNonVoid(ptr %b)
br label %if.end
if.end: ; preds = %if.then, %entry
declare signext i32 @callVoid(...) local_unnamed_addr
-declare signext i32 @callNonVoid(i32*) local_unnamed_addr
+declare signext i32 @callNonVoid(ptr) local_unnamed_addr
-define dso_local signext i32 @test2(i32* %p1) local_unnamed_addr {
+define dso_local signext i32 @test2(ptr %p1) local_unnamed_addr {
; CHECK-PWR9-LABEL: test2:
; CHECK-PWR9: # %bb.0: # %entry
; CHECK-PWR9-NEXT: mflr r0
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %tobool = icmp eq i32* %p1, null
+ %tobool = icmp eq ptr %p1, null
br i1 %tobool, label %return, label %if.end
if.end: ; preds = %entry
- %0 = load i32, i32* @a, align 4, !tbaa !2
+ %0 = load i32, ptr @a, align 4, !tbaa !2
%conv = sext i32 %0 to i64
- %1 = inttoptr i64 %conv to i32*
- %cmp = icmp eq i32* %1, %p1
+ %1 = inttoptr i64 %conv to ptr
+ %cmp = icmp eq ptr %1, %p1
br i1 %cmp, label %if.then2, label %return
if.then2: ; preds = %if.end
- %call = tail call signext i32 bitcast (i32 (...)* @callVoid to i32 ()*)()
- %call3 = tail call signext i32 @callNonVoid(i32* nonnull %p1)
+ %call = tail call signext i32 @callVoid()
+ %call3 = tail call signext i32 @callNonVoid(ptr nonnull %p1)
br label %return
return: ; preds = %if.end, %entry, %if.then2
}
-define dso_local i8* @test3(i8** nocapture %p1, i8 zeroext %p2) local_unnamed_addr {
+define dso_local ptr @test3(ptr nocapture %p1, i8 zeroext %p2) local_unnamed_addr {
; CHECK-PWR9-LABEL: test3:
; CHECK-PWR9: # %bb.0: # %entry
; CHECK-PWR9-NEXT: mflr r0
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %0 = load i8*, i8** %p1, align 8, !tbaa !6
- %tobool = icmp eq i8* %0, null
+ %0 = load ptr, ptr %p1, align 8, !tbaa !6
+ %tobool = icmp eq ptr %0, null
br i1 %tobool, label %land.end, label %land.rhs
land.rhs: ; preds = %entry
- %call = tail call i8* @bar(i8* nonnull %0, i8 zeroext %p2)
- store i8* %call, i8** %p1, align 8, !tbaa !6
+ %call = tail call ptr @bar(ptr nonnull %0, i8 zeroext %p2)
+ store ptr %call, ptr %p1, align 8, !tbaa !6
br label %land.end
land.end: ; preds = %entry, %land.rhs
- ret i8* %0
+ ret ptr %0
}
-declare i8* @bar(i8*, i8 zeroext) local_unnamed_addr
+declare ptr @bar(ptr, i8 zeroext) local_unnamed_addr
!llvm.module.flags = !{!0}
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%induction5 = or i64 %index, 1
- %0 = getelementptr inbounds [1600 x i32], [1600 x i32]* @x, i64 0, i64 %index
- %1 = getelementptr inbounds [1600 x i32], [1600 x i32]* @x, i64 0, i64 %induction5
- %2 = load i32, i32* %0, align 4
- %3 = load i32, i32* %1, align 4
+ %0 = getelementptr inbounds [1600 x i32], ptr @x, i64 0, i64 %index
+ %1 = getelementptr inbounds [1600 x i32], ptr @x, i64 0, i64 %induction5
+ %2 = load i32, ptr %0, align 4
+ %3 = load i32, ptr %1, align 4
%4 = add nsw i32 %2, %v
%5 = add nsw i32 %3, %v
- store i32 %4, i32* %0, align 4
- store i32 %5, i32* %1, align 4
+ store i32 %4, ptr %0, align 4
+ store i32 %5, ptr %1, align 4
%index.next = add i64 %index, 2
%6 = icmp eq i64 %index.next, 1600
br i1 %6, label %for.cond.cleanup, label %vector.body
declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>)
declare <4 x double> @llvm.maxnum.v4f64(<4 x double>, <4 x double>)
-define void @test1(float %f, float* %fp) {
+define void @test1(float %f, ptr %fp) {
entry:
br label %loop_body
loop_body:
%invar_address.dim.0.01 = phi i64 [ 0, %entry ], [ %1, %loop_body ]
%0 = call float @llvm.minnum.f32(float %f, float 1.0)
- store float %0, float* %fp, align 4
+ store float %0, ptr %fp, align 4
%1 = add i64 %invar_address.dim.0.01, 1
%2 = icmp eq i64 %1, 2
br i1 %2, label %loop_exit, label %loop_body
; CHECK-NOT: mtctr
; CHECK: blr
-define void @test1v(<4 x float> %f, <4 x float>* %fp) {
+define void @test1v(<4 x float> %f, ptr %fp) {
entry:
br label %loop_body
loop_body:
%invar_address.dim.0.01 = phi i64 [ 0, %entry ], [ %1, %loop_body ]
%0 = call <4 x float> @llvm.minnum.v4f32(<4 x float> %f, <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>)
- store <4 x float> %0, <4 x float>* %fp, align 16
+ store <4 x float> %0, ptr %fp, align 16
%1 = add i64 %invar_address.dim.0.01, 1
%2 = icmp eq i64 %1, 4
br i1 %2, label %loop_exit, label %loop_body
; CHECK-NOT: xsmindp
; CHECK: blr
-define void @test1a(float %f, float* %fp) {
+define void @test1a(float %f, ptr %fp) {
entry:
br label %loop_body
loop_body:
%invar_address.dim.0.01 = phi i64 [ 0, %entry ], [ %1, %loop_body ]
%0 = call float @fminf(float %f, float 1.0) readnone
- store float %0, float* %fp, align 4
+ store float %0, ptr %fp, align 4
%1 = add i64 %invar_address.dim.0.01, 1
%2 = icmp eq i64 %1, 2
br i1 %2, label %loop_exit, label %loop_body
; CHECK-NOT: mtctr
; CHECK: blr
-define void @test2(float %f, float* %fp) {
+define void @test2(float %f, ptr %fp) {
entry:
br label %loop_body
loop_body:
%invar_address.dim.0.01 = phi i64 [ 0, %entry ], [ %1, %loop_body ]
%0 = call float @llvm.maxnum.f32(float %f, float 1.0)
- store float %0, float* %fp, align 4
+ store float %0, ptr %fp, align 4
%1 = add i64 %invar_address.dim.0.01, 1
%2 = icmp eq i64 %1, 2
br i1 %2, label %loop_exit, label %loop_body
; CHECK-NOT: mtctr
; CHECK: blr
-define void @test2v(<4 x double> %f, <4 x double>* %fp) {
+define void @test2v(<4 x double> %f, ptr %fp) {
entry:
br label %loop_body
loop_body:
%invar_address.dim.0.01 = phi i64 [ 0, %entry ], [ %1, %loop_body ]
%0 = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %f, <4 x double> <double 1.0, double 1.0, double 1.0, double 1.0>)
- store <4 x double> %0, <4 x double>* %fp, align 16
+ store <4 x double> %0, ptr %fp, align 16
%1 = add i64 %invar_address.dim.0.01, 1
%2 = icmp eq i64 %1, 4
br i1 %2, label %loop_exit, label %loop_body
; CHECK-NOT: xsmaxdp
; CHECK: blr
-define void @test2a(float %f, float* %fp) {
+define void @test2a(float %f, ptr %fp) {
entry:
br label %loop_body
loop_body:
%invar_address.dim.0.01 = phi i64 [ 0, %entry ], [ %1, %loop_body ]
%0 = call float @fmaxf(float %f, float 1.0) readnone
- store float %0, float* %fp, align 4
+ store float %0, ptr %fp, align 4
%1 = add i64 %invar_address.dim.0.01, 1
%2 = icmp eq i64 %1, 2
br i1 %2, label %loop_exit, label %loop_body
; CHECK-NOT: mtctr
; CHECK: blr
-define void @test3(double %f, double* %fp) {
+define void @test3(double %f, ptr %fp) {
entry:
br label %loop_body
loop_body:
%invar_address.dim.0.01 = phi i64 [ 0, %entry ], [ %1, %loop_body ]
%0 = call double @llvm.minnum.f64(double %f, double 1.0)
- store double %0, double* %fp, align 8
+ store double %0, ptr %fp, align 8
%1 = add i64 %invar_address.dim.0.01, 1
%2 = icmp eq i64 %1, 2
br i1 %2, label %loop_exit, label %loop_body
; CHECK-NOT: mtctr
; CHECK: blr
-define void @test3a(double %f, double* %fp) {
+define void @test3a(double %f, ptr %fp) {
entry:
br label %loop_body
loop_body:
%invar_address.dim.0.01 = phi i64 [ 0, %entry ], [ %1, %loop_body ]
%0 = call double @fmin(double %f, double 1.0) readnone
- store double %0, double* %fp, align 8
+ store double %0, ptr %fp, align 8
%1 = add i64 %invar_address.dim.0.01, 1
%2 = icmp eq i64 %1, 2
br i1 %2, label %loop_exit, label %loop_body
; CHECK-NOT: mtctr
; CHECK: blr
-define void @test4(double %f, double* %fp) {
+define void @test4(double %f, ptr %fp) {
entry:
br label %loop_body
loop_body:
%invar_address.dim.0.01 = phi i64 [ 0, %entry ], [ %1, %loop_body ]
%0 = call double @llvm.maxnum.f64(double %f, double 1.0)
- store double %0, double* %fp, align 8
+ store double %0, ptr %fp, align 8
%1 = add i64 %invar_address.dim.0.01, 1
%2 = icmp eq i64 %1, 2
br i1 %2, label %loop_exit, label %loop_body
; CHECK-NOT: mtctr
; CHECK: blr
-define void @test4a(double %f, double* %fp) {
+define void @test4a(double %f, ptr %fp) {
entry:
br label %loop_body
loop_body:
%invar_address.dim.0.01 = phi i64 [ 0, %entry ], [ %1, %loop_body ]
%0 = call double @fmax(double %f, double 1.0) readnone
- store double %0, double* %fp, align 8
+ store double %0, ptr %fp, align 8
%1 = add i64 %invar_address.dim.0.01, 1
%2 = icmp eq i64 %1, 2
br i1 %2, label %loop_exit, label %loop_body
; RUN: llc -mtriple powerpc64le < %s | FileCheck %s
; Check constrained ops converted to call
-define void @test(double* %cast) {
+define void @test(ptr %cast) {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %root
; CHECK-NEXT: mflr 0
for.body:
%i = phi i64 [ 0, %root ], [ %next, %for.body ]
- %idx = getelementptr inbounds double, double* %cast, i64 %i
- %val = load double, double* %idx
+ %idx = getelementptr inbounds double, ptr %cast, i64 %i
+ %val = load double, ptr %idx
%cos = tail call nnan ninf nsz arcp double @llvm.experimental.constrained.cos.f64(double %val, metadata !"round.dynamic", metadata !"fpexcept.strict")
- store double %cos, double* %idx, align 8
+ store double %cos, ptr %idx, align 8
%next = add nuw nsw i64 %i, 1
%cond = icmp eq i64 %next, 255
br i1 %cond, label %exit, label %for.body
}
; Check constrained ops converted to native instruction
-define void @test2(double* %cast) {
+define void @test2(ptr %cast) {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 255
for.body:
%i = phi i64 [ 0, %entry ], [ %next, %for.body ]
- %idx = getelementptr inbounds double, double* %cast, i64 %i
- %val = load double, double* %idx
+ %idx = getelementptr inbounds double, ptr %cast, i64 %i
+ %val = load double, ptr %idx
%cos = tail call nnan ninf nsz arcp double @llvm.experimental.constrained.sqrt.f64(double %val, metadata !"round.dynamic", metadata !"fpexcept.strict")
- store double %cos, double* %idx, align 8
+ store double %cos, ptr %idx, align 8
%next = add nuw nsw i64 %i, 1
%cond = icmp eq i64 %next, 255
br i1 %cond, label %exit, label %for.body
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32"
target triple = "powerpc-unknown-linux-gnu"
-define ppc_fp128 @foo(ppc_fp128* nocapture %n, ppc_fp128 %d) nounwind readonly {
+define ppc_fp128 @foo(ptr nocapture %n, ppc_fp128 %d) nounwind readonly {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi ppc_fp128 [ %d, %entry ], [ %conv, %for.body ]
- %arrayidx = getelementptr inbounds ppc_fp128, ppc_fp128* %n, i32 %i.06
- %0 = load ppc_fp128, ppc_fp128* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds ppc_fp128, ptr %n, i32 %i.06
+ %0 = load ppc_fp128, ptr %arrayidx, align 8
%conv = tail call ppc_fp128 @copysignl(ppc_fp128 %x.05, ppc_fp128 %d) nounwind readonly
%inc = add nsw i32 %i.06, 1
%exitcond = icmp eq i32 %inc, 2048
define void @fmul_ctrloop_fp128() {
entry:
- %0 = load fp128, fp128* @a, align 16
+ %0 = load fp128, ptr @a, align 16
br label %for.body
for.body: ; preds = %for.body, %entry
%i.06 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds [4 x fp128], [4 x fp128]* @x, i64 0, i64 %i.06
- %1 = load fp128, fp128* %arrayidx, align 16
+ %arrayidx = getelementptr inbounds [4 x fp128], ptr @x, i64 0, i64 %i.06
+ %1 = load fp128, ptr %arrayidx, align 16
%mul = fmul fp128 %0, %1
- %arrayidx1 = getelementptr inbounds [4 x fp128], [4 x fp128]* @y, i64 0, i64 %i.06
- store fp128 %mul, fp128* %arrayidx1, align 16
+ %arrayidx1 = getelementptr inbounds [4 x fp128], ptr @y, i64 0, i64 %i.06
+ store fp128 %mul, ptr %arrayidx1, align 16
%inc = add nuw nsw i64 %i.06, 1
%exitcond = icmp eq i64 %inc, 4
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NOT: call i1 @llvm.loop.decrement.i64(i64 1)
}
-define void @fpext_ctrloop_fp128(double* %a) {
+define void @fpext_ctrloop_fp128(ptr %a) {
entry:
br label %for.body
for.body:
%i.06 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds double, double* %a, i64 %i.06
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %a, i64 %i.06
+ %0 = load double, ptr %arrayidx, align 8
%ext = fpext double %0 to fp128
- %arrayidx1 = getelementptr inbounds [4 x fp128], [4 x fp128]* @y, i64 0, i64 %i.06
- store fp128 %ext, fp128* %arrayidx1, align 16
+ %arrayidx1 = getelementptr inbounds [4 x fp128], ptr @y, i64 0, i64 %i.06
+ store fp128 %ext, ptr %arrayidx1, align 16
%inc = add nuw nsw i64 %i.06, 1
%exitcond = icmp eq i64 %inc, 4
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NOT: call i1 @llvm.loop.decrement.i64(i64 1)
}
-define void @fptrunc_ctrloop_fp128(double* %a) {
+define void @fptrunc_ctrloop_fp128(ptr %a) {
entry:
br label %for.body
for.body:
%i.06 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds [4 x fp128], [4 x fp128]* @x, i64 0, i64 %i.06
- %0 = load fp128, fp128* %arrayidx, align 16
+ %arrayidx = getelementptr inbounds [4 x fp128], ptr @x, i64 0, i64 %i.06
+ %0 = load fp128, ptr %arrayidx, align 16
%trunc = fptrunc fp128 %0 to double
- %arrayidx1 = getelementptr inbounds double, double* %a, i64 %i.06
- store double %trunc, double* %arrayidx1, align 16
+ %arrayidx1 = getelementptr inbounds double, ptr %a, i64 %i.06
+ store double %trunc, ptr %arrayidx1, align 16
%inc = add nuw nsw i64 %i.06, 1
%exitcond = icmp eq i64 %inc, 4
br i1 %exitcond, label %for.end, label %for.body
; CHECK-NOT: call i1 @llvm.loop.decrement.i64(i64 1)
}
-declare void @obfuscate(i8*, ...) local_unnamed_addr #2
+declare void @obfuscate(ptr, ...) local_unnamed_addr #2
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32"
target triple = "powerpc-unknown-linux-gnu"
-define i64 @foo(double* nocapture %n) nounwind readonly {
+define i64 @foo(ptr nocapture %n) nounwind readonly {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds double, double* %n, i32 %i.06
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %n, i32 %i.06
+ %0 = load double, ptr %arrayidx, align 8
%conv = sitofp i64 %x.05 to double
%add = fadd double %conv, %0
%conv1 = fptosi double %add to i64
@init_value = global double 1.000000e+00, align 8
@data64 = global [8000 x i64] zeroinitializer, align 8
-define i32 @main(i32 %argc, i8** nocapture %argv) {
+define i32 @main(i32 %argc, ptr nocapture %argv) {
entry:
- %0 = load double, double* @init_value, align 8
+ %0 = load double, ptr @init_value, align 8
%conv = fptosi double %0 to i64
%broadcast.splatinsert.i = insertelement <2 x i64> undef, i64 %conv, i32 0
%broadcast.splat.i = shufflevector <2 x i64> %broadcast.splatinsert.i, <2 x i64> undef, <2 x i32> zeroinitializer
vector.body.i: ; preds = %vector.body.i, %entry
%index.i = phi i32 [ 0, %entry ], [ %index.next.i, %vector.body.i ]
- %next.gep.i = getelementptr [8000 x i64], [8000 x i64]* @data64, i32 0, i32 %index.i
- %1 = bitcast i64* %next.gep.i to <2 x i64>*
- store <2 x i64> %broadcast.splat.i, <2 x i64>* %1, align 8
+ %next.gep.i = getelementptr [8000 x i64], ptr @data64, i32 0, i32 %index.i
+ store <2 x i64> %broadcast.splat.i, ptr %next.gep.i, align 8
%next.gep.sum24.i = or i32 %index.i, 2
- %2 = getelementptr [8000 x i64], [8000 x i64]* @data64, i32 0, i32 %next.gep.sum24.i
- %3 = bitcast i64* %2 to <2 x i64>*
- store <2 x i64> %broadcast.splat.i, <2 x i64>* %3, align 8
+ %1 = getelementptr [8000 x i64], ptr @data64, i32 0, i32 %next.gep.sum24.i
+ store <2 x i64> %broadcast.splat.i, ptr %1, align 8
%index.next.i = add i32 %index.i, 4
- %4 = icmp eq i32 %index.next.i, 8000
- br i1 %4, label %_Z4fillIPxxEvT_S1_T0_.exit, label %vector.body.i
+ %2 = icmp eq i32 %index.next.i, 8000
+ br i1 %2, label %_Z4fillIPxxEvT_S1_T0_.exit, label %vector.body.i
_Z4fillIPxxEvT_S1_T0_.exit: ; preds = %vector.body.i
ret i32 0
_Label_0: ; preds = %_Label_0, %entry-block
%result.0138 = phi i128 [ %5, %_Label_0 ], [ 0, %entry-block ]
- %iter.sroa.0.0137 = phi i8* [ %0, %_Label_0 ], [ undef, %entry-block ]
- %0 = getelementptr inbounds i8, i8* %iter.sroa.0.0137, i64 1
+ %iter.sroa.0.0137 = phi ptr [ %0, %_Label_0 ], [ undef, %entry-block ]
+ %0 = getelementptr inbounds i8, ptr %iter.sroa.0.0137, i64 1
%1 = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %result.0138, i128 undef) #2
%2 = extractvalue { i128, i1 } %1, 0
%3 = tail call { i128, i1 } @llvm.sadd.with.overflow.i128(i128 %2, i128 0) #2
%4 = extractvalue { i128, i1 } %3, 1
%5 = extractvalue { i128, i1 } %3, 0
- %6 = icmp eq i8* %0, null
+ %6 = icmp eq ptr %0, null
br i1 %6, label %bb66.loopexit, label %_Label_0
bb66.loopexit: ; preds = %_Label_0
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32"
target triple = "powerpc-unknown-linux-gnu"
-define i64 @foo(i64* nocapture %n, i64 %d) nounwind readonly {
+define i64 @foo(ptr nocapture %n, i64 %d) nounwind readonly {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %n, i32 %i.06
+ %0 = load i64, ptr %arrayidx, align 8
%conv = udiv i64 %x.05, %d
%conv1 = add i64 %conv, %0
%inc = add nsw i32 %i.06, 1
; CHECK: @foo
; CHECK-NOT: mtctr
-define i64 @foo2(i64* nocapture %n, i64 %d) nounwind readonly {
+define i64 @foo2(ptr nocapture %n, i64 %d) nounwind readonly {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %n, i32 %i.06
+ %0 = load i64, ptr %arrayidx, align 8
%conv = sdiv i64 %x.05, %d
%conv1 = add i64 %conv, %0
%inc = add nsw i32 %i.06, 1
; CHECK: @foo2
; CHECK-NOT: mtctr
-define i64 @foo3(i64* nocapture %n, i64 %d) nounwind readonly {
+define i64 @foo3(ptr nocapture %n, i64 %d) nounwind readonly {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %n, i32 %i.06
+ %0 = load i64, ptr %arrayidx, align 8
%conv = urem i64 %x.05, %d
%conv1 = add i64 %conv, %0
%inc = add nsw i32 %i.06, 1
; CHECK: @foo3
; CHECK-NOT: mtctr
-define i64 @foo4(i64* nocapture %n, i64 %d) nounwind readonly {
+define i64 @foo4(ptr nocapture %n, i64 %d) nounwind readonly {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %n, i32 %i.06
+ %0 = load i64, ptr %arrayidx, align 8
%conv = srem i64 %x.05, %d
%conv1 = add i64 %conv, %0
%inc = add nsw i32 %i.06, 1
@.str.11.98 = external hidden unnamed_addr constant [3 x i8], align 1
; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
; Function Attrs: nounwind
-declare i8* @halide_string_to_string(i8*, i8*, i8*) #1
+declare ptr @halide_string_to_string(ptr, ptr, ptr) #1
; Function Attrs: nounwind
-declare i8* @halide_int64_to_string(i8*, i8*, i64, i32) #1
+declare ptr @halide_int64_to_string(ptr, ptr, i64, i32) #1
; Function Attrs: nounwind
-define weak i8* @halide_double_to_string(i8* %dst, i8* %end, double %arg, i32 %scientific) #1 {
+define weak ptr @halide_double_to_string(ptr %dst, ptr %end, double %arg, i32 %scientific) #1 {
entry:
%arg.addr = alloca double, align 8
%bits = alloca i64, align 8
%buf = alloca [512 x i8], align 1
- store double %arg, double* %arg.addr, align 8, !tbaa !4
- %0 = bitcast i64* %bits to i8*
- call void @llvm.lifetime.start.p0i8(i64 8, i8* %0) #0
- store i64 0, i64* %bits, align 8, !tbaa !8
- %1 = bitcast double* %arg.addr to i8*
- %call = call i8* @memcpy(i8* %0, i8* %1, i64 8) #2
- %2 = load i64, i64* %bits, align 8, !tbaa !8
- %and = and i64 %2, 4503599627370495
- %shr = lshr i64 %2, 52
+ store double %arg, ptr %arg.addr, align 8, !tbaa !4
+ call void @llvm.lifetime.start.p0(i64 8, ptr %bits) #0
+ store i64 0, ptr %bits, align 8, !tbaa !8
+ %call = call ptr @memcpy(ptr %bits, ptr %arg.addr, i64 8) #2
+ %0 = load i64, ptr %bits, align 8, !tbaa !8
+ %and = and i64 %0, 4503599627370495
+ %shr = lshr i64 %0, 52
%shr.tr = trunc i64 %shr to i32
%conv = and i32 %shr.tr, 2047
- %shr2 = lshr i64 %2, 63
+ %shr2 = lshr i64 %0, 63
%conv3 = trunc i64 %shr2 to i32
%cmp = icmp eq i32 %conv, 2047
br i1 %cmp, label %if.then, label %if.else.15
br i1 %tobool5, label %if.then.6, label %if.else
if.then.6: ; preds = %if.then.4
- %call7 = call i8* @halide_string_to_string(i8* %dst, i8* %end, i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.87, i64 0, i64 0)) #3
+ %call7 = call ptr @halide_string_to_string(ptr %dst, ptr %end, ptr @.str.87) #3
br label %cleanup.148
if.else: ; preds = %if.then.4
- %call8 = call i8* @halide_string_to_string(i8* %dst, i8* %end, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.1.88, i64 0, i64 0)) #3
+ %call8 = call ptr @halide_string_to_string(ptr %dst, ptr %end, ptr @.str.1.88) #3
br label %cleanup.148
if.else.9: ; preds = %if.then
br i1 %tobool5, label %if.then.11, label %if.else.13
if.then.11: ; preds = %if.else.9
- %call12 = call i8* @halide_string_to_string(i8* %dst, i8* %end, i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.2.89, i64 0, i64 0)) #3
+ %call12 = call ptr @halide_string_to_string(ptr %dst, ptr %end, ptr @.str.2.89) #3
br label %cleanup.148
if.else.13: ; preds = %if.else.9
- %call14 = call i8* @halide_string_to_string(i8* %dst, i8* %end, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.3.90, i64 0, i64 0)) #3
+ %call14 = call ptr @halide_string_to_string(ptr %dst, ptr %end, ptr @.str.3.90) #3
br label %cleanup.148
if.else.15: ; preds = %entry
br i1 %tobool21, label %if.then.22, label %if.else.24
if.then.22: ; preds = %if.then.20
- %call23 = call i8* @halide_string_to_string(i8* %dst, i8* %end, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.4.91, i64 0, i64 0)) #3
+ %call23 = call ptr @halide_string_to_string(ptr %dst, ptr %end, ptr @.str.4.91) #3
br label %cleanup.148
if.else.24: ; preds = %if.then.20
- %call25 = call i8* @halide_string_to_string(i8* %dst, i8* %end, i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str.5.92, i64 0, i64 0)) #3
+ %call25 = call ptr @halide_string_to_string(ptr %dst, ptr %end, ptr @.str.5.92) #3
br label %cleanup.148
if.else.26: ; preds = %if.then.18
br i1 %tobool21, label %if.then.28, label %if.else.30
if.then.28: ; preds = %if.else.26
- %call29 = call i8* @halide_string_to_string(i8* %dst, i8* %end, i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.6.93, i64 0, i64 0)) #3
+ %call29 = call ptr @halide_string_to_string(ptr %dst, ptr %end, ptr @.str.6.93) #3
br label %cleanup.148
if.else.30: ; preds = %if.else.26
- %call31 = call i8* @halide_string_to_string(i8* %dst, i8* %end, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.7.94, i64 0, i64 0)) #3
+ %call31 = call ptr @halide_string_to_string(ptr %dst, ptr %end, ptr @.str.7.94) #3
br label %cleanup.148
if.end.32: ; preds = %if.else.15
br i1 %tobool33, label %if.end.37, label %if.then.34
if.then.34: ; preds = %if.end.32
- %call35 = call i8* @halide_string_to_string(i8* %dst, i8* %end, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.8.95, i64 0, i64 0)) #3
+ %call35 = call ptr @halide_string_to_string(ptr %dst, ptr %end, ptr @.str.8.95) #3
%sub36 = fsub double -0.000000e+00, %arg
- store double %sub36, double* %arg.addr, align 8, !tbaa !4
+ store double %sub36, ptr %arg.addr, align 8, !tbaa !4
br label %if.end.37
if.end.37: ; preds = %if.then.34, %if.end.32
%.pr = phi double [ %sub36, %if.then.34 ], [ %arg, %if.end.32 ]
- %dst.addr.0 = phi i8* [ %call35, %if.then.34 ], [ %dst, %if.end.32 ]
+ %dst.addr.0 = phi ptr [ %call35, %if.then.34 ], [ %dst, %if.end.32 ]
%tobool38 = icmp eq i32 %scientific, 0
br i1 %tobool38, label %if.else.62, label %while.condthread-pre-split
while.body: ; preds = %while.body, %while.condthread-pre-split
%exponent_base_10.0262 = phi i32 [ %dec, %while.body ], [ 0, %while.condthread-pre-split ]
- %3 = phi double [ %mul, %while.body ], [ %.pr, %while.condthread-pre-split ]
- %mul = fmul double %3, 1.000000e+01
+ %1 = phi double [ %mul, %while.body ], [ %.pr, %while.condthread-pre-split ]
+ %mul = fmul double %1, 1.000000e+01
%dec = add nsw i32 %exponent_base_10.0262, -1
%cmp40 = fcmp olt double %mul, 1.000000e+00
br i1 %cmp40, label %while.body, label %while.cond.while.cond.41thread-pre-split_crit_edge
while.cond.while.cond.41thread-pre-split_crit_edge: ; preds = %while.body
- store double %mul, double* %arg.addr, align 8, !tbaa !4
+ store double %mul, ptr %arg.addr, align 8, !tbaa !4
br label %while.cond.41thread-pre-split
while.cond.41thread-pre-split: ; preds = %while.cond.while.cond.41thread-pre-split_crit_edge, %while.condthread-pre-split
while.body.43: ; preds = %while.body.43, %while.cond.41thread-pre-split
%exponent_base_10.1258 = phi i32 [ %inc, %while.body.43 ], [ %exponent_base_10.0.lcssa, %while.cond.41thread-pre-split ]
- %4 = phi double [ %div, %while.body.43 ], [ %.pr246, %while.cond.41thread-pre-split ]
- %div = fdiv double %4, 1.000000e+01
+ %2 = phi double [ %div, %while.body.43 ], [ %.pr246, %while.cond.41thread-pre-split ]
+ %div = fdiv double %2, 1.000000e+01
%inc = add nsw i32 %exponent_base_10.1258, 1
%cmp42 = fcmp ult double %div, 1.000000e+01
br i1 %cmp42, label %while.cond.41.while.end.44_crit_edge, label %while.body.43
while.cond.41.while.end.44_crit_edge: ; preds = %while.body.43
- store double %div, double* %arg.addr, align 8, !tbaa !4
+ store double %div, ptr %arg.addr, align 8, !tbaa !4
br label %while.end.44
while.end.44: ; preds = %while.cond.41.while.end.44_crit_edge, %while.cond.41thread-pre-split
%add = fadd double %mul45, 5.000000e-01
%conv46 = fptoui double %add to i64
%div47 = udiv i64 %conv46, 1000000
- %5 = mul i64 %div47, -1000000
- %sub49 = add i64 %conv46, %5
- %call50 = call i8* @halide_int64_to_string(i8* %dst.addr.0, i8* %end, i64 %div47, i32 1) #3
- %call51 = call i8* @halide_string_to_string(i8* %call50, i8* %end, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.9.96, i64 0, i64 0)) #3
- %call52 = call i8* @halide_int64_to_string(i8* %call51, i8* %end, i64 %sub49, i32 6) #3
+ %3 = mul i64 %div47, -1000000
+ %sub49 = add i64 %conv46, %3
+ %call50 = call ptr @halide_int64_to_string(ptr %dst.addr.0, ptr %end, i64 %div47, i32 1) #3
+ %call51 = call ptr @halide_string_to_string(ptr %call50, ptr %end, ptr @.str.9.96) #3
+ %call52 = call ptr @halide_int64_to_string(ptr %call51, ptr %end, i64 %sub49, i32 6) #3
%cmp53 = icmp sgt i32 %exponent_base_10.1.lcssa, -1
br i1 %cmp53, label %if.then.54, label %if.else.56
if.then.54: ; preds = %while.end.44
- %call55 = call i8* @halide_string_to_string(i8* %call52, i8* %end, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str.10.97, i64 0, i64 0)) #3
+ %call55 = call ptr @halide_string_to_string(ptr %call52, ptr %end, ptr @.str.10.97) #3
br label %if.end.59
if.else.56: ; preds = %while.end.44
- %call57 = call i8* @halide_string_to_string(i8* %call52, i8* %end, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str.11.98, i64 0, i64 0)) #3
+ %call57 = call ptr @halide_string_to_string(ptr %call52, ptr %end, ptr @.str.11.98) #3
%sub58 = sub nsw i32 0, %exponent_base_10.1.lcssa
br label %if.end.59
if.end.59: ; preds = %if.else.56, %if.then.54
%exponent_base_10.2 = phi i32 [ %exponent_base_10.1.lcssa, %if.then.54 ], [ %sub58, %if.else.56 ]
- %dst.addr.1 = phi i8* [ %call55, %if.then.54 ], [ %call57, %if.else.56 ]
+ %dst.addr.1 = phi ptr [ %call55, %if.then.54 ], [ %call57, %if.else.56 ]
%conv60 = sext i32 %exponent_base_10.2 to i64
- %call61 = call i8* @halide_int64_to_string(i8* %dst.addr.1, i8* %end, i64 %conv60, i32 2) #3
+ %call61 = call ptr @halide_int64_to_string(ptr %dst.addr.1, ptr %end, i64 %conv60, i32 2) #3
br label %cleanup.148
if.else.62: ; preds = %if.end.37
br i1 %cmp16, label %if.then.64, label %if.end.66
if.then.64: ; preds = %if.else.62
- %call65 = call i8* @halide_double_to_string(i8* %dst.addr.0, i8* %end, double 0.000000e+00, i32 0) #3
+ %call65 = call ptr @halide_double_to_string(ptr %dst.addr.0, ptr %end, double 0.000000e+00, i32 0) #3
br label %cleanup.148
if.end.66: ; preds = %if.else.62
%conv85.244 = zext i32 %sub70 to i64
%shl86 = shl i64 %conv85.244, 52
%add88 = add i64 %shl86, 4696837146684686336
- %6 = bitcast i64 %add88 to double
- %mul90 = fmul double %6, %f.0
+ %4 = bitcast i64 %add88 to double
+ %mul90 = fmul double %4, %f.0
%add91 = fadd double %mul90, 5.000000e-01
%conv92 = fptoui double %add91 to i64
%conv93 = uitofp i64 %conv92 to double
%integer_part.2 = phi i64 [ %inc103.integer_part.0, %if.end.84 ], [ %add68, %if.end.66 ]
%integer_exponent.0 = phi i32 [ 0, %if.end.84 ], [ %sub70, %if.end.66 ]
%fractional_part.2 = phi i64 [ %.fractional_part.0, %if.end.84 ], [ 0, %if.end.66 ]
- %7 = bitcast [512 x i8]* %buf to i8*
- call void @llvm.lifetime.start.p0i8(i64 512, i8* %7) #0
- %add.ptr = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i64 0, i64 512
- %add.ptr106 = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i64 0, i64 480
- %call109 = call i8* @halide_int64_to_string(i8* %add.ptr106, i8* %add.ptr, i64 %integer_part.2, i32 1) #3
+ call void @llvm.lifetime.start.p0(i64 512, ptr %buf) #0
+ %add.ptr = getelementptr inbounds [512 x i8], ptr %buf, i64 0, i64 512
+ %add.ptr106 = getelementptr inbounds [512 x i8], ptr %buf, i64 0, i64 480
+ %call109 = call ptr @halide_int64_to_string(ptr %add.ptr106, ptr %add.ptr, i64 %integer_part.2, i32 1) #3
%cmp110.252 = icmp sgt i32 %integer_exponent.0, 0
br i1 %cmp110.252, label %for.cond.112.preheader, label %for.cond.cleanup
for.cond.112.preheader: ; preds = %if.end.138, %if.end.105
%i.0255 = phi i32 [ %inc140, %if.end.138 ], [ 0, %if.end.105 ]
- %int_part_ptr.0253 = phi i8* [ %int_part_ptr.1, %if.end.138 ], [ %add.ptr106, %if.end.105 ]
- %int_part_ptr.02534 = ptrtoint i8* %int_part_ptr.0253 to i64
- %cmp114.249 = icmp eq i8* %call109, %int_part_ptr.0253
+ %int_part_ptr.0253 = phi ptr [ %int_part_ptr.1, %if.end.138 ], [ %add.ptr106, %if.end.105 ]
+ %int_part_ptr.02534 = ptrtoint ptr %int_part_ptr.0253 to i64
+ %cmp114.249 = icmp eq ptr %call109, %int_part_ptr.0253
br i1 %cmp114.249, label %if.end.138, label %for.body.116.preheader
for.body.116.preheader: ; preds = %for.cond.112.preheader
- %8 = sub i64 0, %int_part_ptr.02534
- %scevgep5 = getelementptr i8, i8* %call109, i64 %8
- %scevgep56 = ptrtoint i8* %scevgep5 to i64
+ %5 = sub i64 0, %int_part_ptr.02534
+ %scevgep5 = getelementptr i8, ptr %call109, i64 %5
+ %scevgep56 = ptrtoint ptr %scevgep5 to i64
call void @llvm.set.loop.iterations.i64(i64 %scevgep56)
br label %for.body.116
for.cond.cleanup: ; preds = %if.end.138, %if.end.105
- %int_part_ptr.0.lcssa = phi i8* [ %add.ptr106, %if.end.105 ], [ %int_part_ptr.1, %if.end.138 ]
- %9 = bitcast [512 x i8]* %buf to i8*
- %call142 = call i8* @halide_string_to_string(i8* %dst.addr.0, i8* %end, i8* %int_part_ptr.0.lcssa) #3
- %call143 = call i8* @halide_string_to_string(i8* %call142, i8* %end, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.9.96, i64 0, i64 0)) #3
- %call144 = call i8* @halide_int64_to_string(i8* %call143, i8* %end, i64 %fractional_part.2, i32 6) #3
- call void @llvm.lifetime.end.p0i8(i64 512, i8* %9) #0
+ %int_part_ptr.0.lcssa = phi ptr [ %add.ptr106, %if.end.105 ], [ %int_part_ptr.1, %if.end.138 ]
+ %call142 = call ptr @halide_string_to_string(ptr %dst.addr.0, ptr %end, ptr %int_part_ptr.0.lcssa) #3
+ %call143 = call ptr @halide_string_to_string(ptr %call142, ptr %end, ptr @.str.9.96) #3
+ %call144 = call ptr @halide_int64_to_string(ptr %call143, ptr %end, i64 %fractional_part.2, i32 6) #3
+ call void @llvm.lifetime.end.p0(i64 512, ptr %buf) #0
br label %cleanup.148
for.cond.cleanup.115: ; preds = %for.body.116
br i1 %cmp125, label %if.then.136, label %if.end.138
for.body.116: ; preds = %for.body.116, %for.body.116.preheader
- %call109.pn = phi i8* [ %p.0251, %for.body.116 ], [ %call109, %for.body.116.preheader ]
+ %call109.pn = phi ptr [ %p.0251, %for.body.116 ], [ %call109, %for.body.116.preheader ]
%carry.0250 = phi i32 [ %carry.1, %for.body.116 ], [ 0, %for.body.116.preheader ]
- %call109.pn2 = ptrtoint i8* %call109.pn to i64
- %p.0251 = getelementptr inbounds i8, i8* %call109.pn, i64 -1
- %scevgep3 = getelementptr i8, i8* inttoptr (i64 -1 to i8*), i64 %call109.pn2
- %10 = load i8, i8* %scevgep3, align 1, !tbaa !10
- %sub118 = add i8 %10, -48
+ %call109.pn2 = ptrtoint ptr %call109.pn to i64
+ %p.0251 = getelementptr inbounds i8, ptr %call109.pn, i64 -1
+ %scevgep3 = getelementptr i8, ptr inttoptr (i64 -1 to ptr), i64 %call109.pn2
+ %6 = load i8, ptr %scevgep3, align 1, !tbaa !10
+ %sub118 = add i8 %6, -48
%conv120 = sext i8 %sub118 to i32
%mul121 = shl nsw i32 %conv120, 1
%add122 = or i32 %mul121, %carry.0250
- %11 = trunc i32 %add122 to i8
- %cmp125 = icmp sgt i8 %11, 9
+ %7 = trunc i32 %add122 to i8
+ %cmp125 = icmp sgt i8 %7, 9
%sub128 = add nsw i32 %add122, 246
%carry.1 = zext i1 %cmp125 to i32
%new_digit.0.in = select i1 %cmp125, i32 %sub128, i32 %add122
%add133 = add nsw i32 %new_digit.0.in, 48
%conv134 = trunc i32 %add133 to i8
- %scevgep = getelementptr i8, i8* inttoptr (i64 -1 to i8*), i64 %call109.pn2
- store i8 %conv134, i8* %scevgep, align 1, !tbaa !10
- %12 = call i1 @llvm.loop.decrement(i64 1)
- br i1 %12, label %for.body.116, label %for.cond.cleanup.115
+ %scevgep = getelementptr i8, ptr inttoptr (i64 -1 to ptr), i64 %call109.pn2
+ store i8 %conv134, ptr %scevgep, align 1, !tbaa !10
+ %8 = call i1 @llvm.loop.decrement(i64 1)
+ br i1 %8, label %for.body.116, label %for.cond.cleanup.115
if.then.136: ; preds = %for.cond.cleanup.115
- %incdec.ptr137 = getelementptr inbounds i8, i8* %int_part_ptr.0253, i64 -1
- store i8 49, i8* %incdec.ptr137, align 1, !tbaa !10
+ %incdec.ptr137 = getelementptr inbounds i8, ptr %int_part_ptr.0253, i64 -1
+ store i8 49, ptr %incdec.ptr137, align 1, !tbaa !10
br label %if.end.138
if.end.138: ; preds = %if.then.136, %for.cond.cleanup.115, %for.cond.112.preheader
- %int_part_ptr.1 = phi i8* [ %incdec.ptr137, %if.then.136 ], [ %call109, %for.cond.112.preheader ], [ %int_part_ptr.0253, %for.cond.cleanup.115 ]
+ %int_part_ptr.1 = phi ptr [ %incdec.ptr137, %if.then.136 ], [ %call109, %for.cond.112.preheader ], [ %int_part_ptr.0253, %for.cond.cleanup.115 ]
%inc140 = add nuw nsw i32 %i.0255, 1
%exitcond = icmp eq i32 %inc140, %integer_exponent.0
br i1 %exitcond, label %for.cond.cleanup, label %for.cond.112.preheader
cleanup.148: ; preds = %for.cond.cleanup, %if.then.64, %if.end.59, %if.else.30, %if.then.28, %if.else.24, %if.then.22, %if.else.13, %if.then.11, %if.else, %if.then.6
- %retval.1 = phi i8* [ %call7, %if.then.6 ], [ %call8, %if.else ], [ %call12, %if.then.11 ], [ %call14, %if.else.13 ], [ %call23, %if.then.22 ], [ %call25, %if.else.24 ], [ %call29, %if.then.28 ], [ %call31, %if.else.30 ], [ %call65, %if.then.64 ], [ %call61, %if.end.59 ], [ %call144, %for.cond.cleanup ]
- %13 = bitcast i64* %bits to i8*
- call void @llvm.lifetime.end.p0i8(i64 8, i8* %13) #0
- ret i8* %retval.1
+ %retval.1 = phi ptr [ %call7, %if.then.6 ], [ %call8, %if.else ], [ %call12, %if.then.11 ], [ %call14, %if.else.13 ], [ %call23, %if.then.22 ], [ %call25, %if.else.24 ], [ %call29, %if.then.28 ], [ %call31, %if.else.30 ], [ %call65, %if.then.64 ], [ %call61, %if.end.59 ], [ %call144, %for.cond.cleanup ]
+ call void @llvm.lifetime.end.p0(i64 8, ptr %bits) #0
+ ret ptr %retval.1
}
; Function Attrs: nounwind
-declare i8* @memcpy(i8*, i8* nocapture readonly, i64) #1
+declare ptr @memcpy(ptr, ptr nocapture readonly, i64) #1
; Function Attrs: nounwind
declare void @llvm.set.loop.iterations.i64(i64) #0
; CHECK: test_pos1_ir_sle
; CHECK: bdnz
; a < b
-define void @test_pos1_ir_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos1_ir_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 28395, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 28395, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 1
%cmp = icmp sle i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos2_ir_sle
; CHECK: bdnz
; a < b
-define void @test_pos2_ir_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos2_ir_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 9073, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 9073, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 2
%cmp = icmp sle i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos4_ir_sle
; CHECK: bdnz
; a < b
-define void @test_pos4_ir_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos4_ir_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 21956, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 21956, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 4
%cmp = icmp sle i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos8_ir_sle
; CHECK: bdnz
; a < b
-define void @test_pos8_ir_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos8_ir_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 16782, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 16782, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 8
%cmp = icmp sle i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos16_ir_sle
; CHECK: bdnz
; a < b
-define void @test_pos16_ir_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos16_ir_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 19097, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 19097, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 16
%cmp = icmp sle i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos1_ri_sle
; CHECK: bdnz
; a < b
-define void @test_pos1_ri_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos1_ri_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 %a, 14040
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 1
%cmp = icmp sle i32 %inc, 14040
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos2_ri_sle
; CHECK: bdnz
; a < b
-define void @test_pos2_ri_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos2_ri_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 %a, 13710
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 2
%cmp = icmp sle i32 %inc, 13710
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos4_ri_sle
; CHECK: bdnz
; a < b
-define void @test_pos4_ri_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos4_ri_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 %a, 9920
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 4
%cmp = icmp sle i32 %inc, 9920
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos8_ri_sle
; CHECK: bdnz
; a < b
-define void @test_pos8_ri_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos8_ri_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 %a, 18924
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 8
%cmp = icmp sle i32 %inc, 18924
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos16_ri_sle
; CHECK: bdnz
; a < b
-define void @test_pos16_ri_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos16_ri_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 %a, 11812
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 16
%cmp = icmp sle i32 %inc, 11812
br i1 %cmp, label %for.body, label %for.end
; FIXME: Support this loop!
; CHECK-NOT: bdnz
; a < b
-define void @test_pos1_rr_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos1_rr_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 1
%cmp = icmp sle i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; FIXME: Support this loop!
; CHECK-NOT: bdnz
; a < b
-define void @test_pos2_rr_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos2_rr_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 2
%cmp = icmp sle i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; FIXME: Support this loop!
; CHECK-NOT: bdnz
; a < b
-define void @test_pos4_rr_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos4_rr_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 4
%cmp = icmp sle i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; FIXME: Support this loop!
; CHECK-NOT: bdnz
; a < b
-define void @test_pos8_rr_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos8_rr_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 8
%cmp = icmp sle i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; FIXME: Support this loop!
; CHECK-NOT: bdnz
; a < b
-define void @test_pos16_rr_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos16_rr_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp sle i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 16
%cmp = icmp sle i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos1_ir_slt
; CHECK: bdnz
; a < b
-define void @test_pos1_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos1_ir_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 8531, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 8531, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 1
%cmp = icmp slt i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos2_ir_slt
; CHECK: bdnz
; a < b
-define void @test_pos2_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos2_ir_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 9152, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 9152, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 2
%cmp = icmp slt i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos4_ir_slt
; CHECK: bdnz
; a < b
-define void @test_pos4_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos4_ir_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 18851, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 18851, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 4
%cmp = icmp slt i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos8_ir_slt
; CHECK: bdnz
; a < b
-define void @test_pos8_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos8_ir_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 25466, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 25466, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 8
%cmp = icmp slt i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos16_ir_slt
; CHECK: bdnz
; a < b
-define void @test_pos16_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos16_ir_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 9295, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 9295, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 16
%cmp = icmp slt i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos1_ri_slt
; CHECK: bdnz
; a < b
-define void @test_pos1_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos1_ri_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, 31236
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 1
%cmp = icmp slt i32 %inc, 31236
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos2_ri_slt
; CHECK: bdnz
; a < b
-define void @test_pos2_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos2_ri_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, 22653
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 2
%cmp = icmp slt i32 %inc, 22653
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos4_ri_slt
; CHECK: bdnz
; a < b
-define void @test_pos4_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos4_ri_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, 1431
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 4
%cmp = icmp slt i32 %inc, 1431
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos8_ri_slt
; CHECK: bdnz
; a < b
-define void @test_pos8_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos8_ri_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, 22403
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 8
%cmp = icmp slt i32 %inc, 22403
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos16_ri_slt
; CHECK: bdnz
; a < b
-define void @test_pos16_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos16_ri_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, 21715
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 16
%cmp = icmp slt i32 %inc, 21715
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos1_rr_slt
; CHECK: bdnz
; a < b
-define void @test_pos1_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos1_rr_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 1
%cmp = icmp slt i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos2_rr_slt
; CHECK: bdnz
; a < b
-define void @test_pos2_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos2_rr_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 2
%cmp = icmp slt i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos4_rr_slt
; CHECK: bdnz
; a < b
-define void @test_pos4_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos4_rr_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 4
%cmp = icmp slt i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos8_rr_slt
; CHECK: bdnz
; a < b
-define void @test_pos8_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos8_rr_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 8
%cmp = icmp slt i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos16_rr_slt
; CHECK: bdnz
; a < b
-define void @test_pos16_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos16_rr_slt(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 16
%cmp = icmp slt i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos1_ir_ne
; CHECK: bdnz
; a < b
-define void @test_pos1_ir_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos1_ir_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 32623, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 32623, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 1
%cmp = icmp ne i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos2_ir_ne
; CHECK: bdnz
; a < b
-define void @test_pos2_ir_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos2_ir_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 29554, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 29554, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 2
%cmp = icmp ne i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos4_ir_ne
; CHECK: bdnz
; a < b
-define void @test_pos4_ir_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos4_ir_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 15692, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 15692, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 4
%cmp = icmp ne i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos8_ir_ne
; CHECK: bdnz
; a < b
-define void @test_pos8_ir_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos8_ir_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 10449, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 10449, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 8
%cmp = icmp ne i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos16_ir_ne
; CHECK: bdnz
; a < b
-define void @test_pos16_ir_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos16_ir_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 32087, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 32087, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 16
%cmp = icmp ne i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos1_ri_ne
; CHECK: bdnz
; a < b
-define void @test_pos1_ri_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos1_ri_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, 3472
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 1
%cmp = icmp ne i32 %inc, 3472
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos2_ri_ne
; CHECK: bdnz
; a < b
-define void @test_pos2_ri_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos2_ri_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, 8730
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 2
%cmp = icmp ne i32 %inc, 8730
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos4_ri_ne
; CHECK: bdnz
; a < b
-define void @test_pos4_ri_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos4_ri_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, 1493
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 4
%cmp = icmp ne i32 %inc, 1493
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos8_ri_ne
; CHECK: bdnz
; a < b
-define void @test_pos8_ri_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos8_ri_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, 1706
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 8
%cmp = icmp ne i32 %inc, 1706
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos16_ri_ne
; CHECK: bdnz
; a < b
-define void @test_pos16_ri_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos16_ri_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, 1886
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 16
%cmp = icmp ne i32 %inc, 1886
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos1_rr_ne
; CHECK: bdnz
; a < b
-define void @test_pos1_rr_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos1_rr_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 1
%cmp = icmp ne i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos2_rr_ne
; CHECK: bdnz
; a < b
-define void @test_pos2_rr_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos2_rr_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 2
%cmp = icmp ne i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos4_rr_ne
; CHECK: bdnz
; a < b
-define void @test_pos4_rr_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos4_rr_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 4
%cmp = icmp ne i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos8_rr_ne
; CHECK: bdnz
; a < b
-define void @test_pos8_rr_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos8_rr_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 8
%cmp = icmp ne i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
; CHECK: test_pos16_rr_ne
; CHECK: bdnz
; a < b
-define void @test_pos16_rr_ne(i8* nocapture %p, i32 %a, i32 %b) nounwind {
+define void @test_pos16_rr_ne(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
%cmp3 = icmp slt i32 %a, %b
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
- %0 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i32 %i.04
+ %0 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%conv1 = trunc i32 %add to i8
- store i8 %conv1, i8* %arrayidx, align 1
+ store i8 %conv1, ptr %arrayidx, align 1
%inc = add nsw i32 %i.04, 16
%cmp = icmp ne i32 %inc, %b
br i1 %cmp, label %for.body, label %for.end
%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211 = type { %union.v.0.48.90.114.120.138.144.150.156.162.168.174.180.210, i16, i16 }
%union.v.0.48.90.114.120.138.144.150.156.162.168.174.180.210 = type { i64 }
-%struct.stream_s.5.53.95.119.125.143.149.155.161.167.173.179.185.215 = type { i8*, i8*, i8*, i32, i8, i8, i64, %struct.stream_procs.2.50.92.116.122.140.146.152.158.164.170.176.182.212, i32, %struct._IO_FILE.4.52.94.118.124.142.148.154.160.166.172.178.184.214*, %struct.stream_s.5.53.95.119.125.143.149.155.161.167.173.179.185.215*, i16, i32 }
-%struct.stream_procs.2.50.92.116.122.140.146.152.158.164.170.176.182.212 = type { i32 (%struct.stream_s.5.53.95.119.125.143.149.155.161.167.173.179.185.215*)*, i32 (%struct.stream_s.5.53.95.119.125.143.149.155.161.167.173.179.185.215*, i8)*, i32 (%struct.stream_s.5.53.95.119.125.143.149.155.161.167.173.179.185.215*, i64*)*, i32 (%struct.stream_s.5.53.95.119.125.143.149.155.161.167.173.179.185.215*, i64)*, i32 (%struct.stream_s.5.53.95.119.125.143.149.155.161.167.173.179.185.215*)*, i32 (%struct.stream_s.5.53.95.119.125.143.149.155.161.167.173.179.185.215*)* }
-%struct._IO_FILE.4.52.94.118.124.142.148.154.160.166.172.178.184.214 = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker.3.51.93.117.123.141.147.153.159.165.171.177.183.213*, %struct._IO_FILE.4.52.94.118.124.142.148.154.160.166.172.178.184.214*, i32, i32, i64, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i64, i32, [20 x i8] }
-%struct._IO_marker.3.51.93.117.123.141.147.153.159.165.171.177.183.213 = type { %struct._IO_marker.3.51.93.117.123.141.147.153.159.165.171.177.183.213*, %struct._IO_FILE.4.52.94.118.124.142.148.154.160.166.172.178.184.214*, i32 }
+%struct.stream_s.5.53.95.119.125.143.149.155.161.167.173.179.185.215 = type { ptr, ptr, ptr, i32, i8, i8, i64, %struct.stream_procs.2.50.92.116.122.140.146.152.158.164.170.176.182.212, i32, ptr, ptr, i16, i32 }
+%struct.stream_procs.2.50.92.116.122.140.146.152.158.164.170.176.182.212 = type { ptr, ptr, ptr, ptr, ptr, ptr }
+%struct._IO_FILE.4.52.94.118.124.142.148.154.160.166.172.178.184.214 = type { i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, i64, i16, i8, [1 x i8], ptr, i64, ptr, ptr, ptr, ptr, i64, i32, [20 x i8] }
+%struct._IO_marker.3.51.93.117.123.141.147.153.159.165.171.177.183.213 = type { ptr, ptr, i32 }
-@special_ops = external global [7 x i32 (%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*)*], align 8
+@special_ops = external global [7 x ptr], align 8
@ostack = external global [520 x %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211], align 8
-@osbot = external global %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, align 8
-@osp = external global %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, align 8
-@ostop = external global %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, align 8
-@osp_nargs = external global [6 x %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*], align 8
+@osbot = external global ptr, align 8
+@osp = external global ptr, align 8
+@ostop = external global ptr, align 8
+@osp_nargs = external global [6 x ptr], align 8
@estack = external global [150 x %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211], align 8
-@esp = external global %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, align 8
-@estop = external global %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, align 8
+@esp = external global ptr, align 8
+@estop = external global ptr, align 8
@dstack = external global [20 x %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211], align 8
-@dsp = external global %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, align 8
-@dstop = external global %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, align 8
+@dsp = external global ptr, align 8
+@dstop = external global ptr, align 8
@name_errordict = external global %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211
@name_ErrorNames = external global %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211
@error_object = external global %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211, align 8
-declare i32 @zadd(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*)
+declare i32 @zadd(ptr)
-declare i32 @zdup(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*)
+declare i32 @zdup(ptr)
-declare i32 @zexch(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*)
+declare i32 @zexch(ptr)
-declare i32 @zifelse(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*)
+declare i32 @zifelse(ptr)
-declare i32 @zle(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*)
+declare i32 @zle(ptr)
-declare i32 @zpop(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*)
+declare i32 @zpop(ptr)
-declare i32 @zsub(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*)
+declare i32 @zsub(ptr)
declare void @interp_init(i32) nounwind
-declare void @interp_fix_op(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211* nocapture) nounwind
+declare void @interp_fix_op(ptr nocapture) nounwind
-define i32 @interpret(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211* %pref, i32 %user_errors) nounwind {
+define i32 @interpret(ptr %pref, i32 %user_errors) nounwind {
entry:
%erref = alloca %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211, align 8
br i1 undef, label %retry.us, label %retry
ret i32 undef
retry: ; preds = %if.end18, %entry
- %0 = phi %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211* [ null, %entry ], [ %erref, %if.end18 ]
- %call = call i32 @interp(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211* %0)
+ %0 = phi ptr [ null, %entry ], [ %erref, %if.end18 ]
+ %call = call i32 @interp(ptr %0)
switch i32 %call, label %if.end18 [
i32 -3, label %retry.us
i32 -5, label %retry.us
; CHECK: @interpret
-declare i32 @interp_exit(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211* nocapture) nounwind readnone
+declare i32 @interp_exit(ptr nocapture) nounwind readnone
-declare i32 @interp(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*) nounwind
+declare i32 @interp(ptr) nounwind
-declare i32 @dict_lookup(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211**)
+declare i32 @dict_lookup(ptr, ptr, ptr, ptr)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
declare i32 @obj_compare(...)
declare i32 @file_close(...)
-declare void @sread_string(%struct.stream_s.5.53.95.119.125.143.149.155.161.167.173.179.185.215*, i8*, i32)
+declare void @sread_string(ptr, ptr, i32)
@e = common global [16000 x double] zeroinitializer, align 32
@tt = common global [256 x [256 x double]] zeroinitializer, align 32
@indx = common global [16000 x i32] zeroinitializer, align 32
-@xx = common global double* null, align 8
-@yy = common global double* null, align 8
+@xx = common global ptr null, align 8
+@yy = common global ptr null, align 8
define i32 @s000() nounwind {
entry:
for.body3: ; preds = %for.body3, %for.cond1.preheader
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next.15, %for.body3 ]
- %arrayidx = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv
- %0 = load double, double* %arrayidx, align 32
+ %arrayidx = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv
+ %0 = load double, ptr %arrayidx, align 32
%add = fadd double %0, 1.000000e+00
- %arrayidx5 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv
- store double %add, double* %arrayidx5, align 32
+ %arrayidx5 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv
+ store double %add, ptr %arrayidx5, align 32
%indvars.iv.next11 = or i64 %indvars.iv, 1
- %arrayidx.1 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next11
- %1 = load double, double* %arrayidx.1, align 8
+ %arrayidx.1 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next11
+ %1 = load double, ptr %arrayidx.1, align 8
%add.1 = fadd double %1, 1.000000e+00
- %arrayidx5.1 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next11
- store double %add.1, double* %arrayidx5.1, align 8
+ %arrayidx5.1 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next11
+ store double %add.1, ptr %arrayidx5.1, align 8
%indvars.iv.next.112 = or i64 %indvars.iv, 2
- %arrayidx.2 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.112
- %2 = load double, double* %arrayidx.2, align 16
+ %arrayidx.2 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.112
+ %2 = load double, ptr %arrayidx.2, align 16
%add.2 = fadd double %2, 1.000000e+00
- %arrayidx5.2 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.112
- store double %add.2, double* %arrayidx5.2, align 16
+ %arrayidx5.2 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.112
+ store double %add.2, ptr %arrayidx5.2, align 16
%indvars.iv.next.213 = or i64 %indvars.iv, 3
- %arrayidx.3 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.213
- %3 = load double, double* %arrayidx.3, align 8
+ %arrayidx.3 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.213
+ %3 = load double, ptr %arrayidx.3, align 8
%add.3 = fadd double %3, 1.000000e+00
- %arrayidx5.3 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.213
- store double %add.3, double* %arrayidx5.3, align 8
+ %arrayidx5.3 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.213
+ store double %add.3, ptr %arrayidx5.3, align 8
%indvars.iv.next.314 = or i64 %indvars.iv, 4
- %arrayidx.4 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.314
- %4 = load double, double* %arrayidx.4, align 32
+ %arrayidx.4 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.314
+ %4 = load double, ptr %arrayidx.4, align 32
%add.4 = fadd double %4, 1.000000e+00
- %arrayidx5.4 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.314
- store double %add.4, double* %arrayidx5.4, align 32
+ %arrayidx5.4 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.314
+ store double %add.4, ptr %arrayidx5.4, align 32
%indvars.iv.next.415 = or i64 %indvars.iv, 5
- %arrayidx.5 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.415
- %5 = load double, double* %arrayidx.5, align 8
+ %arrayidx.5 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.415
+ %5 = load double, ptr %arrayidx.5, align 8
%add.5 = fadd double %5, 1.000000e+00
- %arrayidx5.5 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.415
- store double %add.5, double* %arrayidx5.5, align 8
+ %arrayidx5.5 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.415
+ store double %add.5, ptr %arrayidx5.5, align 8
%indvars.iv.next.516 = or i64 %indvars.iv, 6
- %arrayidx.6 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.516
- %6 = load double, double* %arrayidx.6, align 16
+ %arrayidx.6 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.516
+ %6 = load double, ptr %arrayidx.6, align 16
%add.6 = fadd double %6, 1.000000e+00
- %arrayidx5.6 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.516
- store double %add.6, double* %arrayidx5.6, align 16
+ %arrayidx5.6 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.516
+ store double %add.6, ptr %arrayidx5.6, align 16
%indvars.iv.next.617 = or i64 %indvars.iv, 7
- %arrayidx.7 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.617
- %7 = load double, double* %arrayidx.7, align 8
+ %arrayidx.7 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.617
+ %7 = load double, ptr %arrayidx.7, align 8
%add.7 = fadd double %7, 1.000000e+00
- %arrayidx5.7 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.617
- store double %add.7, double* %arrayidx5.7, align 8
+ %arrayidx5.7 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.617
+ store double %add.7, ptr %arrayidx5.7, align 8
%indvars.iv.next.718 = or i64 %indvars.iv, 8
- %arrayidx.8 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.718
- %8 = load double, double* %arrayidx.8, align 32
+ %arrayidx.8 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.718
+ %8 = load double, ptr %arrayidx.8, align 32
%add.8 = fadd double %8, 1.000000e+00
- %arrayidx5.8 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.718
- store double %add.8, double* %arrayidx5.8, align 32
+ %arrayidx5.8 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.718
+ store double %add.8, ptr %arrayidx5.8, align 32
%indvars.iv.next.819 = or i64 %indvars.iv, 9
- %arrayidx.9 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.819
- %9 = load double, double* %arrayidx.9, align 8
+ %arrayidx.9 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.819
+ %9 = load double, ptr %arrayidx.9, align 8
%add.9 = fadd double %9, 1.000000e+00
- %arrayidx5.9 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.819
- store double %add.9, double* %arrayidx5.9, align 8
+ %arrayidx5.9 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.819
+ store double %add.9, ptr %arrayidx5.9, align 8
%indvars.iv.next.920 = or i64 %indvars.iv, 10
- %arrayidx.10 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.920
- %10 = load double, double* %arrayidx.10, align 16
+ %arrayidx.10 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.920
+ %10 = load double, ptr %arrayidx.10, align 16
%add.10 = fadd double %10, 1.000000e+00
- %arrayidx5.10 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.920
- store double %add.10, double* %arrayidx5.10, align 16
+ %arrayidx5.10 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.920
+ store double %add.10, ptr %arrayidx5.10, align 16
%indvars.iv.next.1021 = or i64 %indvars.iv, 11
- %arrayidx.11 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1021
- %11 = load double, double* %arrayidx.11, align 8
+ %arrayidx.11 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.1021
+ %11 = load double, ptr %arrayidx.11, align 8
%add.11 = fadd double %11, 1.000000e+00
- %arrayidx5.11 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1021
- store double %add.11, double* %arrayidx5.11, align 8
+ %arrayidx5.11 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.1021
+ store double %add.11, ptr %arrayidx5.11, align 8
%indvars.iv.next.1122 = or i64 %indvars.iv, 12
- %arrayidx.12 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1122
- %12 = load double, double* %arrayidx.12, align 32
+ %arrayidx.12 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.1122
+ %12 = load double, ptr %arrayidx.12, align 32
%add.12 = fadd double %12, 1.000000e+00
- %arrayidx5.12 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1122
- store double %add.12, double* %arrayidx5.12, align 32
+ %arrayidx5.12 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.1122
+ store double %add.12, ptr %arrayidx5.12, align 32
%indvars.iv.next.1223 = or i64 %indvars.iv, 13
- %arrayidx.13 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1223
- %13 = load double, double* %arrayidx.13, align 8
+ %arrayidx.13 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.1223
+ %13 = load double, ptr %arrayidx.13, align 8
%add.13 = fadd double %13, 1.000000e+00
- %arrayidx5.13 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1223
- store double %add.13, double* %arrayidx5.13, align 8
+ %arrayidx5.13 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.1223
+ store double %add.13, ptr %arrayidx5.13, align 8
%indvars.iv.next.1324 = or i64 %indvars.iv, 14
- %arrayidx.14 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1324
- %14 = load double, double* %arrayidx.14, align 16
+ %arrayidx.14 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.1324
+ %14 = load double, ptr %arrayidx.14, align 16
%add.14 = fadd double %14, 1.000000e+00
- %arrayidx5.14 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1324
- store double %add.14, double* %arrayidx5.14, align 16
+ %arrayidx5.14 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.1324
+ store double %add.14, ptr %arrayidx5.14, align 16
%indvars.iv.next.1425 = or i64 %indvars.iv, 15
- %arrayidx.15 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1425
- %15 = load double, double* %arrayidx.15, align 8
+ %arrayidx.15 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.1425
+ %15 = load double, ptr %arrayidx.15, align 8
%add.15 = fadd double %15, 1.000000e+00
- %arrayidx5.15 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1425
- store double %add.15, double* %arrayidx5.15, align 8
+ %arrayidx5.15 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.1425
+ store double %add.15, ptr %arrayidx5.15, align 8
%indvars.iv.next.15 = add i64 %indvars.iv, 16
%lftr.wideiv.15 = trunc i64 %indvars.iv.next.15 to i32
%exitcond.15 = icmp eq i32 %lftr.wideiv.15, 16000
br i1 %exitcond.15, label %for.end, label %for.body3
for.end: ; preds = %for.body3
- %call = tail call i32 @dummy(double* getelementptr inbounds ([16000 x double], [16000 x double]* @X, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @Y, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @Z, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @U, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @V, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @aa, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @bb, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @cc, i64 0, i64 0), double 0.000000e+00) nounwind
+ %call = tail call i32 @dummy(ptr @X, ptr @Y, ptr @Z, ptr @U, ptr @V, ptr @aa, ptr @bb, ptr @cc, double 0.000000e+00) nounwind
%inc7 = add nsw i32 %nl.010, 1
%exitcond = icmp eq i32 %inc7, 400000
br i1 %exitcond, label %for.end8, label %for.cond1.preheader
; CHECK: bdnz
}
-declare i32 @dummy(double*, double*, double*, double*, double*, [256 x double]*, [256 x double]*, [256 x double]*, double)
+declare i32 @dummy(ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, double)
target triple = "powerpc-ellcc-linux"
; Function Attrs: nounwind
-define void @foo1(i128* %a, i128* readonly %b, i128* readonly %c) #0 {
+define void @foo1(ptr %a, ptr readonly %b, ptr readonly %c) #0 {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load i128, i128* %b, align 16
- %1 = load i128, i128* %c, align 16
+ %0 = load i128, ptr %b, align 16
+ %1 = load i128, ptr %c, align 16
%shl = shl i128 %0, %1
- store i128 %shl, i128* %a, align 16
+ store i128 %shl, ptr %a, align 16
%inc = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, 2048
br i1 %exitcond, label %for.end, label %for.body
}
; Function Attrs: nounwind
-define void @foo2(i128* %a, i128* readonly %b, i128* readonly %c) #0 {
+define void @foo2(ptr %a, ptr readonly %b, ptr readonly %c) #0 {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load i128, i128* %b, align 16
- %1 = load i128, i128* %c, align 16
+ %0 = load i128, ptr %b, align 16
+ %1 = load i128, ptr %c, align 16
%shl = ashr i128 %0, %1
- store i128 %shl, i128* %a, align 16
+ store i128 %shl, ptr %a, align 16
%inc = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, 2048
br i1 %exitcond, label %for.end, label %for.body
}
; Function Attrs: nounwind
-define void @foo3(i128* %a, i128* readonly %b, i128* readonly %c) #0 {
+define void @foo3(ptr %a, ptr readonly %b, ptr readonly %c) #0 {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load i128, i128* %b, align 16
- %1 = load i128, i128* %c, align 16
+ %0 = load i128, ptr %b, align 16
+ %1 = load i128, ptr %c, align 16
%shl = lshr i128 %0, %1
- store i128 %shl, i128* %a, align 16
+ store i128 %shl, ptr %a, align 16
%inc = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, 2048
br i1 %exitcond, label %for.end, label %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next, %for.body ]
%Sum.05 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds [5 x i32], [5 x i32]* @arr, i64 0, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [5 x i32], ptr @arr, i64 0, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %Sum.05
%indvars.iv.next = add nsw i64 %indvars.iv, -1
%tobool = icmp eq i64 %indvars.iv, 0
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 2, %entry ], [ %indvars.iv.next, %for.body ]
%Sum.05 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds [5 x i32], [5 x i32]* @arr, i64 0, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [5 x i32], ptr @arr, i64 0, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %Sum.05
%indvars.iv.next = add nsw i64 %indvars.iv, -1
%tobool = icmp eq i64 %indvars.iv, 0
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 3, %entry ], [ %indvars.iv.next, %for.body ]
%Sum.05 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds [5 x i32], [5 x i32]* @arr, i64 0, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [5 x i32], ptr @arr, i64 0, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %Sum.05
%indvars.iv.next = add nsw i64 %indvars.iv, -1
%tobool = icmp eq i64 %indvars.iv, 0
; CHECK: blr
entry:
- %.pre = load i32, i32* @a, align 4
+ %.pre = load i32, ptr @a, align 4
br label %for.body
for.body: ; preds = %entry, %if.end
br i1 %tobool1, label %if.end, label %if.then
if.then: ; preds = %for.body
- store i32 2, i32* @a, align 4
+ store i32 2, ptr @a, align 4
br label %if.end
if.end: ; preds = %for.body, %if.then
; CHECK-PWR8-NOT: mtctr
entry:
- %.prea = load i32, i32* @a, align 4
- %.preb = load i32, i32* @b, align 4
- %.prec = load i32, i32* @c, align 4
- %.pred = load i32, i32* @d, align 4
- %.pree = load i32, i32* @e, align 4
- %.pref = load i32, i32* @f, align 4
+ %.prea = load i32, ptr @a, align 4
+ %.preb = load i32, ptr @b, align 4
+ %.prec = load i32, ptr @c, align 4
+ %.pred = load i32, ptr @d, align 4
+ %.pree = load i32, ptr @e, align 4
+ %.pref = load i32, ptr @f, align 4
br label %for.body
for.body: ; preds = %entry, %for.body
br i1 %tobool, label %for.end, label %for.body
for.end: ; preds = %for.body
- store i32 %6, i32* @a, align 4
- store i32 %7, i32* @b, align 4
- store i32 %8, i32* @c, align 4
- store i32 %9, i32* @d, align 4
- store i32 %10, i32* @e, align 4
- store i32 %11, i32* @f, align 4
+ store i32 %6, ptr @a, align 4
+ store i32 %7, ptr @b, align 4
+ store i32 %8, ptr @c, align 4
+ store i32 %9, ptr @d, align 4
+ store i32 %10, ptr @e, align 4
+ store i32 %11, ptr @f, align 4
ret i32 0
}
@.str = private unnamed_addr constant [23 x i8] c"Sum(Array[%d,%d] = %d\0A\00", align 1
-define i32 @SumArray([100 x i32]* nocapture %Array, i32 %NumI, i32 %NumJ) nounwind readonly {
+define i32 @SumArray(ptr nocapture %Array, i32 %NumI, i32 %NumJ) nounwind readonly {
entry:
%cmp12 = icmp eq i32 %NumI, 0
br i1 %cmp12, label %for.end8, label %for.cond1.preheader.lr.ph
for.body3.us: ; preds = %for.body3.us, %for.body3.lr.ph.us
%indvars.iv = phi i64 [ 0, %for.body3.lr.ph.us ], [ %indvars.iv.next, %for.body3.us ]
%Result.111.us = phi i32 [ %Result.014.us, %for.body3.lr.ph.us ], [ %add.us, %for.body3.us ]
- %arrayidx5.us = getelementptr inbounds [100 x i32], [100 x i32]* %Array, i64 %indvars.iv16, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx5.us, align 4
+ %arrayidx5.us = getelementptr inbounds [100 x i32], ptr %Array, i64 %indvars.iv16, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx5.us, align 4
%add.us = add nsw i32 %0, %Result.111.us
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%indvars.iv33 = phi i64 [ 0, %entry ], [ %indvars.iv.next34, %for.body ]
%0 = trunc i64 %indvars.iv33 to i32
%sub = sub i32 0, %0
- %arrayidx2 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv33, i64 %indvars.iv33
- store i32 %sub, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds [100 x [100 x i32]], ptr %Array, i64 0, i64 %indvars.iv33, i64 %indvars.iv33
+ store i32 %sub, ptr %arrayidx2, align 4
%indvars.iv.next34 = add i64 %indvars.iv33, 1
%lftr.wideiv35 = trunc i64 %indvars.iv.next34 to i32
%exitcond36 = icmp eq i32 %lftr.wideiv35, 100
if.then: ; preds = %for.body8
%3 = add i64 %indvars.iv, %indvars.iv29
- %arrayidx13 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv29, i64 %indvars.iv
+ %arrayidx13 = getelementptr inbounds [100 x [100 x i32]], ptr %Array, i64 0, i64 %indvars.iv29, i64 %indvars.iv
%4 = trunc i64 %3 to i32
- store i32 %4, i32* %arrayidx13, align 4
+ store i32 %4, ptr %arrayidx13, align 4
br label %for.inc14
for.inc14: ; preds = %for.body8, %if.then
for.body3.us.i: ; preds = %for.body3.lr.ph.us.i, %for.body3.us.i
%indvars.iv.i = phi i64 [ 0, %for.body3.lr.ph.us.i ], [ %indvars.iv.next.i, %for.body3.us.i ]
%Result.111.us.i = phi i32 [ %Result.014.us.i, %for.body3.lr.ph.us.i ], [ %add.us.i, %for.body3.us.i ]
- %arrayidx5.us.i = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv16.i, i64 %indvars.iv.i
- %5 = load i32, i32* %arrayidx5.us.i, align 4
+ %arrayidx5.us.i = getelementptr inbounds [100 x [100 x i32]], ptr %Array, i64 0, i64 %indvars.iv16.i, i64 %indvars.iv.i
+ %5 = load i32, ptr %arrayidx5.us.i, align 4
%add.us.i = add nsw i32 %5, %Result.111.us.i
%indvars.iv.next.i = add i64 %indvars.iv.i, 1
%lftr.wideiv = trunc i64 %indvars.iv.next.i to i32
br label %for.body3.us.i
SumArray.exit: ; preds = %for.inc6.us.i
- %call20 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str, i64 0, i64 0), i32 100, i32 100, i32 %add.us.i) nounwind
+ %call20 = call i32 (ptr, ...) @printf(ptr @.str, i32 100, i32 100, i32 %add.us.i) nounwind
ret i32 0
; CHECK: @main
; CHECK: bdnz
}
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
%add.i = or i128 %jj, undef
%div.i = udiv i128 %add.i, %jj
%conv3.i11 = trunc i128 %div.i to i64
- store i64 %conv3.i11, i64* undef, align 8
+ store i64 %conv3.i11, ptr undef, align 8
%cmp = icmp eq i64 %i.018, 0
br i1 %cmp, label %for.end, label %for.body
entry:
%a = alloca [1000 x i32], align 4
- %0 = bitcast [1000 x i32]* %a to i8*
br label %for.body
for.body:
%i.013 = phi i64 [ 0, %entry ], [ %inc, %if.end ]
%b.012 = phi i64 [ 0, %entry ], [ %xor, %if.end ]
- %arrayidx = getelementptr inbounds [1000 x i32], [1000 x i32]* %a, i64 0, i64 %i.013
- %1 = load i32, i32* %arrayidx, align 4
- %tobool = icmp eq i32 %1, 0
+ %arrayidx = getelementptr inbounds [1000 x i32], ptr %a, i64 0, i64 %i.013
+ %0 = load i32, ptr %arrayidx, align 4
+ %tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %cleanup, !prof !1
if.end:
entry:
%a = alloca [1000 x i32], align 4
- %0 = bitcast [1000 x i32]* %a to i8*
br label %for.body
for.body:
%i.013 = phi i64 [ 0, %entry ], [ %inc, %if.end ]
%b.012 = phi i64 [ 0, %entry ], [ %xor, %if.end ]
- %arrayidx = getelementptr inbounds [1000 x i32], [1000 x i32]* %a, i64 0, i64 %i.013
- %1 = load i32, i32* %arrayidx, align 4
- %tobool = icmp eq i32 %1, 0
+ %arrayidx = getelementptr inbounds [1000 x i32], ptr %a, i64 0, i64 %i.013
+ %0 = load i32, ptr %arrayidx, align 4
+ %tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %cleanup, !prof !2
if.end:
entry:
%a = alloca [1000 x i32], align 4
- %0 = bitcast [1000 x i32]* %a to i8*
br label %for.body
for.body:
%i.013 = phi i64 [ 0, %entry ], [ %inc, %if.end ]
%b.012 = phi i64 [ 0, %entry ], [ %xor, %if.end ]
- %arrayidx = getelementptr inbounds [1000 x i32], [1000 x i32]* %a, i64 0, i64 %i.013
- %1 = load i32, i32* %arrayidx, align 4
- %tobool = icmp eq i32 %1, 0
+ %arrayidx = getelementptr inbounds [1000 x i32], ptr %a, i64 0, i64 %i.013
+ %0 = load i32, ptr %arrayidx, align 4
+ %tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %cleanup
if.end:
entry:
%a = alloca [1000 x i32], align 4
- %0 = bitcast [1000 x i32]* %a to i8*
br label %for.body
for.body:
%i.013 = phi i64 [ 0, %entry ], [ %inc, %if.end ]
%b.012 = phi i64 [ 0, %entry ], [ %xor, %if.end ]
- %arrayidx = getelementptr inbounds [1000 x i32], [1000 x i32]* %a, i64 0, i64 %i.013
- %1 = load i32, i32* %arrayidx, align 4
- %tobool = icmp ne i32 %1, 0
+ %arrayidx = getelementptr inbounds [1000 x i32], ptr %a, i64 0, i64 %i.013
+ %0 = load i32, ptr %arrayidx, align 4
+ %tobool = icmp ne i32 %0, 0
br i1 %tobool, label %cleanup, label %if.end, !prof !2
if.end:
entry:
%a = alloca [1000 x i32], align 4
- %0 = bitcast [1000 x i32]* %a to i8*
br label %for.body
for.body:
%i.013 = phi i64 [ 0, %entry ], [ %inc, %if.end ]
%b.012 = phi i64 [ 0, %entry ], [ %xor, %if.end ]
- %arrayidx = getelementptr inbounds [1000 x i32], [1000 x i32]* %a, i64 0, i64 %i.013
- %1 = load i32, i32* %arrayidx, align 4
- %tobool = icmp ne i32 %1, 0
+ %arrayidx = getelementptr inbounds [1000 x i32], ptr %a, i64 0, i64 %i.013
+ %0 = load i32, ptr %arrayidx, align 4
+ %tobool = icmp ne i32 %0, 0
br i1 %tobool, label %cleanup, label %if.end, !prof !1
if.end:
entry:
%a = alloca [1000 x i32], align 4
- %0 = bitcast [1000 x i32]* %a to i8*
br label %for.body
for.body:
%i.013 = phi i64 [ 0, %entry ], [ %inc, %if.end ]
%b.012 = phi i64 [ 0, %entry ], [ %xor, %if.end ]
- %arrayidx = getelementptr inbounds [1000 x i32], [1000 x i32]* %a, i64 0, i64 %i.013
- %1 = load i32, i32* %arrayidx, align 4
- %tobool = icmp ne i32 %1, 0
+ %arrayidx = getelementptr inbounds [1000 x i32], ptr %a, i64 0, i64 %i.013
+ %0 = load i32, ptr %arrayidx, align 4
+ %tobool = icmp ne i32 %0, 0
br i1 %tobool, label %cleanup, label %if.end
if.end:
for.body: ; preds = %for.body, %entry
%i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load volatile i32, i32* @a, align 4
+ %0 = load volatile i32, ptr @a, align 4
%add = add nsw i32 %0, %c
- store volatile i32 %add, i32* @a, align 4
+ store volatile i32 %add, ptr @a, align 4
%inc = add nsw i32 %i.01, 1
%exitcond = icmp eq i32 %inc, 2048
br i1 %exitcond, label %for.end, label %for.body
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %0 = load volatile i32, i32* @a, align 4
+ %0 = load volatile i32, ptr @a, align 4
%add = add nsw i32 %0, %c
- store volatile i32 %add, i32* @a, align 4
+ store volatile i32 %add, ptr @a, align 4
%inc = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, %d
br i1 %exitcond, label %for.end, label %for.body
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%mul = mul nsw i32 %i.02, %c
- %0 = load volatile i32, i32* @a, align 4
+ %0 = load volatile i32, ptr @a, align 4
%add = add nsw i32 %0, %mul
- store volatile i32 %add, i32* @a, align 4
+ store volatile i32 %add, ptr @a, align 4
%inc = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, %d
br i1 %exitcond, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%phi = phi i32 [ %dec, %for.body ], [ %inp, %entry ]
- %load = ptrtoint i8* @tls_var to i32
+ %load = ptrtoint ptr @tls_var to i32
%val = add i32 %load, %phi
%dec = add i32 %phi, -1
%cmp = icmp sgt i32 %phi, 1
@x = common global double 0.000000e+00, align 8
define void @foo1() #0 {
- store double 1.100000e+00, double* @y, align 8
- store double 1.100000e+00, double* @x, align 8
+ store double 1.100000e+00, ptr @y, align 8
+ store double 1.100000e+00, ptr @x, align 8
br label %2
; <label>:1 ; preds = %2
%.lcssa = phi double [ %4, %2 ]
- store double %.lcssa, double* @y, align 8
+ store double %.lcssa, ptr @y, align 8
ret void
; <label>:2 ; preds = %2, %0
}
define void @foo2() #0 {
- store double 1.100000e+00, double* @y, align 8
- store double 1.100000e+00, double* @x, align 8
+ store double 1.100000e+00, ptr @y, align 8
+ store double 1.100000e+00, ptr @x, align 8
br label %2
; <label>:1 ; preds = %2
%.lcssa = phi double [ %4, %2 ]
- store double %.lcssa, double* @y, align 8
+ store double %.lcssa, ptr @y, align 8
ret void
; <label>:2 ; preds = %2, %0
}
define void @foo3() #0 {
- store double 1.100000e+00, double* @y, align 8
- store double 1.100000e+00, double* @x, align 8
+ store double 1.100000e+00, ptr @y, align 8
+ store double 1.100000e+00, ptr @x, align 8
br label %2
; <label>:1 ; preds = %2
%.lcssa = phi double [ %4, %2 ]
- store double %.lcssa, double* @y, align 8
+ store double %.lcssa, ptr @y, align 8
ret void
; <label>:2 ; preds = %2, %0
}
define void @foo4() #0 {
- store double 1.100000e+00, double* @y, align 8
- store double 1.100000e+00, double* @x, align 8
+ store double 1.100000e+00, ptr @y, align 8
+ store double 1.100000e+00, ptr @x, align 8
br label %2
; <label>:1 ; preds = %2
%.lcssa = phi double [ %4, %2 ]
- store double %.lcssa, double* @y, align 8
+ store double %.lcssa, ptr @y, align 8
ret void
; <label>:2 ; preds = %2, %0
for.body: ; preds = %for.body, %entry
%i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load volatile i32, i32* @a, align 4
+ %0 = load volatile i32, ptr @a, align 4
%add = add nsw i32 %0, %c
- store volatile i32 %add, i32* @a, align 4
+ store volatile i32 %add, ptr @a, align 4
%inc = add nsw i32 %i.01, 1
%exitcond = icmp eq i32 %inc, 2048
br i1 %exitcond, label %for.end, label %for.body
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %0 = load volatile i32, i32* @a, align 4
+ %0 = load volatile i32, ptr @a, align 4
%add = add nsw i32 %0, %c
- store volatile i32 %add, i32* @a, align 4
+ store volatile i32 %add, ptr @a, align 4
%inc = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, %d
br i1 %exitcond, label %for.end, label %for.body
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%mul = mul nsw i32 %i.02, %c
- %0 = load volatile i32, i32* @a, align 4
+ %0 = load volatile i32, ptr @a, align 4
%add = add nsw i32 %0, %mul
- store volatile i32 %add, i32* @a, align 4
+ store volatile i32 %add, ptr @a, align 4
%inc = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, %d
br i1 %exitcond, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%phi = phi i32 [ %dec, %for.body ], [ %inp, %entry ]
- %load = ptrtoint i8* @tls_var to i32
+ %load = ptrtoint ptr @tls_var to i32
%val = add i32 %load, %phi
%dec = add i32 %phi, -1
%cmp = icmp sgt i32 %phi, 1
; RUN: llc --verify-machineinstrs -mtriple powerpc-unknown-freebsd \
; RUN: -mcpu=pwr4 < %s | FileCheck %s
-define double @postinctodbl(i64* nocapture %llp) #0 {
+define double @postinctodbl(ptr nocapture %llp) #0 {
; CHECK-LABEL: postinctodbl:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwu 1, -16(1)
; CHECK-NEXT: addi 1, 1, 16
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* %llp, align 8
+ %0 = load i64, ptr %llp, align 8
%inc = add nsw i64 %0, 1
- store i64 %inc, i64* %llp, align 8
+ store i64 %inc, ptr %llp, align 8
%conv = sitofp i64 %0 to double
ret double %conv
}
@__tls_guard = internal thread_local unnamed_addr global i1 false
@sum1 = internal thread_local global i32 0, align 4
-declare void @_ZN1SC1Ev(%struct.S*)
-declare void @_ZN1SD1Ev(%struct.S*)
-declare i32 @_tlv_atexit(void (i8*)*, i8*, i8*)
+declare void @_ZN1SC1Ev(ptr)
+declare void @_ZN1SD1Ev(ptr)
+declare i32 @_tlv_atexit(ptr, ptr, ptr)
-define cxx_fast_tlscc nonnull %struct.S* @_ZTW2sg() nounwind {
+define cxx_fast_tlscc nonnull ptr @_ZTW2sg() nounwind {
; CHECK-LABEL: _ZTW2sg:
; CHECK: # %bb.0:
; CHECK-NEXT: mflr 0
; CHECK-NEXT: ld 30, -16(1) # 8-byte Folded Reload
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
- %.b.i = load i1, i1* @__tls_guard, align 1
+ %.b.i = load i1, ptr @__tls_guard, align 1
br i1 %.b.i, label %__tls_init.exit, label %init.i
init.i:
- store i1 true, i1* @__tls_guard, align 1
- tail call void @_ZN1SC1Ev(%struct.S* nonnull @sg) #2
- %1 = tail call i32 @_tlv_atexit(void (i8*)* nonnull bitcast (void (%struct.S*)* @_ZN1SD1Ev to void (i8*)*), i8* nonnull getelementptr inbounds (%struct.S, %struct.S* @sg, i64 0, i32 0), i8* nonnull @__dso_handle) #2
+ store i1 true, ptr @__tls_guard, align 1
+ tail call void @_ZN1SC1Ev(ptr nonnull @sg) #2
+ %1 = tail call i32 @_tlv_atexit(ptr nonnull @_ZN1SD1Ev, ptr nonnull @sg, ptr nonnull @__dso_handle) #2
br label %__tls_init.exit
__tls_init.exit:
- ret %struct.S* @sg
+ ret ptr @sg
}
-define cxx_fast_tlscc nonnull i32* @_ZTW4sum1() nounwind {
+define cxx_fast_tlscc nonnull ptr @_ZTW4sum1() nounwind {
; CHECK-LABEL: _ZTW4sum1:
; CHECK: # %bb.0:
; CHECK-NEXT: addis 3, 13, sum1@tprel@ha
; CHECK-NEXT: addi 3, 3, sum1@tprel@l
; CHECK-NEXT: blr
- ret i32* @sum1
+ ret ptr @sum1
}
-define cxx_fast_tlscc i32* @_ZTW4sum2() #0 {
+define cxx_fast_tlscc ptr @_ZTW4sum2() #0 {
; CHECK-LABEL: _ZTW4sum2:
; CHECK: # %bb.0:
; CHECK-NEXT: addis 3, 13, sum1@tprel@ha
; CHECK-NEXT: addi 3, 3, sum1@tprel@l
; CHECK-NEXT: blr
- ret i32* @sum1
+ ret ptr @sum1
}
attributes #0 = { nounwind "frame-pointer"="all" }
target datalayout = "E-p:32:32"
target triple = "powerpc-unknown-linux-gnu.2.0"
-@"foo bar" = global i32 4 ; <i32*> [#uses=0]
+@"foo bar" = global i32 4 ; <ptr> [#uses=0]
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define i32 @main(i32 %argc, i8** nocapture %argv) nounwind readnone !dbg !5 {
+define i32 @main(i32 %argc, ptr nocapture %argv) nounwind readnone !dbg !5 {
entry:
tail call void @llvm.dbg.value(metadata i32 %argc, i64 0, metadata !15, metadata !DIExpression()), !dbg !17
- tail call void @llvm.dbg.value(metadata i8** %argv, i64 0, metadata !16, metadata !DIExpression()), !dbg !18
+ tail call void @llvm.dbg.value(metadata ptr %argv, i64 0, metadata !16, metadata !DIExpression()), !dbg !18
%add = add nsw i32 %argc, 1, !dbg !19
ret i32 %add, !dbg !19
}
; RUN: -ppc-vsr-nums-as-vr -mcpu=pwr10 | FileCheck %s
; Function Attrs: nounwind
-define void @dcbfps_test(i8* %a) {
+define void @dcbfps_test(ptr %a) {
; CHECK-LABEL: dcbfps_test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r3, r3, 3
; CHECK-NEXT: dcbfps 0, r3
; CHECK-NEXT: blr
entry:
- %add.a = getelementptr inbounds i8, i8* %a, i64 3
- tail call void @llvm.ppc.dcbfps(i8* %add.a)
+ %add.a = getelementptr inbounds i8, ptr %a, i64 3
+ tail call void @llvm.ppc.dcbfps(ptr %add.a)
ret void
}
-declare void @llvm.ppc.dcbfps(i8*)
+declare void @llvm.ppc.dcbfps(ptr)
; Function Attrs: nounwind
-define void @dcbstps_test(i8* %a) {
+define void @dcbstps_test(ptr %a) {
; CHECK-LABEL: dcbstps_test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r3, r3, 3
; CHECK-NEXT: dcbstps 0, r3
; CHECK-NEXT: blr
entry:
- %add.a = getelementptr inbounds i8, i8* %a, i64 3
- tail call void @llvm.ppc.dcbstps(i8* %add.a)
+ %add.a = getelementptr inbounds i8, ptr %a, i64 3
+ tail call void @llvm.ppc.dcbstps(ptr %add.a)
ret void
}
-declare void @llvm.ppc.dcbstps(i8*)
+declare void @llvm.ppc.dcbstps(ptr)
; RUN: llvm-objdump --mcpu=future -dr - | FileCheck %s --check-prefix=CHECK-O
; Function Attrs: nounwind
-define void @dcbf_test(i8* %a) {
+define void @dcbf_test(ptr %a) {
entry:
- tail call void @llvm.ppc.dcbf(i8* %a)
+ tail call void @llvm.ppc.dcbf(ptr %a)
; CHECK-S-LABEL: @dcbf_test
; CHECK-S: dcbf 0, r3
; CHECK-S-NEXT: blr
ret void
}
-declare void @llvm.ppc.dcbf(i8*)
+declare void @llvm.ppc.dcbf(ptr)
; Function Attrs: nounwind
-define void @dcbfl_test(i8* %a) {
+define void @dcbfl_test(ptr %a) {
entry:
- tail call void @llvm.ppc.dcbfl(i8* %a)
+ tail call void @llvm.ppc.dcbfl(ptr %a)
; CHECK-S-LABEL: @dcbfl_test
; CHECK-S: dcbfl 0, r3
; CHECK-S-NEXT: blr
ret void
}
-declare void @llvm.ppc.dcbfl(i8*)
+declare void @llvm.ppc.dcbfl(ptr)
; Function Attrs: nounwind
-define void @dcbflp_test(i8* %a) {
+define void @dcbflp_test(ptr %a) {
entry:
- %add.a = getelementptr inbounds i8, i8* %a, i64 3
- tail call void @llvm.ppc.dcbflp(i8* %add.a)
+ %add.a = getelementptr inbounds i8, ptr %a, i64 3
+ tail call void @llvm.ppc.dcbflp(ptr %add.a)
; CHECK-S-LABEL: @dcbflp_test
; CHECK-S: addi r3, r3, 3
; CHECK-S-NEXT: dcbflp 0, r3
ret void
}
-declare void @llvm.ppc.dcbflp(i8*)
+declare void @llvm.ppc.dcbflp(ptr)
target triple = "powerpc64-unknown-linux-gnu"
; RUN: llc -verify-machineinstrs -mcpu=a2 -enable-misched -enable-aa-sched-mi < %s | FileCheck %s
-define i8 @test1(i8* noalias %a, i8* noalias %b, i8* noalias %c) nounwind {
+define i8 @test1(ptr noalias %a, ptr noalias %b, ptr noalias %c) nounwind {
entry:
- %q = load i8, i8* %b
- call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 1)
- %r = load i8, i8* %c
+ %q = load i8, ptr %b
+ call void @llvm.prefetch(ptr %a, i32 0, i32 3, i32 1)
+ %r = load i8, ptr %c
%s = add i8 %q, %r
ret i8 %s
}
-declare void @llvm.prefetch(i8*, i32, i32, i32)
+declare void @llvm.prefetch(ptr, i32, i32, i32)
; Test that we've moved the second load to before the dcbt to better
; hide its latency.
; RUN: -verify-machineinstrs -ppc-asm-full-reg-names \
; RUN: -ppc-vsr-nums-as-vr | FileCheck %s
-define void @dcbt_with_hint_test1(i8* %a) {
+define void @dcbt_with_hint_test1(ptr %a) {
; CHECK-LABEL: dcbt_with_hint_test1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: dcbt 0, r3
; CHECK-NEXT: blr
entry:
- tail call void @llvm.ppc.dcbt.with.hint(i8* %a, i32 0)
+ tail call void @llvm.ppc.dcbt.with.hint(ptr %a, i32 0)
ret void
}
-define void @dcbt_with_hint_test2(i8* %a) {
+define void @dcbt_with_hint_test2(ptr %a) {
; CHECK-LABEL: dcbt_with_hint_test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: dcbt 0, r3, 8
; CHECK-NEXT: blr
entry:
- tail call void @llvm.ppc.dcbt.with.hint(i8* %a, i32 8)
+ tail call void @llvm.ppc.dcbt.with.hint(ptr %a, i32 8)
ret void
}
-define void @dcbt_with_hint_test3(i8* %a) {
+define void @dcbt_with_hint_test3(ptr %a) {
; CHECK-LABEL: dcbt_with_hint_test3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: dcbt 0, r3, 15
; CHECK-NEXT: blr
entry:
- tail call void @llvm.ppc.dcbt.with.hint(i8* %a, i32 15)
+ tail call void @llvm.ppc.dcbt.with.hint(ptr %a, i32 15)
ret void
}
-define void @dcbtst_with_hint_test1(i8* %a) {
+define void @dcbtst_with_hint_test1(ptr %a) {
; CHECK-LABEL: dcbtst_with_hint_test1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: dcbtst 0, r3
; CHECK-NEXT: blr
entry:
- tail call void @llvm.ppc.dcbtst.with.hint(i8* %a, i32 0)
+ tail call void @llvm.ppc.dcbtst.with.hint(ptr %a, i32 0)
ret void
}
-define void @dcbtst_with_hint_test2(i8* %a) {
+define void @dcbtst_with_hint_test2(ptr %a) {
; CHECK-LABEL: dcbtst_with_hint_test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: dcbtst 0, r3, 8
; CHECK-NEXT: blr
entry:
- tail call void @llvm.ppc.dcbtst.with.hint(i8* %a, i32 8)
+ tail call void @llvm.ppc.dcbtst.with.hint(ptr %a, i32 8)
ret void
}
-define void @dcbtst_with_hint_test3(i8* %a) {
+define void @dcbtst_with_hint_test3(ptr %a) {
; CHECK-LABEL: dcbtst_with_hint_test3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: dcbtst 0, r3, 15
; CHECK-NEXT: blr
entry:
- tail call void @llvm.ppc.dcbtst.with.hint(i8* %a, i32 15)
+ tail call void @llvm.ppc.dcbtst.with.hint(ptr %a, i32 15)
ret void
}
-declare void @llvm.ppc.dcbt.with.hint(i8*, i32)
-declare void @llvm.ppc.dcbtst.with.hint(i8*, i32)
+declare void @llvm.ppc.dcbt.with.hint(ptr, i32)
+declare void @llvm.ppc.dcbtst.with.hint(ptr, i32)
%a.addr = alloca i32, align 4
%b.addr = alloca i32, align 4
%sum = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- store i32 %b, i32* %b.addr, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store i32 %b, ptr %b.addr, align 4
br label %top
top: ; preds = %entry
call void @llvm.dbg.label(metadata !8), !dbg !9
- %0 = load i32, i32* %a.addr, align 4
- %1 = load i32, i32* %b.addr, align 4
+ %0 = load i32, ptr %a.addr, align 4
+ %1 = load i32, ptr %b.addr, align 4
%add = add nsw i32 %0, %1
- store i32 %add, i32* %sum, align 4
+ store i32 %add, ptr %sum, align 4
br label %done
done: ; preds = %top
call void @llvm.dbg.label(metadata !10), !dbg !11
- %2 = load i32, i32* %sum, align 4
+ %2 = load i32, ptr %sum, align 4
ret i32 %2
}
%1 = alloca i64, align 8
%2 = tail call i64 @foo()
tail call void @llvm.dbg.value(metadata i64 %2, metadata !10, metadata !DIExpression()), !dbg !13
- store volatile i64 %2, i64* %1, align 8
+ store volatile i64 %2, ptr %1, align 8
ret void
}
; ISel is ignoring dead nodes, though it would be preferable for
; DAGCombiner to be able to eliminate the dead node.
-define void @GrayATo32ARGBTabB(i8* %baseAddr, i16** %cmp, i32 %rowBytes) nounwind {
+define void @GrayATo32ARGBTabB(ptr %baseAddr, ptr %cmp, i32 %rowBytes) nounwind {
entry:
br label %bb1
bb1: ; preds = %bb1, %entry
- %0 = load i16, i16* null, align 2 ; <i16> [#uses=1]
+ %0 = load i16, ptr null, align 2 ; <i16> [#uses=1]
%1 = ashr i16 %0, 4 ; <i16> [#uses=1]
%2 = sext i16 %1 to i32 ; <i32> [#uses=1]
- %3 = getelementptr i8, i8* null, i32 %2 ; <i8*> [#uses=1]
- %4 = load i8, i8* %3, align 1 ; <i8> [#uses=1]
+ %3 = getelementptr i8, ptr null, i32 %2 ; <ptr> [#uses=1]
+ %4 = load i8, ptr %3, align 1 ; <i8> [#uses=1]
%5 = zext i8 %4 to i32 ; <i32> [#uses=1]
%6 = shl i32 %5, 24 ; <i32> [#uses=1]
%7 = or i32 0, %6 ; <i32> [#uses=1]
- store i32 %7, i32* null, align 4
+ store i32 %7, ptr null, align 4
br label %bb1
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
; RUN: -mcpu=pwr9 < %s | FileCheck %s
-define dso_local i64 @test1(i8* nocapture readonly %p, i32 signext %count) local_unnamed_addr #0 {
+define dso_local i64 @test1(ptr nocapture readonly %p, i32 signext %count) local_unnamed_addr #0 {
; CHECK-LABEL: test1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 5, -13
; CHECK-NEXT: maddld 3, 5, 3, 4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %p, i64 -13
- %0 = bitcast i8* %add.ptr to <2 x i64>*
- %1 = load <2 x i64>, <2 x i64>* %0, align 16
- %add.ptr1 = getelementptr inbounds i8, i8* %p, i64 19
- %2 = bitcast i8* %add.ptr1 to <2 x i64>*
- %3 = load <2 x i64>, <2 x i64>* %2, align 16
- %add.ptr3 = getelementptr inbounds i8, i8* %p, i64 3
- %4 = bitcast i8* %add.ptr3 to i64*
- %5 = load i64, i64* %4, align 8
- %add.ptr5 = getelementptr inbounds i8, i8* %p, i64 7
- %6 = bitcast i8* %add.ptr5 to i64*
- %7 = load i64, i64* %6, align 8
- %add.ptr7 = getelementptr inbounds i8, i8* %p, i64 11
- %8 = bitcast i8* %add.ptr7 to i64*
- %9 = load i64, i64* %8, align 8
- %add.ptr9 = getelementptr inbounds i8, i8* %p, i64 15
- %10 = bitcast i8* %add.ptr9 to i64*
- %11 = load i64, i64* %10, align 8
- %vecext = extractelement <2 x i64> %1, i32 1
- %vecext13 = extractelement <2 x i64> %1, i32 0
- %vecext15 = extractelement <2 x i64> %3, i32 0
- %vecext17 = extractelement <2 x i64> %3, i32 1
+ %add.ptr = getelementptr inbounds i8, ptr %p, i64 -13
+ %0 = load <2 x i64>, ptr %add.ptr, align 16
+ %add.ptr1 = getelementptr inbounds i8, ptr %p, i64 19
+ %1 = load <2 x i64>, ptr %add.ptr1, align 16
+ %add.ptr3 = getelementptr inbounds i8, ptr %p, i64 3
+ %2 = load i64, ptr %add.ptr3, align 8
+ %add.ptr5 = getelementptr inbounds i8, ptr %p, i64 7
+ %3 = load i64, ptr %add.ptr5, align 8
+ %add.ptr7 = getelementptr inbounds i8, ptr %p, i64 11
+ %4 = load i64, ptr %add.ptr7, align 8
+ %add.ptr9 = getelementptr inbounds i8, ptr %p, i64 15
+ %5 = load i64, ptr %add.ptr9, align 8
+ %vecext = extractelement <2 x i64> %0, i32 1
+ %vecext13 = extractelement <2 x i64> %0, i32 0
+ %vecext15 = extractelement <2 x i64> %1, i32 0
+ %vecext17 = extractelement <2 x i64> %1, i32 1
%mul = mul i64 %vecext13, %vecext
- %mul10 = mul i64 %mul, %5
+ %mul10 = mul i64 %mul, %2
%mul11 = mul i64 %mul10, %vecext15
%mul12 = mul i64 %mul11, %vecext17
- %mul14 = mul i64 %mul12, %7
- %mul16 = mul i64 %mul14, %9
- %mul18 = mul i64 %mul16, %11
+ %mul14 = mul i64 %mul12, %3
+ %mul16 = mul i64 %mul14, %4
+ %mul18 = mul i64 %mul16, %5
%conv = sext i32 %count to i64
%add19 = add i64 %mul18, %conv
ret i64 %add19
}
-define dso_local i64 @test2(i8* nocapture readonly %p, i32 signext %count) local_unnamed_addr #0 {
+define dso_local i64 @test2(ptr nocapture readonly %p, i32 signext %count) local_unnamed_addr #0 {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 5, 0
; CHECK-NEXT: maddld 3, 5, 3, 4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %p, i64 40009
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %add.ptr2 = getelementptr inbounds i8, i8* %p, i64 40001
- %2 = bitcast i8* %add.ptr2 to i64*
- %3 = load i64, i64* %2, align 8
- %add.ptr4 = getelementptr inbounds i8, i8* %p, i64 40005
- %4 = bitcast i8* %add.ptr4 to i64*
- %5 = load i64, i64* %4, align 8
- %mul = mul i64 %3, %1
- %mul5 = mul i64 %mul, %5
+ %add.ptr = getelementptr inbounds i8, ptr %p, i64 40009
+ %0 = load i64, ptr %add.ptr, align 8
+ %add.ptr2 = getelementptr inbounds i8, ptr %p, i64 40001
+ %1 = load i64, ptr %add.ptr2, align 8
+ %add.ptr4 = getelementptr inbounds i8, ptr %p, i64 40005
+ %2 = load i64, ptr %add.ptr4, align 8
+ %mul = mul i64 %1, %0
+ %mul5 = mul i64 %mul, %2
%conv = sext i32 %count to i64
%add6 = add i64 %mul5, %conv
ret i64 %add6
}
-define dso_local i64 @test3(i8* nocapture readonly %p, i32 signext %count) local_unnamed_addr {
+define dso_local i64 @test3(ptr nocapture readonly %p, i32 signext %count) local_unnamed_addr {
; CHECK-LABEL: test3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lis 5, 1
; CHECK-NEXT: maddld 3, 5, 3, 4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %p, i64 80033
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %add.ptr2 = getelementptr inbounds i8, i8* %p, i64 80001
- %2 = bitcast i8* %add.ptr2 to i64*
- %3 = load i64, i64* %2, align 8
- %add.ptr4 = getelementptr inbounds i8, i8* %p, i64 80017
- %4 = bitcast i8* %add.ptr4 to i64*
- %5 = load i64, i64* %4, align 8
- %mul = mul i64 %3, %1
- %mul5 = mul i64 %mul, %5
+ %add.ptr = getelementptr inbounds i8, ptr %p, i64 80033
+ %0 = load i64, ptr %add.ptr, align 8
+ %add.ptr2 = getelementptr inbounds i8, ptr %p, i64 80001
+ %1 = load i64, ptr %add.ptr2, align 8
+ %add.ptr4 = getelementptr inbounds i8, ptr %p, i64 80017
+ %2 = load i64, ptr %add.ptr4, align 8
+ %mul = mul i64 %1, %0
+ %mul5 = mul i64 %mul, %2
%conv = sext i32 %count to i64
%add6 = add i64 %mul5, %conv
ret i64 %add6
; This test checks that LSR properly recognizes lxvp/stxvp as load/store
; intrinsics to avoid generating x-form instructions instead of d-forms.
-declare <256 x i1> @llvm.ppc.vsx.lxvp(i8*)
-declare void @llvm.ppc.vsx.stxvp(<256 x i1>, i8*)
-define void @foo(i32 zeroext %n, <256 x i1>* %ptr, <256 x i1>* %ptr2) {
+declare <256 x i1> @llvm.ppc.vsx.lxvp(ptr)
+declare void @llvm.ppc.vsx.stxvp(<256 x i1>, ptr)
+define void @foo(i32 zeroext %n, ptr %ptr, ptr %ptr2) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmplwi r3, 0
br i1 %cmp35.not, label %for.cond.cleanup, label %for.body.lr.ph
for.body.lr.ph:
- %0 = bitcast <256 x i1>* %ptr to i8*
- %1 = bitcast <256 x i1>* %ptr2 to i8*
%wide.trip.count = zext i32 %n to i64
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
- %2 = getelementptr i8, i8* %0, i64 %indvars.iv
- %3 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %2)
+ %0 = getelementptr i8, ptr %ptr, i64 %indvars.iv
+ %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
%add2 = add nuw nsw i64 %indvars.iv, 32
- %4 = getelementptr i8, i8* %0, i64 %add2
- %5 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %4)
+ %2 = getelementptr i8, ptr %ptr, i64 %add2
+ %3 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %2)
%add4 = add nuw nsw i64 %indvars.iv, 64
- %6 = getelementptr i8, i8* %0, i64 %add4
- %7 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %6)
+ %4 = getelementptr i8, ptr %ptr, i64 %add4
+ %5 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %4)
%add6 = add nuw nsw i64 %indvars.iv, 96
- %8 = getelementptr i8, i8* %0, i64 %add6
- %9 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %8)
- %10 = getelementptr i8, i8* %1, i64 %indvars.iv
- tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %3, i8* %10)
- %11 = getelementptr i8, i8* %1, i64 %add2
- tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %5, i8* %11)
- %12 = getelementptr i8, i8* %1, i64 %add4
- tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %7, i8* %12)
- %13 = getelementptr i8, i8* %1, i64 %add6
- tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %9, i8* %13)
+ %6 = getelementptr i8, ptr %ptr, i64 %add6
+ %7 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %6)
+ %8 = getelementptr i8, ptr %ptr2, i64 %indvars.iv
+ tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %8)
+ %9 = getelementptr i8, ptr %ptr2, i64 %add2
+ tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %3, ptr %9)
+ %10 = getelementptr i8, ptr %ptr2, i64 %add4
+ tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %5, ptr %10)
+ %11 = getelementptr i8, ptr %ptr2, i64 %add6
+ tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %7, ptr %11)
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
; RUN: llc -verify-machineinstrs -O2 -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s
; Function Attrs: norecurse nounwind
-define void @test1(float* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readnone %c, i32 signext %n) #0 {
+define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ptr nocapture readnone %c, i32 signext %n) #0 {
; CHECK-LABEL: test1
entry:
%idxprom = sext i32 %n to i64
- %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4, !tbaa !1
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4, !tbaa !1
%conv = sitofp i32 %0 to float
%mul = fmul float %conv, 0x4002916880000000
- %arrayidx2 = getelementptr inbounds float, float* %a, i64 %idxprom
- store float %mul, float* %arrayidx2, align 4, !tbaa !5
+ %arrayidx2 = getelementptr inbounds float, ptr %a, i64 %idxprom
+ store float %mul, ptr %arrayidx2, align 4, !tbaa !5
ret void
; CHECK-NOT: mtvsrwa
}
; Function Attrs: norecurse nounwind readonly
-define float @test2(i32* nocapture readonly %b) #0 {
+define float @test2(ptr nocapture readonly %b) #0 {
; CHECK-LABEL: test2
entry:
- %0 = load i32, i32* %b, align 4, !tbaa !1
+ %0 = load i32, ptr %b, align 4, !tbaa !1
%conv = sitofp i32 %0 to float
%mul = fmul float %conv, 0x40030A3D80000000
ret float %mul
}
; Function Attrs: norecurse nounwind
-define void @test3(float* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 signext %n) #0 {
+define void @test3(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 signext %n) #0 {
; CHECK-LABEL: test3
entry:
%idxprom = sext i32 %n to i64
- %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4, !tbaa !1
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4, !tbaa !1
%conv = sitofp i32 %0 to float
%mul = fmul float %conv, 0x4002916880000000
- %arrayidx2 = getelementptr inbounds float, float* %a, i64 %idxprom
- store float %mul, float* %arrayidx2, align 4, !tbaa !5
- %arrayidx6 = getelementptr inbounds i32, i32* %c, i64 %idxprom
- %1 = load i32, i32* %arrayidx6, align 4, !tbaa !1
+ %arrayidx2 = getelementptr inbounds float, ptr %a, i64 %idxprom
+ store float %mul, ptr %arrayidx2, align 4, !tbaa !5
+ %arrayidx6 = getelementptr inbounds i32, ptr %c, i64 %idxprom
+ %1 = load i32, ptr %arrayidx6, align 4, !tbaa !1
%add = add nsw i32 %1, %0
- store i32 %add, i32* %arrayidx6, align 4, !tbaa !1
+ store i32 %add, ptr %arrayidx6, align 4, !tbaa !1
ret void
; CHECK: mtfprwa
; RUN: llc -mtriple=ppc64le -relocation-model=pic < %s | FileCheck %s
@default = global i32 55
-define dso_local i32* @get_default_global() {
+define dso_local ptr @get_default_global() {
; CHECK-LABEL: get_default_global:
; CHECK: addis 3, 2, .LC{{.*}}@toc@ha
; CHECK-NEXT: ld 3, .LC{{.*}}@toc@l(3)
; CHECK-NEXT: blr
- ret i32* @default
+ ret ptr @default
}
@local_global = dso_local global i32 55
-define dso_local i32* @get_local_global() {
+define dso_local ptr @get_local_global() {
; CHECK-LABEL: get_local_global:
; CHECK: addis 3, 2, local_global@toc@ha
; CHECK-NEXT: addi 3, 3, local_global@toc@l
; CHECK-NEXT: blr
- ret i32* @local_global
+ ret ptr @local_global
}
@preemptable_global = dso_preemptable global i32 42
-define dso_local i32* @get_preemptable_global() {
+define dso_local ptr @get_preemptable_global() {
; CHECK-LABEL: get_preemptable_global:
; CHECK: addis 3, 2, .LC{{.*}}@toc@ha
; CHECK-NEXT: ld 3, .LC{{.*}}@toc@l(3)
; CHECK-NEXT: blr
- ret i32* @preemptable_global
+ ret ptr @preemptable_global
}
@external_default_global = external global i32
-define dso_local i32* @get_external_default_global() {
+define dso_local ptr @get_external_default_global() {
; CHECK-LABEL: get_external_default_global:
; CHECK: addis 3, 2, .LC{{.*}}@toc@ha
; CHECK-NEXT: ld 3, .LC{{.*}}@toc@l(3)
; CHECK-NEXT: blr
- ret i32* @external_default_global
+ ret ptr @external_default_global
}
@external_local_global = external dso_local global i32
-define dso_local i32* @get_external_local_global() {
+define dso_local ptr @get_external_local_global() {
; CHECK-LABEL: get_external_local_global:
; CHECK: addis 3, 2, external_local_global@toc@ha
; CHECK: addi 3, 3, external_local_global@toc@l
; CHECK: blr
- ret i32* @external_local_global
+ ret ptr @external_local_global
}
@external_preemptable_global = external dso_preemptable global i32
-define dso_local i32* @get_external_preemptable_global() {
+define dso_local ptr @get_external_preemptable_global() {
; CHECK-LABEL: get_external_preemptable_global:
; CHECK: addis 3, 2, .LC{{.*}}@toc@ha
; CHECK-NEXT: ld 3, .LC{{.*}}@toc@l(3)
; CHECK-NEXT: blr
- ret i32* @external_preemptable_global
+ ret ptr @external_preemptable_global
}
; RUN: llc -mtriple=ppc64le -relocation-model=static < %s | FileCheck %s
@default = global i32 55
-define dso_local i32* @get_default_global() {
+define dso_local ptr @get_default_global() {
; CHECK-LABEL: get_default_global:
; CHECK: addis 3, 2, .LC{{.*}}@toc@ha
; CHECK-NEXT: ld 3, .LC{{.*}}@toc@l(3)
; CHECK-NEXT: blr
- ret i32* @default
+ ret ptr @default
}
@local_global = dso_local global i32 55
-define dso_local i32* @get_local_global() {
+define dso_local ptr @get_local_global() {
; CHECK-LABEL: get_local_global:
; CHECK: addis 3, 2, local_global@toc@ha
; CHECK-NEXT: addi 3, 3, local_global@toc@l
; CHECK-NEXT: blr
- ret i32* @local_global
+ ret ptr @local_global
}
@preemptable_global = dso_preemptable global i32 42
-define dso_local i32* @get_preemptable_global() {
+define dso_local ptr @get_preemptable_global() {
; CHECK-LABEL: get_preemptable_global:
; CHECK: addis 3, 2, .LC{{.*}}@toc@ha
; CHECK-NEXT: ld 3, .LC{{.*}}@toc@l(3)
; CHECK-NEXT: blr
- ret i32* @preemptable_global
+ ret ptr @preemptable_global
}
@external_default_global = external global i32
-define dso_local i32* @get_external_default_global() {
+define dso_local ptr @get_external_default_global() {
; CHECK-LABEL: get_external_default_global:
; CHECK: addis 3, 2, .LC{{.*}}@toc@ha
; CHECK-NEXT: ld 3, .LC{{.*}}@toc@l(3)
; CHECK-NEXT: blr
- ret i32* @external_default_global
+ ret ptr @external_default_global
}
@external_local_global = external dso_local global i32
-define dso_local i32* @get_external_local_global() {
+define dso_local ptr @get_external_local_global() {
; CHECK-LABEL: get_external_local_global:
; CHECK: addis 3, 2, external_local_global@toc@ha
; CHECK-NEXT: addi 3, 3, external_local_global@toc@l
; CHECK-NEXT: blr
- ret i32* @external_local_global
+ ret ptr @external_local_global
}
@external_preemptable_global = external dso_preemptable global i32
-define dso_local i32* @get_external_preemptable_global() {
+define dso_local ptr @get_external_preemptable_global() {
; CHECK-LABEL: get_external_preemptable_global:
; CHECK: addis 3, 2, .LC{{.*}}@toc@ha
; CHECK-NEXT: ld 3, .LC{{.*}}@toc@l(3)
; CHECK-NEXT: blr
- ret i32* @external_preemptable_global
+ ret ptr @external_preemptable_global
}
%struct.s = type { i32, i32 }
-declare void @bar(i32*, i32*) #0
+declare void @bar(ptr, ptr) #0
-define void @goo(%struct.s* byval(%struct.s) nocapture readonly %a, i32 signext %n) #0 {
+define void @goo(ptr byval(%struct.s) nocapture readonly %a, i32 signext %n) #0 {
entry:
%0 = zext i32 %n to i64
%vla = alloca i32, i64 %0, align 128
%vla1 = alloca i32, i64 %0, align 128
- %a2 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %1 = load i32, i32* %a2, align 4
- store i32 %1, i32* %vla1, align 128
- %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %2 = load i32, i32* %b, align 4
- %arrayidx3 = getelementptr inbounds i32, i32* %vla1, i64 1
- store i32 %2, i32* %arrayidx3, align 4
- call void @bar(i32* %vla1, i32* %vla) #0
+ %1 = load i32, ptr %a, align 4
+ store i32 %1, ptr %vla1, align 128
+ %b = getelementptr inbounds %struct.s, ptr %a, i64 0, i32 1
+ %2 = load i32, ptr %b, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %vla1, i64 1
+ store i32 %2, ptr %arrayidx3, align 4
+ call void @bar(ptr %vla1, ptr %vla) #0
ret void
; CHECK-LABEL: @goo
target datalayout = "E-m:e-p:32:32-i64:64-n32"
target triple = "powerpc-unknown-linux-gnu"
-define internal i32 @func_49(i64 %p_50, i16 zeroext %p_51, i8* %p_52, i32 %p_53) {
+define internal i32 @func_49(i64 %p_50, i16 zeroext %p_51, ptr %p_52, i32 %p_53) {
; CHECK-LABEL: @func_49
; CHECK-NOT: mfocrf
- %1 = load i64, i64* undef, align 8
- %2 = load i64, i64* undef, align 8
+ %1 = load i64, ptr undef, align 8
+ %2 = load i64, ptr undef, align 8
%3 = icmp sge i32 undef, undef
%4 = zext i1 %3 to i32
%5 = sext i32 %4 to i64
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define void @foo(i32* %P) #0 {
+define void @foo(ptr %P) #0 {
entry:
- %tobool = icmp eq i32* %P, null
+ %tobool = icmp eq ptr %P, null
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- store i32 0, i32* %P, align 4
+ store i32 0, ptr %P, align 4
br label %if.end
if.end: ; preds = %entry, %if.then
; CHECK: blr
}
-define void @bar(i32* %P, i32* %Q) #0 {
+define void @bar(ptr %P, ptr %Q) #0 {
entry:
- %tobool = icmp eq i32* %P, null
+ %tobool = icmp eq ptr %P, null
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- store i32 0, i32* %P, align 4
- %tobool1 = icmp eq i32* %Q, null
+ store i32 0, ptr %P, align 4
+ %tobool1 = icmp eq ptr %Q, null
br i1 %tobool1, label %if.end3, label %if.then2
if.then2: ; preds = %if.then
- store i32 1, i32* %Q, align 4
+ store i32 1, ptr %Q, align 4
br label %if.end3
if.else: ; preds = %entry
- store i32 0, i32* %Q, align 4
+ store i32 0, ptr %Q, align 4
br label %if.end3
if.end3: ; preds = %if.then, %if.then2, %if.else
@.str2 = private unnamed_addr constant [2 x i8] c"c\00"
@.str3 = private unnamed_addr constant [2 x i8] c"d\00"
@.str4 = private unnamed_addr constant [2 x i8] c"e\00"
-define i8* @dont_assert(i32 %x) {
+define ptr @dont_assert(i32 %x) {
; LLVM would assert due to moving an early return into the jump table block and
; removing one of its predecessors despite that block ending with an indirect
; branch.
sw.bb4: br label %return
sw.epilog: br label %return
return:
- %retval.0 = phi i8* [ null, %sw.epilog ],
- [ getelementptr inbounds ([2 x i8], [2 x i8]* @.str4, i64 0, i64 0), %sw.bb4 ],
- [ getelementptr inbounds ([2 x i8], [2 x i8]* @.str3, i64 0, i64 0), %sw.bb3 ],
- [ getelementptr inbounds ([2 x i8], [2 x i8]* @.str2, i64 0, i64 0), %sw.bb2 ],
- [ getelementptr inbounds ([2 x i8], [2 x i8]* @.str1, i64 0, i64 0), %sw.bb1 ],
- [ getelementptr inbounds ([2 x i8], [2 x i8]* @.str0, i64 0, i64 0), %entry ]
- ret i8* %retval.0
+ %retval.0 = phi ptr [ null, %sw.epilog ],
+ [ @.str4, %sw.bb4 ],
+ [ @.str3, %sw.bb3 ],
+ [ @.str2, %sw.bb2 ],
+ [ @.str1, %sw.bb1 ],
+ [ @.str0, %entry ]
+ ret ptr %retval.0
}
attributes #0 = { nounwind }
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux"
-%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713 = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker.118.8248.32638.195238.200116.211498.218002.221254.222880.224506.226132.240766.244018.245644.248896.260278.271660.281416.283042.302554.304180.325318.326944.344712*, %struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713*, i32, i32, i64, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i64, i32, [20 x i8] }
-%struct._IO_marker.118.8248.32638.195238.200116.211498.218002.221254.222880.224506.226132.240766.244018.245644.248896.260278.271660.281416.283042.302554.304180.325318.326944.344712 = type { %struct._IO_marker.118.8248.32638.195238.200116.211498.218002.221254.222880.224506.226132.240766.244018.245644.248896.260278.271660.281416.283042.302554.304180.325318.326944.344712*, %struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713*, i32 }
+%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713 = type { i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, i64, i16, i8, [1 x i8], ptr, i64, ptr, ptr, ptr, ptr, i64, i32, [20 x i8] }
+%struct._IO_marker.118.8248.32638.195238.200116.211498.218002.221254.222880.224506.226132.240766.244018.245644.248896.260278.271660.281416.283042.302554.304180.325318.326944.344712 = type { ptr, ptr, i32 }
@.str236 = external unnamed_addr constant [121 x i8], align 1
@.str294 = external unnamed_addr constant [49 x i8], align 1
; Function Attrs: nounwind
-declare void @fprintf(%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713* nocapture, i8* nocapture readonly, ...) #0
+declare void @fprintf(ptr nocapture, ptr nocapture readonly, ...) #0
; Function Attrs: inlinehint nounwind
define void @_ZN4PAMI6Device2MU15ResourceManager46calculatePerCoreMUResourcesBasedOnAvailabilityEv(i32 %inp32, i64 %inp64) #1 align 2 {
entry:
%numFreeResourcesInSubgroup = alloca i32, align 4
- %0 = ptrtoint i32* %numFreeResourcesInSubgroup to i64
+ %0 = ptrtoint ptr %numFreeResourcesInSubgroup to i64
br label %for.cond2.preheader
for.cond2.preheader: ; preds = %if.end23.3, %entry
unreachable
if.end: ; preds = %for.cond2.preheader
- %1 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %1 = load i32, ptr %numFreeResourcesInSubgroup, align 4
%conv = zext i32 %1 to i64
%2 = call { i64, i64, i64, i64 } asm sideeffect "sc", "=&{r0},=&{r3},=&{r4},=&{r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1034, i64 %indvars.iv, i64 %0, i64 %inp64) #2
%cmp10 = icmp eq i32 0, 0
unreachable
if.end14: ; preds = %if.end
- %3 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %3 = load i32, ptr %numFreeResourcesInSubgroup, align 4
%cmp19 = icmp eq i32 %inp32, 0
br i1 %cmp19, label %if.end23, label %if.then20
if.then20: ; preds = %if.end14.3, %if.end14.2, %if.end14.1, %if.end14
%conv4.i65.lcssa = phi i32 [ %inp32, %if.end14 ], [ 0, %if.end14.1 ], [ %conv4.i65.2, %if.end14.2 ], [ %conv4.i65.3, %if.end14.3 ]
- call void (%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713*, i8*, ...) @fprintf(%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713* undef, i8* getelementptr inbounds ([121 x i8], [121 x i8]* @.str236, i64 0, i64 0), i32 signext 2503) #3
- call void (%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713*, i8*, ...) @fprintf(%struct._IO_FILE.119.8249.32639.195239.200117.211499.218003.221255.222881.224507.226133.240767.244019.245645.248897.260279.271661.281417.283043.302555.304181.325319.326945.344713* undef, i8* getelementptr inbounds ([49 x i8], [49 x i8]* @.str294, i64 0, i64 0), i32 signext %conv4.i65.lcssa) #3
+ call void (ptr, ptr, ...) @fprintf(ptr undef, ptr @.str236, i32 signext 2503) #3
+ call void (ptr, ptr, ...) @fprintf(ptr undef, ptr @.str294, i32 signext %conv4.i65.lcssa) #3
unreachable
if.end23: ; preds = %if.end14
%conv15 = zext i32 %3 to i64
- %4 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %4 = load i32, ptr %numFreeResourcesInSubgroup, align 4
%conv24 = zext i32 %4 to i64
%5 = call { i64, i64, i64, i64 } asm sideeffect "sc", "=&{r0},=&{r3},=&{r4},=&{r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1033, i64 0, i64 %0, i64 %inp64) #2
%cmp5.1 = icmp eq i32 0, 0
ret void
if.end.1: ; preds = %if.end23
- %6 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %6 = load i32, ptr %numFreeResourcesInSubgroup, align 4
%conv.1 = zext i32 %6 to i64
%add.1 = add nuw nsw i64 %conv.1, %conv
%7 = call { i64, i64, i64, i64 } asm sideeffect "sc", "=&{r0},=&{r3},=&{r4},=&{r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1034, i64 0, i64 %0, i64 %inp64) #2
br i1 %cmp10.1, label %if.end14.1, label %if.then11
if.end14.1: ; preds = %if.end.1
- %8 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %8 = load i32, ptr %numFreeResourcesInSubgroup, align 4
%cmp19.1 = icmp eq i32 0, 0
br i1 %cmp19.1, label %if.end23.1, label %if.then20
if.end23.1: ; preds = %if.end14.1
%conv15.1 = zext i32 %8 to i64
%add16.1 = add nuw nsw i64 %conv15.1, %conv15
- %9 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %9 = load i32, ptr %numFreeResourcesInSubgroup, align 4
%conv24.1 = zext i32 %9 to i64
%add25.1 = add nuw nsw i64 %conv24.1, %conv24
%cmp5.2 = icmp eq i32 %inp32, 0
br i1 %cmp5.2, label %if.end.2, label %if.then
if.end.2: ; preds = %if.end23.1
- %10 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %10 = load i32, ptr %numFreeResourcesInSubgroup, align 4
%conv.2 = zext i32 %10 to i64
%add.2 = add nuw nsw i64 %conv.2, %add.1
%11 = call { i64, i64, i64, i64 } asm sideeffect "sc", "=&{r0},=&{r3},=&{r4},=&{r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1034, i64 %inp64, i64 %0, i64 %inp64) #2
br i1 %cmp10.2, label %if.end14.2, label %if.then11
if.end14.2: ; preds = %if.end.2
- %12 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %12 = load i32, ptr %numFreeResourcesInSubgroup, align 4
%13 = call { i64, i64, i64, i64 } asm sideeffect "sc", "=&{r0},=&{r3},=&{r4},=&{r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1035, i64 %inp64, i64 %0, i64 0) #2
%asmresult1.i64.2 = extractvalue { i64, i64, i64, i64 } %13, 1
%conv4.i65.2 = trunc i64 %asmresult1.i64.2 to i32
if.end23.2: ; preds = %if.end14.2
%conv15.2 = zext i32 %12 to i64
%add16.2 = add nuw nsw i64 %conv15.2, %add16.1
- %14 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %14 = load i32, ptr %numFreeResourcesInSubgroup, align 4
%conv24.2 = zext i32 %14 to i64
%add25.2 = add nuw nsw i64 %conv24.2, %add25.1
%cmp5.3 = icmp eq i32 0, 0
br i1 %cmp5.3, label %if.end.3, label %if.then
if.end.3: ; preds = %if.end23.2
- %15 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %15 = load i32, ptr %numFreeResourcesInSubgroup, align 4
%conv.3 = zext i32 %15 to i64
%add.3 = add nuw nsw i64 %conv.3, %add.2
%cmp10.3 = icmp eq i32 %inp32, 0
br i1 %cmp10.3, label %if.end14.3, label %if.then11
if.end14.3: ; preds = %if.end.3
- %16 = load i32, i32* %numFreeResourcesInSubgroup, align 4
+ %16 = load i32, ptr %numFreeResourcesInSubgroup, align 4
%17 = call { i64, i64, i64, i64 } asm sideeffect "sc", "=&{r0},=&{r3},=&{r4},=&{r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1035, i64 0, i64 %0, i64 0) #2
%asmresult1.i64.3 = extractvalue { i64, i64, i64, i64 } %17, 1
%conv4.i65.3 = trunc i64 %asmresult1.i64.3 to i32
define void @_Z1fv() #0 {
entry:
- %0 = call i8* @llvm.eh.dwarf.cfa(i32 0)
- call void @_Z1gPv(i8* %0)
+ %0 = call ptr @llvm.eh.dwarf.cfa(i32 0)
+ call void @_Z1gPv(ptr %0)
ret void
; CHECK-LABEL: @_Z1fv
; CHECK: blr
}
-declare void @_Z1gPv(i8*)
+declare void @_Z1gPv(ptr)
; Function Attrs: nounwind
-declare i8* @llvm.eh.dwarf.cfa(i32) #1
+declare ptr @llvm.eh.dwarf.cfa(i32) #1
attributes #0 = { "frame-pointer"="all" "target-cpu"="ppc64le" }
attributes #1 = { nounwind }
; STATIC-NEXT: stw 5, comm_glob@toc@l(4)
; STATIC-NEXT: blr
entry:
- %0 = load i32, i32* @comm_glob, align 4
+ %0 = load i32, ptr @comm_glob, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @comm_glob, align 4
+ store i32 %inc, ptr @comm_glob, align 4
ret i32 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem1(%struct_S1* byval(%struct_S1) align 1 @gS1)
+ %call = call zeroext i8 @test_byval_mem1(ptr byval(%struct_S1) align 1 @gS1)
ret void
}
-define zeroext i8 @test_byval_mem1(%struct_S1* byval(%struct_S1) align 1 %s) {
+define zeroext i8 @test_byval_mem1(ptr byval(%struct_S1) align 1 %s) {
; CHECK-LABEL: test_byval_mem1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: stb 4, -8(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S1, %struct_S1* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem1_2(i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, %struct_S1* byval(%struct_S1) align 1 @gS1)
+ %call = call zeroext i8 @test_byval_mem1_2(i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, ptr byval(%struct_S1) align 1 @gS1)
ret void
}
-define zeroext i8 @test_byval_mem1_2(i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7, i64 %v8, %struct_S1* byval(%struct_S1) align 1 %s) {
+define zeroext i8 @test_byval_mem1_2(i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7, i64 %v8, ptr byval(%struct_S1) align 1 %s) {
; CHECK-LABEL: test_byval_mem1_2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz 3, 96(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S1, %struct_S1* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem1_3(i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, %struct_S1* byval(%struct_S1) align 1 @gS1)
+ %call = call zeroext i8 @test_byval_mem1_3(i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, ptr byval(%struct_S1) align 1 @gS1)
ret void
}
-define zeroext i8 @test_byval_mem1_3(i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7, %struct_S1* byval(%struct_S1) align 1 %s) {
+define zeroext i8 @test_byval_mem1_3(i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7, ptr byval(%struct_S1) align 1 %s) {
; CHECK-LABEL: test_byval_mem1_3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: clrldi 3, 10, 56
; CHECK-NEXT: stb 10, -8(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S1, %struct_S1* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem1_4(i64 0, i64 1, i64 2, %struct_S1* byval(%struct_S1) align 1 @gS1, i64 3, i64 4, i64 5, i64 6, i64 7)
+ %call = call zeroext i8 @test_byval_mem1_4(i64 0, i64 1, i64 2, ptr byval(%struct_S1) align 1 @gS1, i64 3, i64 4, i64 5, i64 6, i64 7)
ret void
}
-define zeroext i8 @test_byval_mem1_4(i64 %v1, i64 %v2, i64 %v3, %struct_S1* byval(%struct_S1) align 1 %s, i64 %v4, i64 %v5, i64 %v6, i64 %v7, i64 %v8) {
+define zeroext i8 @test_byval_mem1_4(i64 %v1, i64 %v2, i64 %v3, ptr byval(%struct_S1) align 1 %s, i64 %v4, i64 %v5, i64 %v6, i64 %v7, i64 %v8) {
; CHECK-LABEL: test_byval_mem1_4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: clrldi 3, 6, 56
; CHECK-NEXT: stb 6, 56(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S1, %struct_S1* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem1_5(i64 0, i64 1, i64 2, %struct_S1* byval(%struct_S1) align 1 @gS1, i64 3, i64 4, i64 5, i64 6)
+ %call = call zeroext i8 @test_byval_mem1_5(i64 0, i64 1, i64 2, ptr byval(%struct_S1) align 1 @gS1, i64 3, i64 4, i64 5, i64 6)
ret void
}
-define zeroext i8 @test_byval_mem1_5(i64 %v1, i64 %v2, i64 %v3, %struct_S1* byval(%struct_S1) align 1 %s, i64 %v4, i64 %v5, i64 %v6, i64 %v7) {
+define zeroext i8 @test_byval_mem1_5(i64 %v1, i64 %v2, i64 %v3, ptr byval(%struct_S1) align 1 %s, i64 %v4, i64 %v5, i64 %v6, i64 %v7) {
; CHECK-LABEL: test_byval_mem1_5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: clrldi 3, 6, 56
; CHECK-NEXT: stb 6, -8(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S1, %struct_S1* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem2(%struct_S2* byval(%struct_S2) align 1 @gS2)
+ %call = call zeroext i8 @test_byval_mem2(ptr byval(%struct_S2) align 1 @gS2)
ret void
}
-define zeroext i8 @test_byval_mem2(%struct_S2* byval(%struct_S2) align 1 %s) {
+define zeroext i8 @test_byval_mem2(ptr byval(%struct_S2) align 1 %s) {
; CHECK-LABEL: test_byval_mem2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth 3, -8(1)
; CHECK-NEXT: lbz 3, -8(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S2, %struct_S2* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem3(%struct_S3* byval(%struct_S3) align 1 @gS3)
+ %call = call zeroext i8 @test_byval_mem3(ptr byval(%struct_S3) align 1 @gS3)
ret void
}
-define zeroext i8 @test_byval_mem3(%struct_S3* byval(%struct_S3) align 1 %s) {
+define zeroext i8 @test_byval_mem3(ptr byval(%struct_S3) align 1 %s) {
; CHECK-LABEL: test_byval_mem3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth 3, -8(1)
; CHECK-NEXT: mr 3, 4
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S3, %struct_S3* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem4(%struct_S4* byval(%struct_S4) align 1 @gS4)
+ %call = call zeroext i8 @test_byval_mem4(ptr byval(%struct_S4) align 1 @gS4)
ret void
}
-define zeroext i8 @test_byval_mem4(%struct_S4* byval(%struct_S4) align 1 %s) {
+define zeroext i8 @test_byval_mem4(ptr byval(%struct_S4) align 1 %s) {
; CHECK-LABEL: test_byval_mem4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw 3, -8(1)
; CHECK-NEXT: lbz 3, -8(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S4, %struct_S4* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem8(%struct_S8* byval(%struct_S8) align 1 @gS8)
+ %call = call zeroext i8 @test_byval_mem8(ptr byval(%struct_S8) align 1 @gS8)
ret void
}
-define zeroext i8 @test_byval_mem8(%struct_S8* byval(%struct_S8) align 1 %s) {
+define zeroext i8 @test_byval_mem8(ptr byval(%struct_S8) align 1 %s) {
; CHECK-LABEL: test_byval_mem8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, -8(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S8, %struct_S8* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem32(%struct_S32* byval(%struct_S32) align 1 @gS32)
+ %call = call zeroext i8 @test_byval_mem32(ptr byval(%struct_S32) align 1 @gS32)
ret void
}
-define zeroext i8 @test_byval_mem32(%struct_S32* byval(%struct_S32) align 1 %s) {
+define zeroext i8 @test_byval_mem32(ptr byval(%struct_S32) align 1 %s) {
; CHECK-LABEL: test_byval_mem32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 7, 3
; CHECK-NEXT: std 7, -32(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S32, %struct_S32* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem32_2(float 1.0, %struct_S32* byval(%struct_S32) align 1 @gS32)
+ %call = call zeroext i8 @test_byval_mem32_2(float 1.0, ptr byval(%struct_S32) align 1 @gS32)
ret void
}
-define zeroext i8 @test_byval_mem32_2(float %f, %struct_S32* byval(%struct_S32) align 1 %s) {
+define zeroext i8 @test_byval_mem32_2(float %f, ptr byval(%struct_S32) align 1 %s) {
; CHECK-LABEL: test_byval_mem32_2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: clrldi 3, 4, 56
; CHECK-NEXT: std 7, -8(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S32, %struct_S32* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem32_3(i64 1, float 1.0, i64 3, double 4.0, i32 2, %struct_S32* byval(%struct_S32) align 1 @gS32)
+ %call = call zeroext i8 @test_byval_mem32_3(i64 1, float 1.0, i64 3, double 4.0, i32 2, ptr byval(%struct_S32) align 1 @gS32)
ret void
}
-define zeroext i8 @test_byval_mem32_3(i64 %i1, float %f, i64 %i2, double %d, i32 %i3, %struct_S32* byval(%struct_S32) align 1 %s) {
+define zeroext i8 @test_byval_mem32_3(i64 %i1, float %f, i64 %i2, double %d, i32 %i3, ptr byval(%struct_S32) align 1 %s) {
; CHECK-LABEL: test_byval_mem32_3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: clrldi 3, 8, 56
; CHECK-NEXT: std 10, 88(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S32, %struct_S32* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem64(%struct_S64* byval(%struct_S64) align 1 @gS64)
+ %call = call zeroext i8 @test_byval_mem64(ptr byval(%struct_S64) align 1 @gS64)
ret void
}
-define zeroext i8 @test_byval_mem64(%struct_S64* byval(%struct_S64) align 1 %s) {
+define zeroext i8 @test_byval_mem64(ptr byval(%struct_S64) align 1 %s) {
; CHECK-LABEL: test_byval_mem64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std 3, -64(1)
; CHECK-NEXT: std 10, -8(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S64, %struct_S64* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %call = call zeroext i8 @test_byval_mem65(%struct_S65* byval(%struct_S65) align 1 @gS65)
+ %call = call zeroext i8 @test_byval_mem65(ptr byval(%struct_S65) align 1 @gS65)
ret void
}
-define zeroext i8 @test_byval_mem65(%struct_S65* byval(%struct_S65) align 1 %s) {
+define zeroext i8 @test_byval_mem65(ptr byval(%struct_S65) align 1 %s) {
; CHECK-LABEL: test_byval_mem65:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std 3, 32(1)
; CHECK-NEXT: std 10, 88(1)
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct_S65, %struct_S65* %s, i32 0, i32 0, i32 0
- %0 = load i8, i8* %arrayidx, align 1
+ %0 = load i8, ptr %s, align 1
ret i8 %0
}
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %call = tail call signext i32 bitcast (i32 (...)* @f1 to i32 ()*)()
+ %call = tail call signext i32 @f1()
br label %return
if.else: ; preds = %entry
- %call1 = tail call signext i32 bitcast (i32 (...)* @f2 to i32 ()*)()
+ %call1 = tail call signext i32 @f2()
br label %return
return: ; preds = %if.else, %if.then
%struct.empty = type {}
-define void @callee(%struct.empty* noalias sret(%struct.empty) %agg.result, %struct.empty* byval(%struct.empty) %a1, %struct.empty* %a2, %struct.empty* byval(%struct.empty) %a3) nounwind {
+define void @callee(ptr noalias sret(%struct.empty) %agg.result, ptr byval(%struct.empty) %a1, ptr %a2, ptr byval(%struct.empty) %a3) nounwind {
entry:
- %a2.addr = alloca %struct.empty*, align 8
- store %struct.empty* %a2, %struct.empty** %a2.addr, align 8
- %0 = load %struct.empty*, %struct.empty** %a2.addr, align 8
- %1 = bitcast %struct.empty* %agg.result to i8*
- %2 = bitcast %struct.empty* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 0, i1 false)
+ %a2.addr = alloca ptr, align 8
+ store ptr %a2, ptr %a2.addr, align 8
+ %0 = load ptr, ptr %a2.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr %agg.result, ptr %0, i64 0, i1 false)
ret void
}
; CHECK-NOT: std 6,
; CHECK: blr
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
-define void @caller(%struct.empty* noalias sret(%struct.empty) %agg.result) nounwind {
+define void @caller(ptr noalias sret(%struct.empty) %agg.result) nounwind {
entry:
%e1 = alloca %struct.empty, align 1
%e2 = alloca %struct.empty, align 1
%e3 = alloca %struct.empty, align 1
- call void @callee(%struct.empty* sret(%struct.empty) %agg.result, %struct.empty* byval(%struct.empty) %e1, %struct.empty* %e2, %struct.empty* byval(%struct.empty) %e3)
+ call void @callee(ptr sret(%struct.empty) %agg.result, ptr byval(%struct.empty) %e1, ptr %e2, ptr byval(%struct.empty) %e3)
ret void
}
@external_y = thread_local global i8 7, align 2
@internal_y = internal thread_local global i64 9, align 16
-define i32* @get_external_x() {
+define ptr @get_external_x() {
entry:
- ret i32* @external_x
+ ret ptr @external_x
}
-define i8* @get_external_y() {
+define ptr @get_external_y() {
entry:
- ret i8* @external_y
+ ret ptr @external_y
}
-define i64* @get_internal_y() {
+define ptr @get_internal_y() {
entry:
- ret i64* @internal_y
+ ret ptr @internal_y
}
; CHECK-LABEL: get_external_x:
ret i32 %W
}
-define void @VNOR(<4 x float>* %P, <4 x float>* %Q) nounwind {
- %tmp = load <4 x float>, <4 x float>* %P ; <<4 x float>> [#uses=1]
+define void @VNOR(ptr %P, ptr %Q) nounwind {
+ %tmp = load <4 x float>, ptr %P ; <<4 x float>> [#uses=1]
%tmp.upgrd.1 = bitcast <4 x float> %tmp to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp2 = load <4 x float>, <4 x float>* %Q ; <<4 x float>> [#uses=1]
+ %tmp2 = load <4 x float>, ptr %Q ; <<4 x float>> [#uses=1]
%tmp2.upgrd.2 = bitcast <4 x float> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp3 = or <4 x i32> %tmp.upgrd.1, %tmp2.upgrd.2 ; <<4 x i32>> [#uses=1]
%tmp4 = xor <4 x i32> %tmp3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
%tmp4.upgrd.3 = bitcast <4 x i32> %tmp4 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp4.upgrd.3, <4 x float>* %P
+ store <4 x float> %tmp4.upgrd.3, ptr %P
ret void
}
-define void @VANDC(<4 x float>* %P, <4 x float>* %Q) nounwind {
- %tmp = load <4 x float>, <4 x float>* %P ; <<4 x float>> [#uses=1]
+define void @VANDC(ptr %P, ptr %Q) nounwind {
+ %tmp = load <4 x float>, ptr %P ; <<4 x float>> [#uses=1]
%tmp.upgrd.4 = bitcast <4 x float> %tmp to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp2 = load <4 x float>, <4 x float>* %Q ; <<4 x float>> [#uses=1]
+ %tmp2 = load <4 x float>, ptr %Q ; <<4 x float>> [#uses=1]
%tmp2.upgrd.5 = bitcast <4 x float> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp4 = xor <4 x i32> %tmp2.upgrd.5, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
%tmp3 = and <4 x i32> %tmp.upgrd.4, %tmp4 ; <<4 x i32>> [#uses=1]
%tmp4.upgrd.6 = bitcast <4 x i32> %tmp3 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp4.upgrd.6, <4 x float>* %P
+ store <4 x float> %tmp4.upgrd.6, ptr %P
ret void
}
%cmp.i30.us = icmp ult i64 %.sink.us, 2
br i1 %cmp.i30.us, label %if.end.us, label %if.end.i.i.us
if.end.i.i.us:
- %0 = inttoptr i64 %Str.sroa.0.0.us to i8*
- %call.i.i.us = tail call signext i32 @memcmp(i8* %0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i64 0, i64 0), i64 2)
+ %0 = inttoptr i64 %Str.sroa.0.0.us to ptr
+ %call.i.i.us = tail call signext i32 @memcmp(ptr %0, ptr @.str, i64 2)
%phitmp.i.us = icmp eq i32 %call.i.i.us, 0
br i1 %phitmp.i.us, label %if.then, label %_ZNK4llvm9StringRefixEm.exit.us
if.end.us:
%cmp.i34.us = icmp eq i64 %.sink.us, 0
br i1 %cmp.i34.us, label %cond.false.i.loopexit, label %_ZNK4llvm9StringRefixEm.exit.us
_ZNK4llvm9StringRefixEm.exit.us:
- %1 = inttoptr i64 %Str.sroa.0.0.us to i8*
- %2 = load i8, i8* %1, align 1
+ %1 = inttoptr i64 %Str.sroa.0.0.us to ptr
+ %2 = load i8, ptr %1, align 1
switch i8 %2, label %_ZNK4llvm9StringRef6substrEmm.exit.loopexit [
i8 92, label %if.then4.us
i8 93, label %if.then9
]
if.then4.us:
%.sroa.speculated12.i38.us = select i1 %cmp.i30.us, i64 %.sink.us, i64 2
- %add.ptr.i40.us = getelementptr inbounds i8, i8* %1, i64 %.sroa.speculated12.i38.us
+ %add.ptr.i40.us = getelementptr inbounds i8, ptr %1, i64 %.sroa.speculated12.i38.us
%sub.i41.us = sub i64 %.sink.us, %.sroa.speculated12.i38.us
- %tobool.i.i44.us = icmp ne i8* %add.ptr.i40.us, null
+ %tobool.i.i44.us = icmp ne ptr %add.ptr.i40.us, null
%cmp.i4.i45.us = icmp eq i64 %sub.i41.us, 0
%or.cond.i.i46.us = or i1 %tobool.i.i44.us, %cmp.i4.i45.us
br i1 %or.cond.i.i46.us, label %_ZNK4llvm9StringRef6substrEmm.exit50.us, label %cond.false.i.i47.loopexit
_ZNK4llvm9StringRef6substrEmm.exit50.us:
- %3 = ptrtoint i8* %add.ptr.i40.us to i64
+ %3 = ptrtoint ptr %add.ptr.i40.us to i64
br label %while.cond.us
if.then:
ret i64 undef
cond.false.i.loopexit135:
br label %cond.false.i
cond.false.i:
- tail call void @__assert_fail(i8* getelementptr inbounds ([35 x i8], [35 x i8]* @.str.1, i64 0, i64 0), i8* getelementptr inbounds ([50 x i8], [50 x i8]* @.str.2, i64 0, i64 0), i32 zeroext 225, i8* getelementptr inbounds ([47 x i8], [47 x i8]* @__PRETTY_FUNCTION__._ZNK4llvm9StringRefixEm, i64 0, i64 0))
+ tail call void @__assert_fail(ptr @.str.1, ptr @.str.2, i32 zeroext 225, ptr @__PRETTY_FUNCTION__._ZNK4llvm9StringRefixEm)
unreachable
_ZNK4llvm9StringRefixEm.exit:
%.sink131 = phi i64 [ %sub.i41, %_ZNK4llvm9StringRef6substrEmm.exit50 ], [ %.sink.ph, %_ZNK4llvm9StringRefixEm.exit.preheader ]
%Str.sroa.0.0130 = phi i64 [ %6, %_ZNK4llvm9StringRef6substrEmm.exit50 ], [ %Str.sroa.0.0.ph, %_ZNK4llvm9StringRefixEm.exit.preheader ]
- %4 = inttoptr i64 %Str.sroa.0.0130 to i8*
- %5 = load i8, i8* %4, align 1
+ %4 = inttoptr i64 %Str.sroa.0.0130 to ptr
+ %5 = load i8, ptr %4, align 1
switch i8 %5, label %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 [
i8 92, label %if.then4
i8 93, label %if.end10
if.then4:
%cmp.i.i37 = icmp ult i64 %.sink131, 2
%.sroa.speculated12.i38 = select i1 %cmp.i.i37, i64 %.sink131, i64 2
- %add.ptr.i40 = getelementptr inbounds i8, i8* %4, i64 %.sroa.speculated12.i38
+ %add.ptr.i40 = getelementptr inbounds i8, ptr %4, i64 %.sroa.speculated12.i38
%sub.i41 = sub i64 %.sink131, %.sroa.speculated12.i38
- %tobool.i.i44 = icmp ne i8* %add.ptr.i40, null
+ %tobool.i.i44 = icmp ne ptr %add.ptr.i40, null
%cmp.i4.i45 = icmp eq i64 %sub.i41, 0
%or.cond.i.i46 = or i1 %tobool.i.i44, %cmp.i4.i45
br i1 %or.cond.i.i46, label %_ZNK4llvm9StringRef6substrEmm.exit50, label %cond.false.i.i47.loopexit133
cond.false.i.i47.loopexit133:
br label %cond.false.i.i47
cond.false.i.i47:
- tail call void @__assert_fail(i8* getelementptr inbounds ([95 x i8], [95 x i8]* @.str.3, i64 0, i64 0), i8* getelementptr inbounds ([50 x i8], [50 x i8]* @.str.2, i64 0, i64 0), i32 zeroext 90, i8* getelementptr inbounds ([49 x i8], [49 x i8]* @__PRETTY_FUNCTION__._ZN4llvm9StringRefC2EPKcm, i64 0, i64 0))
+ tail call void @__assert_fail(ptr @.str.3, ptr @.str.2, i32 zeroext 90, ptr @__PRETTY_FUNCTION__._ZN4llvm9StringRefC2EPKcm)
unreachable
_ZNK4llvm9StringRef6substrEmm.exit50:
- %6 = ptrtoint i8* %add.ptr.i40 to i64
+ %6 = ptrtoint ptr %add.ptr.i40 to i64
%cmp.i34 = icmp eq i64 %sub.i41, 0
br i1 %cmp.i34, label %cond.false.i.loopexit134, label %_ZNK4llvm9StringRefixEm.exit
if.then9:
br label %_ZNK4llvm9StringRef6substrEmm.exit
_ZNK4llvm9StringRef6substrEmm.exit:
%.sink76 = phi i64 [ %.sink131, %if.end10 ], [ %.sink.us, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit ], [ %.sink131, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 ]
- %7 = phi i8* [ %4, %if.end10 ], [ %1, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit ], [ %4, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 ]
+ %7 = phi ptr [ %4, %if.end10 ], [ %1, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit ], [ %4, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 ]
%BracketDepth.1 = phi i64 [ %dec, %if.end10 ], [ 0, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit ], [ %BracketDepth.0.ph, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 ]
%sub.i = add i64 %.sink76, -1
- %add.ptr.i = getelementptr inbounds i8, i8* %7, i64 1
- %8 = ptrtoint i8* %add.ptr.i to i64
+ %add.ptr.i = getelementptr inbounds i8, ptr %7, i64 1
+ %8 = ptrtoint ptr %add.ptr.i to i64
br label %while.cond.outer
; CHECK-LABEL: @_Z3fn1N4llvm9StringRefE
declare void @exit(i32 signext)
-declare signext i32 @memcmp(i8* nocapture, i8* nocapture, i64)
-declare void @__assert_fail(i8*, i8*, i32 zeroext, i8*)
+declare signext i32 @memcmp(ptr nocapture, ptr nocapture, i64)
+declare void @__assert_fail(ptr, ptr, i32 zeroext, ptr)
; Function Attrs: norecurse nounwind readonly
define signext i32 @testComplexISEL() #0 {
entry:
- %0 = load i32, i32* @b, align 4, !tbaa !1
+ %0 = load i32, ptr @b, align 4, !tbaa !1
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %cleanup
if.end:
- %1 = load i32, i32* @a, align 4, !tbaa !1
+ %1 = load i32, ptr @a, align 4, !tbaa !1
%conv = sext i32 %1 to i64
- %2 = inttoptr i64 %conv to i32 (...)*
- %cmp = icmp eq i32 (...)* %2, bitcast (i32 ()* @testComplexISEL to i32 (...)*)
+ %2 = inttoptr i64 %conv to ptr
+ %cmp = icmp eq ptr %2, @testComplexISEL
%conv3 = zext i1 %cmp to i32
br label %cleanup
; <label>:4: ; preds = %4, %3
%5 = phi i64 [ %6, %4 ], [ undef, %3 ]
- %6 = and i64 %5, and (i64 and (i64 and (i64 and (i64 and (i64 and (i64 and (i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i16), i16 0), i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 lshr (i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 6)) to i64), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i16), i16 0), i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 lshr (i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 6)) to i64)), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i16), i16 0), i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 lshr (i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 6)) to i64)), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i16), i16 0), i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 lshr (i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 6)) to i64)), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i16), i16 0), i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 lshr (i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 6)) to i64)), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i16), i16 0), i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 lshr (i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 6)) to i64)), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i16), i16 0), i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 lshr (i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 6)) to i64)), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i16), i16 0), i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 lshr (i32 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @d, i64 0, i64 1), i32* @c) to i32), i32 6)) to i64))
+ %6 = and i64 %5, and (i64 and (i64 and (i64 and (i64 and (i64 and (i64 and (i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i16), i16 0), i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 lshr (i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 6)) to i64), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i16), i16 0), i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 lshr (i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 6)) to i64)), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i16), i16 0), i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 lshr (i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 6)) to i64)), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i16), i16 0), i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 lshr (i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 6)) to i64)), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i16), i16 0), i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 lshr (i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 6)) to i64)), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i16), i16 0), i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 lshr (i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 6)) to i64)), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i16), i16 0), i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 lshr (i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 6)) to i64)), i64 sext (i32 select (i1 icmp slt (i16 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i16), i16 0), i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 lshr (i32 zext (i1 icmp eq (ptr getelementptr inbounds ([2 x i32], ptr @d, i64 0, i64 1), ptr @c) to i32), i32 6)) to i64))
%7 = icmp slt i32 undef, 6
br i1 %7, label %4, label %8
%"class.Foam::messageStream.6" = type <{ %"class.Foam::string.5", i32, i32, i32, [4 x i8] }>
%"class.Foam::string.5" = type { %"class.std::basic_string.4" }
%"class.std::basic_string.4" = type { %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.3" }
-%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.3" = type { i8* }
+%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.3" = type { ptr }
%"class.Foam::prefixOSstream.27" = type { %"class.Foam::OSstream.26", i8, %"class.Foam::string.5" }
-%"class.Foam::OSstream.26" = type { %"class.Foam::Ostream.base.9", %"class.Foam::fileName.10", %"class.std::basic_ostream.25"* }
+%"class.Foam::OSstream.26" = type { %"class.Foam::Ostream.base.9", %"class.Foam::fileName.10", ptr }
%"class.Foam::Ostream.base.9" = type <{ %"class.Foam::IOstream.8", i16 }>
-%"class.Foam::IOstream.8" = type { i32 (...)**, i32, [4 x i8], %"class.Foam::IOstream::versionNumber.7", i32, i32, i32, i32 }
+%"class.Foam::IOstream.8" = type { ptr, i32, [4 x i8], %"class.Foam::IOstream::versionNumber.7", i32, i32, i32, i32 }
%"class.Foam::IOstream::versionNumber.7" = type <{ double, i32, [4 x i8] }>
%"class.Foam::fileName.10" = type { %"class.Foam::string.5" }
-%"class.std::basic_ostream.25" = type { i32 (...)**, %"class.std::basic_ios.24" }
-%"class.std::basic_ios.24" = type { %"class.std::ios_base.16", %"class.std::basic_ostream.25"*, i8, i8, %"class.std::basic_streambuf.17"*, %"class.std::ctype.21"*, %"class.std::__gnu_cxx_ldbl128::num_put.22"*, %"class.std::__gnu_cxx_ldbl128::num_get.23"* }
-%"class.std::ios_base.16" = type { i32 (...)**, i64, i64, i32, i32, i32, %"struct.std::ios_base::_Callback_list.11"*, %"struct.std::ios_base::_Words.12", [8 x %"struct.std::ios_base::_Words.12"], i32, %"struct.std::ios_base::_Words.12"*, %"class.std::locale.15" }
-%"struct.std::ios_base::_Callback_list.11" = type { %"struct.std::ios_base::_Callback_list.11"*, void (i32, %"class.std::ios_base.16"*, i32)*, i32, i32 }
-%"struct.std::ios_base::_Words.12" = type { i8*, i64 }
-%"class.std::locale.15" = type { %"class.std::locale::_Impl.14"* }
-%"class.std::locale::_Impl.14" = type { i32, %"class.std::locale::facet.13"**, i64, %"class.std::locale::facet.13"**, i8** }
-%"class.std::locale::facet.13" = type <{ i32 (...)**, i32, [4 x i8] }>
-%"class.std::basic_streambuf.17" = type { i32 (...)**, i8*, i8*, i8*, i8*, i8*, i8*, %"class.std::locale.15" }
-%"class.std::ctype.21" = type <{ %"class.std::locale::facet.base.18", [4 x i8], %struct.__locale_struct.20*, i8, [7 x i8], i32*, i32*, i16*, i8, [256 x i8], [256 x i8], i8, [6 x i8] }>
-%"class.std::locale::facet.base.18" = type <{ i32 (...)**, i32 }>
-%struct.__locale_struct.20 = type { [13 x %struct.__locale_data.19*], i16*, i32*, i32*, [13 x i8*] }
+%"class.std::basic_ostream.25" = type { ptr, %"class.std::basic_ios.24" }
+%"class.std::basic_ios.24" = type { %"class.std::ios_base.16", ptr, i8, i8, ptr, ptr, ptr, ptr }
+%"class.std::ios_base.16" = type { ptr, i64, i64, i32, i32, i32, ptr, %"struct.std::ios_base::_Words.12", [8 x %"struct.std::ios_base::_Words.12"], i32, ptr, %"class.std::locale.15" }
+%"struct.std::ios_base::_Callback_list.11" = type { ptr, ptr, i32, i32 }
+%"struct.std::ios_base::_Words.12" = type { ptr, i64 }
+%"class.std::locale.15" = type { ptr }
+%"class.std::locale::_Impl.14" = type { i32, ptr, i64, ptr, ptr }
+%"class.std::locale::facet.13" = type <{ ptr, i32, [4 x i8] }>
+%"class.std::basic_streambuf.17" = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, %"class.std::locale.15" }
+%"class.std::ctype.21" = type <{ %"class.std::locale::facet.base.18", [4 x i8], ptr, i8, [7 x i8], ptr, ptr, ptr, i8, [256 x i8], [256 x i8], i8, [6 x i8] }>
+%"class.std::locale::facet.base.18" = type <{ ptr, i32 }>
+%struct.__locale_struct.20 = type { [13 x ptr], ptr, ptr, ptr, [13 x ptr] }
%struct.__locale_data.19 = type opaque
%"class.std::__gnu_cxx_ldbl128::num_put.22" = type { %"class.std::locale::facet.base.18", [4 x i8] }
%"class.std::__gnu_cxx_ldbl128::num_get.23" = type { %"class.std::locale::facet.base.18", [4 x i8] }
-%"class.Foam::primitiveMesh.135" = type { i32 (...)**, i32, i32, i32, i32, i32, i32, i32, i32, i32, %"class.Foam::List.116"*, %"class.Foam::List.0"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.5"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::List.1"*, %"class.Foam::DynamicList.40", %"class.Foam::HashSet.127", %"class.Foam::Field.131"*, %"class.Foam::Field.131"*, %"class.Foam::Field.11"*, %"class.Foam::Field.131"* }
+%"class.Foam::primitiveMesh.135" = type { ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, %"class.Foam::DynamicList.40", %"class.Foam::HashSet.127", ptr, ptr, ptr, ptr }
%"class.Foam::List.116" = type opaque
%"class.Foam::List.0" = type { %"class.Foam::UList.119" }
-%"class.Foam::UList.119" = type { i32, %"class.Foam::edge.118"* }
+%"class.Foam::UList.119" = type { i32, ptr }
%"class.Foam::edge.118" = type { %"class.Foam::FixedList.117" }
%"class.Foam::FixedList.117" = type { [2 x i32] }
%"class.Foam::List.5" = type { %"class.Foam::UList.6" }
-%"class.Foam::UList.6" = type { i32, %"class.Foam::cell.121"* }
+%"class.Foam::UList.6" = type { i32, ptr }
%"class.Foam::cell.121" = type { %"class.Foam::List.3" }
%"class.Foam::List.3" = type { %"class.Foam::UList.4" }
-%"class.Foam::UList.4" = type { i32, i32* }
+%"class.Foam::UList.4" = type { i32, ptr }
%"class.Foam::List.1" = type { %"class.Foam::UList.2" }
-%"class.Foam::UList.2" = type { i32, %"class.Foam::List.3"* }
+%"class.Foam::UList.2" = type { i32, ptr }
%"class.Foam::DynamicList.40" = type <{ %"class.Foam::List.3", i32, [4 x i8] }>
%"class.Foam::HashSet.127" = type { %"class.Foam::HashTable.7" }
-%"class.Foam::HashTable.7" = type { i32, i32, %"struct.Foam::HashTable<Foam::nil, int, Foam::Hash<Foam::label> >::hashedEntry.125"** }
-%"struct.Foam::HashTable<Foam::nil, int, Foam::Hash<Foam::label> >::hashedEntry.125" = type <{ i32, [4 x i8], %"struct.Foam::HashTable<Foam::nil, int, Foam::Hash<Foam::label> >::hashedEntry.125"*, %"class.Foam::nil.124", [7 x i8] }>
+%"class.Foam::HashTable.7" = type { i32, i32, ptr }
+%"struct.Foam::HashTable<Foam::nil, int, Foam::Hash<Foam::label> >::hashedEntry.125" = type <{ i32, [4 x i8], ptr, %"class.Foam::nil.124", [7 x i8] }>
%"class.Foam::nil.124" = type { i8 }
%"class.Foam::Field.11" = type { %"class.Foam::refCount.128", %"class.Foam::List.12" }
%"class.Foam::refCount.128" = type { i32 }
%"class.Foam::List.12" = type { %"class.Foam::UList.13" }
-%"class.Foam::UList.13" = type { i32, double* }
+%"class.Foam::UList.13" = type { i32, ptr }
%"class.Foam::Field.131" = type { %"class.Foam::refCount.128", %"class.Foam::List.8" }
%"class.Foam::List.8" = type { %"class.Foam::UList.9" }
-%"class.Foam::UList.9" = type { i32, %"class.Foam::Vector.29"* }
+%"class.Foam::UList.9" = type { i32, ptr }
%"class.Foam::Vector.29" = type { %"class.Foam::VectorSpace.10" }
%"class.Foam::VectorSpace.10" = type { [3 x double] }
%"class.Foam::Ostream.189" = type <{ %"class.Foam::IOstream.8", i16, [6 x i8] }>
@.str28 = external unnamed_addr constant [7 x i8], align 1
@_ZN4Foam4PoutE = external global %"class.Foam::prefixOSstream.27", align 8
-define void @_ZN4Foam13checkTopologyERKNS_8polyMeshEbb(i1 zeroext %allTopology) #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @_ZN4Foam13checkTopologyERKNS_8polyMeshEbb(i1 zeroext %allTopology) #0 personality ptr @__gxx_personality_v0 {
entry:
br i1 undef, label %for.body, label %for.cond.cleanup
to label %_ZN4Foam4wordC2EPKcb.exit unwind label %lpad.i
lpad.i: ; preds = %_ZNK4Foam8ZoneMeshINS_9pointZoneENS_8polyMeshEE15checkDefinitionEb.exit
- %0 = landingpad { i8*, i32 }
+ %0 = landingpad { ptr, i32 }
cleanup
- resume { i8*, i32 } %0
+ resume { ptr, i32 } %0
_ZN4Foam4wordC2EPKcb.exit: ; preds = %_ZNK4Foam8ZoneMeshINS_9pointZoneENS_8polyMeshEE15checkDefinitionEb.exit
invoke void @_ZN4Foam7cellSetC1ERKNS_8polyMeshERKNS_4wordEiNS_8IOobject11writeOptionE()
br i1 undef, label %if.then121, label %if.else
lpad: ; preds = %_ZN4Foam4wordC2EPKcb.exit
- %1 = landingpad { i8*, i32 }
+ %1 = landingpad { ptr, i32 }
cleanup
br i1 undef, label %_ZNSsD2Ev.exit1578, label %if.then.i.i1570, !prof !1
to label %_ZN4Foam4wordC2EPKcb.exit1701 unwind label %lpad.i1689
lpad.i1689: ; preds = %if.else
- %2 = landingpad { i8*, i32 }
+ %2 = landingpad { ptr, i32 }
cleanup
unreachable
to label %invoke.cont169 unwind label %lpad165
invoke.cont169: ; preds = %_ZN4Foam4wordC2EPKcb.exit1701
- %call177 = invoke zeroext i1 undef(%"class.Foam::primitiveMesh.135"* undef, i1 zeroext true, %"class.Foam::HashSet.127"* undef)
+ %call177 = invoke zeroext i1 undef(ptr undef, i1 zeroext true, ptr undef)
to label %invoke.cont176 unwind label %lpad175
invoke.cont176: ; preds = %invoke.cont169
unreachable
lpad165: ; preds = %_ZN4Foam4wordC2EPKcb.exit1701
- %3 = landingpad { i8*, i32 }
+ %3 = landingpad { ptr, i32 }
cleanup
unreachable
lpad175: ; preds = %invoke.cont169
- %4 = landingpad { i8*, i32 }
+ %4 = landingpad { ptr, i32 }
cleanup
invoke void @_ZN4Foam8pointSetD1Ev()
to label %eh.resume unwind label %terminate.lpad
to label %_ZN4Foam4wordC2EPKcb.exit1777 unwind label %lpad.i1765
lpad.i1765: ; preds = %if.end213
- %5 = landingpad { i8*, i32 }
+ %5 = landingpad { ptr, i32 }
cleanup
br i1 undef, label %eh.resume.i1776, label %if.then.i.i.i1767, !prof !1
unreachable
eh.resume.i1776: ; preds = %lpad.i1765
- resume { i8*, i32 } %5
+ resume { ptr, i32 } %5
_ZN4Foam4wordC2EPKcb.exit1777: ; preds = %if.end213
invoke void @_ZN4Foam7faceSetC1ERKNS_8polyMeshERKNS_4wordEiNS_8IOobject11writeOptionE()
br label %_ZNSsD2Ev.exit1792
_ZNSsD2Ev.exit1792: ; preds = %if.then4.i.i1791, %if.then.i.i1784, %invoke.cont221
- %call232 = invoke zeroext i1 undef(%"class.Foam::primitiveMesh.135"* undef, i1 zeroext true, %"class.Foam::HashSet.127"* undef)
+ %call232 = invoke zeroext i1 undef(ptr undef, i1 zeroext true, ptr undef)
to label %invoke.cont231 unwind label %lpad230
invoke.cont231: ; preds = %_ZNSsD2Ev.exit1792
to label %invoke.cont243 unwind label %lpad230
lpad217: ; preds = %_ZN4Foam4wordC2EPKcb.exit1777
- %6 = landingpad { i8*, i32 }
+ %6 = landingpad { ptr, i32 }
cleanup
br label %eh.resume
lpad230: ; preds = %invoke.cont231, %_ZNSsD2Ev.exit1792
- %7 = landingpad { i8*, i32 }
+ %7 = landingpad { ptr, i32 }
cleanup
invoke void @_ZN4Foam7faceSetD1Ev()
to label %eh.resume unwind label %terminate.lpad
to label %_ZN4Foam4wordC2EPKcb.exit1862 unwind label %lpad.i1850
lpad.i1850: ; preds = %invoke.cont243
- %8 = landingpad { i8*, i32 }
+ %8 = landingpad { ptr, i32 }
cleanup
unreachable
unreachable
lpad276: ; preds = %_ZN4Foam4wordC2EPKcb.exit1862
- %9 = landingpad { i8*, i32 }
+ %9 = landingpad { ptr, i32 }
cleanup
unreachable
to label %invoke.cont668 unwind label %lpad663
invoke.cont668: ; preds = %if.end660
- %call671 = invoke dereferenceable(56) %"class.Foam::Ostream.189"* @_ZN4FoamlsERNS_7OstreamEPKc()
+ %call671 = invoke dereferenceable(56) ptr @_ZN4FoamlsERNS_7OstreamEPKc()
to label %invoke.cont670 unwind label %lpad663
invoke.cont670: ; preds = %invoke.cont668
to label %invoke.cont674 unwind label %lpad663
invoke.cont674: ; preds = %invoke.cont670
- %call677 = invoke dereferenceable(56) %"class.Foam::Ostream.189"* @_ZN4FoamlsERNS_7OstreamEPKc()
+ %call677 = invoke dereferenceable(56) ptr @_ZN4FoamlsERNS_7OstreamEPKc()
to label %invoke.cont676 unwind label %lpad663
invoke.cont676: ; preds = %invoke.cont674
- invoke void undef(%"class.Foam::Ostream.189"* %call677)
+ invoke void undef(ptr %call677)
to label %if.end878 unwind label %lpad663
lpad663: ; preds = %invoke.cont670, %if.end660, %invoke.cont668, %invoke.cont674, %invoke.cont676
- %10 = landingpad { i8*, i32 }
+ %10 = landingpad { ptr, i32 }
cleanup
br i1 undef, label %_ZN4Foam4ListIiED2Ev.exit.i3073, label %delete.notnull.i.i3071
to label %_ZN4Foam4wordC2EPKcb.exit3098 unwind label %lpad.i3086
lpad.i3086: ; preds = %if.else888
- %11 = landingpad { i8*, i32 }
+ %11 = landingpad { ptr, i32 }
cleanup
unreachable
unreachable
_ZNSsD2Ev.exit3113: ; preds = %if.then.i.i3105, %invoke.cont902
- %call.i31163117 = invoke zeroext i32 undef(%"class.Foam::IOstream.8"* getelementptr inbounds (%"class.Foam::prefixOSstream.27", %"class.Foam::prefixOSstream.27"* @_ZN4Foam4PoutE, i64 0, i32 0, i32 0, i32 0))
+ %call.i31163117 = invoke zeroext i32 undef(ptr @_ZN4Foam4PoutE)
to label %call.i3116.noexc unwind label %lpad905.loopexit.split-lp
call.i3116.noexc: ; preds = %_ZNSsD2Ev.exit3113
- %call5.i3118 = invoke zeroext i32 null(%"class.Foam::IOstream.8"* getelementptr inbounds (%"class.Foam::prefixOSstream.27", %"class.Foam::prefixOSstream.27"* @_ZN4Foam4PoutE, i64 0, i32 0, i32 0, i32 0), i32 zeroext undef)
+ %call5.i3118 = invoke zeroext i32 null(ptr @_ZN4Foam4PoutE, i32 zeroext undef)
to label %invoke.cont906 unwind label %lpad905.loopexit.split-lp
invoke.cont906: ; preds = %call.i3116.noexc
unreachable
lpad898: ; preds = %_ZN4Foam4wordC2EPKcb.exit3098
- %12 = landingpad { i8*, i32 }
+ %12 = landingpad { ptr, i32 }
cleanup
br i1 undef, label %_ZNSsD2Ev.exit3204, label %if.then.i.i3196, !prof !1
unreachable
lpad905.loopexit.split-lp: ; preds = %call.i3116.noexc, %_ZNSsD2Ev.exit3113
- %lpad.loopexit.split-lp = landingpad { i8*, i32 }
+ %lpad.loopexit.split-lp = landingpad { ptr, i32 }
cleanup
invoke void @_ZN4Foam8pointSetD1Ev()
to label %eh.resume unwind label %terminate.lpad
eh.resume: ; preds = %_ZN4Foam4ListIiED2Ev.exit.i3073, %lpad230, %lpad175, %lpad905.loopexit.split-lp, %lpad217
- resume { i8*, i32 } undef
+ resume { ptr, i32 } undef
terminate.lpad: ; preds = %_ZN4Foam4ListIiED2Ev.exit.i3073, %lpad230, %lpad175, %lpad905.loopexit.split-lp
- %13 = landingpad { i8*, i32 }
- catch i8* null
+ %13 = landingpad { ptr, i32 }
+ catch ptr null
unreachable
}
-declare dereferenceable(56) %"class.Foam::Ostream.189"* @_ZN4FoamlsERNS_7OstreamEPKc() #0
+declare dereferenceable(56) ptr @_ZN4FoamlsERNS_7OstreamEPKc() #0
declare void @_ZN4Foam13messageStreamcvRNS_8OSstreamEEv() #0
; RUN: -ppc-asm-full-reg-names -verify-machineinstrs -O2 < %s | FileCheck %s \
; RUN: --check-prefix=CHECK-P9-BE
-define <2 x i64> @testllv(<2 x i64> returned %a, <2 x i64> %b, i64* nocapture %ap, i64 %Idx) local_unnamed_addr #0 {
+define <2 x i64> @testllv(<2 x i64> returned %a, <2 x i64> %b, ptr nocapture %ap, i64 %Idx) local_unnamed_addr #0 {
; CHECK-LABEL: testllv:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxswapd vs0, vs34
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <2 x i64> %a, i32 0
- %arrayidx = getelementptr inbounds i64, i64* %ap, i64 %Idx
- store i64 %vecext, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %ap, i64 %Idx
+ store i64 %vecext, ptr %arrayidx, align 8
ret <2 x i64> %a
}
-define <2 x i64> @testll0(<2 x i64> returned %a, <2 x i64> %b, i64* nocapture %ap) local_unnamed_addr #0 {
+define <2 x i64> @testll0(<2 x i64> returned %a, <2 x i64> %b, ptr nocapture %ap) local_unnamed_addr #0 {
; CHECK-LABEL: testll0:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxswapd vs0, vs34
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <2 x i64> %a, i32 0
- %arrayidx = getelementptr inbounds i64, i64* %ap, i64 3
- store i64 %vecext, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %ap, i64 3
+ store i64 %vecext, ptr %arrayidx, align 8
ret <2 x i64> %a
}
; Function Attrs: norecurse nounwind writeonly
-define <2 x i64> @testll1(<2 x i64> returned %a, i64 %b, i64* nocapture %ap) local_unnamed_addr #0 {
+define <2 x i64> @testll1(<2 x i64> returned %a, i64 %b, ptr nocapture %ap) local_unnamed_addr #0 {
; CHECK-LABEL: testll1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r3, r6, 24
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <2 x i64> %a, i32 1
- %arrayidx = getelementptr inbounds i64, i64* %ap, i64 3
- store i64 %vecext, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %ap, i64 3
+ store i64 %vecext, ptr %arrayidx, align 8
ret <2 x i64> %a
}
-define <2 x double> @testdv(<2 x double> returned %a, <2 x double> %b, double* nocapture %ap, i64 %Idx) local_unnamed_addr #0 {
+define <2 x double> @testdv(<2 x double> returned %a, <2 x double> %b, ptr nocapture %ap, i64 %Idx) local_unnamed_addr #0 {
; CHECK-LABEL: testdv:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxswapd vs0, vs34
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <2 x double> %a, i32 0
- %arrayidx = getelementptr inbounds double, double* %ap, i64 %Idx
- store double %vecext, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %ap, i64 %Idx
+ store double %vecext, ptr %arrayidx, align 8
ret <2 x double> %a
}
-define <2 x double> @testd0(<2 x double> returned %a, <2 x double> %b, double* nocapture %ap) local_unnamed_addr #0 {
+define <2 x double> @testd0(<2 x double> returned %a, <2 x double> %b, ptr nocapture %ap) local_unnamed_addr #0 {
; CHECK-LABEL: testd0:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxswapd vs0, vs34
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <2 x double> %a, i32 0
- %arrayidx = getelementptr inbounds double, double* %ap, i64 3
- store double %vecext, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %ap, i64 3
+ store double %vecext, ptr %arrayidx, align 8
ret <2 x double> %a
}
; Function Attrs: norecurse nounwind writeonly
-define <2 x double> @testd1(<2 x double> returned %a, <2 x double> %b, double* nocapture %ap) local_unnamed_addr #0 {
+define <2 x double> @testd1(<2 x double> returned %a, <2 x double> %b, ptr nocapture %ap) local_unnamed_addr #0 {
; CHECK-LABEL: testd1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r3, r7, 24
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <2 x double> %a, i32 1
- %arrayidx = getelementptr inbounds double, double* %ap, i64 3
- store double %vecext, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %ap, i64 3
+ store double %vecext, ptr %arrayidx, align 8
ret <2 x double> %a
}
; Function Attrs: norecurse nounwind writeonly
-define <4 x float> @testf0(<4 x float> returned %a, <4 x float> %b, float* nocapture %ap) local_unnamed_addr #0 {
+define <4 x float> @testf0(<4 x float> returned %a, <4 x float> %b, ptr nocapture %ap) local_unnamed_addr #0 {
; CHECK-LABEL: testf0:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxsldwi vs0, vs34, vs34, 2
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <4 x float> %a, i32 0
- %arrayidx = getelementptr inbounds float, float* %ap, i64 3
- store float %vecext, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %ap, i64 3
+ store float %vecext, ptr %arrayidx, align 4
ret <4 x float> %a
}
; Function Attrs: norecurse nounwind writeonly
-define <4 x float> @testf1(<4 x float> returned %a, <4 x float> %b, float* nocapture %ap) local_unnamed_addr #0 {
+define <4 x float> @testf1(<4 x float> returned %a, <4 x float> %b, ptr nocapture %ap) local_unnamed_addr #0 {
; CHECK-LABEL: testf1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxsldwi vs0, vs34, vs34, 1
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <4 x float> %a, i32 1
- %arrayidx = getelementptr inbounds float, float* %ap, i64 3
- store float %vecext, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %ap, i64 3
+ store float %vecext, ptr %arrayidx, align 4
ret <4 x float> %a
}
; Function Attrs: norecurse nounwind writeonly
-define <4 x float> @testf2(<4 x float> returned %a, <4 x float> %b, float* nocapture %ap) local_unnamed_addr #0 {
+define <4 x float> @testf2(<4 x float> returned %a, <4 x float> %b, ptr nocapture %ap) local_unnamed_addr #0 {
; CHECK-LABEL: testf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r3, r7, 12
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <4 x float> %a, i32 2
- %arrayidx = getelementptr inbounds float, float* %ap, i64 3
- store float %vecext, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %ap, i64 3
+ store float %vecext, ptr %arrayidx, align 4
ret <4 x float> %a
}
; Function Attrs: norecurse nounwind writeonly
-define <4 x float> @testf3(<4 x float> returned %a, <4 x float> %b, float* nocapture %ap) local_unnamed_addr #0 {
+define <4 x float> @testf3(<4 x float> returned %a, <4 x float> %b, ptr nocapture %ap) local_unnamed_addr #0 {
; CHECK-LABEL: testf3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxsldwi vs0, vs34, vs34, 3
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <4 x float> %a, i32 3
- %arrayidx = getelementptr inbounds float, float* %ap, i64 3
- store float %vecext, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %ap, i64 3
+ store float %vecext, ptr %arrayidx, align 4
ret <4 x float> %a
}
; Function Attrs: norecurse nounwind writeonly
-define <4 x i32> @testi0(<4 x i32> returned %a, <4 x i32> %b, i32* nocapture %ap) local_unnamed_addr #0 {
+define <4 x i32> @testi0(<4 x i32> returned %a, <4 x i32> %b, ptr nocapture %ap) local_unnamed_addr #0 {
; CHECK-LABEL: testi0:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxsldwi vs0, vs34, vs34, 2
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <4 x i32> %a, i32 0
- %arrayidx = getelementptr inbounds i32, i32* %ap, i64 3
- store i32 %vecext, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %ap, i64 3
+ store i32 %vecext, ptr %arrayidx, align 4
ret <4 x i32> %a
}
; Function Attrs: norecurse nounwind writeonly
-define <4 x i32> @testi1(<4 x i32> returned %a, <4 x i32> %b, i32* nocapture %ap) local_unnamed_addr #0 {
+define <4 x i32> @testi1(<4 x i32> returned %a, <4 x i32> %b, ptr nocapture %ap) local_unnamed_addr #0 {
; CHECK-LABEL: testi1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxsldwi vs0, vs34, vs34, 1
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <4 x i32> %a, i32 1
- %arrayidx = getelementptr inbounds i32, i32* %ap, i64 3
- store i32 %vecext, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %ap, i64 3
+ store i32 %vecext, ptr %arrayidx, align 4
ret <4 x i32> %a
}
; Function Attrs: norecurse nounwind writeonly
-define <4 x i32> @testi2(<4 x i32> returned %a, <4 x i32> %b, i32* nocapture %ap) local_unnamed_addr #0 {
+define <4 x i32> @testi2(<4 x i32> returned %a, <4 x i32> %b, ptr nocapture %ap) local_unnamed_addr #0 {
; CHECK-LABEL: testi2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r3, r7, 12
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <4 x i32> %a, i32 2
- %arrayidx = getelementptr inbounds i32, i32* %ap, i64 3
- store i32 %vecext, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %ap, i64 3
+ store i32 %vecext, ptr %arrayidx, align 4
ret <4 x i32> %a
}
; Function Attrs: norecurse nounwind writeonly
-define <4 x i32> @testi3(<4 x i32> returned %a, <4 x i32> %b, i32* nocapture %ap) local_unnamed_addr #0 {
+define <4 x i32> @testi3(<4 x i32> returned %a, <4 x i32> %b, ptr nocapture %ap) local_unnamed_addr #0 {
; CHECK-LABEL: testi3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxsldwi vs0, vs34, vs34, 3
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <4 x i32> %a, i32 3
- %arrayidx = getelementptr inbounds i32, i32* %ap, i64 3
- store i32 %vecext, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %ap, i64 3
+ store i32 %vecext, ptr %arrayidx, align 4
ret <4 x i32> %a
}
-define dso_local void @test_consecutive_i32(<4 x i32> %a, i32* nocapture %b) local_unnamed_addr #0 {
+define dso_local void @test_consecutive_i32(<4 x i32> %a, ptr nocapture %b) local_unnamed_addr #0 {
; CHECK-LABEL: test_consecutive_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxsldwi vs0, vs34, vs34, 2
entry:
%vecext = extractelement <4 x i32> %a, i32 0
- store i32 %vecext, i32* %b, align 4
+ store i32 %vecext, ptr %b, align 4
%vecext1 = extractelement <4 x i32> %a, i32 2
- %arrayidx2 = getelementptr inbounds i32, i32* %b, i64 1
- store i32 %vecext1, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %b, i64 1
+ store i32 %vecext1, ptr %arrayidx2, align 4
ret void
}
-define dso_local void @test_consecutive_float(<4 x float> %a, float* nocapture %b) local_unnamed_addr #0 {
+define dso_local void @test_consecutive_float(<4 x float> %a, ptr nocapture %b) local_unnamed_addr #0 {
; CHECK-LABEL: test_consecutive_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxsldwi vs0, vs34, vs34, 1
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <4 x float> %a, i32 1
- store float %vecext, float* %b, align 4
+ store float %vecext, ptr %b, align 4
%vecext1 = extractelement <4 x float> %a, i32 3
- %arrayidx2 = getelementptr inbounds float, float* %b, i64 1
- store float %vecext1, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %b, i64 1
+ store float %vecext1, ptr %arrayidx2, align 4
ret void
}
-define dso_local void @test_stores_exceed_vec_size(<4 x i32> %a, i32* nocapture %b) local_unnamed_addr #0 {
+define dso_local void @test_stores_exceed_vec_size(<4 x i32> %a, ptr nocapture %b) local_unnamed_addr #0 {
; CHECK-LABEL: test_stores_exceed_vec_size:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r3, r2, .LCPI16_0@toc@ha
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <4 x i32> %a, i32 2
- store i32 %vecext, i32* %b, align 4
+ store i32 %vecext, ptr %b, align 4
%vecext1 = extractelement <4 x i32> %a, i32 3
- %arrayidx2 = getelementptr inbounds i32, i32* %b, i64 1
- store i32 %vecext1, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %b, i64 1
+ store i32 %vecext1, ptr %arrayidx2, align 4
%vecext3 = extractelement <4 x i32> %a, i32 0
- %arrayidx4 = getelementptr inbounds i32, i32* %b, i64 2
- store i32 %vecext3, i32* %arrayidx4, align 4
- %arrayidx6 = getelementptr inbounds i32, i32* %b, i64 3
- store i32 %vecext3, i32* %arrayidx6, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %b, i64 2
+ store i32 %vecext3, ptr %arrayidx4, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %b, i64 3
+ store i32 %vecext3, ptr %arrayidx6, align 4
%vecext7 = extractelement <4 x i32> %a, i32 1
- %arrayidx8 = getelementptr inbounds i32, i32* %b, i64 4
- store i32 %vecext7, i32* %arrayidx8, align 4
- %arrayidx10 = getelementptr inbounds i32, i32* %b, i64 5
- store i32 %vecext, i32* %arrayidx10, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %b, i64 4
+ store i32 %vecext7, ptr %arrayidx8, align 4
+ %arrayidx10 = getelementptr inbounds i32, ptr %b, i64 5
+ store i32 %vecext, ptr %arrayidx10, align 4
ret void
}
-define void @test_5_consecutive_stores_of_bytes(<16 x i8> %a, i8* nocapture %b) local_unnamed_addr #0 {
+define void @test_5_consecutive_stores_of_bytes(<16 x i8> %a, ptr nocapture %b) local_unnamed_addr #0 {
; CHECK-LABEL: test_5_consecutive_stores_of_bytes:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxswapd vs0, vs34
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <16 x i8> %a, i32 4
- store i8 %vecext, i8* %b, align 1
+ store i8 %vecext, ptr %b, align 1
%vecext1 = extractelement <16 x i8> %a, i32 12
- %arrayidx2 = getelementptr inbounds i8, i8* %b, i64 1
- store i8 %vecext1, i8* %arrayidx2, align 1
+ %arrayidx2 = getelementptr inbounds i8, ptr %b, i64 1
+ store i8 %vecext1, ptr %arrayidx2, align 1
%vecext3 = extractelement <16 x i8> %a, i32 9
- %arrayidx4 = getelementptr inbounds i8, i8* %b, i64 2
- store i8 %vecext3, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %b, i64 2
+ store i8 %vecext3, ptr %arrayidx4, align 1
%vecext5 = extractelement <16 x i8> %a, i32 7
- %arrayidx6 = getelementptr inbounds i8, i8* %b, i64 3
- store i8 %vecext5, i8* %arrayidx6, align 1
+ %arrayidx6 = getelementptr inbounds i8, ptr %b, i64 3
+ store i8 %vecext5, ptr %arrayidx6, align 1
%vecext7 = extractelement <16 x i8> %a, i32 6
- %arrayidx8 = getelementptr inbounds i8, i8* %b, i64 4
- store i8 %vecext7, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %b, i64 4
+ store i8 %vecext7, ptr %arrayidx8, align 1
ret void
}
-define void @test_13_consecutive_stores_of_bytes(<16 x i8> %a, i8* nocapture %b) local_unnamed_addr #0 {
+define void @test_13_consecutive_stores_of_bytes(<16 x i8> %a, ptr nocapture %b) local_unnamed_addr #0 {
; CHECK-LABEL: test_13_consecutive_stores_of_bytes:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxswapd vs0, vs34
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <16 x i8> %a, i32 4
- store i8 %vecext, i8* %b, align 1
+ store i8 %vecext, ptr %b, align 1
%vecext1 = extractelement <16 x i8> %a, i32 12
- %arrayidx2 = getelementptr inbounds i8, i8* %b, i64 1
- store i8 %vecext1, i8* %arrayidx2, align 1
+ %arrayidx2 = getelementptr inbounds i8, ptr %b, i64 1
+ store i8 %vecext1, ptr %arrayidx2, align 1
%vecext3 = extractelement <16 x i8> %a, i32 9
- %arrayidx4 = getelementptr inbounds i8, i8* %b, i64 2
- store i8 %vecext3, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %b, i64 2
+ store i8 %vecext3, ptr %arrayidx4, align 1
%vecext5 = extractelement <16 x i8> %a, i32 7
- %arrayidx6 = getelementptr inbounds i8, i8* %b, i64 3
- store i8 %vecext5, i8* %arrayidx6, align 1
+ %arrayidx6 = getelementptr inbounds i8, ptr %b, i64 3
+ store i8 %vecext5, ptr %arrayidx6, align 1
%vecext7 = extractelement <16 x i8> %a, i32 6
- %arrayidx8 = getelementptr inbounds i8, i8* %b, i64 4
- store i8 %vecext7, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %b, i64 4
+ store i8 %vecext7, ptr %arrayidx8, align 1
%vecext9 = extractelement <16 x i8> %a, i32 0
- %arrayidx10 = getelementptr inbounds i8, i8* %b, i64 5
- store i8 %vecext9, i8* %arrayidx10, align 1
+ %arrayidx10 = getelementptr inbounds i8, ptr %b, i64 5
+ store i8 %vecext9, ptr %arrayidx10, align 1
%vecext11 = extractelement <16 x i8> %a, i32 11
- %arrayidx12 = getelementptr inbounds i8, i8* %b, i64 6
- store i8 %vecext11, i8* %arrayidx12, align 1
+ %arrayidx12 = getelementptr inbounds i8, ptr %b, i64 6
+ store i8 %vecext11, ptr %arrayidx12, align 1
%vecext13 = extractelement <16 x i8> %a, i32 13
- %arrayidx14 = getelementptr inbounds i8, i8* %b, i64 7
- store i8 %vecext13, i8* %arrayidx14, align 1
+ %arrayidx14 = getelementptr inbounds i8, ptr %b, i64 7
+ store i8 %vecext13, ptr %arrayidx14, align 1
%vecext15 = extractelement <16 x i8> %a, i32 2
- %arrayidx16 = getelementptr inbounds i8, i8* %b, i64 8
- store i8 %vecext15, i8* %arrayidx16, align 1
+ %arrayidx16 = getelementptr inbounds i8, ptr %b, i64 8
+ store i8 %vecext15, ptr %arrayidx16, align 1
%vecext17 = extractelement <16 x i8> %a, i32 15
- %arrayidx18 = getelementptr inbounds i8, i8* %b, i64 9
- store i8 %vecext17, i8* %arrayidx18, align 1
+ %arrayidx18 = getelementptr inbounds i8, ptr %b, i64 9
+ store i8 %vecext17, ptr %arrayidx18, align 1
%vecext19 = extractelement <16 x i8> %a, i32 1
- %arrayidx20 = getelementptr inbounds i8, i8* %b, i64 10
- store i8 %vecext19, i8* %arrayidx20, align 1
+ %arrayidx20 = getelementptr inbounds i8, ptr %b, i64 10
+ store i8 %vecext19, ptr %arrayidx20, align 1
%vecext21 = extractelement <16 x i8> %a, i32 5
- %arrayidx22 = getelementptr inbounds i8, i8* %b, i64 11
- store i8 %vecext21, i8* %arrayidx22, align 1
+ %arrayidx22 = getelementptr inbounds i8, ptr %b, i64 11
+ store i8 %vecext21, ptr %arrayidx22, align 1
%vecext23 = extractelement <16 x i8> %a, i32 14
- %arrayidx24 = getelementptr inbounds i8, i8* %b, i64 12
- store i8 %vecext23, i8* %arrayidx24, align 1
+ %arrayidx24 = getelementptr inbounds i8, ptr %b, i64 12
+ store i8 %vecext23, ptr %arrayidx24, align 1
ret void
}
-define void @test_elements_from_two_vec(<4 x i32> %a, <4 x i32> %b, i32* nocapture %c) local_unnamed_addr #0 {
+define void @test_elements_from_two_vec(<4 x i32> %a, <4 x i32> %b, ptr nocapture %c) local_unnamed_addr #0 {
; CHECK-LABEL: test_elements_from_two_vec:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxsldwi vs0, vs34, vs34, 2
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <4 x i32> %a, i32 0
- %arrayidx = getelementptr inbounds i32, i32* %c, i64 1
- store i32 %vecext, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %c, i64 1
+ store i32 %vecext, ptr %arrayidx, align 4
%vecext1 = extractelement <4 x i32> %b, i32 1
- store i32 %vecext1, i32* %c, align 4
+ store i32 %vecext1, ptr %c, align 4
ret void
}
-define dso_local void @test_elements_from_three_vec(<4 x float> %a, <4 x float> %b, <4 x float> %c, float* nocapture %d) local_unnamed_addr #0 {
+define dso_local void @test_elements_from_three_vec(<4 x float> %a, <4 x float> %b, <4 x float> %c, ptr nocapture %d) local_unnamed_addr #0 {
; CHECK-LABEL: test_elements_from_three_vec:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxsldwi vs0, vs34, vs34, 3
; CHECK-P9-BE-NEXT: blr
entry:
%vecext = extractelement <4 x float> %a, i32 3
- store float %vecext, float* %d, align 4
+ store float %vecext, ptr %d, align 4
%vecext1 = extractelement <4 x float> %b, i32 2
- %arrayidx2 = getelementptr inbounds float, float* %d, i64 1
- store float %vecext1, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %d, i64 1
+ store float %vecext1, ptr %arrayidx2, align 4
%vecext3 = extractelement <4 x float> %c, i32 1
- %arrayidx4 = getelementptr inbounds float, float* %d, i64 2
- store float %vecext3, float* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %d, i64 2
+ store float %vecext3, ptr %arrayidx4, align 4
ret void
}
; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
; RUN: -mcpu=pwr9 -ppc-asm-full-reg-names < %s | FileCheck %s
-@z = external local_unnamed_addr global i32*, align 8
+@z = external local_unnamed_addr global ptr, align 8
; Function Attrs: norecurse nounwind readonly
define signext i32 @_Z2tcii(i32 signext %x, i32 signext %y) local_unnamed_addr #0 {
entry:
- %0 = load i32*, i32** @z, align 8
+ %0 = load ptr, ptr @z, align 8
%add = add nsw i32 %y, %x
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i32, i32* %0, i64 %idxprom
- %1 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %0, i64 %idxprom
+ %1 = load i32, ptr %arrayidx, align 4
ret i32 %1
; CHECK-LABEL: @_Z2tcii
; CHECK: extswsli {{r[0-9]+}}, {{r[0-9]+}}, 2
@a1 = local_unnamed_addr global [3 x fp128] zeroinitializer, align 16
; Function Attrs: norecurse nounwind readonly
-define fp128 @testArray_01(fp128* nocapture readonly %sa) {
+define fp128 @testArray_01(ptr nocapture readonly %sa) {
; CHECK-LABEL: testArray_01:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 32(r3)
; CHECK-P8-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds fp128, fp128* %sa, i64 2
- %0 = load fp128, fp128* %arrayidx, align 16
+ %arrayidx = getelementptr inbounds fp128, ptr %sa, i64 2
+ %0 = load fp128, ptr %arrayidx, align 16
ret fp128 %0
}
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds ([3 x fp128], [3 x fp128]* @a1,
+ %0 = load fp128, ptr getelementptr inbounds ([3 x fp128], ptr @a1,
i64 0, i64 2), align 16
ret fp128 %0
}
; Since we can only pass a max of 8 float128 value in VSX registers, ensure we
; store to stack if passing more.
; Function Attrs: norecurse nounwind readonly
-define fp128 @testStruct_03(%struct.With9fp128params* byval(%struct.With9fp128params) nocapture readonly align 16 %a) {
+define fp128 @testStruct_03(ptr byval(%struct.With9fp128params) nocapture readonly align 16 %a) {
; CHECK-LABEL: testStruct_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 128(r1)
entry:
%a7 = getelementptr inbounds %struct.With9fp128params,
- %struct.With9fp128params* %a, i64 0, i32 6
- %0 = load fp128, fp128* %a7, align 16
+ ptr %a, i64 0, i32 6
+ %0 = load fp128, ptr %a7, align 16
ret fp128 %0
}
; Function Attrs: norecurse nounwind readonly
-define fp128 @testNestedAggregate(%struct.MixedC* byval(%struct.MixedC) nocapture readonly align 16 %a) {
+define fp128 @testNestedAggregate(ptr byval(%struct.MixedC) nocapture readonly align 16 %a) {
; CHECK-LABEL: testNestedAggregate:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r8, 72(r1)
; CHECK-P8-NEXT: blr
entry:
- %c = getelementptr inbounds %struct.MixedC, %struct.MixedC* %a, i64 0, i32 1, i32 1
- %0 = load fp128, fp128* %c, align 16
+ %c = getelementptr inbounds %struct.MixedC, ptr %a, i64 0, i32 1, i32 1
+ %0 = load fp128, ptr %c, align 16
ret fp128 %0
}
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %ap = alloca i8*, align 8
- %0 = bitcast i8** %ap to i8*
- call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0) #2
+ %ap = alloca ptr, align 8
+ call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %ap) #2
%cmp = icmp slt i32 %count, 1
br i1 %cmp, label %cleanup, label %if.end
if.end: ; preds = %entry
- call void @llvm.va_start(i8* nonnull %0)
- %argp.cur = load i8*, i8** %ap, align 8
- %argp.next = getelementptr inbounds i8, i8* %argp.cur, i64 16
- %1 = bitcast i8* %argp.cur to fp128*
- %2 = load fp128, fp128* %1, align 8
- %add = fadd fp128 %2, 0xL00000000000000000000000000000000
- %argp.next3 = getelementptr inbounds i8, i8* %argp.cur, i64 32
- store i8* %argp.next3, i8** %ap, align 8
- %3 = bitcast i8* %argp.next to fp128*
- %4 = load fp128, fp128* %3, align 8
- %add4 = fadd fp128 %add, %4
- call void @llvm.va_end(i8* nonnull %0)
+ call void @llvm.va_start(ptr nonnull %ap)
+ %argp.cur = load ptr, ptr %ap, align 8
+ %argp.next = getelementptr inbounds i8, ptr %argp.cur, i64 16
+ %0 = load fp128, ptr %argp.cur, align 8
+ %add = fadd fp128 %0, 0xL00000000000000000000000000000000
+ %argp.next3 = getelementptr inbounds i8, ptr %argp.cur, i64 32
+ store ptr %argp.next3, ptr %ap, align 8
+ %1 = load fp128, ptr %argp.next, align 8
+ %add4 = fadd fp128 %add, %1
+ call void @llvm.va_end(ptr nonnull %ap)
br label %cleanup
cleanup: ; preds = %entry, %if.end
%retval.0 = phi fp128 [ %add4, %if.end ], [ 0xL00000000000000000000000000000000, %entry ]
- call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0) #2
+ call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %ap) #2
ret fp128 %retval.0
}
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.va_start(i8*) #2
-declare void @llvm.va_end(i8*) #2
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.va_start(ptr) #2
+declare void @llvm.va_end(ptr) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
; RUN: -check-prefix=CHECK-P8
; Function Attrs: norecurse nounwind
-define dso_local void @qpAdd(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpAdd(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpAdd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%add = fadd fp128 %0, %0
- store fp128 %add, fp128* %res, align 16
+ store fp128 %add, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define dso_local void @qpSub(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpSub(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpSub:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%sub = fsub fp128 %0, %0
- store fp128 %sub, fp128* %res, align 16
+ store fp128 %sub, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define dso_local void @qpMul(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpMul(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpMul:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%mul = fmul fp128 %0, %0
- store fp128 %mul, fp128* %res, align 16
+ store fp128 %mul, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define dso_local void @qpDiv(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpDiv(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpDiv:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%div = fdiv fp128 %0, %0
- store fp128 %div, fp128* %res, align 16
+ store fp128 %div, ptr %res, align 16
ret void
}
-define dso_local void @testLdNSt(i8* nocapture readonly %PtrC, fp128* nocapture %PtrF) {
+define dso_local void @testLdNSt(ptr nocapture readonly %PtrC, ptr nocapture %PtrF) {
; CHECK-LABEL: testLdNSt:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li r5, 4
; CHECK-P8-NEXT: stxvd2x vs0, 0, r3
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %PtrC, i64 4
- %0 = bitcast i8* %add.ptr to fp128*
- %1 = load fp128, fp128* %0, align 16
- %2 = bitcast fp128* %PtrF to i8*
- %add.ptr1 = getelementptr inbounds i8, i8* %2, i64 8
- %3 = bitcast i8* %add.ptr1 to fp128*
- store fp128 %1, fp128* %3, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %PtrC, i64 4
+ %0 = load fp128, ptr %add.ptr, align 16
+ %add.ptr1 = getelementptr inbounds i8, ptr %PtrF, i64 8
+ store fp128 %0, ptr %add.ptr1, align 16
ret void
}
-define dso_local void @qpSqrt(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpSqrt(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpSqrt:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.sqrt.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.sqrt.f128(fp128 %Val)
-define dso_local void @qpCpsgn(fp128* nocapture readonly %a, fp128* nocapture readonly %b,
+define dso_local void @qpCpsgn(ptr nocapture readonly %a, ptr nocapture readonly %b,
; CHECK-LABEL: qpCpsgn:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: lxvd2x vs0, 0, r3
; CHECK-P8-NEXT: stxvd2x vs0, 0, r5
; CHECK-P8-NEXT: blr
- fp128* nocapture %res) {
+ ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%2 = tail call fp128 @llvm.copysign.f128(fp128 %0, fp128 %1)
- store fp128 %2, fp128* %res, align 16
+ store fp128 %2, ptr %res, align 16
ret void
}
declare fp128 @llvm.copysign.f128(fp128 %Mag, fp128 %Sgn)
-define dso_local void @qpAbs(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpAbs(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpAbs:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: stxvd2x vs0, 0, r4
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.fabs.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.fabs.f128(fp128 %Val)
-define dso_local void @qpNAbs(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpNAbs(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpNAbs:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: stxvd2x vs0, 0, r4
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.fabs.f128(fp128 %0)
%neg = fsub fp128 0xL00000000000000008000000000000000, %1
- store fp128 %neg, fp128* %res, align 16
+ store fp128 %neg, ptr %res, align 16
ret void
}
-define dso_local void @qpNeg(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpNeg(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpNeg:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: stxvd2x vs0, 0, r4
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%sub = fsub fp128 0xL00000000000000008000000000000000, %0
- store fp128 %sub, fp128* %res, align 16
+ store fp128 %sub, ptr %res, align 16
ret void
}
-define fp128 @qp_sin(fp128* nocapture readonly %a) {
+define fp128 @qp_sin(ptr nocapture readonly %a) {
; CHECK-LABEL: qp_sin:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.sin.f128(fp128 %0)
ret fp128 %1
}
declare fp128 @llvm.sin.f128(fp128 %Val)
-define fp128 @qp_cos(fp128* nocapture readonly %a) {
+define fp128 @qp_cos(ptr nocapture readonly %a) {
; CHECK-LABEL: qp_cos:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.cos.f128(fp128 %0)
ret fp128 %1
}
declare fp128 @llvm.cos.f128(fp128 %Val)
-define fp128 @qp_log(fp128* nocapture readonly %a) {
+define fp128 @qp_log(ptr nocapture readonly %a) {
; CHECK-LABEL: qp_log:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.log.f128(fp128 %0)
ret fp128 %1
}
declare fp128 @llvm.log.f128(fp128 %Val)
-define fp128 @qp_log10(fp128* nocapture readonly %a) {
+define fp128 @qp_log10(ptr nocapture readonly %a) {
; CHECK-LABEL: qp_log10:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.log10.f128(fp128 %0)
ret fp128 %1
}
declare fp128 @llvm.log10.f128(fp128 %Val)
-define fp128 @qp_log2(fp128* nocapture readonly %a) {
+define fp128 @qp_log2(ptr nocapture readonly %a) {
; CHECK-LABEL: qp_log2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.log2.f128(fp128 %0)
ret fp128 %1
}
declare fp128 @llvm.log2.f128(fp128 %Val)
-define fp128 @qp_minnum(fp128* nocapture readonly %a,
+define fp128 @qp_minnum(ptr nocapture readonly %a,
; CHECK-LABEL: qp_minnum:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b) {
+ ptr nocapture readonly %b) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%2 = tail call fp128 @llvm.minnum.f128(fp128 %0, fp128 %1)
ret fp128 %2
}
declare fp128 @llvm.minnum.f128(fp128 %Val0, fp128 %Val1)
-define fp128 @qp_maxnum(fp128* nocapture readonly %a,
+define fp128 @qp_maxnum(ptr nocapture readonly %a,
; CHECK-LABEL: qp_maxnum:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b) {
+ ptr nocapture readonly %b) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%2 = tail call fp128 @llvm.maxnum.f128(fp128 %0, fp128 %1)
ret fp128 %2
}
declare fp128 @llvm.maxnum.f128(fp128 %Val0, fp128 %Val1)
-define fp128 @qp_pow(fp128* nocapture readonly %a,
+define fp128 @qp_pow(ptr nocapture readonly %a,
; CHECK-LABEL: qp_pow:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b) {
+ ptr nocapture readonly %b) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%2 = tail call fp128 @llvm.pow.f128(fp128 %0, fp128 %1)
ret fp128 %2
}
declare fp128 @llvm.pow.f128(fp128 %Val, fp128 %Power)
-define fp128 @qp_exp(fp128* nocapture readonly %a) {
+define fp128 @qp_exp(ptr nocapture readonly %a) {
; CHECK-LABEL: qp_exp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.exp.f128(fp128 %0)
ret fp128 %1
}
declare fp128 @llvm.exp.f128(fp128 %Val)
-define fp128 @qp_exp2(fp128* nocapture readonly %a) {
+define fp128 @qp_exp2(ptr nocapture readonly %a) {
; CHECK-LABEL: qp_exp2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.exp2.f128(fp128 %0)
ret fp128 %1
}
declare fp128 @llvm.exp2.f128(fp128 %Val)
-define dso_local void @qp_powi(fp128* nocapture readonly %a, i32* nocapture readonly %b,
+define dso_local void @qp_powi(ptr nocapture readonly %a, ptr nocapture readonly %b,
; CHECK-LABEL: qp_powi:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture %res) {
+ ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load i32, i32* %b, align 8
+ %0 = load fp128, ptr %a, align 16
+ %1 = load i32, ptr %b, align 8
%2 = tail call fp128 @llvm.powi.f128.i32(fp128 %0, i32 %1)
- store fp128 %2, fp128* %res, align 16
+ store fp128 %2, ptr %res, align 16
ret void
}
declare fp128 @llvm.powi.f128.i32(fp128 %Val, i32 %power)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a, align 16
- %1 = load fp128, fp128* @b, align 16
+ %0 = load fp128, ptr @a, align 16
+ %1 = load fp128, ptr @b, align 16
%rem = frem fp128 %0, %1
ret fp128 %rem
}
-define dso_local void @qpCeil(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpCeil(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpCeil:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.ceil.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.ceil.f128(fp128 %Val)
-define dso_local void @qpFloor(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpFloor(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpFloor:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.floor.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.floor.f128(fp128 %Val)
-define dso_local void @qpTrunc(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpTrunc(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpTrunc:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.trunc.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.trunc.f128(fp128 %Val)
-define dso_local void @qpRound(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpRound(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpRound:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.round.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.round.f128(fp128 %Val)
-define dso_local void @qpLRound(fp128* nocapture readonly %a, i32* nocapture %res) {
+define dso_local void @qpLRound(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpLRound:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call i32 @llvm.lround.f128(fp128 %0)
- store i32 %1, i32* %res, align 16
+ store i32 %1, ptr %res, align 16
ret void
}
declare i32 @llvm.lround.f128(fp128 %Val)
-define dso_local void @qpLLRound(fp128* nocapture readonly %a, i64* nocapture %res) {
+define dso_local void @qpLLRound(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpLLRound:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call i64 @llvm.llround.f128(fp128 %0)
- store i64 %1, i64* %res, align 16
+ store i64 %1, ptr %res, align 16
ret void
}
declare i64 @llvm.llround.f128(fp128 %Val)
-define dso_local void @qpRint(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpRint(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpRint:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.rint.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.rint.f128(fp128 %Val)
-define dso_local void @qpLRint(fp128* nocapture readonly %a, i32* nocapture %res) {
+define dso_local void @qpLRint(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpLRint:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call i32 @llvm.lrint.f128(fp128 %0)
- store i32 %1, i32* %res, align 16
+ store i32 %1, ptr %res, align 16
ret void
}
declare i32 @llvm.lrint.f128(fp128 %Val)
-define dso_local void @qpLLRint(fp128* nocapture readonly %a, i64* nocapture %res) {
+define dso_local void @qpLLRint(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpLLRint:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call i64 @llvm.llrint.f128(fp128 %0)
- store i64 %1, i64* %res, align 16
+ store i64 %1, ptr %res, align 16
ret void
}
declare i64 @llvm.llrint.f128(fp128 %Val)
-define dso_local void @qpNearByInt(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define dso_local void @qpNearByInt(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qpNearByInt:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.nearbyint.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.nearbyint.f128(fp128 %Val)
-define dso_local void @qpFMA(fp128* %a, fp128* %b, fp128* %c, fp128* %res) {
+define dso_local void @qpFMA(ptr %a, ptr %b, ptr %c, ptr %res) {
; CHECK-LABEL: qpFMA:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
- %2 = load fp128, fp128* %c, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
+ %2 = load fp128, ptr %c, align 16
%3 = tail call fp128 @llvm.fma.f128(fp128 %0, fp128 %1, fp128 %2)
- store fp128 %3, fp128* %res, align 16
+ store fp128 %3, ptr %res, align 16
ret void
}
declare fp128 @llvm.fma.f128(fp128, fp128, fp128)
}
; Function Attrs: norecurse nounwind readnone
-define i64 @checkBitcast(fp128 %in, <2 x i64> %in2, <2 x i64> *%out) local_unnamed_addr {
+define i64 @checkBitcast(fp128 %in, <2 x i64> %in2, ptr %out) local_unnamed_addr {
; CHECK-LABEL: checkBitcast:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mfvsrld r3, v2
%0 = bitcast fp128 %in to <2 x i64>
%1 = extractelement <2 x i64> %0, i64 0
%2 = add <2 x i64> %0, %in2
- store <2 x i64> %2, <2 x i64> *%out, align 16
+ store <2 x i64> %2, ptr %out, align 16
ret i64 %1
}
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp ogt fp128 %0, %1
%conv = zext i1 %cmp to i32
ret i32 %conv
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp olt fp128 %0, %1
%conv = zext i1 %cmp to i32
ret i32 %conv
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp oge fp128 %0, %1
%conv = zext i1 %cmp to i32
ret i32 %conv
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp ole fp128 %0, %1
%conv = zext i1 %cmp to i32
ret i32 %conv
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp oeq fp128 %0, %1
%conv = zext i1 %cmp to i32
ret i32 %conv
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp ogt fp128 %0, %1
%lnot = xor i1 %cmp, true
%lnot.ext = zext i1 %lnot to i32
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp olt fp128 %0, %1
%lnot = xor i1 %cmp, true
%lnot.ext = zext i1 %lnot to i32
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp oge fp128 %0, %1
%lnot = xor i1 %cmp, true
%lnot.ext = zext i1 %lnot to i32
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp ole fp128 %0, %1
%lnot = xor i1 %cmp, true
%lnot.ext = zext i1 %lnot to i32
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp une fp128 %0, %1
%conv = zext i1 %cmp to i32
ret i32 %conv
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp ogt fp128 %0, %1
%cond = select i1 %cmp, fp128 %0, fp128 %1
ret fp128 %cond
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp olt fp128 %0, %1
%cond = select i1 %cmp, fp128 %0, fp128 %1
ret fp128 %cond
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp oge fp128 %0, %1
%cond = select i1 %cmp, fp128 %0, fp128 %1
ret fp128 %cond
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp ole fp128 %0, %1
%cond = select i1 %cmp, fp128 %0, fp128 %1
ret fp128 %cond
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @a_qp, align 16
- %1 = load fp128, fp128* @b_qp, align 16
+ %0 = load fp128, ptr @a_qp, align 16
+ %1 = load fp128, ptr @b_qp, align 16
%cmp = fcmp oeq fp128 %0, %1
%cond = select i1 %cmp, fp128 %0, fp128 %1
ret fp128 %cond
@ubMem = local_unnamed_addr global [5 x i8] c"\05\02\03\04\00", align 1
; Function Attrs: norecurse nounwind
-define void @sdwConv2qp(fp128* nocapture %a, i64 %b) {
+define void @sdwConv2qp(ptr nocapture %a, i64 %b) {
; CHECK-LABEL: sdwConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtvsrd v2, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i64 %b to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sdwConv2qp_01(fp128* nocapture %a, i128 %b) {
+define void @sdwConv2qp_01(ptr nocapture %a, i128 %b) {
; CHECK-LABEL: sdwConv2qp_01:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i128 %b to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sdwConv2qp_02(fp128* nocapture %a) {
+define void @sdwConv2qp_02(ptr nocapture %a) {
; CHECK-LABEL: sdwConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i64, i64* getelementptr inbounds
- ([5 x i64], [5 x i64]* @mem, i64 0, i64 2), align 8
+ %0 = load i64, ptr getelementptr inbounds
+ ([5 x i64], ptr @mem, i64 0, i64 2), align 8
%conv = sitofp i64 %0 to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sdwConv2qp_03(fp128* nocapture %a, i64* nocapture readonly %b) {
+define void @sdwConv2qp_03(ptr nocapture %a, ptr nocapture readonly %b) {
; CHECK-LABEL: sdwConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsd v2, 0(r4)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i64, i64* %b, align 8
+ %0 = load i64, ptr %b, align 8
%conv = sitofp i64 %0 to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sdwConv2qp_04(fp128* nocapture %a, i1 %b) {
+define void @sdwConv2qp_04(ptr nocapture %a, i1 %b) {
; CHECK-LABEL: sdwConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andi. r4, r4, 1
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i1 %b to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @udwConv2qp(fp128* nocapture %a, i64 %b) {
+define void @udwConv2qp(ptr nocapture %a, i64 %b) {
; CHECK-LABEL: udwConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtvsrd v2, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i64 %b to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @udwConv2qp_01(fp128* nocapture %a, i128 %b) {
+define void @udwConv2qp_01(ptr nocapture %a, i128 %b) {
; CHECK-LABEL: udwConv2qp_01:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i128 %b to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @udwConv2qp_02(fp128* nocapture %a) {
+define void @udwConv2qp_02(ptr nocapture %a) {
; CHECK-LABEL: udwConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC1@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i64, i64* getelementptr inbounds
- ([5 x i64], [5 x i64]* @umem, i64 0, i64 4), align 8
+ %0 = load i64, ptr getelementptr inbounds
+ ([5 x i64], ptr @umem, i64 0, i64 4), align 8
%conv = uitofp i64 %0 to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @udwConv2qp_03(fp128* nocapture %a, i64* nocapture readonly %b) {
+define void @udwConv2qp_03(ptr nocapture %a, ptr nocapture readonly %b) {
; CHECK-LABEL: udwConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsd v2, 0(r4)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i64, i64* %b, align 8
+ %0 = load i64, ptr %b, align 8
%conv = uitofp i64 %0 to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @udwConv2qp_04(fp128* nocapture %a, i1 %b) {
+define void @udwConv2qp_04(ptr nocapture %a, i1 %b) {
; CHECK-LABEL: udwConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: clrlwi r4, r4, 31
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i1 %b to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define fp128* @sdwConv2qp_testXForm(fp128* returned %sink,
+define ptr @sdwConv2qp_testXForm(ptr returned %sink,
; CHECK-LABEL: sdwConv2qp_testXForm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lis r5, 1
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- i8* nocapture readonly %a) {
+ ptr nocapture readonly %a) {
entry:
- %add.ptr = getelementptr inbounds i8, i8* %a, i64 73333
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = sitofp i64 %1 to fp128
- store fp128 %conv, fp128* %sink, align 16
- ret fp128* %sink
+ %add.ptr = getelementptr inbounds i8, ptr %a, i64 73333
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = sitofp i64 %0 to fp128
+ store fp128 %conv, ptr %sink, align 16
+ ret ptr %sink
}
; Function Attrs: norecurse nounwind
-define fp128* @udwConv2qp_testXForm(fp128* returned %sink,
+define ptr @udwConv2qp_testXForm(ptr returned %sink,
; CHECK-LABEL: udwConv2qp_testXForm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lis r5, 1
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- i8* nocapture readonly %a) {
+ ptr nocapture readonly %a) {
entry:
- %add.ptr = getelementptr inbounds i8, i8* %a, i64 73333
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = uitofp i64 %1 to fp128
- store fp128 %conv, fp128* %sink, align 16
- ret fp128* %sink
+ %add.ptr = getelementptr inbounds i8, ptr %a, i64 73333
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = uitofp i64 %0 to fp128
+ store fp128 %conv, ptr %sink, align 16
+ ret ptr %sink
}
; Function Attrs: norecurse nounwind
-define void @swConv2qp(fp128* nocapture %a, i32 signext %b) {
+define void @swConv2qp(ptr nocapture %a, i32 signext %b) {
; CHECK-LABEL: swConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtvsrwa v2, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i32 %b to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @swConv2qp_02(fp128* nocapture %a, i32* nocapture readonly %b) {
+define void @swConv2qp_02(ptr nocapture %a, ptr nocapture readonly %b) {
; CHECK-LABEL: swConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsiwax v2, 0, r4
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i32, i32* %b, align 4
+ %0 = load i32, ptr %b, align 4
%conv = sitofp i32 %0 to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @swConv2qp_03(fp128* nocapture %a) {
+define void @swConv2qp_03(ptr nocapture %a) {
; CHECK-LABEL: swConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC2@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds
- ([5 x i32], [5 x i32]* @swMem, i64 0, i64 3), align 4
+ %0 = load i32, ptr getelementptr inbounds
+ ([5 x i32], ptr @swMem, i64 0, i64 3), align 4
%conv = sitofp i32 %0 to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uwConv2qp(fp128* nocapture %a, i32 zeroext %b) {
+define void @uwConv2qp(ptr nocapture %a, i32 zeroext %b) {
; CHECK-LABEL: uwConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtvsrwz v2, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i32 %b to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uwConv2qp_02(fp128* nocapture %a, i32* nocapture readonly %b) {
+define void @uwConv2qp_02(ptr nocapture %a, ptr nocapture readonly %b) {
; CHECK-LABEL: uwConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsiwzx v2, 0, r4
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i32, i32* %b, align 4
+ %0 = load i32, ptr %b, align 4
%conv = uitofp i32 %0 to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uwConv2qp_03(fp128* nocapture %a) {
+define void @uwConv2qp_03(ptr nocapture %a) {
; CHECK-LABEL: uwConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC3@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds
- ([5 x i32], [5 x i32]* @uwMem, i64 0, i64 3), align 4
+ %0 = load i32, ptr getelementptr inbounds
+ ([5 x i32], ptr @uwMem, i64 0, i64 3), align 4
%conv = uitofp i32 %0 to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uwConv2qp_04(fp128* nocapture %a,
+define void @uwConv2qp_04(ptr nocapture %a,
; CHECK-LABEL: uwConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r5, 0(r5)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- i32 zeroext %b, i32* nocapture readonly %c) {
+ i32 zeroext %b, ptr nocapture readonly %c) {
entry:
- %0 = load i32, i32* %c, align 4
+ %0 = load i32, ptr %c, align 4
%add = add i32 %0, %b
%conv = uitofp i32 %add to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uhwConv2qp(fp128* nocapture %a, i16 zeroext %b) {
+define void @uhwConv2qp(ptr nocapture %a, i16 zeroext %b) {
; CHECK-LABEL: uhwConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtvsrwz v2, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i16 %b to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uhwConv2qp_02(fp128* nocapture %a, i16* nocapture readonly %b) {
+define void @uhwConv2qp_02(ptr nocapture %a, ptr nocapture readonly %b) {
; CHECK-LABEL: uhwConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsihzx v2, 0, r4
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* %b, align 2
+ %0 = load i16, ptr %b, align 2
%conv = uitofp i16 %0 to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uhwConv2qp_03(fp128* nocapture %a) {
+define void @uhwConv2qp_03(ptr nocapture %a) {
; CHECK-LABEL: uhwConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC4@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* getelementptr inbounds
- ([5 x i16], [5 x i16]* @uhwMem, i64 0, i64 3), align 2
+ %0 = load i16, ptr getelementptr inbounds
+ ([5 x i16], ptr @uhwMem, i64 0, i64 3), align 2
%conv = uitofp i16 %0 to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uhwConv2qp_04(fp128* nocapture %a, i16 zeroext %b,
+define void @uhwConv2qp_04(ptr nocapture %a, i16 zeroext %b,
; CHECK-LABEL: uhwConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r5, 0(r5)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- i16* nocapture readonly %c) {
+ ptr nocapture readonly %c) {
entry:
%conv = zext i16 %b to i32
- %0 = load i16, i16* %c, align 2
+ %0 = load i16, ptr %c, align 2
%conv1 = zext i16 %0 to i32
%add = add nuw nsw i32 %conv1, %conv
%conv2 = sitofp i32 %add to fp128
- store fp128 %conv2, fp128* %a, align 16
+ store fp128 %conv2, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubConv2qp(fp128* nocapture %a, i8 zeroext %b) {
+define void @ubConv2qp(ptr nocapture %a, i8 zeroext %b) {
; CHECK-LABEL: ubConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtvsrwz v2, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i8 %b to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubConv2qp_02(fp128* nocapture %a, i8* nocapture readonly %b) {
+define void @ubConv2qp_02(ptr nocapture %a, ptr nocapture readonly %b) {
; CHECK-LABEL: ubConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsibzx v2, 0, r4
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* %b, align 1
+ %0 = load i8, ptr %b, align 1
%conv = uitofp i8 %0 to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubConv2qp_03(fp128* nocapture %a) {
+define void @ubConv2qp_03(ptr nocapture %a) {
; CHECK-LABEL: ubConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC5@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* getelementptr inbounds
- ([5 x i8], [5 x i8]* @ubMem, i64 0, i64 2), align 1
+ %0 = load i8, ptr getelementptr inbounds
+ ([5 x i8], ptr @ubMem, i64 0, i64 2), align 1
%conv = uitofp i8 %0 to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubConv2qp_04(fp128* nocapture %a, i8 zeroext %b,
+define void @ubConv2qp_04(ptr nocapture %a, i8 zeroext %b,
; CHECK-LABEL: ubConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r5, 0(r5)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- i8* nocapture readonly %c) {
+ ptr nocapture readonly %c) {
entry:
%conv = zext i8 %b to i32
- %0 = load i8, i8* %c, align 1
+ %0 = load i8, ptr %c, align 1
%conv1 = zext i8 %0 to i32
%add = add nuw nsw i32 %conv1, %conv
%conv2 = sitofp i32 %add to fp128
- store fp128 %conv2, fp128* %a, align 16
+ store fp128 %conv2, ptr %a, align 16
ret void
}
@f128global = global fp128 0xL300000000000000040089CA8F5C28F5C, align 16
; Function Attrs: norecurse nounwind readonly
-define double @qpConv2dp(fp128* nocapture readonly %a) {
+define double @qpConv2dp(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2dp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptrunc fp128 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind
-define void @qpConv2dp_02(double* nocapture %res) {
+define void @qpConv2dp_02(ptr nocapture %res) {
; CHECK-LABEL: qpConv2dp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC6@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @f128global, align 16
+ %0 = load fp128, ptr @f128global, align 16
%conv = fptrunc fp128 %0 to double
- store double %conv, double* %res, align 8
+ store double %conv, ptr %res, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpConv2dp_03(double* nocapture %res, i32 signext %idx) {
+define void @qpConv2dp_03(ptr nocapture %res, i32 signext %idx) {
; CHECK-LABEL: qpConv2dp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC7@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds ([4 x fp128], [4 x fp128]* @f128Array, i64 0, i64 0), align 16
+ %0 = load fp128, ptr @f128Array, align 16
%conv = fptrunc fp128 %0 to double
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds double, double* %res, i64 %idxprom
- store double %conv, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %res, i64 %idxprom
+ store double %conv, ptr %arrayidx, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpConv2dp_04(fp128* nocapture readonly %a, fp128* nocapture readonly %b, double* nocapture %res) {
+define void @qpConv2dp_04(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %res) {
; CHECK-LABEL: qpConv2dp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%add = fadd fp128 %0, %1
%conv = fptrunc fp128 %add to double
- store double %conv, double* %res, align 8
+ store double %conv, ptr %res, align 8
ret void
}
; Convert QP to SP
; Function Attrs: norecurse nounwind readonly
-define float @qpConv2sp(fp128* nocapture readonly %a) {
+define float @qpConv2sp(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2sp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptrunc fp128 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind
-define void @qpConv2sp_02(float* nocapture %res) {
+define void @qpConv2sp_02(ptr nocapture %res) {
; CHECK-LABEL: qpConv2sp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC6@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* @f128global, align 16
+ %0 = load fp128, ptr @f128global, align 16
%conv = fptrunc fp128 %0 to float
- store float %conv, float* %res, align 4
+ store float %conv, ptr %res, align 4
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpConv2sp_03(float* nocapture %res, i32 signext %idx) {
+define void @qpConv2sp_03(ptr nocapture %res, i32 signext %idx) {
; CHECK-LABEL: qpConv2sp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC7@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds ([4 x fp128], [4 x fp128]* @f128Array, i64 0, i64 3), align 16
+ %0 = load fp128, ptr getelementptr inbounds ([4 x fp128], ptr @f128Array, i64 0, i64 3), align 16
%conv = fptrunc fp128 %0 to float
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds float, float* %res, i64 %idxprom
- store float %conv, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %res, i64 %idxprom
+ store float %conv, ptr %arrayidx, align 4
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpConv2sp_04(fp128* nocapture readonly %a, fp128* nocapture readonly %b, float* nocapture %res) {
+define void @qpConv2sp_04(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %res) {
; CHECK-LABEL: qpConv2sp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%add = fadd fp128 %0, %1
%conv = fptrunc fp128 %add to float
- store float %conv, float* %res, align 4
+ store float %conv, ptr %res, align 4
ret void
}
}
; Function Attrs: norecurse nounwind
-define void @dpConv2qp_02(double* nocapture readonly %a) {
+define void @dpConv2qp_02(ptr nocapture readonly %a) {
; CHECK-LABEL: dpConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsd v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fpext double %0 to fp128
- store fp128 %conv, fp128* @f128Glob, align 16
+ store fp128 %conv, ptr @f128Glob, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2qp_02b(double* nocapture readonly %a, i32 signext %idx) {
+define void @dpConv2qp_02b(ptr nocapture readonly %a, i32 signext %idx) {
; CHECK-LABEL: dpConv2qp_02b:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sldi r4, r4, 3
; CHECK-P8-NEXT: blr
entry:
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds double, double* %a, i64 %idxprom
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %a, i64 %idxprom
+ %0 = load double, ptr %arrayidx, align 8
%conv = fpext double %0 to fp128
- store fp128 %conv, fp128* @f128Glob, align 16
+ store fp128 %conv, ptr @f128Glob, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2qp_03(fp128* nocapture %res, i32 signext %idx, double %a) {
+define void @dpConv2qp_03(ptr nocapture %res, i32 signext %idx, double %a) {
; CHECK-LABEL: dpConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscpsgndp v2, f1, f1
entry:
%conv = fpext double %a to fp128
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds fp128, fp128* %res, i64 %idxprom
- store fp128 %conv, fp128* %arrayidx, align 16
+ %arrayidx = getelementptr inbounds fp128, ptr %res, i64 %idxprom
+ store fp128 %conv, ptr %arrayidx, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2qp_04(double %a, fp128* nocapture %res) {
+define void @dpConv2qp_04(double %a, ptr nocapture %res) {
; CHECK-LABEL: dpConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscpsgndp v2, f1, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fpext double %a to fp128
- store fp128 %conv, fp128* %res, align 16
+ store fp128 %conv, ptr %res, align 16
ret void
}
}
; Function Attrs: norecurse nounwind
-define void @spConv2qp_02(float* nocapture readonly %a) {
+define void @spConv2qp_02(ptr nocapture readonly %a) {
; CHECK-LABEL: spConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxssp v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fpext float %0 to fp128
- store fp128 %conv, fp128* @f128Glob, align 16
+ store fp128 %conv, ptr @f128Glob, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2qp_02b(float* nocapture readonly %a, i32 signext %idx) {
+define void @spConv2qp_02b(ptr nocapture readonly %a, i32 signext %idx) {
; CHECK-LABEL: spConv2qp_02b:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sldi r4, r4, 2
; CHECK-P8-NEXT: blr
entry:
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds float, float* %a, i64 %idxprom
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %a, i64 %idxprom
+ %0 = load float, ptr %arrayidx, align 4
%conv = fpext float %0 to fp128
- store fp128 %conv, fp128* @f128Glob, align 16
+ store fp128 %conv, ptr @f128Glob, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2qp_03(fp128* nocapture %res, i32 signext %idx, float %a) {
+define void @spConv2qp_03(ptr nocapture %res, i32 signext %idx, float %a) {
; CHECK-LABEL: spConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscpsgndp v2, f1, f1
entry:
%conv = fpext float %a to fp128
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds fp128, fp128* %res, i64 %idxprom
- store fp128 %conv, fp128* %arrayidx, align 16
+ %arrayidx = getelementptr inbounds fp128, ptr %res, i64 %idxprom
+ store fp128 %conv, ptr %arrayidx, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2qp_04(float %a, fp128* nocapture %res) {
+define void @spConv2qp_04(float %a, ptr nocapture %res) {
; CHECK-LABEL: spConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscpsgndp v2, f1, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fpext float %a to fp128
- store fp128 %conv, fp128* %res, align 16
+ store fp128 %conv, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @cvdp2sw2qp(double %val, fp128* nocapture %res) {
+define void @cvdp2sw2qp(double %val, ptr nocapture %res) {
; CHECK-LABEL: cvdp2sw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws v2, f1
entry:
%conv = fptosi double %val to i32
%conv1 = sitofp i32 %conv to fp128
- store fp128 %conv1, fp128* %res, align 16
+ store fp128 %conv1, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @cvdp2sdw2qp(double %val, fp128* nocapture %res) {
+define void @cvdp2sdw2qp(double %val, ptr nocapture %res) {
; CHECK-LABEL: cvdp2sdw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxds v2, f1
entry:
%conv = fptosi double %val to i64
%conv1 = sitofp i64 %conv to fp128
- store fp128 %conv1, fp128* %res, align 16
+ store fp128 %conv1, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @cvsp2sw2qp(float %val, fp128* nocapture %res) {
+define void @cvsp2sw2qp(float %val, ptr nocapture %res) {
; CHECK-LABEL: cvsp2sw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws v2, f1
entry:
%conv = fptosi float %val to i32
%conv1 = sitofp i32 %conv to fp128
- store fp128 %conv1, fp128* %res, align 16
+ store fp128 %conv1, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @cvsp2sdw2qp(float %val, fp128* nocapture %res) {
+define void @cvsp2sdw2qp(float %val, ptr nocapture %res) {
; CHECK-LABEL: cvsp2sdw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxds v2, f1
entry:
%conv = fptosi float %val to i64
%conv1 = sitofp i64 %conv to fp128
- store fp128 %conv1, fp128* %res, align 16
+ store fp128 %conv1, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @cvdp2uw2qp(double %val, fp128* nocapture %res) {
+define void @cvdp2uw2qp(double %val, ptr nocapture %res) {
; CHECK-LABEL: cvdp2uw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxws f0, f1
entry:
%conv = fptoui double %val to i32
%conv1 = uitofp i32 %conv to fp128
- store fp128 %conv1, fp128* %res, align 16
+ store fp128 %conv1, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @cvdp2udw2qp(double %val, fp128* nocapture %res) {
+define void @cvdp2udw2qp(double %val, ptr nocapture %res) {
; CHECK-LABEL: cvdp2udw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxds v2, f1
entry:
%conv = fptoui double %val to i64
%conv1 = uitofp i64 %conv to fp128
- store fp128 %conv1, fp128* %res, align 16
+ store fp128 %conv1, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @cvsp2uw2qp(float %val, fp128* nocapture %res) {
+define void @cvsp2uw2qp(float %val, ptr nocapture %res) {
; CHECK-LABEL: cvsp2uw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxws f0, f1
entry:
%conv = fptoui float %val to i32
%conv1 = uitofp i32 %conv to fp128
- store fp128 %conv1, fp128* %res, align 16
+ store fp128 %conv1, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @cvsp2udw2qp(float %val, fp128* nocapture %res) {
+define void @cvsp2udw2qp(float %val, ptr nocapture %res) {
; CHECK-LABEL: cvsp2udw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxds v2, f1
entry:
%conv = fptoui float %val to i64
%conv1 = uitofp i64 %conv to fp128
- store fp128 %conv1, fp128* %res, align 16
+ store fp128 %conv1, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind readonly
-define i128 @qpConv2i128(fp128* nocapture readonly %a) {
+define i128 @qpConv2i128(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2i128:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptosi fp128 %0 to i128
ret i128 %conv
}
; Function Attrs: norecurse nounwind readonly
-define i128 @qpConv2ui128(fp128* nocapture readonly %a) {
+define i128 @qpConv2ui128(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2ui128:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptoui fp128 %0 to i128
ret i128 %conv
}
; Function Attrs: norecurse nounwind readonly
-define i1 @qpConv2ui1(fp128* nocapture readonly %a) {
+define i1 @qpConv2ui1(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2ui1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptoui fp128 %0 to i1
ret i1 %conv
}
; Function Attrs: norecurse nounwind readonly
-define i1 @qpConv2si1(fp128* nocapture readonly %a) {
+define i1 @qpConv2si1(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2si1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptosi fp128 %0 to i1
ret i1 %conv
}
; RUN: -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s | FileCheck %s \
; RUN: -check-prefix=CHECK-P8
-define void @qpFmadd(fp128* nocapture readonly %a, fp128* nocapture %b,
+define void @qpFmadd(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: qpFmadd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %c, fp128* nocapture %res) {
+ ptr nocapture readonly %c, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
- %2 = load fp128, fp128* %c, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
+ %2 = load fp128, ptr %c, align 16
%madd = tail call fp128 @llvm.fmuladd.f128(fp128 %0, fp128 %1, fp128 %2)
- store fp128 %madd, fp128* %res, align 16
+ store fp128 %madd, ptr %res, align 16
ret void
}
declare fp128 @llvm.fmuladd.f128(fp128, fp128, fp128)
; Function Attrs: norecurse nounwind
-define void @qpFmadd_02(fp128* nocapture readonly %a,
+define void @qpFmadd_02(ptr nocapture readonly %a,
; CHECK-LABEL: qpFmadd_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b,
- fp128* nocapture readonly %c, fp128* nocapture %res) {
+ ptr nocapture readonly %b,
+ ptr nocapture readonly %c, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
- %2 = load fp128, fp128* %c, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
+ %2 = load fp128, ptr %c, align 16
%mul = fmul contract fp128 %1, %2
%add = fadd contract fp128 %0, %mul
- store fp128 %add, fp128* %res, align 16
+ store fp128 %add, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpFmadd_03(fp128* nocapture readonly %a,
+define void @qpFmadd_03(ptr nocapture readonly %a,
; CHECK-LABEL: qpFmadd_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b,
- fp128* nocapture readonly %c, fp128* nocapture %res) {
+ ptr nocapture readonly %b,
+ ptr nocapture readonly %c, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%mul = fmul contract fp128 %0, %1
- %2 = load fp128, fp128* %c, align 16
+ %2 = load fp128, ptr %c, align 16
%add = fadd contract fp128 %mul, %2
- store fp128 %add, fp128* %res, align 16
+ store fp128 %add, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpFnmadd(fp128* nocapture readonly %a,
+define void @qpFnmadd(ptr nocapture readonly %a,
; CHECK-LABEL: qpFnmadd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b,
- fp128* nocapture readonly %c, fp128* nocapture %res) {
+ ptr nocapture readonly %b,
+ ptr nocapture readonly %c, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
- %2 = load fp128, fp128* %c, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
+ %2 = load fp128, ptr %c, align 16
%mul = fmul contract fp128 %1, %2
%add = fadd contract fp128 %0, %mul
%sub = fsub fp128 0xL00000000000000008000000000000000, %add
- store fp128 %sub, fp128* %res, align 16
+ store fp128 %sub, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpFnmadd_02(fp128* nocapture readonly %a,
+define void @qpFnmadd_02(ptr nocapture readonly %a,
; CHECK-LABEL: qpFnmadd_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b,
- fp128* nocapture readonly %c, fp128* nocapture %res) {
+ ptr nocapture readonly %b,
+ ptr nocapture readonly %c, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%mul = fmul contract fp128 %0, %1
- %2 = load fp128, fp128* %c, align 16
+ %2 = load fp128, ptr %c, align 16
%add = fadd contract fp128 %mul, %2
%sub = fsub fp128 0xL00000000000000008000000000000000, %add
- store fp128 %sub, fp128* %res, align 16
+ store fp128 %sub, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpFmsub(fp128* nocapture readonly %a,
+define void @qpFmsub(ptr nocapture readonly %a,
; CHECK-LABEL: qpFmsub:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b,
- fp128* nocapture readonly %c, fp128* nocapture %res) {
+ ptr nocapture readonly %b,
+ ptr nocapture readonly %c, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
- %2 = load fp128, fp128* %c, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
+ %2 = load fp128, ptr %c, align 16
%mul = fmul contract fp128 %1, %2
%sub = fsub contract nsz fp128 %0, %mul
- store fp128 %sub, fp128* %res, align 16
+ store fp128 %sub, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpFmsub_02(fp128* nocapture readonly %a,
+define void @qpFmsub_02(ptr nocapture readonly %a,
; CHECK-LABEL: qpFmsub_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b,
- fp128* nocapture readonly %c, fp128* nocapture %res) {
+ ptr nocapture readonly %b,
+ ptr nocapture readonly %c, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%mul = fmul contract fp128 %0, %1
- %2 = load fp128, fp128* %c, align 16
+ %2 = load fp128, ptr %c, align 16
%sub = fsub contract fp128 %mul, %2
- store fp128 %sub, fp128* %res, align 16
+ store fp128 %sub, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpFnmsub(fp128* nocapture readonly %a,
+define void @qpFnmsub(ptr nocapture readonly %a,
; CHECK-LABEL: qpFnmsub:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v3, 0(r4)
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b,
- fp128* nocapture readonly %c, fp128* nocapture %res) {
+ ptr nocapture readonly %b,
+ ptr nocapture readonly %c, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
- %2 = load fp128, fp128* %c, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
+ %2 = load fp128, ptr %c, align 16
%mul = fmul contract fp128 %1, %2
%sub = fsub contract fp128 %0, %mul
%sub1 = fsub fp128 0xL00000000000000008000000000000000, %sub
- store fp128 %sub1, fp128* %res, align 16
+ store fp128 %sub1, ptr %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpFnmsub_02(fp128* nocapture readonly %a,
+define void @qpFnmsub_02(ptr nocapture readonly %a,
; CHECK-LABEL: qpFnmsub_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b,
- fp128* nocapture readonly %c, fp128* nocapture %res) {
+ ptr nocapture readonly %b,
+ ptr nocapture readonly %c, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%mul = fmul contract fp128 %0, %1
- %2 = load fp128, fp128* %c, align 16
+ %2 = load fp128, ptr %c, align 16
%sub = fsub contract fp128 %mul, %2
%sub1 = fsub fp128 0xL00000000000000008000000000000000, %sub
- store fp128 %sub1, fp128* %res, align 16
+ store fp128 %sub1, ptr %res, align 16
ret void
}
; array of float128 types
; Function Attrs: norecurse nounwind readonly
-define fp128 @fp128Array(fp128* nocapture readonly %farray,
+define fp128 @fp128Array(ptr nocapture readonly %farray,
; CHECK-LABEL: fp128Array:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sldi r4, r4, 4
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- i32 signext %loopcnt, fp128* nocapture readnone %sum) {
+ i32 signext %loopcnt, ptr nocapture readnone %sum) {
entry:
- %0 = load fp128, fp128* %farray, align 16
+ %0 = load fp128, ptr %farray, align 16
%sub = add nsw i32 %loopcnt, -1
%idxprom = sext i32 %sub to i64
- %arrayidx1 = getelementptr inbounds fp128, fp128* %farray, i64 %idxprom
- %1 = load fp128, fp128* %arrayidx1, align 16
+ %arrayidx1 = getelementptr inbounds fp128, ptr %farray, i64 %idxprom
+ %1 = load fp128, ptr %arrayidx1, align 16
%add = fadd fp128 %0, %1
ret fp128 %add
}
}
; Function Attrs: norecurse nounwind
-define fp128 @mixParam_02(fp128 %p1, double %p2, i64* nocapture %p3,
+define fp128 @mixParam_02(fp128 %p1, double %p2, ptr nocapture %p3,
; CHECK-LABEL: mixParam_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 96(r1)
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- i16 signext %p4, fp128* nocapture readonly %p5,
+ i16 signext %p4, ptr nocapture readonly %p5,
i32 signext %p6, i8 zeroext %p7, i32 zeroext %p8) {
entry:
%conv = sext i16 %p4 to i32
%add2 = add nsw i32 %add, %conv1
%add3 = add i32 %add2, %p8
%conv4 = zext i32 %add3 to i64
- store i64 %conv4, i64* %p3, align 8
- %0 = load fp128, fp128* %p5, align 16
+ store i64 %conv4, ptr %p3, align 8
+ %0 = load fp128, ptr %p5, align 16
%add5 = fadd fp128 %0, %p1
%conv6 = fpext double %p2 to fp128
%add7 = fadd fp128 %add5, %conv6
}
; Function Attrs: norecurse nounwind
-define fastcc fp128 @mixParam_02f(fp128 %p1, double %p2, i64* nocapture %p3,
+define fastcc fp128 @mixParam_02f(fp128 %p1, double %p2, ptr nocapture %p3,
; CHECK-LABEL: mixParam_02f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: add r4, r4, r6
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- i16 signext %p4, fp128* nocapture readonly %p5,
+ i16 signext %p4, ptr nocapture readonly %p5,
i32 signext %p6, i8 zeroext %p7, i32 zeroext %p8) {
entry:
%conv = sext i16 %p4 to i32
%add2 = add nsw i32 %add, %conv1
%add3 = add i32 %add2, %p8
%conv4 = zext i32 %add3 to i64
- store i64 %conv4, i64* %p3, align 8
- %0 = load fp128, fp128* %p5, align 16
+ store i64 %conv4, ptr %p3, align 8
+ %0 = load fp128, ptr %p5, align 16
%add5 = fadd fp128 %0, %p1
%conv6 = fpext double %p2 to fp128
%add7 = fadd fp128 %add5, %conv6
; Passing a mix of float128 and vector parameters.
; Function Attrs: norecurse nounwind
-define void @mixParam_03(fp128 %f1, double* nocapture %d1, <4 x i32> %vec1,
+define void @mixParam_03(fp128 %f1, ptr nocapture %d1, <4 x i32> %vec1,
; CHECK-LABEL: mixParam_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 104(r1)
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture %f2, i32 signext %i1, i8 zeroext %c1,
- <4 x i32>* nocapture %vec2) {
+ ptr nocapture %f2, i32 signext %i1, i8 zeroext %c1,
+ ptr nocapture %vec2) {
entry:
- store fp128 %f1, fp128* %f2, align 16
- store <4 x i32> %vec1, <4 x i32>* %vec2, align 16
- %0 = load fp128, fp128* %f2, align 16
+ store fp128 %f1, ptr %f2, align 16
+ store <4 x i32> %vec1, ptr %vec2, align 16
+ %0 = load fp128, ptr %f2, align 16
%conv = sitofp i32 %i1 to fp128
%add = fadd fp128 %0, %conv
%conv1 = fptrunc fp128 %add to double
- store double %conv1, double* %d1, align 8
+ store double %conv1, ptr %d1, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define fastcc void @mixParam_03f(fp128 %f1, double* nocapture %d1, <4 x i32> %vec1,
+define fastcc void @mixParam_03f(fp128 %f1, ptr nocapture %d1, <4 x i32> %vec1,
; CHECK-LABEL: mixParam_03f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxv v2, 0(r4)
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture %f2, i32 signext %i1, i8 zeroext %c1,
- <4 x i32>* nocapture %vec2) {
+ ptr nocapture %f2, i32 signext %i1, i8 zeroext %c1,
+ ptr nocapture %vec2) {
entry:
- store fp128 %f1, fp128* %f2, align 16
- store <4 x i32> %vec1, <4 x i32>* %vec2, align 16
- %0 = load fp128, fp128* %f2, align 16
+ store fp128 %f1, ptr %f2, align 16
+ store <4 x i32> %vec1, ptr %vec2, align 16
+ %0 = load fp128, ptr %f2, align 16
%conv = sitofp i32 %i1 to fp128
%add = fadd fp128 %0, %conv
%conv1 = fptrunc fp128 %add to double
- store double %conv1, double* %d1, align 8
+ store double %conv1, ptr %d1, align 8
ret void
}
; RUN: -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s | FileCheck %s \
; RUN: -check-prefix=CHECK-P8
-define void @qp_trunc(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define void @qp_trunc(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qp_trunc:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.trunc.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.trunc.f128(fp128 %Val)
-define void @qp_rint(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define void @qp_rint(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qp_rint:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.rint.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.rint.f128(fp128 %Val)
-define void @qp_nearbyint(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define void @qp_nearbyint(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qp_nearbyint:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.nearbyint.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.nearbyint.f128(fp128 %Val)
-define void @qp_round(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define void @qp_round(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qp_round:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.round.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.round.f128(fp128 %Val)
-define void @qp_floor(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define void @qp_floor(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qp_floor:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.floor.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.floor.f128(fp128 %Val)
-define void @qp_ceil(fp128* nocapture readonly %a, fp128* nocapture %res) {
+define void @qp_ceil(ptr nocapture readonly %a, ptr nocapture %res) {
; CHECK-LABEL: qp_ceil:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%1 = tail call fp128 @llvm.ceil.f128(fp128 %0)
- store fp128 %1, fp128* %res, align 16
+ store fp128 %1, ptr %res, align 16
ret void
}
declare fp128 @llvm.ceil.f128(fp128 %Val)
align 16
; Function Attrs: norecurse nounwind readonly
-define i64 @qpConv2sdw(fp128* nocapture readonly %a) {
+define i64 @qpConv2sdw(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2sdw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptosi fp128 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind
-define void @qpConv2sdw_02(i64* nocapture %res) local_unnamed_addr #1 {
+define void @qpConv2sdw_02(ptr nocapture %res) local_unnamed_addr #1 {
; CHECK-LABEL: qpConv2sdw_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array, i64 0,
+ %0 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array, i64 0,
i64 2), align 16
%conv = fptosi fp128 %0 to i64
- store i64 %conv, i64* %res, align 8
+ store i64 %conv, ptr %res, align 8
ret void
}
; Function Attrs: norecurse nounwind readonly
-define i64 @qpConv2sdw_03(fp128* nocapture readonly %a) {
+define i64 @qpConv2sdw_03(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2sdw_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array, i64 0,
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array, i64 0,
i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i64
}
; Function Attrs: norecurse nounwind
-define void @qpConv2sdw_04(fp128* nocapture readonly %a,
+define void @qpConv2sdw_04(ptr nocapture readonly %a,
; CHECK-LABEL: qpConv2sdw_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b, i64* nocapture %res) {
+ ptr nocapture readonly %b, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i64
- store i64 %conv, i64* %res, align 8
+ store i64 %conv, ptr %res, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpConv2sdw_testXForm(i64* nocapture %res, i32 signext %idx) {
+define void @qpConv2sdw_testXForm(ptr nocapture %res, i32 signext %idx) {
; CHECK-LABEL: qpConv2sdw_testXForm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array,
+ %0 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array,
i64 0, i64 2), align 16
%conv = fptosi fp128 %0 to i64
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds i64, i64* %res, i64 %idxprom
- store i64 %conv, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %res, i64 %idxprom
+ store i64 %conv, ptr %arrayidx, align 8
ret void
}
; Function Attrs: norecurse nounwind readonly
-define i64 @qpConv2udw(fp128* nocapture readonly %a) {
+define i64 @qpConv2udw(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2udw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptoui fp128 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind
-define void @qpConv2udw_02(i64* nocapture %res) {
+define void @qpConv2udw_02(ptr nocapture %res) {
; CHECK-LABEL: qpConv2udw_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array, i64 0,
+ %0 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array, i64 0,
i64 2), align 16
%conv = fptoui fp128 %0 to i64
- store i64 %conv, i64* %res, align 8
+ store i64 %conv, ptr %res, align 8
ret void
}
; Function Attrs: norecurse nounwind readonly
-define i64 @qpConv2udw_03(fp128* nocapture readonly %a) {
+define i64 @qpConv2udw_03(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2udw_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array, i64 0,
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array, i64 0,
i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i64
}
; Function Attrs: norecurse nounwind
-define void @qpConv2udw_04(fp128* nocapture readonly %a,
+define void @qpConv2udw_04(ptr nocapture readonly %a,
; CHECK-LABEL: qpConv2udw_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b, i64* nocapture %res) {
+ ptr nocapture readonly %b, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i64
- store i64 %conv, i64* %res, align 8
+ store i64 %conv, ptr %res, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpConv2udw_testXForm(i64* nocapture %res, i32 signext %idx) {
+define void @qpConv2udw_testXForm(ptr nocapture %res, i32 signext %idx) {
; CHECK-LABEL: qpConv2udw_testXForm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array,
- i64 0, i64 0), align 16
+ %0 = load fp128, ptr @f128Array, align 16
%conv = fptoui fp128 %0 to i64
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds i64, i64* %res, i64 %idxprom
- store i64 %conv, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %res, i64 %idxprom
+ store i64 %conv, ptr %arrayidx, align 8
ret void
}
; Function Attrs: norecurse nounwind readonly
-define signext i32 @qpConv2sw(fp128* nocapture readonly %a) {
+define signext i32 @qpConv2sw(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2sw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptosi fp128 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind
-define void @qpConv2sw_02(i32* nocapture %res) {
+define void @qpConv2sw_02(ptr nocapture %res) {
; CHECK-LABEL: qpConv2sw_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array, i64 0,
+ %0 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array, i64 0,
i64 2), align 16
%conv = fptosi fp128 %0 to i32
- store i32 %conv, i32* %res, align 4
+ store i32 %conv, ptr %res, align 4
ret void
}
; Function Attrs: norecurse nounwind readonly
-define signext i32 @qpConv2sw_03(fp128* nocapture readonly %a) {
+define signext i32 @qpConv2sw_03(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2sw_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array, i64 0,
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array, i64 0,
i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i32
}
; Function Attrs: norecurse nounwind
-define void @qpConv2sw_04(fp128* nocapture readonly %a,
+define void @qpConv2sw_04(ptr nocapture readonly %a,
; CHECK-LABEL: qpConv2sw_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b, i32* nocapture %res) {
+ ptr nocapture readonly %b, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i32
- store i32 %conv, i32* %res, align 4
+ store i32 %conv, ptr %res, align 4
ret void
}
; Function Attrs: norecurse nounwind readonly
-define zeroext i32 @qpConv2uw(fp128* nocapture readonly %a) {
+define zeroext i32 @qpConv2uw(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2uw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptoui fp128 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind
-define void @qpConv2uw_02(i32* nocapture %res) {
+define void @qpConv2uw_02(ptr nocapture %res) {
; CHECK-LABEL: qpConv2uw_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array, i64 0,
+ %0 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array, i64 0,
i64 2), align 16
%conv = fptoui fp128 %0 to i32
- store i32 %conv, i32* %res, align 4
+ store i32 %conv, ptr %res, align 4
ret void
}
; Function Attrs: norecurse nounwind readonly
-define zeroext i32 @qpConv2uw_03(fp128* nocapture readonly %a) {
+define zeroext i32 @qpConv2uw_03(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2uw_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array, i64 0,
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array, i64 0,
i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i32
}
; Function Attrs: norecurse nounwind
-define void @qpConv2uw_04(fp128* nocapture readonly %a,
+define void @qpConv2uw_04(ptr nocapture readonly %a,
; CHECK-LABEL: qpConv2uw_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b, i32* nocapture %res) {
+ ptr nocapture readonly %b, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i32
- store i32 %conv, i32* %res, align 4
+ store i32 %conv, ptr %res, align 4
ret void
}
; Function Attrs: norecurse nounwind readonly
-define signext i16 @qpConv2shw(fp128* nocapture readonly %a) {
+define signext i16 @qpConv2shw(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2shw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptosi fp128 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind
-define void @qpConv2shw_02(i16* nocapture %res) {
+define void @qpConv2shw_02(ptr nocapture %res) {
; CHECK-LABEL: qpConv2shw_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array,
+ %0 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array,
i64 0, i64 2), align 16
%conv = fptosi fp128 %0 to i16
- store i16 %conv, i16* %res, align 2
+ store i16 %conv, ptr %res, align 2
ret void
}
; Function Attrs: norecurse nounwind readonly
-define signext i16 @qpConv2shw_03(fp128* nocapture readonly %a) {
+define signext i16 @qpConv2shw_03(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2shw_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array,
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array,
i64 0, i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i16
}
; Function Attrs: norecurse nounwind
-define void @qpConv2shw_04(fp128* nocapture readonly %a,
+define void @qpConv2shw_04(ptr nocapture readonly %a,
; CHECK-LABEL: qpConv2shw_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b, i16* nocapture %res) {
+ ptr nocapture readonly %b, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i16
- store i16 %conv, i16* %res, align 2
+ store i16 %conv, ptr %res, align 2
ret void
}
; Function Attrs: norecurse nounwind readonly
-define zeroext i16 @qpConv2uhw(fp128* nocapture readonly %a) {
+define zeroext i16 @qpConv2uhw(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2uhw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptoui fp128 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind
-define void @qpConv2uhw_02(i16* nocapture %res) {
+define void @qpConv2uhw_02(ptr nocapture %res) {
; CHECK-LABEL: qpConv2uhw_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array,
+ %0 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array,
i64 0, i64 2), align 16
%conv = fptoui fp128 %0 to i16
- store i16 %conv, i16* %res, align 2
+ store i16 %conv, ptr %res, align 2
ret void
}
; Function Attrs: norecurse nounwind readonly
-define zeroext i16 @qpConv2uhw_03(fp128* nocapture readonly %a) {
+define zeroext i16 @qpConv2uhw_03(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2uhw_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array,
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array,
i64 0, i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i16
}
; Function Attrs: norecurse nounwind
-define void @qpConv2uhw_04(fp128* nocapture readonly %a,
+define void @qpConv2uhw_04(ptr nocapture readonly %a,
; CHECK-LABEL: qpConv2uhw_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b, i16* nocapture %res) {
+ ptr nocapture readonly %b, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i16
- store i16 %conv, i16* %res, align 2
+ store i16 %conv, ptr %res, align 2
ret void
}
; Function Attrs: norecurse nounwind readonly
-define signext i8 @qpConv2sb(fp128* nocapture readonly %a) {
+define signext i8 @qpConv2sb(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2sb:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptosi fp128 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind
-define void @qpConv2sb_02(i8* nocapture %res) {
+define void @qpConv2sb_02(ptr nocapture %res) {
; CHECK-LABEL: qpConv2sb_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array,
+ %0 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array,
i64 0, i64 2), align 16
%conv = fptosi fp128 %0 to i8
- store i8 %conv, i8* %res, align 1
+ store i8 %conv, ptr %res, align 1
ret void
}
; Function Attrs: norecurse nounwind readonly
-define signext i8 @qpConv2sb_03(fp128* nocapture readonly %a) {
+define signext i8 @qpConv2sb_03(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2sb_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array,
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array,
i64 0, i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i8
}
; Function Attrs: norecurse nounwind
-define void @qpConv2sb_04(fp128* nocapture readonly %a,
+define void @qpConv2sb_04(ptr nocapture readonly %a,
; CHECK-LABEL: qpConv2sb_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b, i8* nocapture %res) {
+ ptr nocapture readonly %b, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i8
- store i8 %conv, i8* %res, align 1
+ store i8 %conv, ptr %res, align 1
ret void
}
; Function Attrs: norecurse nounwind readonly
-define zeroext i8 @qpConv2ub(fp128* nocapture readonly %a) {
+define zeroext i8 @qpConv2ub(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2ub:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptoui fp128 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind
-define void @qpConv2ub_02(i8* nocapture %res) {
+define void @qpConv2ub_02(ptr nocapture %res) {
; CHECK-LABEL: qpConv2ub_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array,
+ %0 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array,
i64 0, i64 2), align 16
%conv = fptoui fp128 %0 to i8
- store i8 %conv, i8* %res, align 1
+ store i8 %conv, ptr %res, align 1
ret void
}
; Function Attrs: norecurse nounwind readonly
-define zeroext i8 @qpConv2ub_03(fp128* nocapture readonly %a) {
+define zeroext i8 @qpConv2ub_03(ptr nocapture readonly %a) {
; CHECK-LABEL: qpConv2ub_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* getelementptr inbounds
- ([4 x fp128], [4 x fp128]* @f128Array,
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr getelementptr inbounds
+ ([4 x fp128], ptr @f128Array,
i64 0, i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i8
}
; Function Attrs: norecurse nounwind
-define void @qpConv2ub_04(fp128* nocapture readonly %a,
+define void @qpConv2ub_04(ptr nocapture readonly %a,
; CHECK-LABEL: qpConv2ub_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
- fp128* nocapture readonly %b, i8* nocapture %res) {
+ ptr nocapture readonly %b, ptr nocapture %res) {
entry:
- %0 = load fp128, fp128* %a, align 16
- %1 = load fp128, fp128* %b, align 16
+ %0 = load fp128, ptr %a, align 16
+ %1 = load fp128, ptr %b, align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i8
- store i8 %conv, i8* %res, align 1
+ store i8 %conv, ptr %res, align 1
ret void
}
-define void @qpConvppcf128(fp128 %src, ppc_fp128* %dst) {
+define void @qpConvppcf128(fp128 %src, ptr %dst) {
; CHECK-LABEL: qpConvppcf128:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: blr
entry:
%res = call ppc_fp128 @llvm.ppc.convert.f128.to.ppcf128(fp128 %src)
- store ppc_fp128 %res, ppc_fp128* %dst, align 16
+ store ppc_fp128 %res, ptr %dst, align 16
ret void
}
-define void @ppcf128Convqp(ppc_fp128 %src, fp128* %dst) {
+define void @ppcf128Convqp(ppc_fp128 %src, ptr %dst) {
; CHECK-LABEL: ppcf128Convqp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-P8-NEXT: blr
entry:
%res = call fp128 @llvm.ppc.convert.ppcf128.to.f128(ppc_fp128 %src)
- store fp128 %res, fp128* %dst, align 16
+ store fp128 %res, ptr %dst, align 16
ret void
}
@udwVecMem = global <2 x i64> <i64 88, i64 99>, align 16
; Function Attrs: norecurse nounwind
-define void @sdwVecConv2qp(fp128* nocapture %a, <2 x i64> %b) {
+define void @sdwVecConv2qp(ptr nocapture %a, <2 x i64> %b) {
; CHECK-LABEL: sdwVecConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltd v2, v2, 1
entry:
%vecext = extractelement <2 x i64> %b, i32 0
%conv = sitofp i64 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sdwVecConv2qp1(fp128* nocapture %a, <2 x i64> %b) {
+define void @sdwVecConv2qp1(ptr nocapture %a, <2 x i64> %b) {
; CHECK-LABEL: sdwVecConv2qp1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvsdqp v2, v2
entry:
%vecext = extractelement <2 x i64> %b, i32 1
%conv = sitofp i64 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sdwVecConv2qp_02(fp128* nocapture %a) {
+define void @sdwVecConv2qp_02(ptr nocapture %a) {
; CHECK-LABEL: sdwVecConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load <2 x i64>, <2 x i64>* @sdwVecMem, align 16
+ %0 = load <2 x i64>, ptr @sdwVecMem, align 16
%vecext = extractelement <2 x i64> %0, i32 0
%conv = sitofp i64 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sdwVecConv2qp1_03(fp128* nocapture %a, <2 x i64>* nocapture readonly %b) {
+define void @sdwVecConv2qp1_03(ptr nocapture %a, ptr nocapture readonly %b) {
; CHECK-LABEL: sdwVecConv2qp1_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsd v2, 8(r4)
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %b, align 16
%vecext = extractelement <2 x i64> %0, i32 1
%conv = sitofp i64 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @udwVecConv2qp(fp128* nocapture %a, <2 x i64> %b) {
+define void @udwVecConv2qp(ptr nocapture %a, <2 x i64> %b) {
; CHECK-LABEL: udwVecConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltd v2, v2, 1
entry:
%vecext = extractelement <2 x i64> %b, i32 0
%conv = uitofp i64 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @udwVecConv2qp1(fp128* nocapture %a, <2 x i64> %b) {
+define void @udwVecConv2qp1(ptr nocapture %a, <2 x i64> %b) {
; CHECK-LABEL: udwVecConv2qp1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvudqp v2, v2
entry:
%vecext = extractelement <2 x i64> %b, i32 1
%conv = uitofp i64 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @udwVecConv2qp1_02(fp128* nocapture %a) {
+define void @udwVecConv2qp1_02(ptr nocapture %a) {
; CHECK-LABEL: udwVecConv2qp1_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC1@toc@ha
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load <2 x i64>, <2 x i64>* @udwVecMem, align 16
+ %0 = load <2 x i64>, ptr @udwVecMem, align 16
%vecext = extractelement <2 x i64> %0, i32 1
%conv = uitofp i64 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @udwVecConv2qp_03(fp128* nocapture %a, <2 x i64>* nocapture readonly %b) {
+define void @udwVecConv2qp_03(ptr nocapture %a, ptr nocapture readonly %b) {
; CHECK-LABEL: udwVecConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsd v2, 0(r4)
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %b, align 16
%vecext = extractelement <2 x i64> %0, i32 0
%conv = uitofp i64 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Vector extract Word and convert to quad precision.
; Function Attrs: norecurse nounwind
-define void @swVecConv2qp(fp128* nocapture %a, <4 x i32> %b) {
+define void @swVecConv2qp(ptr nocapture %a, <4 x i32> %b) {
; CHECK-LABEL: swVecConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vspltw v2, v2, 3
entry:
%vecext = extractelement <4 x i32> %b, i32 0
%conv = sitofp i32 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @swVecConv2qp1(fp128* nocapture %a, <4 x i32> %b) {
+define void @swVecConv2qp1(ptr nocapture %a, <4 x i32> %b) {
; CHECK-LABEL: swVecConv2qp1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vspltw v2, v2, 2
entry:
%vecext = extractelement <4 x i32> %b, i32 1
%conv = sitofp i32 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @swVecConv2qp2(fp128* nocapture %a, <4 x i32> %b) {
+define void @swVecConv2qp2(ptr nocapture %a, <4 x i32> %b) {
; CHECK-LABEL: swVecConv2qp2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextsw2d v2, v2
entry:
%vecext = extractelement <4 x i32> %b, i32 2
%conv = sitofp i32 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @swVecConv2qp3(fp128* nocapture %a, <4 x i32> %b) {
+define void @swVecConv2qp3(ptr nocapture %a, <4 x i32> %b) {
; CHECK-LABEL: swVecConv2qp3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vspltw v2, v2, 0
entry:
%vecext = extractelement <4 x i32> %b, i32 3
%conv = sitofp i32 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uwVecConv2qp(fp128* nocapture %a, <4 x i32> %b) {
+define void @uwVecConv2qp(ptr nocapture %a, <4 x i32> %b) {
; CHECK-LABEL: uwVecConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxextractuw v2, v2, 12
entry:
%vecext = extractelement <4 x i32> %b, i32 0
%conv = uitofp i32 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uwVecConv2qp1(fp128* nocapture %a, <4 x i32> %b) {
+define void @uwVecConv2qp1(ptr nocapture %a, <4 x i32> %b) {
; CHECK-LABEL: uwVecConv2qp1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxextractuw v2, v2, 8
entry:
%vecext = extractelement <4 x i32> %b, i32 1
%conv = uitofp i32 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uwVecConv2qp2(fp128* nocapture %a, <4 x i32> %b) {
+define void @uwVecConv2qp2(ptr nocapture %a, <4 x i32> %b) {
; CHECK-LABEL: uwVecConv2qp2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxextractuw v2, v2, 4
entry:
%vecext = extractelement <4 x i32> %b, i32 2
%conv = uitofp i32 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uwVecConv2qp3(fp128* nocapture %a, <4 x i32> %b) {
+define void @uwVecConv2qp3(ptr nocapture %a, <4 x i32> %b) {
; CHECK-LABEL: uwVecConv2qp3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxextractuw v2, v2, 0
entry:
%vecext = extractelement <4 x i32> %b, i32 3
%conv = uitofp i32 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Vector extract HWord and convert to quad precision.
; Function Attrs: norecurse nounwind
-define void @shwVecConv2qp(fp128* nocapture %a, <8 x i16> %b) {
+define void @shwVecConv2qp(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: shwVecConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 14
entry:
%vecext = extractelement <8 x i16> %b, i32 0
%conv = sitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @shwVecConv2qp1(fp128* nocapture %a, <8 x i16> %b) {
+define void @shwVecConv2qp1(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: shwVecConv2qp1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 12
entry:
%vecext = extractelement <8 x i16> %b, i32 1
%conv = sitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @shwVecConv2qp2(fp128* nocapture %a, <8 x i16> %b) {
+define void @shwVecConv2qp2(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: shwVecConv2qp2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 10
entry:
%vecext = extractelement <8 x i16> %b, i32 2
%conv = sitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @shwVecConv2qp3(fp128* nocapture %a, <8 x i16> %b) {
+define void @shwVecConv2qp3(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: shwVecConv2qp3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 8
entry:
%vecext = extractelement <8 x i16> %b, i32 3
%conv = sitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @shwVecConv2qp4(fp128* nocapture %a, <8 x i16> %b) {
+define void @shwVecConv2qp4(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: shwVecConv2qp4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 6
entry:
%vecext = extractelement <8 x i16> %b, i32 4
%conv = sitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @shwVecConv2qp5(fp128* nocapture %a, <8 x i16> %b) {
+define void @shwVecConv2qp5(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: shwVecConv2qp5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 4
entry:
%vecext = extractelement <8 x i16> %b, i32 5
%conv = sitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @shwVecConv2qp6(fp128* nocapture %a, <8 x i16> %b) {
+define void @shwVecConv2qp6(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: shwVecConv2qp6:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 2
entry:
%vecext = extractelement <8 x i16> %b, i32 6
%conv = sitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @shwVecConv2qp7(fp128* nocapture %a, <8 x i16> %b) {
+define void @shwVecConv2qp7(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: shwVecConv2qp7:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 0
entry:
%vecext = extractelement <8 x i16> %b, i32 7
%conv = sitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uhwVecConv2qp(fp128* nocapture %a, <8 x i16> %b) {
+define void @uhwVecConv2qp(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: uhwVecConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 14
entry:
%vecext = extractelement <8 x i16> %b, i32 0
%conv = uitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uhwVecConv2qp1(fp128* nocapture %a, <8 x i16> %b) {
+define void @uhwVecConv2qp1(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: uhwVecConv2qp1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 12
entry:
%vecext = extractelement <8 x i16> %b, i32 1
%conv = uitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uhwVecConv2qp2(fp128* nocapture %a, <8 x i16> %b) {
+define void @uhwVecConv2qp2(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: uhwVecConv2qp2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 10
entry:
%vecext = extractelement <8 x i16> %b, i32 2
%conv = uitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uhwVecConv2qp3(fp128* nocapture %a, <8 x i16> %b) {
+define void @uhwVecConv2qp3(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: uhwVecConv2qp3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 8
entry:
%vecext = extractelement <8 x i16> %b, i32 3
%conv = uitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uhwVecConv2qp4(fp128* nocapture %a, <8 x i16> %b) {
+define void @uhwVecConv2qp4(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: uhwVecConv2qp4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 6
entry:
%vecext = extractelement <8 x i16> %b, i32 4
%conv = uitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uhwVecConv2qp5(fp128* nocapture %a, <8 x i16> %b) {
+define void @uhwVecConv2qp5(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: uhwVecConv2qp5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 4
entry:
%vecext = extractelement <8 x i16> %b, i32 5
%conv = uitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uhwVecConv2qp6(fp128* nocapture %a, <8 x i16> %b) {
+define void @uhwVecConv2qp6(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: uhwVecConv2qp6:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 2
entry:
%vecext = extractelement <8 x i16> %b, i32 6
%conv = uitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @uhwVecConv2qp7(fp128* nocapture %a, <8 x i16> %b) {
+define void @uhwVecConv2qp7(ptr nocapture %a, <8 x i16> %b) {
; CHECK-LABEL: uhwVecConv2qp7:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 0
entry:
%vecext = extractelement <8 x i16> %b, i32 7
%conv = uitofp i16 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Vector extract Byte and convert to quad precision.
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 15
entry:
%vecext = extractelement <16 x i8> %b, i32 0
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp1(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp1(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 14
entry:
%vecext = extractelement <16 x i8> %b, i32 1
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp2(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp2(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 13
entry:
%vecext = extractelement <16 x i8> %b, i32 2
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp3(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp3(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 12
entry:
%vecext = extractelement <16 x i8> %b, i32 3
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp4(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp4(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 11
entry:
%vecext = extractelement <16 x i8> %b, i32 4
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp5(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp5(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 10
entry:
%vecext = extractelement <16 x i8> %b, i32 5
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp6(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp6(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp6:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 9
entry:
%vecext = extractelement <16 x i8> %b, i32 6
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp7(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp7(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp7:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 8
entry:
%vecext = extractelement <16 x i8> %b, i32 7
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp8(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp8(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 7
entry:
%vecext = extractelement <16 x i8> %b, i32 8
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp9(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp9(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp9:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 6
entry:
%vecext = extractelement <16 x i8> %b, i32 9
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp10(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp10(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp10:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 5
entry:
%vecext = extractelement <16 x i8> %b, i32 10
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp11(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp11(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp11:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 4
entry:
%vecext = extractelement <16 x i8> %b, i32 11
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp12(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp12(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp12:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 3
entry:
%vecext = extractelement <16 x i8> %b, i32 12
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp13(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp13(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp13:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 2
entry:
%vecext = extractelement <16 x i8> %b, i32 13
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp14(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp14(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp14:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 1
entry:
%vecext = extractelement <16 x i8> %b, i32 14
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @sbVecConv2qp15(fp128* nocapture %a, <16 x i8> %b) {
+define void @sbVecConv2qp15(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: sbVecConv2qp15:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 0
entry:
%vecext = extractelement <16 x i8> %b, i32 15
%conv = sitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 15
entry:
%vecext = extractelement <16 x i8> %b, i32 0
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp1(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp1(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 14
entry:
%vecext = extractelement <16 x i8> %b, i32 1
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp2(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp2(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 13
entry:
%vecext = extractelement <16 x i8> %b, i32 2
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp3(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp3(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 12
entry:
%vecext = extractelement <16 x i8> %b, i32 3
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp4(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp4(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 11
entry:
%vecext = extractelement <16 x i8> %b, i32 4
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp5(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp5(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 10
entry:
%vecext = extractelement <16 x i8> %b, i32 5
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp6(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp6(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp6:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 9
entry:
%vecext = extractelement <16 x i8> %b, i32 6
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp7(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp7(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp7:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 8
entry:
%vecext = extractelement <16 x i8> %b, i32 7
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp8(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp8(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 7
entry:
%vecext = extractelement <16 x i8> %b, i32 8
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp9(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp9(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp9:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 6
entry:
%vecext = extractelement <16 x i8> %b, i32 9
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp10(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp10(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp10:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 5
entry:
%vecext = extractelement <16 x i8> %b, i32 10
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp11(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp11(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp11:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 4
entry:
%vecext = extractelement <16 x i8> %b, i32 11
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp12(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp12(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp12:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 3
entry:
%vecext = extractelement <16 x i8> %b, i32 12
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp13(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp13(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp13:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 2
entry:
%vecext = extractelement <16 x i8> %b, i32 13
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp14(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp14(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp14:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 1
entry:
%vecext = extractelement <16 x i8> %b, i32 14
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
-define void @ubVecConv2qp15(fp128* nocapture %a, <16 x i8> %b) {
+define void @ubVecConv2qp15(ptr nocapture %a, <16 x i8> %b) {
; CHECK-LABEL: ubVecConv2qp15:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 0
entry:
%vecext = extractelement <16 x i8> %b, i32 15
%conv = uitofp i8 %vecext to fp128
- store fp128 %conv, fp128* %a, align 16
+ store fp128 %conv, ptr %a, align 16
ret void
}
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to fp128*
- %1 = load fp128, fp128* %0, align 16
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load fp128, ptr %0, align 16
ret fp128 %1
}
; Function Attrs: norecurse nounwind readonly willreturn
-define dso_local fp128 @ld_unalign16___float128___float128(i8* nocapture readonly %ptr) {
+define dso_local fp128 @ld_unalign16___float128___float128(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign16___float128___float128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plxv v2, 1(r3), 0
; CHECK-PREP10-NEXT: lxvx v2, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to fp128*
- %1 = load fp128, fp128* %0, align 16
- ret fp128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load fp128, ptr %add.ptr, align 16
+ ret fp128 %0
}
; Function Attrs: norecurse nounwind readonly willreturn
-define dso_local fp128 @ld_align16___float128___float128(i8* nocapture readonly %ptr) {
+define dso_local fp128 @ld_align16___float128___float128(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align16___float128___float128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plxv v2, 8(r3), 0
; CHECK-PREP10-NEXT: lxvx v2, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to fp128*
- %1 = load fp128, fp128* %0, align 16
- ret fp128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load fp128, ptr %add.ptr, align 16
+ ret fp128 %0
}
; Function Attrs: norecurse nounwind readonly willreturn
-define dso_local fp128 @ld_unalign32___float128___float128(i8* nocapture readonly %ptr) {
+define dso_local fp128 @ld_unalign32___float128___float128(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign32___float128___float128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plxv v2, 99999(r3), 0
; CHECK-PREP10-NEXT: lxvx v2, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to fp128*
- %1 = load fp128, fp128* %0, align 16
- ret fp128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ %0 = load fp128, ptr %add.ptr, align 16
+ ret fp128 %0
}
; Function Attrs: norecurse nounwind readonly willreturn
-define dso_local fp128 @ld_align32___float128___float128(i8* nocapture readonly %ptr) {
+define dso_local fp128 @ld_align32___float128___float128(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32___float128___float128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plxv v2, 99999000(r3), 0
; CHECK-PREP10-NEXT: lxvx v2, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to fp128*
- %1 = load fp128, fp128* %0, align 16
- ret fp128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load fp128, ptr %add.ptr, align 16
+ ret fp128 %0
}
; Function Attrs: norecurse nounwind readonly willreturn
-define dso_local fp128 @ld_unalign64___float128___float128(i8* nocapture readonly %ptr) {
+define dso_local fp128 @ld_unalign64___float128___float128(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64___float128___float128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: lxvx v2, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to fp128*
- %1 = load fp128, fp128* %0, align 16
- ret fp128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load fp128, ptr %add.ptr, align 16
+ ret fp128 %0
}
; Function Attrs: norecurse nounwind readonly willreturn
-define dso_local fp128 @ld_align64___float128___float128(i8* nocapture readonly %ptr) {
+define dso_local fp128 @ld_align64___float128___float128(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64___float128___float128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lxvx v2, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to fp128*
- %1 = load fp128, fp128* %0, align 16
- ret fp128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load fp128, ptr %add.ptr, align 16
+ ret fp128 %0
}
; Function Attrs: norecurse nounwind readonly willreturn
-define dso_local fp128 @ld_reg___float128___float128(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local fp128 @ld_reg___float128___float128(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg___float128___float128:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvx v2, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to fp128*
- %1 = load fp128, fp128* %0, align 16
- ret fp128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load fp128, ptr %add.ptr, align 16
+ ret fp128 %0
}
; Function Attrs: norecurse nounwind readonly willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to fp128*
- %1 = load fp128, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load fp128, ptr %0, align 16
ret fp128 %1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to fp128*
- %1 = load fp128, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load fp128, ptr %0, align 16
ret fp128 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to fp128*
- %1 = load fp128, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load fp128, ptr %0, align 16
ret fp128 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to fp128*
- %1 = load fp128, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load fp128, ptr %0, align 16
ret fp128 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to fp128*
- %1 = load fp128, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load fp128, ptr %0, align 16
ret fp128 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to fp128*
- %1 = load fp128, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load fp128, ptr %0, align 16
ret fp128 %1
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to fp128*
- %1 = load fp128, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load fp128, ptr %0, align 16
ret fp128 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to fp128*
- %1 = load fp128, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load fp128, ptr %0, align 16
ret fp128 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to fp128*
- %1 = load fp128, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load fp128, ptr %0, align 16
ret fp128 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to fp128*
- %1 = load fp128, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load fp128, ptr %0, align 16
ret fp128 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to fp128*
- %1 = load fp128, fp128* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load fp128, ptr %0, align 4096
ret fp128 %1
}
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load fp128, fp128* inttoptr (i64 255 to fp128*), align 16
+ %0 = load fp128, ptr inttoptr (i64 255 to ptr), align 16
ret fp128 %0
}
; CHECK-NEXT: lxv v2, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load fp128, fp128* inttoptr (i64 4080 to fp128*), align 16
+ %0 = load fp128, ptr inttoptr (i64 4080 to ptr), align 16
ret fp128 %0
}
; CHECK-PREP10-NEXT: lxv v2, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load fp128, fp128* inttoptr (i64 99999 to fp128*), align 16
+ %0 = load fp128, ptr inttoptr (i64 99999 to ptr), align 16
ret fp128 %0
}
; CHECK-PREP10-NEXT: lxv v2, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load fp128, fp128* inttoptr (i64 9999900 to fp128*), align 16
+ %0 = load fp128, ptr inttoptr (i64 9999900 to ptr), align 16
ret fp128 %0
}
; CHECK-PREP10-NEXT: lxv v2, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load fp128, fp128* inttoptr (i64 1000000000001 to fp128*), align 16
+ %0 = load fp128, ptr inttoptr (i64 1000000000001 to ptr), align 16
ret fp128 %0
}
; CHECK-PREP10-NEXT: lxv v2, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load fp128, fp128* inttoptr (i64 1000000000000 to fp128*), align 4096
+ %0 = load fp128, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret fp128 %0
}
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to fp128*
- store fp128 %str, fp128* %0, align 16
+ %0 = inttoptr i64 %ptr to ptr
+ store fp128 %str, ptr %0, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind willreturn writeonly
-define dso_local void @st_unalign16___float128___float128(i8* nocapture %ptr, fp128 %str) {
+define dso_local void @st_unalign16___float128___float128(ptr nocapture %ptr, fp128 %str) {
; CHECK-P10-LABEL: st_unalign16___float128___float128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstxv v2, 1(r3), 0
; CHECK-PREP10-NEXT: stxvx v2, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to fp128*
- store fp128 %str, fp128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ store fp128 %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind willreturn writeonly
-define dso_local void @st_align16___float128___float128(i8* nocapture %ptr, fp128 %str) {
+define dso_local void @st_align16___float128___float128(ptr nocapture %ptr, fp128 %str) {
; CHECK-P10-LABEL: st_align16___float128___float128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstxv v2, 8(r3), 0
; CHECK-PREP10-NEXT: stxvx v2, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to fp128*
- store fp128 %str, fp128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store fp128 %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind willreturn writeonly
-define dso_local void @st_unalign32___float128___float128(i8* nocapture %ptr, fp128 %str) {
+define dso_local void @st_unalign32___float128___float128(ptr nocapture %ptr, fp128 %str) {
; CHECK-P10-LABEL: st_unalign32___float128___float128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstxv v2, 99999(r3), 0
; CHECK-PREP10-NEXT: stxvx v2, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to fp128*
- store fp128 %str, fp128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ store fp128 %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind willreturn writeonly
-define dso_local void @st_align32___float128___float128(i8* nocapture %ptr, fp128 %str) {
+define dso_local void @st_align32___float128___float128(ptr nocapture %ptr, fp128 %str) {
; CHECK-P10-LABEL: st_align32___float128___float128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstxv v2, 99999000(r3), 0
; CHECK-PREP10-NEXT: stxvx v2, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to fp128*
- store fp128 %str, fp128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store fp128 %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind willreturn writeonly
-define dso_local void @st_unalign64___float128___float128(i8* nocapture %ptr, fp128 %str) {
+define dso_local void @st_unalign64___float128___float128(ptr nocapture %ptr, fp128 %str) {
; CHECK-P10-LABEL: st_unalign64___float128___float128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: stxvx v2, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to fp128*
- store fp128 %str, fp128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ store fp128 %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind willreturn writeonly
-define dso_local void @st_align64___float128___float128(i8* nocapture %ptr, fp128 %str) {
+define dso_local void @st_align64___float128___float128(ptr nocapture %ptr, fp128 %str) {
; CHECK-P10-LABEL: st_align64___float128___float128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: stxvx v2, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to fp128*
- store fp128 %str, fp128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store fp128 %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind willreturn writeonly
-define dso_local void @st_reg___float128___float128(i8* nocapture %ptr, i64 %off, fp128 %str) {
+define dso_local void @st_reg___float128___float128(ptr nocapture %ptr, i64 %off, fp128 %str) {
; CHECK-LABEL: st_reg___float128___float128:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvx v2, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to fp128*
- store fp128 %str, fp128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store fp128 %str, ptr %add.ptr, align 16
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to fp128*
- store fp128 %str, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store fp128 %str, ptr %0, align 16
ret void
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to fp128*
- store fp128 %str, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store fp128 %str, ptr %0, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to fp128*
- store fp128 %str, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store fp128 %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to fp128*
- store fp128 %str, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store fp128 %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to fp128*
- store fp128 %str, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store fp128 %str, ptr %0, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to fp128*
- store fp128 %str, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store fp128 %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to fp128*
- store fp128 %str, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store fp128 %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to fp128*
- store fp128 %str, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store fp128 %str, ptr %0, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to fp128*
- store fp128 %str, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store fp128 %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to fp128*
- store fp128 %str, fp128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store fp128 %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to fp128*
- store fp128 %str, fp128* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store fp128 %str, ptr %0, align 4096
ret void
}
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
entry:
- store fp128 %str, fp128* inttoptr (i64 255 to fp128*), align 16
+ store fp128 %str, ptr inttoptr (i64 255 to ptr), align 16
ret void
}
; CHECK-NEXT: stxv v2, 4080(0)
; CHECK-NEXT: blr
entry:
- store fp128 %str, fp128* inttoptr (i64 4080 to fp128*), align 16
+ store fp128 %str, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-PREP10-NEXT: stxv v2, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- store fp128 %str, fp128* inttoptr (i64 99999 to fp128*), align 16
+ store fp128 %str, ptr inttoptr (i64 99999 to ptr), align 16
ret void
}
; CHECK-PREP10-NEXT: stxv v2, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- store fp128 %str, fp128* inttoptr (i64 9999900 to fp128*), align 16
+ store fp128 %str, ptr inttoptr (i64 9999900 to ptr), align 16
ret void
}
; CHECK-PREP10-NEXT: stxv v2, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- store fp128 %str, fp128* inttoptr (i64 1000000000001 to fp128*), align 16
+ store fp128 %str, ptr inttoptr (i64 1000000000001 to ptr), align 16
ret void
}
; CHECK-PREP10-NEXT: stxv v2, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- store fp128 %str, fp128* inttoptr (i64 1000000000000 to fp128*), align 4096
+ store fp128 %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-PREP10-NEXT: stxv vs0, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds ([20 x fp128], [20 x fp128]* @GlobLd128, i64 0, i64 0), align 16
- store fp128 %0, fp128* getelementptr inbounds ([20 x fp128], [20 x fp128]* @GlobSt128, i64 0, i64 0), align 16
+ %0 = load fp128, ptr @GlobLd128, align 16
+ store fp128 %0, ptr @GlobSt128, align 16
ret void
}
; CHECK-PREP10-NEXT: stxvx vs0, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load fp128, fp128* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x fp128]* @GlobLd128 to i8*), i64 3) to fp128*), align 16
- store fp128 %0, fp128* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x fp128]* @GlobSt128 to i8*), i64 3) to fp128*), align 16
+ %0 = load fp128, ptr getelementptr inbounds (i8, ptr @GlobLd128, i64 3), align 16
+ store fp128 %0, ptr getelementptr inbounds (i8, ptr @GlobSt128, i64 3), align 16
ret void
}
; CHECK-PREP10-NEXT: stxvx vs0, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load fp128, fp128* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x fp128]* @GlobLd128 to i8*), i64 4) to fp128*), align 16
- store fp128 %0, fp128* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x fp128]* @GlobSt128 to i8*), i64 4) to fp128*), align 16
+ %0 = load fp128, ptr getelementptr inbounds (i8, ptr @GlobLd128, i64 4), align 16
+ store fp128 %0, ptr getelementptr inbounds (i8, ptr @GlobSt128, i64 4), align 16
ret void
}
; CHECK-PREP10-NEXT: stxv vs0, 16(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load fp128, fp128* getelementptr inbounds ([20 x fp128], [20 x fp128]* @GlobLd128, i64 0, i64 1), align 16
- store fp128 %0, fp128* getelementptr inbounds ([20 x fp128], [20 x fp128]* @GlobSt128, i64 0, i64 1), align 16
+ %0 = load fp128, ptr getelementptr inbounds ([20 x fp128], ptr @GlobLd128, i64 0, i64 1), align 16
+ store fp128 %0, ptr getelementptr inbounds ([20 x fp128], ptr @GlobSt128, i64 0, i64 1), align 16
ret void
}
; CHECK-PREP10-NEXT: stxvx vs0, r4, r3
; CHECK-PREP10-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x fp128], [20 x fp128]* @GlobLd128, i64 0, i64 %Idx
- %0 = load fp128, fp128* %arrayidx, align 16
- %arrayidx1 = getelementptr inbounds [20 x fp128], [20 x fp128]* @GlobSt128, i64 0, i64 %Idx
- store fp128 %0, fp128* %arrayidx1, align 16
+ %arrayidx = getelementptr inbounds [20 x fp128], ptr @GlobLd128, i64 0, i64 %Idx
+ %0 = load fp128, ptr %arrayidx, align 16
+ %arrayidx1 = getelementptr inbounds [20 x fp128], ptr @GlobSt128, i64 0, i64 %Idx
+ store fp128 %0, ptr %arrayidx1, align 16
ret void
}
define i64 @testullf(float %arg) #0 {
entry:
%arg.addr = alloca float, align 4
- store float %arg, float* %arg.addr, align 4
- %0 = load float, float* %arg.addr, align 4
+ store float %arg, ptr %arg.addr, align 4
+ %0 = load float, ptr %arg.addr, align 4
%conv = fptoui float %0 to i64
ret i64 %conv
@A = common global [3 x [3 x %struct.A]] zeroinitializer, align 4
@B = common global [2 x [2 x [2 x %struct.B]]] zeroinitializer, align 4
-define i32* @t1() nounwind {
+define ptr @t1() nounwind {
entry:
; PPC64: t1
- %addr = alloca i32*, align 4
- store i32* getelementptr inbounds ([2 x [2 x [2 x [2 x [2 x i32]]]]], [2 x [2 x [2 x [2 x [2 x i32]]]]]* @arr, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1), i32** %addr, align 4
+ %addr = alloca ptr, align 4
+ store ptr getelementptr inbounds ([2 x [2 x [2 x [2 x [2 x i32]]]]], ptr @arr, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1), ptr %addr, align 4
; PPC64: addi {{[0-9]+}}, {{[0-9]+}}, 124
- %0 = load i32*, i32** %addr, align 4
- ret i32* %0
+ %0 = load ptr, ptr %addr, align 4
+ ret ptr %0
}
-define i32* @t2() nounwind {
+define ptr @t2() nounwind {
entry:
; PPC64: t2
- %addr = alloca i32*, align 4
- store i32* getelementptr inbounds ([3 x [3 x %struct.A]], [3 x [3 x %struct.A]]* @A, i32 0, i32 2, i32 2, i32 3, i32 1, i32 2, i32 2), i32** %addr, align 4
+ %addr = alloca ptr, align 4
+ store ptr getelementptr inbounds ([3 x [3 x %struct.A]], ptr @A, i32 0, i32 2, i32 2, i32 3, i32 1, i32 2, i32 2), ptr %addr, align 4
; PPC64: addi {{[0-9]+}}, {{[0-9]+}}, 1148
- %0 = load i32*, i32** %addr, align 4
- ret i32* %0
+ %0 = load ptr, ptr %addr, align 4
+ ret ptr %0
}
-define i32* @t3() nounwind {
+define ptr @t3() nounwind {
entry:
; PPC64: t3
- %addr = alloca i32*, align 4
- store i32* getelementptr inbounds ([3 x [3 x %struct.A]], [3 x [3 x %struct.A]]* @A, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1), i32** %addr, align 4
+ %addr = alloca ptr, align 4
+ store ptr getelementptr inbounds ([3 x [3 x %struct.A]], ptr @A, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1), ptr %addr, align 4
; PPC64: addi {{[0-9]+}}, {{[0-9]+}}, 140
- %0 = load i32*, i32** %addr, align 4
- ret i32* %0
+ %0 = load ptr, ptr %addr, align 4
+ ret ptr %0
}
-define i32* @t4() nounwind {
+define ptr @t4() nounwind {
entry:
; PPC64: t4
- %addr = alloca i32*, align 4
- store i32* getelementptr inbounds ([2 x [2 x [2 x %struct.B]]], [2 x [2 x [2 x %struct.B]]]* @B, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 3, i32 1, i32 2, i32 1), i32** %addr, align 4
+ %addr = alloca ptr, align 4
+ store ptr getelementptr inbounds ([2 x [2 x [2 x %struct.B]]], ptr @B, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 3, i32 1, i32 2, i32 1), ptr %addr, align 4
; PPC64: addi {{[0-9]+}}, {{[0-9]+}}, 1284
- %0 = load i32*, i32** %addr, align 4
- ret i32* %0
+ %0 = load ptr, ptr %addr, align 4
+ ret ptr %0
}
%a.addr = alloca i8, align 4
%0 = add i8 %a, %b
; PPC64: add
- store i8 %0, i8* %a.addr, align 4
+ store i8 %0, ptr %a.addr, align 4
ret void
}
%a.addr = alloca i8, align 4
%0 = add i8 %a, 22;
; PPC64: addi
- store i8 %0, i8* %a.addr, align 4
+ store i8 %0, ptr %a.addr, align 4
ret void
}
%a.addr = alloca i16, align 4
%0 = add i16 %a, %b
; PPC64: add
- store i16 %0, i16* %a.addr, align 4
+ store i16 %0, ptr %a.addr, align 4
ret void
}
%a.addr = alloca i16, align 4
%0 = add i16 %a, 243;
; PPC64: addi
- store i16 %0, i16* %a.addr, align 4
+ store i16 %0, ptr %a.addr, align 4
ret void
}
%a.addr = alloca i8, align 4
%0 = or i8 %a, %b
; PPC64: or
- store i8 %0, i8* %a.addr, align 4
+ store i8 %0, ptr %a.addr, align 4
ret void
}
%a.addr = alloca i8, align 4
%0 = or i8 %a, -13;
; PPC64: ori
- store i8 %0, i8* %a.addr, align 4
+ store i8 %0, ptr %a.addr, align 4
ret void
}
%a.addr = alloca i16, align 4
%0 = or i16 %a, %b
; PPC64: or
- store i16 %0, i16* %a.addr, align 4
+ store i16 %0, ptr %a.addr, align 4
ret void
}
%a.addr = alloca i16, align 4
%0 = or i16 %a, 273;
; PPC64: ori
- store i16 %0, i16* %a.addr, align 4
+ store i16 %0, ptr %a.addr, align 4
ret void
}
%a.addr = alloca i8, align 4
%0 = sub i8 %a, %b
; PPC64: sub
- store i8 %0, i8* %a.addr, align 4
+ store i8 %0, ptr %a.addr, align 4
ret void
}
%a.addr = alloca i8, align 4
%0 = sub i8 %a, 22;
; PPC64: addi
- store i8 %0, i8* %a.addr, align 4
+ store i8 %0, ptr %a.addr, align 4
ret void
}
%a.addr = alloca i16, align 4
%0 = sub i16 %a, %b
; PPC64: sub
- store i16 %0, i16* %a.addr, align 4
+ store i16 %0, ptr %a.addr, align 4
ret void
}
%a.addr = alloca i16, align 4
%0 = sub i16 %a, 247;
; PPC64: addi
- store i16 %0, i16* %a.addr, align 4
+ store i16 %0, ptr %a.addr, align 4
ret void
}
%a.addr = alloca i16, align 4
%0 = sub i16 %a, -32768;
; PPC64: sub
- store i16 %0, i16* %a.addr, align 4
+ store i16 %0, ptr %a.addr, align 4
ret void
}
declare signext i8 @t7();
declare zeroext i8 @t8();
-define i32 @t10(i32 %argc, i8** nocapture %argv) nounwind {
+define i32 @t10(i32 %argc, ptr nocapture %argv) nounwind {
entry:
; ELF64: t10
%call = call i32 @bar(i8 zeroext 0, i8 zeroext -8, i8 zeroext -69, i8 zeroext 28, i8 zeroext 40, i8 zeroext -70)
; Function pointers are not yet implemented.
;define void @foo3() uwtable {
-; %fptr = alloca i32 (i32)*, align 8
-; store i32 (i32)* @bar0, i32 (i32)** %fptr, align 8
-; %1 = load i32 (i32)*, i32 (i32)** %fptr, align 8
+; %fptr = alloca ptr, align 8
+; store ptr @bar0, ptr %fptr, align 8
+; %1 = load ptr, ptr %fptr, align 8
; %call = call i32 %1(i32 0)
; ret void
;}
; ELF64: std {{[0-9]+}}, -[[OFFSET:[0-9]+]](1)
; ELF64: lfd {{[0-9]+}}, -[[OFFSET]](1)
; ELF64: fcfid
- store double %conv, double* %b.addr, align 8
+ store double %conv, ptr %b.addr, align 8
ret void
}
; ELF64: std {{[0-9]+}}, -[[OFFSET:[0-9]+]](1)
; ELF64: lfd {{[0-9]+}}, -[[OFFSET]](1)
; ELF64: fcfid
- store double %conv, double* %b.addr, align 8
+ store double %conv, ptr %b.addr, align 8
ret void
}
; ELF64: std {{[0-9]+}}, -[[OFFSET:[0-9]+]](1)
; ELF64: lfd {{[0-9]+}}, -[[OFFSET]](1)
; ELF64: fcfid
- store double %conv, double* %b.addr, align 8
+ store double %conv, ptr %b.addr, align 8
ret void
}
; ELF64: std {{[0-9]+}}, -[[OFFSET:[0-9]+]](1)
; ELF64: lfd {{[0-9]+}}, -[[OFFSET]](1)
; ELF64: fcfid
- store double %conv, double* %b.addr, align 8
+ store double %conv, ptr %b.addr, align 8
ret void
}
; ELF64: fctiwz
; ELF64: stfd
; ELF64: lwa
- store i32 %conv, i32* %b.addr, align 4
+ store i32 %conv, ptr %b.addr, align 4
ret void
}
; ELF64: fctidz
; ELF64: stfd
; ELF64: ld
- store i64 %conv, i64* %b.addr, align 4
+ store i64 %conv, ptr %b.addr, align 4
ret void
}
; ELF64: fctiwz
; ELF64: stfd
; ELF64: lwa
- store i32 %conv, i32* %b.addr, align 8
+ store i32 %conv, ptr %b.addr, align 8
ret void
}
; ELF64: fctidz
; ELF64: stfd
; ELF64: ld
- store i64 %conv, i64* %b.addr, align 8
+ store i64 %conv, ptr %b.addr, align 8
ret void
}
; ELF64: fctidz
; ELF64: stfd
; ELF64: lwz
- store i32 %conv, i32* %b.addr, align 4
+ store i32 %conv, ptr %b.addr, align 4
ret void
}
; ELF64: fctidz
; ELF64: stfd
; ELF64: lwz
- store i32 %conv, i32* %b.addr, align 8
+ store i32 %conv, ptr %b.addr, align 8
ret void
}
; PPC970: lfd
; PPC970: fcfid
; PPC970: frsp
- store float %conv, float* %b.addr, align 4
+ store float %conv, ptr %b.addr, align 4
ret void
}
; PPC970: fcfid
; PPC970: frsp
; SPE: efscfsi
- store float %conv, float* %b.addr, align 4
+ store float %conv, ptr %b.addr, align 4
ret void
}
; PPC970: frsp
; SPE: extsh
; SPE: efscfsi
- store float %conv, float* %b.addr, align 4
+ store float %conv, ptr %b.addr, align 4
ret void
}
; PPC970: frsp
; SPE: extsb
; SPE: efscfsi
- store float %conv, float* %b.addr, align 4
+ store float %conv, ptr %b.addr, align 4
ret void
}
; PPC970: lfd
; PPC970: fcfid
; SPE: efdcfsi
- store double %conv, double* %b.addr, align 8
+ store double %conv, ptr %b.addr, align 8
ret void
}
; PPC970: std
; PPC970: lfd
; PPC970: fcfid
- store double %conv, double* %b.addr, align 8
+ store double %conv, ptr %b.addr, align 8
ret void
}
; PPC970: fcfid
; SPE: extsh
; SPE: efdcfsi
- store double %conv, double* %b.addr, align 8
+ store double %conv, ptr %b.addr, align 8
ret void
}
; PPC970: fcfid
; SPE: extsb
; SPE: efdcfsi
- store double %conv, double* %b.addr, align 8
+ store double %conv, ptr %b.addr, align 8
ret void
}
; CHECK: lfd
; CHECK: fcfidus
; PPC970-NOT: fcfidus
- store float %conv, float* %b.addr, align 4
+ store float %conv, ptr %b.addr, align 4
ret void
}
; PPC970-NOT: lfiwzx
; PPC970-NOT: fcfidus
; SPE: efscfui
- store float %conv, float* %b.addr, align 4
+ store float %conv, ptr %b.addr, align 4
ret void
}
; PPC970: frsp
; SPE: clrlwi {{[0-9]+}}, {{[0-9]+}}, 16
; SPE: efscfui
- store float %conv, float* %b.addr, align 4
+ store float %conv, ptr %b.addr, align 4
ret void
}
; PPC970: frsp
; SPE: clrlwi {{[0-9]+}}, {{[0-9]+}}, 24
; SPE: efscfui
- store float %conv, float* %b.addr, align 4
+ store float %conv, ptr %b.addr, align 4
ret void
}
; CHECK: lfd
; CHECK: fcfidu
; PPC970-NOT: fcfidu
- store double %conv, double* %b.addr, align 8
+ store double %conv, ptr %b.addr, align 8
ret void
}
; PPC970-NOT: lfiwzx
; PPC970-NOT: fcfidu
; SPE: efdcfui
- store double %conv, double* %b.addr, align 8
+ store double %conv, ptr %b.addr, align 8
ret void
}
; PPC970: fcfid
; SPE: clrlwi {{[0-9]+}}, {{[0-9]+}}, 16
; SPE: efdcfui
- store double %conv, double* %b.addr, align 8
+ store double %conv, ptr %b.addr, align 8
ret void
}
; PPC970: fcfid
; SPE: clrlwi {{[0-9]+}}, {{[0-9]+}}, 24
; SPE: efdcfui
- store double %conv, double* %b.addr, align 8
+ store double %conv, ptr %b.addr, align 8
ret void
}
; PPC970: stfd
; PPC970: lwa
; SPE: efsctsi
- store i32 %conv, i32* %b.addr, align 4
+ store i32 %conv, ptr %b.addr, align 4
ret void
}
; PPC970: fctidz
; PPC970: stfd
; PPC970: ld
- store i64 %conv, i64* %b.addr, align 4
+ store i64 %conv, ptr %b.addr, align 4
ret void
}
; PPC970: stfd
; PPC970: lwa
; SPE: efdctsi
- store i32 %conv, i32* %b.addr, align 8
+ store i32 %conv, ptr %b.addr, align 8
ret void
}
; PPC970: fctidz
; PPC970: stfd
; PPC970: ld
- store i64 %conv, i64* %b.addr, align 8
+ store i64 %conv, ptr %b.addr, align 8
ret void
}
; PPC970: stfd
; PPC970: lwz
; SPE: efsctui
- store i32 %conv, i32* %b.addr, align 4
+ store i32 %conv, ptr %b.addr, align 4
ret void
}
; CHECK: stfd
; CHECK: ld
; PPC970-NOT: fctiduz
- store i64 %conv, i64* %b.addr, align 4
+ store i64 %conv, ptr %b.addr, align 4
ret void
}
; PPC970: stfd
; PPC970: lwz
; SPE: efdctui
- store i32 %conv, i32* %b.addr, align 8
+ store i32 %conv, ptr %b.addr, align 8
ret void
}
; CHECK: stfd
; CHECK: ld
; PPC970-NOT: fctiduz
- store i64 %conv, i64* %b.addr, align 8
+ store i64 %conv, ptr %b.addr, align 8
ret void
}
@__md0 = external global [137 x i8]
-define internal void @stretch(<4 x i8> addrspace(1)* %src, <4 x i8> addrspace(1)* %dst, i32 %width, i32 %height, i32 %iLS, i32 %oLS, <2 x float> %c, <4 x float> %param) nounwind {
+define internal void @stretch(ptr addrspace(1) %src, ptr addrspace(1) %dst, i32 %width, i32 %height, i32 %iLS, i32 %oLS, <2 x float> %c, <4 x float> %param) nounwind {
entry:
ret void
}
ret i32 undef
}
-define void @wrap(i8 addrspace(1)* addrspace(1)* %arglist, i32 addrspace(1)* %gtid) nounwind {
+define void @wrap(ptr addrspace(1) %arglist, ptr addrspace(1) %gtid) nounwind {
entry:
- call void @stretch(<4 x i8> addrspace(1)* undef, <4 x i8> addrspace(1)* undef, i32 undef, i32 undef, i32 undef, i32 undef, <2 x float> undef, <4 x float> undef)
+ call void @stretch(ptr addrspace(1) undef, ptr addrspace(1) undef, i32 undef, i32 undef, i32 undef, i32 undef, <2 x float> undef, <4 x float> undef)
ret void
}
define void @t1() nounwind {
; PPC64: t1
- %1 = load i8, i8* @a, align 1
+ %1 = load i8, ptr @a, align 1
call void @foo1(i8 zeroext %1)
; PPC64: lbz
; PPC64-NOT: rldicl
define void @t2() nounwind {
; PPC64: t2
- %1 = load i16, i16* @b, align 2
+ %1 = load i16, ptr @b, align 2
call void @foo2(i16 zeroext %1)
; PPC64: lhz
; PPC64-NOT: rldicl
define void @t2a() nounwind {
; PPC64: t2a
- %1 = load i32, i32* @c, align 4
+ %1 = load i32, ptr @c, align 4
call void @foo3(i32 zeroext %1)
; PPC64: lwz
; PPC64-NOT: rldicl
define i32 @t3() nounwind {
; PPC64: t3
- %1 = load i8, i8* @a, align 1
+ %1 = load i8, ptr @a, align 1
%2 = zext i8 %1 to i32
; PPC64: lbz
; PPC64-NOT: rlwinm
define i32 @t4() nounwind {
; PPC64: t4
- %1 = load i16, i16* @b, align 2
+ %1 = load i16, ptr @b, align 2
%2 = zext i16 %1 to i32
; PPC64: lhz
; PPC64-NOT: rlwinm
define i32 @t5() nounwind {
; PPC64: t5
- %1 = load i16, i16* @b, align 2
+ %1 = load i16, ptr @b, align 2
%2 = sext i16 %1 to i32
; PPC64: lha
; PPC64-NOT: rlwinm
define i32 @t6() nounwind {
; PPC64: t6
- %1 = load i8, i8* @a, align 2
+ %1 = load i8, ptr @a, align 2
%2 = sext i8 %1 to i32
; PPC64: lbz
; PPC64-NOT: rlwinm
define i64 @t7() nounwind {
; PPC64: t7
- %1 = load i8, i8* @a, align 1
+ %1 = load i8, ptr @a, align 1
%2 = zext i8 %1 to i64
; PPC64: lbz
; PPC64-NOT: rldicl
define i64 @t8() nounwind {
; PPC64: t8
- %1 = load i16, i16* @b, align 2
+ %1 = load i16, ptr @b, align 2
%2 = zext i16 %1 to i64
; PPC64: lhz
; PPC64-NOT: rldicl
define i64 @t9() nounwind {
; PPC64: t9
- %1 = load i16, i16* @b, align 2
+ %1 = load i16, ptr @b, align 2
%2 = sext i16 %1 to i64
; PPC64: lha
; PPC64-NOT: extsh
define i64 @t10() nounwind {
; PPC64: t10
- %1 = load i8, i8* @a, align 2
+ %1 = load i8, ptr @a, align 2
%2 = sext i8 %1 to i64
; PPC64: lbz
; PPC64: extsb
define i64 @t11() nounwind {
; PPC64: t11
- %1 = load i32, i32* @c, align 4
+ %1 = load i32, ptr @c, align 4
%2 = zext i32 %1 to i64
; PPC64: lwz
; PPC64-NOT: rldicl
define i64 @t12() nounwind {
; PPC64: t12
- %1 = load i32, i32* @c, align 4
+ %1 = load i32, ptr @c, align 4
%2 = sext i32 %1 to i64
; PPC64: lwa
; PPC64-NOT: extsw
; CHECK-NEXT: nop
%memPos = alloca float, align 4
- store float 1.500000e+01, float* %memPos
- %valPos = load float, float* %memPos
+ store float 1.500000e+01, ptr %memPos
+ %valPos = load float, ptr %memPos
%memNeg = alloca float, align 4
- store float -1.500000e+01, float* %memNeg
- %valNeg = load float, float* %memNeg
+ store float -1.500000e+01, ptr %memNeg
+ %valNeg = load float, ptr %memNeg
%FloatToIntPos = fptosi float %valPos to i32
call void @func(i32 15, i32 %FloatToIntPos)
; Verify that pointer offsets larger than 32 bits work correctly.
-define void @test(i32* %array) {
+define void @test(ptr %array) {
; CHECK-LABEL: test:
; CHECK-NOT: li {{[0-9]+}}, -8
- %element = getelementptr i32, i32* %array, i64 2147483646
- store i32 1234, i32* %element
+ %element = getelementptr i32, ptr %array, i64 2147483646
+ store i32 1234, ptr %element
ret void
}
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux"
-%"class.std::__1::__tree_node.130.151" = type { %"class.std::__1::__tree_node_base.base.128.149", %"class.boost::serialization::extended_type_info.129.150"* }
-%"class.std::__1::__tree_node_base.base.128.149" = type <{ %"class.std::__1::__tree_end_node.127.148", %"class.std::__1::__tree_node_base.126.147"*, %"class.std::__1::__tree_node_base.126.147"*, i8 }>
-%"class.std::__1::__tree_end_node.127.148" = type { %"class.std::__1::__tree_node_base.126.147"* }
-%"class.std::__1::__tree_node_base.126.147" = type <{ %"class.std::__1::__tree_end_node.127.148", %"class.std::__1::__tree_node_base.126.147"*, %"class.std::__1::__tree_node_base.126.147"*, i8, [7 x i8] }>
-%"class.boost::serialization::extended_type_info.129.150" = type { i32 (...)**, i32, i8* }
+%"class.std::__1::__tree_node.130.151" = type { %"class.std::__1::__tree_node_base.base.128.149", ptr }
+%"class.std::__1::__tree_node_base.base.128.149" = type <{ %"class.std::__1::__tree_end_node.127.148", ptr, ptr, i8 }>
+%"class.std::__1::__tree_end_node.127.148" = type { ptr }
+%"class.std::__1::__tree_node_base.126.147" = type <{ %"class.std::__1::__tree_end_node.127.148", ptr, ptr, i8, [7 x i8] }>
+%"class.boost::serialization::extended_type_info.129.150" = type { ptr, i32, ptr }
; Function Attrs: noinline
-define void @_ZN5boost13serialization18extended_type_info4findEPKc() #0 align 2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @_ZN5boost13serialization18extended_type_info4findEPKc() #0 align 2 personality ptr @__gxx_personality_v0 {
entry:
br i1 undef, label %cond.true, label %cond.false
br label %invoke.cont
invoke.cont: ; preds = %_ZNKSt3__18multisetIPKN5boost13serialization18extended_type_infoENS2_6detail11key_compareENS_9allocatorIS5_EEE4findERKS5_.exit
- %1 = load %"class.std::__1::__tree_node.130.151"*, %"class.std::__1::__tree_node.130.151"** undef, align 8
- %cmp.i = icmp eq %"class.std::__1::__tree_node.130.151"* undef, %1
+ %1 = load ptr, ptr undef, align 8
+ %cmp.i = icmp eq ptr undef, %1
br label %invoke.cont.2
invoke.cont.2: ; preds = %invoke.cont
br label %cleanup
lpad: ; preds = %cond.end
- %2 = landingpad { i8*, i32 }
+ %2 = landingpad { ptr, i32 }
cleanup
br label %eh.resume
ret void
eh.resume: ; preds = %lpad
- resume { i8*, i32 } undef
+ resume { ptr, i32 } undef
}
declare i32 @__gxx_personality_v0(...)
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=PPC64
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -mtriple=powerpc64-ibm-aix-xcoff -mcpu=pwr7 | FileCheck %s --check-prefix=PPC64
-define void @t1(i8* %x) nounwind {
+define void @t1(ptr %x) nounwind {
entry:
; PPC64: t1
br label %L0
br label %L1
L1:
- indirectbr i8* %x, [ label %L0, label %L1 ]
+ indirectbr ptr %x, [ label %L0, label %L1 ]
; PPC64: mtctr 3
; PPC64: bctr
}
%SomeStruct = type { double }
; ELF64VSX-LABEL: SomeStructCtor
-define linkonce_odr void @SomeStructCtor(%SomeStruct* %this, double %V) unnamed_addr align 2 {
+define linkonce_odr void @SomeStructCtor(ptr %this, double %V) unnamed_addr align 2 {
entry:
- %this.addr = alloca %SomeStruct*, align 8
+ %this.addr = alloca ptr, align 8
%V.addr = alloca double, align 8
- store %SomeStruct* %this, %SomeStruct** %this.addr, align 8
+ store ptr %this, ptr %this.addr, align 8
; ELF64VSX: stfd {{[0-9][0-9]?}}, -{{[1-9][0-9]?}}({{[1-9][0-9]?}})
- store double %V, double* %V.addr, align 8
- %this1 = load %SomeStruct*, %SomeStruct** %this.addr
- %Val = getelementptr inbounds %SomeStruct, %SomeStruct* %this1, i32 0, i32 0
+ store double %V, ptr %V.addr, align 8
+ %this1 = load ptr, ptr %this.addr
; ELF64VSX: stxsdx {{[0-9][0-9]?}}, 0, {{[1-9][0-9]?}}
- %0 = load double, double* %V.addr, align 8
- store double %0, double* %Val, align 8
+ %0 = load double, ptr %V.addr, align 8
+ store double %0, ptr %this1, align 8
ret void
}
define i8 @t1() nounwind {
; ELF64-LABEL: t1:
- %1 = load i8, i8* @a, align 1
+ %1 = load i8, ptr @a, align 1
; ELF64: lbz
%2 = add nsw i8 %1, 1
; ELF64: addi
define i16 @t2() nounwind {
; ELF64-LABEL: t2:
- %1 = load i16, i16* @b, align 2
+ %1 = load i16, ptr @b, align 2
; ELF64: lhz
%2 = add nsw i16 %1, 1
; ELF64: addi
define dso_local i32 @t3() nounwind {
; ELF64-LABEL: t3:
- %1 = load i32, i32* @c, align 4
+ %1 = load i32, ptr @c, align 4
; ELF64: lwz
%2 = add nsw i32 %1, 1
; ELF64: addi
define i64 @t4() nounwind {
; ELF64-LABEL: t4:
- %1 = load i64, i64* @d, align 4
+ %1 = load i64, ptr @d, align 4
; ELF64: ld
%2 = add nsw i64 %1, 1
; ELF64: addi
define dso_local float @t5() nounwind {
; ELF64-LABEL: t5:
; SPE-LABEL: t5:
- %1 = load float, float* @e, align 4
+ %1 = load float, ptr @e, align 4
; ELF64: lfs
; SPE: lwz
%2 = fadd float %1, 1.0
define dso_local double @t6() nounwind {
; ELF64-LABEL: t6:
; SPE-LABEL: t6:
- %1 = load double, double* @f, align 8
+ %1 = load double, ptr @f, align 8
; ELF64: lfd
; VSX: lxsdx
; SPE: evldd
define dso_local void @t7(i8 %v) nounwind {
; ELF64-LABEL: t7:
%1 = add nsw i8 %v, 1
- store i8 %1, i8* @a, align 1
+ store i8 %1, ptr @a, align 1
; ELF64: addi
; ELF64: addis
; ELF64: addi
define dso_local void @t8(i16 %v) nounwind {
; ELF64-LABEL: t8:
%1 = add nsw i16 %v, 1
- store i16 %1, i16* @b, align 2
+ store i16 %1, ptr @b, align 2
; ELF64: addi
; ELF64: addis
; ELF64: addi
define dso_local void @t9(i32 %v) nounwind {
; ELF64-LABEL: t9:
%1 = add nsw i32 %v, 1
- store i32 %1, i32* @c, align 4
+ store i32 %1, ptr @c, align 4
; ELF64: addi
; ELF64: addis
; ELF64: addi
define dso_local void @t10(i64 %v) nounwind {
; ELF64-LABEL: t10:
%1 = add nsw i64 %v, 1
- store i64 %1, i64* @d, align 4
+ store i64 %1, ptr @d, align 4
; ELF64: addi
; ELF64: addis
; ELF64: addi
; ELF64-LABEL: t11:
; SPE-LABEL: t11:
%1 = fadd float %v, 1.0
- store float %1, float* @e, align 4
+ store float %1, ptr @e, align 4
; ELF64: fadds
; ELF64: stfs
; SPE: efsadd
; ELF64-LABEL: t12:
; SPE-LABEL: t12:
%1 = fadd double %v, 1.0
- store double %1, double* @f, align 8
+ store double %1, ptr @f, align 8
; ELF64: fadd
; ELF64: stfd
; VSX: xsadddp
;; lwa requires an offset divisible by 4, so we need lwax here.
define i64 @t13() nounwind {
; ELF64-LABEL: t13:
- %1 = load i32, i32* getelementptr inbounds (%struct.s, %struct.s* @g, i32 0, i32 1), align 1
+ %1 = load i32, ptr getelementptr inbounds (%struct.s, ptr @g, i32 0, i32 1), align 1
%2 = sext i32 %1 to i64
; ELF64: li
; ELF64: lwax
;; ld requires an offset divisible by 4, so we need ldx here.
define i64 @t14() nounwind {
; ELF64-LABEL: t14:
- %1 = load i64, i64* getelementptr inbounds (%struct.t, %struct.t* @h, i32 0, i32 1), align 1
+ %1 = load i64, ptr getelementptr inbounds (%struct.t, ptr @h, i32 0, i32 1), align 1
; ELF64: li
; ELF64: ldx
%2 = add nsw i64 %1, 1
define dso_local void @t15(i64 %v) nounwind {
; ELF64-LABEL: t15:
%1 = add nsw i64 %v, 1
- store i64 %1, i64* getelementptr inbounds (%struct.t, %struct.t* @h, i32 0, i32 1), align 1
+ store i64 %1, ptr getelementptr inbounds (%struct.t, ptr @h, i32 0, i32 1), align 1
; ELF64: addi
; ELF64: addis
; ELF64: addi
;; ld requires an offset that fits in 16 bits, so we need ldx here.
define i64 @t16() nounwind {
; ELF64-LABEL: t16:
- %1 = load i64, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
+ %1 = load i64, ptr getelementptr inbounds ([8192 x i64], ptr @i, i32 0, i64 5000), align 8
; ELF64: lis
; ELF64: ori
; ELF64: ldx
define dso_local void @t17(i64 %v) nounwind {
; ELF64-LABEL: t17:
%1 = add nsw i64 %v, 1
- store i64 %1, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
+ store i64 %1, ptr getelementptr inbounds ([8192 x i64], ptr @i, i32 0, i64 5000), align 8
; ELF64: addi
; ELF64: addis
; ELF64: addi
; RUN: -fast-isel -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
; RUN: FileCheck %s
-%struct._IO_FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct._IO_FILE*, i32, i32, i64, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i64, i32, [20 x i8] }
-%struct._IO_marker = type { %struct._IO_marker*, %struct._IO_FILE*, i32 }
+%struct._IO_FILE = type { i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, i64, i16, i8, [1 x i8], ptr, i64, ptr, ptr, ptr, ptr, i64, i32, [20 x i8] }
+%struct._IO_marker = type { ptr, ptr, i32 }
@.str = private unnamed_addr constant [25 x i8] c"Breaking the TOC for FP\0A\00", align 1
@.str.1 = private unnamed_addr constant [25 x i8] c"Breaking the TOC for GV\0A\00", align 1
-@stdout = external global %struct._IO_FILE*, align 8
+@stdout = external global ptr, align 8
; Function Attrs: noinline nounwind optnone
-define internal void @loadFP(double* %d) #0 {
+define internal void @loadFP(ptr %d) #0 {
; CHECK-LABEL: loadFP:
; CHECK: .localentry loadFP, 1
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %d.addr = alloca double*, align 8
- store double* %d, double** %d.addr, align 8
- %call = call signext i32 (i8*, ...) @printf(i8* getelementptr inbounds ([25 x i8], [25 x i8]* @.str, i64 0, i64 0))
- %0 = load double*, double** %d.addr, align 8
- store double 4.990000e+00, double* %0, align 8
+ %d.addr = alloca ptr, align 8
+ store ptr %d, ptr %d.addr, align 8
+ %call = call signext i32 (ptr, ...) @printf(ptr @.str)
+ %0 = load ptr, ptr %d.addr, align 8
+ store double 4.990000e+00, ptr %0, align 8
ret void
}
-declare signext i32 @printf(i8*, ...)
+declare signext i32 @printf(ptr, ...)
; Function Attrs: noinline nounwind optnone
define internal void @loadGV() #0 {
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %call = call signext i32 (i8*, ...) @printf(i8* getelementptr inbounds ([25 x i8], [25 x i8]* @.str.1, i64 0, i64 0))
- %0 = load %struct._IO_FILE*, %struct._IO_FILE** @stdout, align 8
- %call1 = call signext i32 @_IO_putc(i32 signext 97, %struct._IO_FILE* %0)
+ %call = call signext i32 (ptr, ...) @printf(ptr @.str.1)
+ %0 = load ptr, ptr @stdout, align 8
+ %call1 = call signext i32 @_IO_putc(i32 signext 97, ptr %0)
ret void
}
-declare signext i32 @_IO_putc(i32 signext, %struct._IO_FILE*)
+declare signext i32 @_IO_putc(i32 signext, ptr)
attributes #0 = { noinline nounwind optnone }
; doesn't crash. (It crashed formerly on ARM, and proved useful in
; discovering a bug on PowerPC as well.)
-define i32 @f(i32* %x) nounwind {
- %y = getelementptr inbounds i32, i32* %x, i32 5000
- %tmp103 = load i32, i32* %y, align 4
+define i32 @f(ptr %x) nounwind {
+ %y = getelementptr inbounds i32, ptr %x, i32 5000
+ %tmp103 = load i32, ptr %y, align 4
ret i32 %tmp103
}
; Max number of GPR is 8
define linkonce_odr void
- @WithoutParamArea(i8* %a, i32 signext %b) align 2 {
+ @WithoutParamArea(ptr %a, i32 signext %b) align 2 {
entry:
call fastcc void @fastccFunc(i32 signext 1)
ret void
declare fastcc void @fastccFunc(i32 signext %level) unnamed_addr
; No need for Parameter Save Area if only 8 GPRs is needed.
-define linkonce_odr void @WithoutParamArea2(i8* %a, i32 signext %b) align 2 {
+define linkonce_odr void @WithoutParamArea2(ptr %a, i32 signext %b) align 2 {
entry:
call fastcc void @eightArgs(i32 signext 1, i32 signext 2, i32 signext 3,
i32 signext 4, i32 signext 5, i32 signext 6,
i32 signext %level7, i32 signext %level8) unnamed_addr
; No need for Parameter Save Area for calls that utiliizes 8 GPR and 2 FPR.
-define linkonce_odr void @WithoutParamArea3(i8* %a, i32 signext %b) align 2 {
+define linkonce_odr void @WithoutParamArea3(ptr %a, i32 signext %b) align 2 {
entry:
call fastcc void
@mixedArgs(i32 signext 1, float 1.0, i32 signext 2, float 2.0,
; Pass by value usage requiring less GPR then available
%"myClass::Mem" = type { i8, i8, i16, i32, i32, i32, i64 }
-define internal fastcc void @CallPassByValue(%"myClass::Mem"* %E) align 2 {
+define internal fastcc void @CallPassByValue(ptr %E) align 2 {
entry:
- call fastcc void @PassByValue(%"myClass::Mem"* byval(%"myClass::Mem") nonnull align 8 undef);
+ call fastcc void @PassByValue(ptr byval(%"myClass::Mem") nonnull align 8 undef);
ret void
; CHECK-LABEL: PassByValue
}
declare dso_local fastcc void
- @PassByValue(%"myClass::Mem"* byval(%"myClass::Mem") nocapture readonly align 8) align 2
+ @PassByValue(ptr byval(%"myClass::Mem") nocapture readonly align 8) align 2
; Verify Paramater Save Area is allocated if parameter exceed the number that
; can be passed via registers
; ------------------------------------------------------------------------------
; Max number of GPR is 8
-define linkonce_odr void @WithParamArea(i8 * %a, i32 signext %b) align 2 {
+define linkonce_odr void @WithParamArea(ptr %a, i32 signext %b) align 2 {
entry:
call fastcc void @nineArgs(i32 signext 1, i32 signext 2, i32 signext 3,
i32 signext 4, i32 signext 5, i32 signext 6,
i32 signext %level9) unnamed_addr
; Max number of FPR for parameter passing is 13
-define linkonce_odr void @WithParamArea2(i8* %a, i32 signext %b) align 2 {
+define linkonce_odr void @WithParamArea2(ptr %a, i32 signext %b) align 2 {
entry:
call fastcc void @funcW14FloatArgs(float 1.0, float 2.0, float 3.0,
float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 1.0,
; Pass by value usage requires more GPR then available
%"myClass::MemA" = type { i8, i8, i16, i32, i32, i32, i64 }
-%"myClass::MemB" = type { i32*, i32, i32, %"myClass::MemB"** }
-%"myClass::MemC" = type { %"myClass::MemD"*, %"myClass::MemC"*, i64 }
-%"myClass::MemD" = type { %"myClass::MemB"*, %"myClass::MemC"*, i8, i8, i16,
+%"myClass::MemB" = type { ptr, i32, i32, ptr }
+%"myClass::MemC" = type { ptr, ptr, i64 }
+%"myClass::MemD" = type { ptr, ptr, i8, i8, i16,
i32 }
-%"myStruct::MemF" = type { i32, %"myClass::MemA"*, %"myClass::MemA"*, i64, i64 }
-%"myClass::MemK" = type { i32, %"myClass::MemD"*, %"myClass::MemD"*, i64, i32,
+%"myStruct::MemF" = type { i32, ptr, ptr, i64, i64 }
+%"myClass::MemK" = type { i32, ptr, ptr, i64, i32,
i64, i8, i32, %"myStruct::MemF",
- i8, %"myClass::MemA"* }
+ i8, ptr }
-define internal fastcc void @AggMemExprEmitter(%"myClass::MemK"* %E) align 2 {
+define internal fastcc void @AggMemExprEmitter(ptr %E) align 2 {
entry:
- call fastcc void @MemExprEmitterInitialization(%"myClass::MemK"*
+ call fastcc void @MemExprEmitterInitialization(ptr
byval(%"myClass::MemK") nonnull align 8 undef);
ret void
}
declare dso_local fastcc void
- @MemExprEmitterInitialization(%"myClass::MemK"*
+ @MemExprEmitterInitialization(ptr
byval(%"myClass::MemK") nocapture readonly align 8) align 2
; sext(a) + sext(b) != sext(a + b)
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-freebsd10.0 %s -O0 -o - | FileCheck %s
-define zeroext i8 @gep_promotion(i8* %ptr) nounwind {
+define zeroext i8 @gep_promotion(ptr %ptr) nounwind {
entry:
- %ptr.addr = alloca i8*, align 8
+ %ptr.addr = alloca ptr, align 8
%add = add i8 64, 64 ; 0x40 + 0x40
- %0 = load i8*, i8** %ptr.addr, align 8
+ %0 = load ptr, ptr %ptr.addr, align 8
; CHECK-LABEL: gep_promotion:
; CHECK: lbz {{[0-9]+}}, 0({{.*}})
- %arrayidx = getelementptr inbounds i8, i8* %0, i8 %add
+ %arrayidx = getelementptr inbounds i8, ptr %0, i8 %add
- %1 = load i8, i8* %arrayidx, align 1
+ %1 = load i8, ptr %arrayidx, align 1
ret i8 %1
}
; CHECK-NEXT: ld 0, 16(1)
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
-%1 = load double, double* @a1, align 8
-%2 = load double, double* @a2, align 8
-%3 = load double, double* @a3, align 8
-%4 = load double, double* @a4, align 8
-%5 = load double, double* @a5, align 8
-%6 = load double, double* @a6, align 8
-%7 = load double, double* @a7, align 8
-%8 = load double, double* @a8, align 8
-%9 = load double, double* @a9, align 8
-%10 = load double, double* @a10, align 8
-%11 = load double, double* @a11, align 8
-%12 = load double, double* @a12, align 8
-%13 = load double, double* @a13, align 8
-%14 = load double, double* @a14, align 8
-%15 = load double, double* @a15, align 8
-%16 = load ppc_fp128, ppc_fp128* @a16, align 16
-%17 = load fp128, fp128* @a17, align 16
+%1 = load double, ptr @a1, align 8
+%2 = load double, ptr @a2, align 8
+%3 = load double, ptr @a3, align 8
+%4 = load double, ptr @a4, align 8
+%5 = load double, ptr @a5, align 8
+%6 = load double, ptr @a6, align 8
+%7 = load double, ptr @a7, align 8
+%8 = load double, ptr @a8, align 8
+%9 = load double, ptr @a9, align 8
+%10 = load double, ptr @a10, align 8
+%11 = load double, ptr @a11, align 8
+%12 = load double, ptr @a12, align 8
+%13 = load double, ptr @a13, align 8
+%14 = load double, ptr @a14, align 8
+%15 = load double, ptr @a15, align 8
+%16 = load ppc_fp128, ptr @a16, align 16
+%17 = load fp128, ptr @a17, align 16
tail call void @_Z3fooddddddddddddddd(double %1, double %2, double %3, double %4, double %5, double %6, double %7, double %8, double %9, double %10, double %11, double %12, double %13, double %14, double %15, ppc_fp128 %16, fp128 %17)
ret i32 0
}
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -ppc-vsr-nums-as-vr \
; RUN: -ppc-asm-full-reg-names -mtriple=powerpc-ibm-aix-xcoff < %s \
; RUN: | FileCheck %s -check-prefix=CHECK-BE-AIX-32
-define dso_local <4 x float> @vector_gatherf(float* nocapture readonly %a,
-float* nocapture readonly %b, float* nocapture readonly %c,
-float* nocapture readonly %d) {
+define dso_local <4 x float> @vector_gatherf(ptr nocapture readonly %a,
+ptr nocapture readonly %b, ptr nocapture readonly %c,
+ptr nocapture readonly %d) {
; C code from which this IR test case was generated:
-; vector float test(float *a, float *b, float *c, float *d) {
+; vector float test(ptr a, ptr b, ptr c, ptr d) {
; return (vector float) { *a, *b, *c, *d };
; }
; CHECK-LE-LABEL: vector_gatherf:
; CHECK-BE-AIX-32-NEXT: xxmrghd v[[REG0]], v[[REG3]], v[[REG0]]
; CHECK-BE-AIX-32-NEXT: blr
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%vecinit = insertelement <4 x float> undef, float %0, i32 0
- %1 = load float, float* %b, align 4
+ %1 = load float, ptr %b, align 4
%vecinit1 = insertelement <4 x float> %vecinit, float %1, i32 1
- %2 = load float, float* %c, align 4
+ %2 = load float, ptr %c, align 4
%vecinit2 = insertelement <4 x float> %vecinit1, float %2, i32 2
- %3 = load float, float* %d, align 4
+ %3 = load float, ptr %d, align 4
%vecinit3 = insertelement <4 x float> %vecinit2, float %3, i32 3
ret <4 x float> %vecinit3
}
%l.addr = alloca float, align 4
%m.addr = alloca float, align 4
%n.addr = alloca float, align 4
- store float %a, float* %a.addr, align 4
- store float %b, float* %b.addr, align 4
- store float %c, float* %c.addr, align 4
- store float %d, float* %d.addr, align 4
- store float %e, float* %e.addr, align 4
- store float %f, float* %f.addr, align 4
- store float %g, float* %g.addr, align 4
- store float %h, float* %h.addr, align 4
- store float %i, float* %i.addr, align 4
- store float %j, float* %j.addr, align 4
- store float %k, float* %k.addr, align 4
- store float %l, float* %l.addr, align 4
- store float %m, float* %m.addr, align 4
- store float %n, float* %n.addr, align 4
- %0 = load float, float* %n.addr, align 4
+ store float %a, ptr %a.addr, align 4
+ store float %b, ptr %b.addr, align 4
+ store float %c, ptr %c.addr, align 4
+ store float %d, ptr %d.addr, align 4
+ store float %e, ptr %e.addr, align 4
+ store float %f, ptr %f.addr, align 4
+ store float %g, ptr %g.addr, align 4
+ store float %h, ptr %h.addr, align 4
+ store float %i, ptr %i.addr, align 4
+ store float %j, ptr %j.addr, align 4
+ store float %k, ptr %k.addr, align 4
+ store float %l, ptr %l.addr, align 4
+ store float %m, ptr %m.addr, align 4
+ store float %n, ptr %n.addr, align 4
+ %0 = load float, ptr %n.addr, align 4
ret float %0
}
%l = alloca float, align 4
%m = alloca float, align 4
%n = alloca float, align 4
- store float 1.000000e+00, float* %a, align 4
- store float 2.000000e+00, float* %b, align 4
- store float 3.000000e+00, float* %c, align 4
- store float 4.000000e+00, float* %d, align 4
- store float 5.000000e+00, float* %e, align 4
- store float 6.000000e+00, float* %f, align 4
- store float 7.000000e+00, float* %g, align 4
- store float 8.000000e+00, float* %h, align 4
- store float 9.000000e+00, float* %i, align 4
- store float 1.000000e+01, float* %j, align 4
- store float 1.100000e+01, float* %k, align 4
- store float 1.200000e+01, float* %l, align 4
- store float 1.300000e+01, float* %m, align 4
- store float 1.400000e+01, float* %n, align 4
- %0 = load float, float* %a, align 4
- %1 = load float, float* %b, align 4
- %2 = load float, float* %c, align 4
- %3 = load float, float* %d, align 4
- %4 = load float, float* %e, align 4
- %5 = load float, float* %f, align 4
- %6 = load float, float* %g, align 4
- %7 = load float, float* %h, align 4
- %8 = load float, float* %i, align 4
- %9 = load float, float* %j, align 4
- %10 = load float, float* %k, align 4
- %11 = load float, float* %l, align 4
- %12 = load float, float* %m, align 4
- %13 = load float, float* %n, align 4
+ store float 1.000000e+00, ptr %a, align 4
+ store float 2.000000e+00, ptr %b, align 4
+ store float 3.000000e+00, ptr %c, align 4
+ store float 4.000000e+00, ptr %d, align 4
+ store float 5.000000e+00, ptr %e, align 4
+ store float 6.000000e+00, ptr %f, align 4
+ store float 7.000000e+00, ptr %g, align 4
+ store float 8.000000e+00, ptr %h, align 4
+ store float 9.000000e+00, ptr %i, align 4
+ store float 1.000000e+01, ptr %j, align 4
+ store float 1.100000e+01, ptr %k, align 4
+ store float 1.200000e+01, ptr %l, align 4
+ store float 1.300000e+01, ptr %m, align 4
+ store float 1.400000e+01, ptr %n, align 4
+ %0 = load float, ptr %a, align 4
+ %1 = load float, ptr %b, align 4
+ %2 = load float, ptr %c, align 4
+ %3 = load float, ptr %d, align 4
+ %4 = load float, ptr %e, align 4
+ %5 = load float, ptr %f, align 4
+ %6 = load float, ptr %g, align 4
+ %7 = load float, ptr %h, align 4
+ %8 = load float, ptr %i, align 4
+ %9 = load float, ptr %j, align 4
+ %10 = load float, ptr %k, align 4
+ %11 = load float, ptr %l, align 4
+ %12 = load float, ptr %m, align 4
+ %13 = load float, ptr %n, align 4
%call = call float @bar(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8, float %9, float %10, float %11, float %12, float %13)
ret float %call
}
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind readonly
-define float @tf(float* nocapture readonly %i, i32 signext %o) #0 {
+define float @tf(ptr nocapture readonly %i, i32 signext %o) #0 {
entry:
%idx.ext = sext i32 %o to i64
- %add.ptr = getelementptr inbounds float, float* %i, i64 %idx.ext
- %0 = load float, float* %add.ptr, align 4
+ %add.ptr = getelementptr inbounds float, ptr %i, i64 %idx.ext
+ %0 = load float, ptr %add.ptr, align 4
%add.ptr.sum = add nsw i64 %idx.ext, 1
- %add.ptr3 = getelementptr inbounds float, float* %i, i64 %add.ptr.sum
- %1 = load float, float* %add.ptr3, align 4
+ %add.ptr3 = getelementptr inbounds float, ptr %i, i64 %add.ptr.sum
+ %1 = load float, ptr %add.ptr3, align 4
%add = fadd float %0, %1
ret float %add
}
; Function Attrs: nounwind readonly
-define double @td(double* nocapture readonly %i, i32 signext %o) #0 {
+define double @td(ptr nocapture readonly %i, i32 signext %o) #0 {
entry:
%idx.ext = sext i32 %o to i64
- %add.ptr = getelementptr inbounds double, double* %i, i64 %idx.ext
- %0 = load double, double* %add.ptr, align 8
+ %add.ptr = getelementptr inbounds double, ptr %i, i64 %idx.ext
+ %0 = load double, ptr %add.ptr, align 8
%add.ptr.sum = add nsw i64 %idx.ext, 1
- %add.ptr3 = getelementptr inbounds double, double* %i, i64 %add.ptr.sum
- %1 = load double, double* %add.ptr3, align 8
+ %add.ptr3 = getelementptr inbounds double, ptr %i, i64 %add.ptr.sum
+ %1 = load double, ptr %add.ptr3, align 8
%add = fadd double %0, %1
ret double %add
; CHECK-NEXT: blr
entry:
%fneg = fneg double %a
- store double %fneg, double* @v, align 8
+ store double %fneg, ptr @v, align 8
%fneg1 = fneg double %c
- store double %fneg1, double* @z, align 8
+ store double %fneg1, ptr @z, align 8
%mul = fmul double %fneg1, %b
%add = fsub double %mul, %a
ret double %add
; CHECK-NEXT: blr
entry:
%fneg = fneg double %a
- store double %fneg, double* @v, align 8
+ store double %fneg, ptr @v, align 8
%fneg1 = fneg double %c
%mul = fmul double %fneg1, %b
%add = fsub double %mul, %a
; CHECK-NEXT: xsmaddasp 1, 2, 3
; CHECK-NEXT: xsmaddasp 1, 4, 2
; CHECK-NEXT: blr
- %tmp = load float, float* undef, align 4
- %tmp2 = load float, float* undef, align 4
+ %tmp = load float, ptr undef, align 4
+ %tmp2 = load float, ptr undef, align 4
%tmp3 = fmul contract reassoc float %tmp, 0x3FE372D780000000
%tmp4 = fadd contract reassoc float %tmp3, 1.000000e+00
%tmp5 = fmul contract reassoc float %tmp2, %tmp4
- %tmp6 = load float, float* undef, align 4
- %tmp7 = load float, float* undef, align 4
+ %tmp6 = load float, ptr undef, align 4
+ %tmp7 = load float, ptr undef, align 4
%tmp8 = fmul contract reassoc float %tmp7, 0x3FE372D780000000
%tmp9 = fsub contract reassoc nsz float -1.000000e+00, %tmp8
%tmp10 = fmul contract reassoc float %tmp9, %tmp6
%2 = shufflevector <8 x float> zeroinitializer, <8 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%3 = shufflevector <16 x float> %2, <16 x float> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%4 = fmul <32 x float> %1, %3
- %5 = load <4 x float>, <4 x float>* undef, align 128
- %6 = load <4 x float>, <4 x float>* undef, align 128
+ %5 = load <4 x float>, ptr undef, align 128
+ %6 = load <4 x float>, ptr undef, align 128
%7 = shufflevector <4 x float> undef, <4 x float> %5, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%8 = shufflevector <4 x float> undef, <4 x float> %6, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%9 = shufflevector <8 x float> %7, <8 x float> %8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%10 = shufflevector <16 x float> undef, <16 x float> %9, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
- %11 = load <4 x float>, <4 x float>* null, align 128
- %12 = load <4 x float>, <4 x float>* undef, align 128
+ %11 = load <4 x float>, ptr null, align 128
+ %12 = load <4 x float>, ptr undef, align 128
%13 = shufflevector <4 x float> undef, <4 x float> %11, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%14 = shufflevector <4 x float> undef, <4 x float> %12, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%15 = shufflevector <8 x float> %13, <8 x float> %14, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%24 = shufflevector <256 x float> undef, <256 x float> %23, <512 x i32> <i32 0, i32 256, i32 1, i32 257, i32 2, i32 258, i32 3, i32 259, i32 4, i32 260, i32 5, i32 261, i32 6, i32 262, i32 7, i32 263, i32 8, i32 264, i32 9, i32 265, i32 10, i32 266, i32 11, i32 267, i32 12, i32 268, i32 13, i32 269, i32 14, i32 270, i32 15, i32 271, i32 16, i32 272, i32 17, i32 273, i32 18, i32 274, i32 19, i32 275, i32 20, i32 276, i32 21, i32 277, i32 22, i32 278, i32 23, i32 279, i32 24, i32 280, i32 25, i32 281, i32 26, i32 282, i32 27, i32 283, i32 28, i32 284, i32 29, i32 285, i32 30, i32 286, i32 31, i32 287, i32 32, i32 288, i32 33, i32 289, i32 34, i32 290, i32 35, i32 291, i32 36, i32 292, i32 37, i32 293, i32 38, i32 294, i32 39, i32 295, i32 40, i32 296, i32 41, i32 297, i32 42, i32 298, i32 43, i32 299, i32 44, i32 300, i32 45, i32 301, i32 46, i32 302, i32 47, i32 303, i32 48, i32 304, i32 49, i32 305, i32 50, i32 306, i32 51, i32 307, i32 52, i32 308, i32 53, i32 309, i32 54, i32 310, i32 55, i32 311, i32 56, i32 312, i32 57, i32 313, i32 58, i32 314, i32 59, i32 315, i32 60, i32 316, i32 61, i32 317, i32 62, i32 318, i32 63, i32 319, i32 64, i32 320, i32 65, i32 321, i32 66, i32 322, i32 67, i32 323, i32 68, i32 324, i32 69, i32 325, i32 70, i32 326, i32 71, i32 327, i32 72, i32 328, i32 73, i32 329, i32 74, i32 330, i32 75, i32 331, i32 76, i32 332, i32 77, i32 333, i32 78, i32 334, i32 79, i32 335, i32 80, i32 336, i32 81, i32 337, i32 82, i32 338, i32 83, i32 339, i32 84, i32 340, i32 85, i32 341, i32 86, i32 342, i32 87, i32 343, i32 88, i32 344, i32 89, i32 345, i32 90, i32 346, i32 91, i32 347, i32 92, i32 348, i32 93, i32 349, i32 94, i32 350, i32 95, i32 351, i32 96, i32 352, i32 97, i32 353, i32 98, i32 354, i32 99, i32 355, i32 100, i32 356, i32 101, i32 357, i32 102, i32 358, i32 103, i32 359, i32 104, i32 360, i32 105, i32 361, i32 106, i32 362, i32 107, i32 363, i32 108, i32 364, i32 109, i32 365, i32 110, i32 366, i32 111, i32 367, i32 112, i32 368, i32 113, i32 369, i32 114, i32 370, i32 115, i32 371, i32 116, i32 372, i32 117, i32 373, i32 118, i32 374, i32 119, i32 375, i32 120, i32 376, i32 121, i32 377, i32 122, i32 378, i32 123, i32 379, i32 124, i32 380, i32 125, i32 381, i32 126, i32 382, i32 127, i32 383, i32 128, i32 384, i32 129, i32 385, i32 130, i32 386, i32 131, i32 387, i32 132, i32 388, i32 133, i32 389, i32 134, i32 390, i32 135, i32 391, i32 136, i32 392, i32 137, i32 393, i32 138, i32 394, i32 139, i32 395, i32 140, i32 396, i32 141, i32 397, i32 142, i32 398, i32 143, i32 399, i32 144, i32 400, i32 145, i32 401, i32 146, i32 402, i32 147, i32 403, i32 148, i32 404, i32 149, i32 405, i32 150, i32 406, i32 151, i32 407, i32 152, i32 408, i32 153, i32 409, i32 154, i32 410, i32 155, i32 411, i32 156, i32 412, i32 157, i32 413, i32 158, i32 414, i32 159, i32 415, i32 160, i32 416, i32 161, i32 417, i32 162, i32 418, i32 163, i32 419, i32 164, i32 420, i32 165, i32 421, i32 166, i32 422, i32 167, i32 423, i32 168, i32 424, i32 169, i32 425, i32 170, i32 426, i32 171, i32 427, i32 172, i32 428, i32 173, i32 429, i32 174, i32 430, i32 175, i32 431, i32 176, i32 432, i32 177, i32 433, i32 178, i32 434, i32 179, i32 435, i32 180, i32 436, i32 181, i32 437, i32 182, i32 438, i32 183, i32 439, i32 184, i32 440, i32 185, i32 441, i32 186, i32 442, i32 187, i32 443, i32 188, i32 444, i32 189, i32 445, i32 190, i32 446, i32 191, i32 447, i32 192, i32 448, i32 193, i32 449, i32 194, i32 450, i32 195, i32 451, i32 196, i32 452, i32 197, i32 453, i32 198, i32 454, i32 199, i32 455, i32 200, i32 456, i32 201, i32 457, i32 202, i32 458, i32 203, i32 459, i32 204, i32 460, i32 205, i32 461, i32 206, i32 462, i32 207, i32 463, i32 208, i32 464, i32 209, i32 465, i32 210, i32 466, i32 211, i32 467, i32 212, i32 468, i32 213, i32 469, i32 214, i32 470, i32 215, i32 471, i32 216, i32 472, i32 217, i32 473, i32 218, i32 474, i32 219, i32 475, i32 220, i32 476, i32 221, i32 477, i32 222, i32 478, i32 223, i32 479, i32 224, i32 480, i32 225, i32 481, i32 226, i32 482, i32 227, i32 483, i32 228, i32 484, i32 229, i32 485, i32 230, i32 486, i32 231, i32 487, i32 232, i32 488, i32 233, i32 489, i32 234, i32 490, i32 235, i32 491, i32 236, i32 492, i32 237, i32 493, i32 238, i32 494, i32 239, i32 495, i32 240, i32 496, i32 241, i32 497, i32 242, i32 498, i32 243, i32 499, i32 244, i32 500, i32 245, i32 501, i32 246, i32 502, i32 247, i32 503, i32 248, i32 504, i32 249, i32 505, i32 250, i32 506, i32 251, i32 507, i32 252, i32 508, i32 253, i32 509, i32 254, i32 510, i32 255, i32 511>
%25 = shufflevector <512 x float> %24, <512 x float> undef, <1024 x i32> <i32 0, i32 512, i32 1, i32 513, i32 2, i32 514, i32 3, i32 515, i32 4, i32 516, i32 5, i32 517, i32 6, i32 518, i32 7, i32 519, i32 8, i32 520, i32 9, i32 521, i32 10, i32 522, i32 11, i32 523, i32 12, i32 524, i32 13, i32 525, i32 14, i32 526, i32 15, i32 527, i32 16, i32 528, i32 17, i32 529, i32 18, i32 530, i32 19, i32 531, i32 20, i32 532, i32 21, i32 533, i32 22, i32 534, i32 23, i32 535, i32 24, i32 536, i32 25, i32 537, i32 26, i32 538, i32 27, i32 539, i32 28, i32 540, i32 29, i32 541, i32 30, i32 542, i32 31, i32 543, i32 32, i32 544, i32 33, i32 545, i32 34, i32 546, i32 35, i32 547, i32 36, i32 548, i32 37, i32 549, i32 38, i32 550, i32 39, i32 551, i32 40, i32 552, i32 41, i32 553, i32 42, i32 554, i32 43, i32 555, i32 44, i32 556, i32 45, i32 557, i32 46, i32 558, i32 47, i32 559, i32 48, i32 560, i32 49, i32 561, i32 50, i32 562, i32 51, i32 563, i32 52, i32 564, i32 53, i32 565, i32 54, i32 566, i32 55, i32 567, i32 56, i32 568, i32 57, i32 569, i32 58, i32 570, i32 59, i32 571, i32 60, i32 572, i32 61, i32 573, i32 62, i32 574, i32 63, i32 575, i32 64, i32 576, i32 65, i32 577, i32 66, i32 578, i32 67, i32 579, i32 68, i32 580, i32 69, i32 581, i32 70, i32 582, i32 71, i32 583, i32 72, i32 584, i32 73, i32 585, i32 74, i32 586, i32 75, i32 587, i32 76, i32 588, i32 77, i32 589, i32 78, i32 590, i32 79, i32 591, i32 80, i32 592, i32 81, i32 593, i32 82, i32 594, i32 83, i32 595, i32 84, i32 596, i32 85, i32 597, i32 86, i32 598, i32 87, i32 599, i32 88, i32 600, i32 89, i32 601, i32 90, i32 602, i32 91, i32 603, i32 92, i32 604, i32 93, i32 605, i32 94, i32 606, i32 95, i32 607, i32 96, i32 608, i32 97, i32 609, i32 98, i32 610, i32 99, i32 611, i32 100, i32 612, i32 101, i32 613, i32 102, i32 614, i32 103, i32 615, i32 104, i32 616, i32 105, i32 617, i32 106, i32 618, i32 107, i32 619, i32 108, i32 620, i32 109, i32 621, i32 110, i32 622, i32 111, i32 623, i32 112, i32 624, i32 113, i32 625, i32 114, i32 626, i32 115, i32 627, i32 116, i32 628, i32 117, i32 629, i32 118, i32 630, i32 119, i32 631, i32 120, i32 632, i32 121, i32 633, i32 122, i32 634, i32 123, i32 635, i32 124, i32 636, i32 125, i32 637, i32 126, i32 638, i32 127, i32 639, i32 128, i32 640, i32 129, i32 641, i32 130, i32 642, i32 131, i32 643, i32 132, i32 644, i32 133, i32 645, i32 134, i32 646, i32 135, i32 647, i32 136, i32 648, i32 137, i32 649, i32 138, i32 650, i32 139, i32 651, i32 140, i32 652, i32 141, i32 653, i32 142, i32 654, i32 143, i32 655, i32 144, i32 656, i32 145, i32 657, i32 146, i32 658, i32 147, i32 659, i32 148, i32 660, i32 149, i32 661, i32 150, i32 662, i32 151, i32 663, i32 152, i32 664, i32 153, i32 665, i32 154, i32 666, i32 155, i32 667, i32 156, i32 668, i32 157, i32 669, i32 158, i32 670, i32 159, i32 671, i32 160, i32 672, i32 161, i32 673, i32 162, i32 674, i32 163, i32 675, i32 164, i32 676, i32 165, i32 677, i32 166, i32 678, i32 167, i32 679, i32 168, i32 680, i32 169, i32 681, i32 170, i32 682, i32 171, i32 683, i32 172, i32 684, i32 173, i32 685, i32 174, i32 686, i32 175, i32 687, i32 176, i32 688, i32 177, i32 689, i32 178, i32 690, i32 179, i32 691, i32 180, i32 692, i32 181, i32 693, i32 182, i32 694, i32 183, i32 695, i32 184, i32 696, i32 185, i32 697, i32 186, i32 698, i32 187, i32 699, i32 188, i32 700, i32 189, i32 701, i32 190, i32 702, i32 191, i32 703, i32 192, i32 704, i32 193, i32 705, i32 194, i32 706, i32 195, i32 707, i32 196, i32 708, i32 197, i32 709, i32 198, i32 710, i32 199, i32 711, i32 200, i32 712, i32 201, i32 713, i32 202, i32 714, i32 203, i32 715, i32 204, i32 716, i32 205, i32 717, i32 206, i32 718, i32 207, i32 719, i32 208, i32 720, i32 209, i32 721, i32 210, i32 722, i32 211, i32 723, i32 212, i32 724, i32 213, i32 725, i32 214, i32 726, i32 215, i32 727, i32 216, i32 728, i32 217, i32 729, i32 218, i32 730, i32 219, i32 731, i32 220, i32 732, i32 221, i32 733, i32 222, i32 734, i32 223, i32 735, i32 224, i32 736, i32 225, i32 737, i32 226, i32 738, i32 227, i32 739, i32 228, i32 740, i32 229, i32 741, i32 230, i32 742, i32 231, i32 743, i32 232, i32 744, i32 233, i32 745, i32 234, i32 746, i32 235, i32 747, i32 236, i32 748, i32 237, i32 749, i32 238, i32 750, i32 239, i32 751, i32 240, i32 752, i32 241, i32 753, i32 242, i32 754, i32 243, i32 755, i32 244, i32 756, i32 245, i32 757, i32 246, i32 758, i32 247, i32 759, i32 248, i32 760, i32 249, i32 761, i32 250, i32 762, i32 251, i32 763, i32 252, i32 764, i32 253, i32 765, i32 254, i32 766, i32 255, i32 767, i32 256, i32 768, i32 257, i32 769, i32 258, i32 770, i32 259, i32 771, i32 260, i32 772, i32 261, i32 773, i32 262, i32 774, i32 263, i32 775, i32 264, i32 776, i32 265, i32 777, i32 266, i32 778, i32 267, i32 779, i32 268, i32 780, i32 269, i32 781, i32 270, i32 782, i32 271, i32 783, i32 272, i32 784, i32 273, i32 785, i32 274, i32 786, i32 275, i32 787, i32 276, i32 788, i32 277, i32 789, i32 278, i32 790, i32 279, i32 791, i32 280, i32 792, i32 281, i32 793, i32 282, i32 794, i32 283, i32 795, i32 284, i32 796, i32 285, i32 797, i32 286, i32 798, i32 287, i32 799, i32 288, i32 800, i32 289, i32 801, i32 290, i32 802, i32 291, i32 803, i32 292, i32 804, i32 293, i32 805, i32 294, i32 806, i32 295, i32 807, i32 296, i32 808, i32 297, i32 809, i32 298, i32 810, i32 299, i32 811, i32 300, i32 812, i32 301, i32 813, i32 302, i32 814, i32 303, i32 815, i32 304, i32 816, i32 305, i32 817, i32 306, i32 818, i32 307, i32 819, i32 308, i32 820, i32 309, i32 821, i32 310, i32 822, i32 311, i32 823, i32 312, i32 824, i32 313, i32 825, i32 314, i32 826, i32 315, i32 827, i32 316, i32 828, i32 317, i32 829, i32 318, i32 830, i32 319, i32 831, i32 320, i32 832, i32 321, i32 833, i32 322, i32 834, i32 323, i32 835, i32 324, i32 836, i32 325, i32 837, i32 326, i32 838, i32 327, i32 839, i32 328, i32 840, i32 329, i32 841, i32 330, i32 842, i32 331, i32 843, i32 332, i32 844, i32 333, i32 845, i32 334, i32 846, i32 335, i32 847, i32 336, i32 848, i32 337, i32 849, i32 338, i32 850, i32 339, i32 851, i32 340, i32 852, i32 341, i32 853, i32 342, i32 854, i32 343, i32 855, i32 344, i32 856, i32 345, i32 857, i32 346, i32 858, i32 347, i32 859, i32 348, i32 860, i32 349, i32 861, i32 350, i32 862, i32 351, i32 863, i32 352, i32 864, i32 353, i32 865, i32 354, i32 866, i32 355, i32 867, i32 356, i32 868, i32 357, i32 869, i32 358, i32 870, i32 359, i32 871, i32 360, i32 872, i32 361, i32 873, i32 362, i32 874, i32 363, i32 875, i32 364, i32 876, i32 365, i32 877, i32 366, i32 878, i32 367, i32 879, i32 368, i32 880, i32 369, i32 881, i32 370, i32 882, i32 371, i32 883, i32 372, i32 884, i32 373, i32 885, i32 374, i32 886, i32 375, i32 887, i32 376, i32 888, i32 377, i32 889, i32 378, i32 890, i32 379, i32 891, i32 380, i32 892, i32 381, i32 893, i32 382, i32 894, i32 383, i32 895, i32 384, i32 896, i32 385, i32 897, i32 386, i32 898, i32 387, i32 899, i32 388, i32 900, i32 389, i32 901, i32 390, i32 902, i32 391, i32 903, i32 392, i32 904, i32 393, i32 905, i32 394, i32 906, i32 395, i32 907, i32 396, i32 908, i32 397, i32 909, i32 398, i32 910, i32 399, i32 911, i32 400, i32 912, i32 401, i32 913, i32 402, i32 914, i32 403, i32 915, i32 404, i32 916, i32 405, i32 917, i32 406, i32 918, i32 407, i32 919, i32 408, i32 920, i32 409, i32 921, i32 410, i32 922, i32 411, i32 923, i32 412, i32 924, i32 413, i32 925, i32 414, i32 926, i32 415, i32 927, i32 416, i32 928, i32 417, i32 929, i32 418, i32 930, i32 419, i32 931, i32 420, i32 932, i32 421, i32 933, i32 422, i32 934, i32 423, i32 935, i32 424, i32 936, i32 425, i32 937, i32 426, i32 938, i32 427, i32 939, i32 428, i32 940, i32 429, i32 941, i32 430, i32 942, i32 431, i32 943, i32 432, i32 944, i32 433, i32 945, i32 434, i32 946, i32 435, i32 947, i32 436, i32 948, i32 437, i32 949, i32 438, i32 950, i32 439, i32 951, i32 440, i32 952, i32 441, i32 953, i32 442, i32 954, i32 443, i32 955, i32 444, i32 956, i32 445, i32 957, i32 446, i32 958, i32 447, i32 959, i32 448, i32 960, i32 449, i32 961, i32 450, i32 962, i32 451, i32 963, i32 452, i32 964, i32 453, i32 965, i32 454, i32 966, i32 455, i32 967, i32 456, i32 968, i32 457, i32 969, i32 458, i32 970, i32 459, i32 971, i32 460, i32 972, i32 461, i32 973, i32 462, i32 974, i32 463, i32 975, i32 464, i32 976, i32 465, i32 977, i32 466, i32 978, i32 467, i32 979, i32 468, i32 980, i32 469, i32 981, i32 470, i32 982, i32 471, i32 983, i32 472, i32 984, i32 473, i32 985, i32 474, i32 986, i32 475, i32 987, i32 476, i32 988, i32 477, i32 989, i32 478, i32 990, i32 479, i32 991, i32 480, i32 992, i32 481, i32 993, i32 482, i32 994, i32 483, i32 995, i32 484, i32 996, i32 485, i32 997, i32 486, i32 998, i32 487, i32 999, i32 488, i32 1000, i32 489, i32 1001, i32 490, i32 1002, i32 491, i32 1003, i32 492, i32 1004, i32 493, i32 1005, i32 494, i32 1006, i32 495, i32 1007, i32 496, i32 1008, i32 497, i32 1009, i32 498, i32 1010, i32 499, i32 1011, i32 500, i32 1012, i32 501, i32 1013, i32 502, i32 1014, i32 503, i32 1015, i32 504, i32 1016, i32 505, i32 1017, i32 506, i32 1018, i32 507, i32 1019, i32 508, i32 1020, i32 509, i32 1021, i32 510, i32 1022, i32 511, i32 1023>
%26 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- store <4 x float> %26, <4 x float>* undef, align 128
+ store <4 x float> %26, ptr undef, align 128
%27 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 56, i32 57, i32 58, i32 59>
- store <4 x float> %27, <4 x float>* undef, align 128
+ store <4 x float> %27, ptr undef, align 128
%28 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 164, i32 165, i32 166, i32 167>
- store <4 x float> %28, <4 x float>* undef, align 128
+ store <4 x float> %28, ptr undef, align 128
%29 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 168, i32 169, i32 170, i32 171>
- store <4 x float> %29, <4 x float>* undef, align 128
+ store <4 x float> %29, ptr undef, align 128
%30 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 172, i32 173, i32 174, i32 175>
- store <4 x float> %30, <4 x float>* undef, align 128
+ store <4 x float> %30, ptr undef, align 128
%31 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 176, i32 177, i32 178, i32 179>
- store <4 x float> %31, <4 x float>* undef, align 128
+ store <4 x float> %31, ptr undef, align 128
%32 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 284, i32 285, i32 286, i32 287>
- store <4 x float> %32, <4 x float>* undef, align 128
+ store <4 x float> %32, ptr undef, align 128
%33 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 328, i32 329, i32 330, i32 331>
- store <4 x float> %33, <4 x float>* undef, align 128
+ store <4 x float> %33, ptr undef, align 128
%34 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 332, i32 333, i32 334, i32 335>
- store <4 x float> %34, <4 x float>* undef, align 128
+ store <4 x float> %34, ptr undef, align 128
%35 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 524, i32 525, i32 526, i32 527>
- store <4 x float> %35, <4 x float>* undef, align 128
+ store <4 x float> %35, ptr undef, align 128
%36 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 528, i32 529, i32 530, i32 531>
- store <4 x float> %36, <4 x float>* undef, align 128
+ store <4 x float> %36, ptr undef, align 128
%37 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 648, i32 649, i32 650, i32 651>
- store <4 x float> %37, <4 x float>* undef, align 128
+ store <4 x float> %37, ptr undef, align 128
%38 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 652, i32 653, i32 654, i32 655>
- store <4 x float> %38, <4 x float>* undef, align 128
+ store <4 x float> %38, ptr undef, align 128
%39 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 656, i32 657, i32 658, i32 659>
- store <4 x float> %39, <4 x float>* undef, align 128
+ store <4 x float> %39, ptr undef, align 128
%40 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 732, i32 733, i32 734, i32 735>
- store <4 x float> %40, <4 x float>* undef, align 128
+ store <4 x float> %40, ptr undef, align 128
%41 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 736, i32 737, i32 738, i32 739>
- store <4 x float> %41, <4 x float>* undef, align 128
+ store <4 x float> %41, ptr undef, align 128
%42 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 740, i32 741, i32 742, i32 743>
- store <4 x float> %42, <4 x float>* undef, align 128
+ store <4 x float> %42, ptr undef, align 128
%43 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 872, i32 873, i32 874, i32 875>
- store <4 x float> %43, <4 x float>* undef, align 128
+ store <4 x float> %43, ptr undef, align 128
%44 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 968, i32 969, i32 970, i32 971>
- store <4 x float> %44, <4 x float>* undef, align 128
+ store <4 x float> %44, ptr undef, align 128
%45 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 1016, i32 1017, i32 1018, i32 1019>
- store <4 x float> %45, <4 x float>* undef, align 128
+ store <4 x float> %45, ptr undef, align 128
%46 = shufflevector <1024 x float> %25, <1024 x float> undef, <4 x i32> <i32 1020, i32 1021, i32 1022, i32 1023>
- store <4 x float> %46, <4 x float>* undef, align 128
+ store <4 x float> %46, ptr undef, align 128
%47 = shufflevector <1024 x float> undef, <1024 x float> undef, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
- store <4 x float> %47, <4 x float>* undef, align 128
+ store <4 x float> %47, ptr undef, align 128
%48 = shufflevector <1024 x float> undef, <1024 x float> undef, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
- store <4 x float> %48, <4 x float>* undef, align 128
+ store <4 x float> %48, ptr undef, align 128
%49 = shufflevector <1024 x float> undef, <1024 x float> undef, <4 x i32> <i32 16, i32 17, i32 18, i32 19>
- store <4 x float> %49, <4 x float>* undef, align 128
+ store <4 x float> %49, ptr undef, align 128
%50 = shufflevector <1024 x float> undef, <1024 x float> undef, <4 x i32> <i32 20, i32 21, i32 22, i32 23>
- store <4 x float> %50, <4 x float>* undef, align 128
+ store <4 x float> %50, ptr undef, align 128
%51 = shufflevector <1024 x float> undef, <1024 x float> undef, <4 x i32> <i32 148, i32 149, i32 150, i32 151>
- store <4 x float> %51, <4 x float>* undef, align 128
+ store <4 x float> %51, ptr undef, align 128
%52 = shufflevector <1024 x float> undef, <1024 x float> undef, <4 x i32> <i32 632, i32 633, i32 634, i32 635>
- store <4 x float> %52, <4 x float>* undef, align 128
+ store <4 x float> %52, ptr undef, align 128
ret void
}
ret double %add
}
-define double @fma_multi_uses1(double %a, double %b, double %c, double %d, double* %p1, double* %p2, double* %p3) {
+define double @fma_multi_uses1(double %a, double %b, double %c, double %d, ptr %p1, ptr %p2, ptr %p3) {
; CHECK-LABEL: fma_multi_uses1:
; CHECK: # %bb.0:
; CHECK-NEXT: xsmuldp 1, 1, 2
; CHECK-NEXT: blr
%ab = fmul contract reassoc double %a, %b
%cd = fmul contract reassoc double %c, %d
- store double %ab, double* %p1 ; extra use of %ab
- store double %ab, double* %p2 ; another extra use of %ab
- store double %cd, double* %p3 ; extra use of %cd
+ store double %ab, ptr %p1 ; extra use of %ab
+ store double %ab, ptr %p2 ; another extra use of %ab
+ store double %cd, ptr %p3 ; extra use of %cd
%r = fsub contract reassoc nsz double %ab, %cd
ret double %r
}
-define double @fma_multi_uses2(double %a, double %b, double %c, double %d, double* %p1, double* %p2, double* %p3) {
+define double @fma_multi_uses2(double %a, double %b, double %c, double %d, ptr %p1, ptr %p2, ptr %p3) {
; CHECK-LABEL: fma_multi_uses2:
; CHECK: # %bb.0:
; CHECK-NEXT: xsmuldp 5, 1, 2
; CHECK-NEXT: blr
%ab = fmul contract reassoc double %a, %b
%cd = fmul contract reassoc double %c, %d
- store double %ab, double* %p1 ; extra use of %ab
- store double %cd, double* %p2 ; extra use of %cd
- store double %cd, double* %p3 ; another extra use of %cd
+ store double %ab, ptr %p1 ; extra use of %ab
+ store double %cd, ptr %p2 ; extra use of %cd
+ store double %cd, ptr %p3 ; another extra use of %cd
%r = fsub contract reassoc double %ab, %cd
ret double %r
}
-define double @fma_multi_uses3(double %a, double %b, double %c, double %d, double %f, double %g, double* %p1, double* %p2, double* %p3) {
+define double @fma_multi_uses3(double %a, double %b, double %c, double %d, double %f, double %g, ptr %p1, ptr %p2, ptr %p3) {
; CHECK-LABEL: fma_multi_uses3:
; CHECK: # %bb.0:
; CHECK-NEXT: xsmuldp 0, 1, 2
%ab = fmul contract reassoc double %a, %b
%cd = fmul contract reassoc double %c, %d
%fg = fmul contract reassoc double %f, %g
- store double %ab, double* %p1 ; extra use of %ab
- store double %ab, double* %p2 ; another extra use of %ab
- store double %fg, double* %p3 ; extra use of %fg
+ store double %ab, ptr %p1 ; extra use of %ab
+ store double %ab, ptr %p2 ; another extra use of %ab
+ store double %fg, ptr %p3 ; extra use of %fg
%q = fsub contract reassoc nsz double %fg, %cd ; The uses of %cd reduce to 1 after %r is folded. 2 uses of %fg, fold %cd, remove def of %cd
%r = fsub contract reassoc nsz double %ab, %cd ; Fold %r before %q. 3 uses of %ab, 2 uses of %cd, fold %cd
%add = fadd contract reassoc double %r, %q
%0 = type { i32, i16 }
-@val = common dso_local local_unnamed_addr global %0* null, align 8
+@val = common dso_local local_unnamed_addr global ptr null, align 8
-define dso_local signext i32 @redunLoadImm(%0* %arg) {
+define dso_local signext i32 @redunLoadImm(ptr %arg) {
; CHECK-LABEL: redunLoadImm:
; verify that the load immediate has been folded into the isel and deleted
; CHECK-NOT: li r[[REG1:[0-9]+]], 0
; CHECK: iseleq r[[REG2:[0-9]+]], 0, r[[REG3:[0-9]+]]
bb:
- %tmp = icmp eq %0* %arg, null
+ %tmp = icmp eq ptr %arg, null
br i1 %tmp, label %bb9, label %bb1
bb1: ; preds = %bb
- %tmp2 = getelementptr inbounds %0, %0* %arg, i64 0, i32 1
+ %tmp2 = getelementptr inbounds %0, ptr %arg, i64 0, i32 1
br label %bb3
bb3: ; preds = %bb3, %bb1
- %tmp4 = load i16, i16* %tmp2, align 4
+ %tmp4 = load i16, ptr %tmp2, align 4
%tmp5 = sext i16 %tmp4 to i64
- %tmp6 = getelementptr inbounds %0, %0* %arg, i64 %tmp5
+ %tmp6 = getelementptr inbounds %0, ptr %arg, i64 %tmp5
%tmp7 = icmp eq i16 %tmp4, 0
- %tmp8 = select i1 %tmp7, %0* null, %0* %tmp6
- store %0* %tmp8, %0** @val, align 8
+ %tmp8 = select i1 %tmp7, ptr null, ptr %tmp6
+ store ptr %tmp8, ptr @val, align 8
br label %bb3
bb9: ; preds = %bb
- %tmp10 = load %0*, %0** @val, align 8
- %tmp11 = getelementptr inbounds %0, %0* %tmp10, i64 0, i32 0
- %tmp12 = load i32, i32* %tmp11, align 4
+ %tmp10 = load ptr, ptr @val, align 8
+ %tmp12 = load i32, ptr %tmp10, align 4
ret i32 %tmp12
}
%cmp = icmp eq i32 %var1, 1
%conv = zext i1 %cmp to i32
%shl = shl nuw nsw i32 %conv, 19
- store i32 %shl, i32* @res, align 4
+ store i32 %shl, ptr @res, align 4
ret void
}
entry:
%cmp = icmp eq i32 %var1, 1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @res2, align 4
+ store i32 %conv, ptr @res2, align 4
%shl = shl nuw nsw i32 %conv, 19
- store i32 %shl, i32* @res, align 4
+ store i32 %shl, ptr @res, align 4
ret void
}
br i1 %cmp.not, label %if.end, label %if.then
if.then: ; preds = %entry
- store i32 100, i32* @val, align 4
+ store i32 100, ptr @val, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
br i1 %cmp.not, label %if.end, label %if.then
if.then: ; preds = %entry
- store i32 100, i32* @val, align 4
+ store i32 100, ptr @val, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
br i1 %cmp.not, label %if.end, label %if.then
if.then: ; preds = %entry
- store i32 100, i32* @val, align 4
+ store i32 100, ptr @val, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
br i1 %cmp.not, label %if.end, label %if.then
if.then: ; preds = %entry
- store i32 100, i32* @val, align 4
+ store i32 100, ptr @val, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
br i1 %cmp.not, label %if.end, label %if.then
if.then: ; preds = %entry
- store i32 100, i32* @val, align 4
+ store i32 100, ptr @val, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
br i1 %cmp.not, label %if.end, label %if.then
if.then: ; preds = %entry
- store i32 100, i32* @val, align 4
+ store i32 100, ptr @val, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
br i1 %cmp.not, label %if.end, label %if.then
if.then: ; preds = %entry
- store i32 100, i32* @val, align 4
+ store i32 100, ptr @val, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
; CHECK-NEXT: blr
entry:
%arg.addr = alloca float, align 4
- store float %arg, float* %arg.addr, align 4
- %0 = load float, float* %arg.addr, align 4
+ store float %arg, ptr %arg.addr, align 4
+ %0 = load float, ptr %arg.addr, align 4
%conv = fptoui float %0 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i8, align 1
- store i8 %arg, i8* %arg.addr, align 1
- %0 = load i8, i8* %arg.addr, align 1
+ store i8 %arg, ptr %arg.addr, align 1
+ %0 = load i8, ptr %arg.addr, align 1
%conv = uitofp i8 %0 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca double, align 8
- store double %arg, double* %arg.addr, align 8
- %0 = load double, double* %arg.addr, align 8
+ store double %arg, ptr %arg.addr, align 8
+ %0 = load double, ptr %arg.addr, align 8
%conv = fptoui double %0 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i8, align 1
- store i8 %arg, i8* %arg.addr, align 1
- %0 = load i8, i8* %arg.addr, align 1
+ store i8 %arg, ptr %arg.addr, align 1
+ %0 = load i8, ptr %arg.addr, align 1
%conv = uitofp i8 %0 to double
ret double %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca float, align 4
- store float %arg, float* %arg.addr, align 4
- %0 = load float, float* %arg.addr, align 4
+ store float %arg, ptr %arg.addr, align 4
+ %0 = load float, ptr %arg.addr, align 4
%conv = fptoui float %0 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i8, align 1
- store i8 %arg, i8* %arg.addr, align 1
- %0 = load i8, i8* %arg.addr, align 1
+ store i8 %arg, ptr %arg.addr, align 1
+ %0 = load i8, ptr %arg.addr, align 1
%conv = uitofp i8 %0 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca double, align 8
- store double %arg, double* %arg.addr, align 8
- %0 = load double, double* %arg.addr, align 8
+ store double %arg, ptr %arg.addr, align 8
+ %0 = load double, ptr %arg.addr, align 8
%conv = fptoui double %0 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i8, align 1
- store i8 %arg, i8* %arg.addr, align 1
- %0 = load i8, i8* %arg.addr, align 1
+ store i8 %arg, ptr %arg.addr, align 1
+ %0 = load i8, ptr %arg.addr, align 1
%conv = uitofp i8 %0 to double
ret double %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca float, align 4
- store float %arg, float* %arg.addr, align 4
- %0 = load float, float* %arg.addr, align 4
+ store float %arg, ptr %arg.addr, align 4
+ %0 = load float, ptr %arg.addr, align 4
%conv = fptosi float %0 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i16, align 2
- store i16 %arg, i16* %arg.addr, align 2
- %0 = load i16, i16* %arg.addr, align 2
+ store i16 %arg, ptr %arg.addr, align 2
+ %0 = load i16, ptr %arg.addr, align 2
%conv = sitofp i16 %0 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca double, align 8
- store double %arg, double* %arg.addr, align 8
- %0 = load double, double* %arg.addr, align 8
+ store double %arg, ptr %arg.addr, align 8
+ %0 = load double, ptr %arg.addr, align 8
%conv = fptosi double %0 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i16, align 2
- store i16 %arg, i16* %arg.addr, align 2
- %0 = load i16, i16* %arg.addr, align 2
+ store i16 %arg, ptr %arg.addr, align 2
+ %0 = load i16, ptr %arg.addr, align 2
%conv = sitofp i16 %0 to double
ret double %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca float, align 4
- store float %arg, float* %arg.addr, align 4
- %0 = load float, float* %arg.addr, align 4
+ store float %arg, ptr %arg.addr, align 4
+ %0 = load float, ptr %arg.addr, align 4
%conv = fptoui float %0 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i16, align 2
- store i16 %arg, i16* %arg.addr, align 2
- %0 = load i16, i16* %arg.addr, align 2
+ store i16 %arg, ptr %arg.addr, align 2
+ %0 = load i16, ptr %arg.addr, align 2
%conv = uitofp i16 %0 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca double, align 8
- store double %arg, double* %arg.addr, align 8
- %0 = load double, double* %arg.addr, align 8
+ store double %arg, ptr %arg.addr, align 8
+ %0 = load double, ptr %arg.addr, align 8
%conv = fptoui double %0 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i16, align 2
- store i16 %arg, i16* %arg.addr, align 2
- %0 = load i16, i16* %arg.addr, align 2
+ store i16 %arg, ptr %arg.addr, align 2
+ %0 = load i16, ptr %arg.addr, align 2
%conv = uitofp i16 %0 to double
ret double %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca float, align 4
- store float %arg, float* %arg.addr, align 4
- %0 = load float, float* %arg.addr, align 4
+ store float %arg, ptr %arg.addr, align 4
+ %0 = load float, ptr %arg.addr, align 4
%conv = fptosi float %0 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i32, align 4
- store i32 %arg, i32* %arg.addr, align 4
- %0 = load i32, i32* %arg.addr, align 4
+ store i32 %arg, ptr %arg.addr, align 4
+ %0 = load i32, ptr %arg.addr, align 4
%conv = sitofp i32 %0 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca double, align 8
- store double %arg, double* %arg.addr, align 8
- %0 = load double, double* %arg.addr, align 8
+ store double %arg, ptr %arg.addr, align 8
+ %0 = load double, ptr %arg.addr, align 8
%conv = fptosi double %0 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i32, align 4
- store i32 %arg, i32* %arg.addr, align 4
- %0 = load i32, i32* %arg.addr, align 4
+ store i32 %arg, ptr %arg.addr, align 4
+ %0 = load i32, ptr %arg.addr, align 4
%conv = sitofp i32 %0 to double
ret double %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca float, align 4
- store float %arg, float* %arg.addr, align 4
- %0 = load float, float* %arg.addr, align 4
+ store float %arg, ptr %arg.addr, align 4
+ %0 = load float, ptr %arg.addr, align 4
%conv = fptoui float %0 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i32, align 4
- store i32 %arg, i32* %arg.addr, align 4
- %0 = load i32, i32* %arg.addr, align 4
+ store i32 %arg, ptr %arg.addr, align 4
+ %0 = load i32, ptr %arg.addr, align 4
%conv = uitofp i32 %0 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca double, align 8
- store double %arg, double* %arg.addr, align 8
- %0 = load double, double* %arg.addr, align 8
+ store double %arg, ptr %arg.addr, align 8
+ %0 = load double, ptr %arg.addr, align 8
%conv = fptoui double %0 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i32, align 4
- store i32 %arg, i32* %arg.addr, align 4
- %0 = load i32, i32* %arg.addr, align 4
+ store i32 %arg, ptr %arg.addr, align 4
+ %0 = load i32, ptr %arg.addr, align 4
%conv = uitofp i32 %0 to double
ret double %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca float, align 4
- store float %arg, float* %arg.addr, align 4
- %0 = load float, float* %arg.addr, align 4
+ store float %arg, ptr %arg.addr, align 4
+ %0 = load float, ptr %arg.addr, align 4
%conv = fptosi float %0 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i64, align 8
- store i64 %arg, i64* %arg.addr, align 8
- %0 = load i64, i64* %arg.addr, align 8
+ store i64 %arg, ptr %arg.addr, align 8
+ %0 = load i64, ptr %arg.addr, align 8
%conv = sitofp i64 %0 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca double, align 8
- store double %arg, double* %arg.addr, align 8
- %0 = load double, double* %arg.addr, align 8
+ store double %arg, ptr %arg.addr, align 8
+ %0 = load double, ptr %arg.addr, align 8
%conv = fptosi double %0 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i64, align 8
- store i64 %arg, i64* %arg.addr, align 8
- %0 = load i64, i64* %arg.addr, align 8
+ store i64 %arg, ptr %arg.addr, align 8
+ %0 = load i64, ptr %arg.addr, align 8
%conv = sitofp i64 %0 to double
ret double %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca float, align 4
- store float %arg, float* %arg.addr, align 4
- %0 = load float, float* %arg.addr, align 4
+ store float %arg, ptr %arg.addr, align 4
+ %0 = load float, ptr %arg.addr, align 4
%conv = fptoui float %0 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i64, align 8
- store i64 %arg, i64* %arg.addr, align 8
- %0 = load i64, i64* %arg.addr, align 8
+ store i64 %arg, ptr %arg.addr, align 8
+ %0 = load i64, ptr %arg.addr, align 8
%conv = uitofp i64 %0 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca double, align 8
- store double %arg, double* %arg.addr, align 8
- %0 = load double, double* %arg.addr, align 8
+ store double %arg, ptr %arg.addr, align 8
+ %0 = load double, ptr %arg.addr, align 8
%conv = fptoui double %0 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%arg.addr = alloca i64, align 8
- store i64 %arg, i64* %arg.addr, align 8
- %0 = load i64, i64* %arg.addr, align 8
+ store i64 %arg, ptr %arg.addr, align 8
+ %0 = load i64, ptr %arg.addr, align 8
%conv = uitofp i64 %0 to double
ret double %conv
}
ret float %conv
}
-define void @fptoint_nofpexcept_f64(double %m, i32* %addr1, i64* %addr2) {
+define void @fptoint_nofpexcept_f64(double %m, ptr %addr1, ptr %addr2) {
; MIR-LABEL: name: fptoint_nofpexcept_f64
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
%conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.ignore") #0
%conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.ignore") #0
%conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.ignore") #0
- store volatile i32 %conv1, i32* %addr1, align 4
- store volatile i32 %conv2, i32* %addr1, align 4
- store volatile i64 %conv3, i64* %addr2, align 8
- store volatile i64 %conv4, i64* %addr2, align 8
+ store volatile i32 %conv1, ptr %addr1, align 4
+ store volatile i32 %conv2, ptr %addr1, align 4
+ store volatile i64 %conv3, ptr %addr2, align 8
+ store volatile i64 %conv4, ptr %addr2, align 8
ret void
}
-define void @fptoint_nofpexcept_f32(float %m, i32* %addr1, i64* %addr2) {
+define void @fptoint_nofpexcept_f32(float %m, ptr %addr1, ptr %addr2) {
; MIR-LABEL: name: fptoint_nofpexcept_f32
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
%conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.ignore") #0
%conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.ignore") #0
%conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.ignore") #0
- store volatile i32 %conv1, i32* %addr1, align 4
- store volatile i32 %conv2, i32* %addr1, align 4
- store volatile i64 %conv3, i64* %addr2, align 8
- store volatile i64 %conv4, i64* %addr2, align 8
+ store volatile i32 %conv1, ptr %addr1, align 4
+ store volatile i32 %conv2, ptr %addr1, align 4
+ store volatile i64 %conv3, ptr %addr2, align 8
+ store volatile i64 %conv4, ptr %addr2, align 8
ret void
}
-define void @inttofp_nofpexcept_i32(i32 %m, float* %addr1, double* %addr2) {
+define void @inttofp_nofpexcept_i32(i32 %m, ptr %addr1, ptr %addr2) {
; MIR-LABEL: name: inttofp_nofpexcept_i32
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
%conv2 = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
- store volatile float %conv1, float* %addr1, align 4
- store volatile float %conv2, float* %addr1, align 4
- store volatile double %conv3, double* %addr2, align 8
- store volatile double %conv4, double* %addr2, align 8
+ store volatile float %conv1, ptr %addr1, align 4
+ store volatile float %conv2, ptr %addr1, align 4
+ store volatile double %conv3, ptr %addr2, align 8
+ store volatile double %conv4, ptr %addr2, align 8
ret void
}
-define void @inttofp_nofpexcept_i64(i64 %m, float* %addr1, double* %addr2) {
+define void @inttofp_nofpexcept_i64(i64 %m, ptr %addr1, ptr %addr2) {
; MIR-LABEL: name: inttofp_nofpexcept_i64
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
%conv2 = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
- store volatile float %conv1, float* %addr1, align 4
- store volatile float %conv2, float* %addr1, align 4
- store volatile double %conv3, double* %addr2, align 8
- store volatile double %conv4, double* %addr2, align 8
+ store volatile float %conv1, ptr %addr1, align 4
+ store volatile float %conv2, ptr %addr1, align 4
+ store volatile double %conv3, ptr %addr2, align 8
+ store volatile double %conv4, ptr %addr2, align 8
ret void
}
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-define double @foo1(i32* %x) #0 {
+define double @foo1(ptr %x) #0 {
entry:
- %0 = load i32, i32* %x, align 4
+ %0 = load i32, ptr %x, align 4
%conv = sext i32 %0 to i64
%conv1 = sitofp i64 %conv to double
ret double %conv1
; CHECK: blr
}
-define double @foo2(i32* %x) #0 {
+define double @foo2(ptr %x) #0 {
entry:
- %0 = load i32, i32* %x, align 4
+ %0 = load i32, ptr %x, align 4
%conv = zext i32 %0 to i64
%conv1 = sitofp i64 %conv to double
ret double %conv1
; CHECK: blr
}
-define double @foo3(i32* %x) #0 {
+define double @foo3(ptr %x) #0 {
entry:
- %0 = load i32, i32* %x, align 4
+ %0 = load i32, ptr %x, align 4
%1 = add i32 %0, 8
%conv = zext i32 %1 to i64
%conv1 = sitofp i64 %conv to double
; CHECK: blr
}
-define double @foo4(i32* %x) #0 {
+define double @foo4(ptr %x) #0 {
entry:
- %0 = load i32, i32* %x, align 4
+ %0 = load i32, ptr %x, align 4
%1 = add i32 %0, 8
%conv = sext i32 %1 to i64
%conv1 = sitofp i64 %conv to double
entry:
%x = alloca [32568 x i8]
%"alloca point" = bitcast i32 0 to i32
- %x1 = bitcast [32568 x i8]* %x to i8*
; Check that the RS spill slot has been allocated (because the estimate
; will fail the small-frame-size check and the function has spills).
; CHECK: @foo
; CHECK: stdu 1, -32768(1)
- %s1 = call i64 @bar(i8* %x1) nounwind
- %s2 = call i64 @bar(i8* %x1) nounwind
- %s3 = call i64 @bar(i8* %x1) nounwind
- %s4 = call i64 @bar(i8* %x1) nounwind
- %s5 = call i64 @bar(i8* %x1) nounwind
- %s6 = call i64 @bar(i8* %x1) nounwind
- %s7 = call i64 @bar(i8* %x1) nounwind
- %s8 = call i64 @bar(i8* %x1) nounwind
+ %s1 = call i64 @bar(ptr %x) nounwind
+ %s2 = call i64 @bar(ptr %x) nounwind
+ %s3 = call i64 @bar(ptr %x) nounwind
+ %s4 = call i64 @bar(ptr %x) nounwind
+ %s5 = call i64 @bar(ptr %x) nounwind
+ %s6 = call i64 @bar(ptr %x) nounwind
+ %s7 = call i64 @bar(ptr %x) nounwind
+ %s8 = call i64 @bar(ptr %x) nounwind
%r = call i64 @can(i64 %s1, i64 %s2, i64 %s3, i64 %s4, i64 %s5, i64 %s6, i64 %s7, i64 %s8) nounwind
br label %return
ret i64 %r
}
-declare i64 @bar(i8*)
+declare i64 @bar(ptr)
declare i64 @can(i64, i64, i64, i64, i64, i64, i64, i64)
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-declare void @llvm.eh.sjlj.longjmp(i8*) #1
+declare void @llvm.eh.sjlj.longjmp(ptr) #1
-define i8* @main() #0 {
+define ptr @main() #0 {
entry:
- %0 = call i8* @llvm.frameaddress(i32 0)
- ret i8* %0
+ %0 = call ptr @llvm.frameaddress(i32 0)
+ ret ptr %0
; CHECK: @main
; CHECK: mr 3, 1
}
-define i8* @foo() #3 { ; naked
+define ptr @foo() #3 { ; naked
entry:
- %0 = call i8* @llvm.frameaddress(i32 0)
- ret i8* %0
+ %0 = call ptr @llvm.frameaddress(i32 0)
+ ret ptr %0
; CHECK: @foo
; CHECK: mr 3, 1
}
-define i8* @bar() #0 {
+define ptr @bar() #0 {
entry:
- %x = alloca [100000 x i8] ; <[100000 x i8]*> [#uses=1]
- %x1 = bitcast [100000 x i8]* %x to i8* ; <i8*> [#uses=1]
- call void @use(i8* %x1) nounwind
- %0 = call i8* @llvm.frameaddress(i32 0)
- ret i8* %0
+ %x = alloca [100000 x i8] ; <ptr> [#uses=1]
+ call void @use(ptr %x) nounwind
+ %0 = call ptr @llvm.frameaddress(i32 0)
+ ret ptr %0
; Note that if we start eliminating non-leaf frame pointers by default, this
; will need to be updated.
; CHECK: mr 3, 31
}
-declare void @use(i8*)
+declare void @use(ptr)
-declare i8* @llvm.frameaddress(i32) #2
+declare ptr @llvm.frameaddress(i32) #2
attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { noreturn nounwind }
; DM-NEXT: stw 3, -4(1)
; DM-NEXT: blr
entry:
- %retval = alloca i32 ; <i32*> [#uses=2]
- %tmp = alloca i32 ; <i32*> [#uses=2]
+ %retval = alloca i32 ; <ptr> [#uses=2]
+ %tmp = alloca i32 ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%tmp1 = call i32 @llvm.flt.rounds( ) ; <i32> [#uses=1]
- store i32 %tmp1, i32* %tmp, align 4
- %tmp2 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1]
- store i32 %tmp2, i32* %retval, align 4
+ store i32 %tmp1, ptr %tmp, align 4
+ %tmp2 = load i32, ptr %tmp, align 4 ; <i32> [#uses=1]
+ store i32 %tmp2, ptr %retval, align 4
br label %return
return: ; preds = %entry
- %retval3 = load i32, i32* %retval ; <i32> [#uses=1]
+ %retval3 = load i32, ptr %retval ; <i32> [#uses=1]
ret i32 %retval3
}
%struct.teststruct = type { [12 x i32], i32 }
-define void @copy(%struct.teststruct* noalias nocapture sret(%struct.teststruct) %agg.result, %struct.teststruct* nocapture %in) nounwind {
+define void @copy(ptr noalias nocapture sret(%struct.teststruct) %agg.result, ptr nocapture %in) nounwind {
entry:
; CHECK: @copy
; CHECK-NOT: bl memcpy
- %0 = bitcast %struct.teststruct* %agg.result to i8*
- %1 = bitcast %struct.teststruct* %in to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 52, i1 false)
+ tail call void @llvm.memcpy.p0.p0.i32(ptr align 4 %agg.result, ptr align 4 %in, i32 52, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
%struct.teststruct = type { [24 x i32], i32 }
-define void @copy(%struct.teststruct* noalias nocapture sret(%struct.teststruct) %agg.result, %struct.teststruct* nocapture %in) nounwind {
+define void @copy(ptr noalias nocapture sret(%struct.teststruct) %agg.result, ptr nocapture %in) nounwind {
entry:
; CHECK: @copy
; CHECK-NOT: bl memcpy
- %0 = bitcast %struct.teststruct* %agg.result to i8*
- %1 = bitcast %struct.teststruct* %in to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 100, i1 false)
+ tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %agg.result, ptr align 4 %in, i64 100, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64--linux"
-@g = internal constant i8* bitcast (void ()* @f to i8*), section "gsection", align 8
-@h = constant i8* bitcast (void ()* @f to i8*), section "hsection", align 8
-@llvm.used = appending global [2 x i8*] [i8* bitcast (i8** @g to i8*), i8* bitcast (i8** @h to i8*)], section "llvm.metadata"
+@g = internal constant ptr @f, section "gsection", align 8
+@h = constant ptr @f, section "hsection", align 8
+@llvm.used = appending global [2 x ptr] [ptr @g, ptr @h], section "llvm.metadata"
; Function Attrs: nounwind uwtable
define internal void @f() {
define dso_local void @foo() {
ret void
}
-declare i32 @bar(i8*)
+declare i32 @bar(ptr)
; CHECK-LABEL: {{^}}zed:
; CHECK: addis 3, 2, foo@toc@ha
; CHECK-NEXT: bl bar
define void @zed() {
- call i32 @bar(i8* bitcast (void ()* @foo to i8*))
+ call i32 @bar(ptr @foo)
ret void
}
; RUN: -mattr=-paired-vector-memops,-pcrelative-memops -verify-misched \
; RUN: -debug-only=machine-scheduler 2>&1 | FileCheck %s
-define i64 @store_i64(i64* nocapture %P, i64 %v) {
+define i64 @store_i64(ptr nocapture %P, i64 %v) {
entry:
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: store_i64:%bb.0
; CHECK: SU([[SU1]]): STD renamable $x[[REG]], 8
; CHECK: SU([[SU2]]): STD renamable $x[[REG]], 24
; CHECK: SU([[SU3]]): STD renamable $x[[REG]], 32
- %arrayidx = getelementptr inbounds i64, i64* %P, i64 3
- store i64 %v, i64* %arrayidx
- %arrayidx1 = getelementptr inbounds i64, i64* %P, i64 2
- store i64 %v, i64* %arrayidx1
- %arrayidx2 = getelementptr inbounds i64, i64* %P, i64 1
- store i64 %v, i64* %arrayidx2
- %arrayidx3 = getelementptr inbounds i64, i64* %P, i64 4
- store i64 %v, i64* %arrayidx3
+ %arrayidx = getelementptr inbounds i64, ptr %P, i64 3
+ store i64 %v, ptr %arrayidx
+ %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 2
+ store i64 %v, ptr %arrayidx1
+ %arrayidx2 = getelementptr inbounds i64, ptr %P, i64 1
+ store i64 %v, ptr %arrayidx2
+ %arrayidx3 = getelementptr inbounds i64, ptr %P, i64 4
+ store i64 %v, ptr %arrayidx3
ret i64 %v
}
-define i32 @store_i32(i32* nocapture %P, i32 %v) {
+define i32 @store_i32(ptr nocapture %P, i32 %v) {
entry:
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: store_i32:%bb.0
; CHECK: SU([[SU1]]): STW renamable $r[[REG]], 44
; CHECK: SU([[SU2]]): STW renamable $r[[REG]], 52
; CHECK: SU([[SU3]]): STW renamable $r[[REG]], 56
- %arrayidx = getelementptr inbounds i32, i32* %P, i32 13
- store i32 %v, i32* %arrayidx
- %arrayidx1 = getelementptr inbounds i32, i32* %P, i32 12
- store i32 %v, i32* %arrayidx1
- %arrayidx2 = getelementptr inbounds i32, i32* %P, i32 11
- store i32 %v, i32* %arrayidx2
- %arrayidx3 = getelementptr inbounds i32, i32* %P, i32 14
- store i32 %v, i32* %arrayidx3
+ %arrayidx = getelementptr inbounds i32, ptr %P, i32 13
+ store i32 %v, ptr %arrayidx
+ %arrayidx1 = getelementptr inbounds i32, ptr %P, i32 12
+ store i32 %v, ptr %arrayidx1
+ %arrayidx2 = getelementptr inbounds i32, ptr %P, i32 11
+ store i32 %v, ptr %arrayidx2
+ %arrayidx3 = getelementptr inbounds i32, ptr %P, i32 14
+ store i32 %v, ptr %arrayidx3
ret i32 %v
}
-define void @store_i64_neg(i64* nocapture %P, i64 %v) #0 {
+define void @store_i64_neg(ptr nocapture %P, i64 %v) #0 {
entry:
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: store_i64_neg:%bb.0
; CHECK: SU([[SU1]]): STD renamable $x[[REG]], -16
; CHECK: SU([[SU2]]): STD renamable $x[[REG]], -24
; CHECK: SU([[SU3]]): STD renamable $x[[REG]], -32
- %arrayidx = getelementptr inbounds i64, i64* %P, i64 -3
- store i64 %v, i64* %arrayidx
- %arrayidx1 = getelementptr inbounds i64, i64* %P, i64 -1
- store i64 %v, i64* %arrayidx1
- %arrayidx2 = getelementptr inbounds i64, i64* %P, i64 -2
- store i64 %v, i64* %arrayidx2
- %arrayidx3 = getelementptr inbounds i64, i64* %P, i64 -4
- store i64 %v, i64* %arrayidx3
+ %arrayidx = getelementptr inbounds i64, ptr %P, i64 -3
+ store i64 %v, ptr %arrayidx
+ %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 -1
+ store i64 %v, ptr %arrayidx1
+ %arrayidx2 = getelementptr inbounds i64, ptr %P, i64 -2
+ store i64 %v, ptr %arrayidx2
+ %arrayidx3 = getelementptr inbounds i64, ptr %P, i64 -4
+ store i64 %v, ptr %arrayidx3
ret void
}
-define void @store_i32_neg(i32* nocapture %P, i32 %v) #0 {
+define void @store_i32_neg(ptr nocapture %P, i32 %v) #0 {
entry:
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: store_i32_neg:%bb.0
; CHECK:SU([[SU1]]): STW renamable $r[[REG]], -8
; CHECK:SU([[SU2]]): STW renamable $r[[REG]], -12
; CHECK:SU([[SU3]]): STW renamable $r[[REG]], -16
- %arrayidx = getelementptr inbounds i32, i32* %P, i32 -3
- store i32 %v, i32* %arrayidx
- %arrayidx1 = getelementptr inbounds i32, i32* %P, i32 -1
- store i32 %v, i32* %arrayidx1
- %arrayidx2 = getelementptr inbounds i32, i32* %P, i32 -2
- store i32 %v, i32* %arrayidx2
- %arrayidx3 = getelementptr inbounds i32, i32* %P, i32 -4
- store i32 %v, i32* %arrayidx3
+ %arrayidx = getelementptr inbounds i32, ptr %P, i32 -3
+ store i32 %v, ptr %arrayidx
+ %arrayidx1 = getelementptr inbounds i32, ptr %P, i32 -1
+ store i32 %v, ptr %arrayidx1
+ %arrayidx2 = getelementptr inbounds i32, ptr %P, i32 -2
+ store i32 %v, ptr %arrayidx2
+ %arrayidx3 = getelementptr inbounds i32, ptr %P, i32 -4
+ store i32 %v, ptr %arrayidx3
ret void
}
-define void @store_double(double* nocapture %P, double %v) {
+define void @store_double(ptr nocapture %P, double %v) {
entry:
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: store_double:%bb.0
; CHECK: SU([[SU1]]): STFD renamable $f[[REG]], 16
; CHECK: SU([[SU2]]): STFD renamable $f[[REG]], 24
; CHECK: SU([[SU3]]): STFD renamable $f[[REG]], 32
- %arrayidx = getelementptr inbounds double, double* %P, i64 3
- store double %v, double* %arrayidx
- %arrayidx1 = getelementptr inbounds double, double* %P, i64 1
- store double %v, double* %arrayidx1
- %arrayidx2 = getelementptr inbounds double, double* %P, i64 2
- store double %v, double* %arrayidx2
- %arrayidx3 = getelementptr inbounds double, double* %P, i64 4
- store double %v, double* %arrayidx3
+ %arrayidx = getelementptr inbounds double, ptr %P, i64 3
+ store double %v, ptr %arrayidx
+ %arrayidx1 = getelementptr inbounds double, ptr %P, i64 1
+ store double %v, ptr %arrayidx1
+ %arrayidx2 = getelementptr inbounds double, ptr %P, i64 2
+ store double %v, ptr %arrayidx2
+ %arrayidx3 = getelementptr inbounds double, ptr %P, i64 4
+ store double %v, ptr %arrayidx3
ret void
}
-define void @store_float(float* nocapture %P, float %v) {
+define void @store_float(ptr nocapture %P, float %v) {
entry:
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: store_float:%bb.0
; CHECK: SU([[SU1]]): STFS renamable $f[[REG]], 4
; CHECK: SU([[SU2]]): STFS renamable $f[[REG]], 8
; CHECK: SU([[SU3]]): STFS renamable $f[[REG]], 16
- %arrayidx = getelementptr inbounds float, float* %P, i64 3
- store float %v, float* %arrayidx
- %arrayidx1 = getelementptr inbounds float, float* %P, i64 1
- store float %v, float* %arrayidx1
- %arrayidx2 = getelementptr inbounds float, float* %P, i64 2
- store float %v, float* %arrayidx2
- %arrayidx3 = getelementptr inbounds float, float* %P, i64 4
- store float %v, float* %arrayidx3
+ %arrayidx = getelementptr inbounds float, ptr %P, i64 3
+ store float %v, ptr %arrayidx
+ %arrayidx1 = getelementptr inbounds float, ptr %P, i64 1
+ store float %v, ptr %arrayidx1
+ %arrayidx2 = getelementptr inbounds float, ptr %P, i64 2
+ store float %v, ptr %arrayidx2
+ %arrayidx3 = getelementptr inbounds float, ptr %P, i64 4
+ store float %v, ptr %arrayidx3
ret void
}
; Cannot fuse the store/load if there is volatile in between
-define i64 @store_volatile(i64* nocapture %P, i64 %v) {
+define i64 @store_volatile(ptr nocapture %P, i64 %v) {
entry:
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: store_volatile:%bb.0
; CHECK: SU([[SU1]]): STD renamable $x[[REG]], 16
; CHECK: SU([[SU2]]): STD renamable $x[[REG]], 8
; CHECK: SU([[SU3]]): STD renamable $x[[REG]], 32
- %arrayidx = getelementptr inbounds i64, i64* %P, i64 3
- store volatile i64 %v, i64* %arrayidx
- %arrayidx1 = getelementptr inbounds i64, i64* %P, i64 2
- store volatile i64 %v, i64* %arrayidx1
- %arrayidx2 = getelementptr inbounds i64, i64* %P, i64 1
- store volatile i64 %v, i64* %arrayidx2
- %arrayidx3 = getelementptr inbounds i64, i64* %P, i64 4
- store volatile i64 %v, i64* %arrayidx3
+ %arrayidx = getelementptr inbounds i64, ptr %P, i64 3
+ store volatile i64 %v, ptr %arrayidx
+ %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 2
+ store volatile i64 %v, ptr %arrayidx1
+ %arrayidx2 = getelementptr inbounds i64, ptr %P, i64 1
+ store volatile i64 %v, ptr %arrayidx2
+ %arrayidx3 = getelementptr inbounds i64, ptr %P, i64 4
+ store volatile i64 %v, ptr %arrayidx3
ret i64 %v
}
; CHECK: Cluster ld/st SU([[SU5:[0-9]+]]) - SU([[SU6:[0-9]+]])
; CHECK: SU([[SU5]]): STW8 renamable $x{{[0-9]+}}, 24
; CHECK: SU([[SU6]]): STW renamable $r{{[0-9]+}}, 20
- store i32 9, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @p, i64 0, i64 6), align 4
- store i32 %n, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @p, i64 0, i64 7), align 4
+ store i32 9, ptr getelementptr inbounds ([100 x i32], ptr @p, i64 0, i64 6), align 4
+ store i32 %n, ptr getelementptr inbounds ([100 x i32], ptr @p, i64 0, i64 7), align 4
%add = add nsw i32 %n, %m
- store i32 %add, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @p, i64 0, i64 5), align 4
+ store i32 %add, ptr getelementptr inbounds ([100 x i32], ptr @p, i64 0, i64 5), align 4
ret void
}
; CHECK: Cluster ld/st SU([[SU3:[0-9]+]]) - SU([[SU4:[0-9]+]])
; CHECK: SU([[SU3]]): STW8 renamable $x{{[0-9]+}}, 24
; CHECK: SU([[SU4]]): STW8 renamable $x{{[0-9]+}}, 28
- store i32 9, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @p, i64 0, i64 6), align 4
- store i32 %n, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @p, i64 0, i64 7), align 4
+ store i32 9, ptr getelementptr inbounds ([100 x i32], ptr @p, i64 0, i64 6), align 4
+ store i32 %n, ptr getelementptr inbounds ([100 x i32], ptr @p, i64 0, i64 7), align 4
ret void
}
-declare void @bar(i64*)
+declare void @bar(ptr)
define void @store_frame_index(i32 %a, i32 %b) {
entry:
; CHECK: SU([[SU2]]): STD %{{[0-9]+}}:g8rc, 0, %stack.0.buf
; CHECK: SU([[SU3]]): STD %{{[0-9]+}}:g8rc, 8, %stack.0.buf
%buf = alloca [8 x i64], align 8
- %0 = bitcast [8 x i64]* %buf to i8*
%conv = zext i32 %a to i64
- %arrayidx = getelementptr inbounds [8 x i64], [8 x i64]* %buf, i64 0, i64 0
- store i64 %conv, i64* %arrayidx, align 8
+ store i64 %conv, ptr %buf, align 8
%conv1 = zext i32 %b to i64
- %arrayidx2 = getelementptr inbounds [8 x i64], [8 x i64]* %buf, i64 0, i64 1
- store i64 %conv1, i64* %arrayidx2, align 8
- call void @bar(i64* nonnull %arrayidx)
+ %arrayidx2 = getelementptr inbounds [8 x i64], ptr %buf, i64 0, i64 1
+ store i64 %conv1, ptr %arrayidx2, align 8
+ call void @bar(ptr nonnull %buf)
ret void
}
target triple = "powerpc64le-unknown-linux"
%"class.std::__1::__assoc_sub_state" = type { %"class.std::__1::__shared_count", %"class.std::__exception_ptr::exception_ptr", %"class.std::__1::mutex", %"class.std::__1::condition_variable", i32 }
-%"class.std::__1::__shared_count" = type { i32 (...)**, i64 }
-%"class.std::__exception_ptr::exception_ptr" = type { i8* }
+%"class.std::__1::__shared_count" = type { ptr, i64 }
+%"class.std::__exception_ptr::exception_ptr" = type { ptr }
%"class.std::__1::mutex" = type { %union.pthread_mutex_t }
%union.pthread_mutex_t = type { %"struct.<anonymous union>::__pthread_mutex_s" }
%"struct.<anonymous union>::__pthread_mutex_s" = type { i32, i32, i32, i32, i32, i32, %struct.__pthread_internal_list }
-%struct.__pthread_internal_list = type { %struct.__pthread_internal_list*, %struct.__pthread_internal_list* }
+%struct.__pthread_internal_list = type { ptr, ptr }
%"class.std::__1::condition_variable" = type { %union.pthread_cond_t }
%union.pthread_cond_t = type { %struct.anon }
-%struct.anon = type { i32, i32, i64, i64, i64, i8*, i32, i32 }
-%"class.std::__1::unique_lock" = type { %"class.std::__1::mutex"*, i8 }
+%struct.anon = type { i32, i32, i64, i64, i64, ptr, i32, i32 }
+%"class.std::__1::unique_lock" = type { ptr, i8 }
declare i32 @__gxx_personality_v0(...)
; Function Attrs: optsize
-define void @_ZNSt3__117__assoc_sub_state4copyEv(%"class.std::__1::__assoc_sub_state"* %this) #0 align 2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @_ZNSt3__117__assoc_sub_state4copyEv(ptr %this) #0 align 2 personality ptr @__gxx_personality_v0 {
entry:
%__lk = alloca %"class.std::__1::unique_lock", align 8
%ref.tmp = alloca %"class.std::__exception_ptr::exception_ptr", align 8
%tmp = alloca { i64, i64 }, align 8
%agg.tmp = alloca %"class.std::__exception_ptr::exception_ptr", align 8
- %__mut_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state", %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 2
- %__m_.i.i = getelementptr inbounds %"class.std::__1::unique_lock", %"class.std::__1::unique_lock"* %__lk, i64 0, i32 0
- store %"class.std::__1::mutex"* %__mut_, %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
- %__owns_.i.i = getelementptr inbounds %"class.std::__1::unique_lock", %"class.std::__1::unique_lock"* %__lk, i64 0, i32 1
- store i8 1, i8* %__owns_.i.i, align 8, !tbaa !6
- call void @_ZNSt3__15mutex4lockEv(%"class.std::__1::mutex"* %__mut_) #4
- invoke void @_ZNSt3__117__assoc_sub_state10__sub_waitERNS_11unique_lockINS_5mutexEEE(%"class.std::__1::__assoc_sub_state"* %this, %"class.std::__1::unique_lock"* %__lk) #4
+ %__mut_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state", ptr %this, i64 0, i32 2
+ store ptr %__mut_, ptr %__lk, align 8, !tbaa !5
+ %__owns_.i.i = getelementptr inbounds %"class.std::__1::unique_lock", ptr %__lk, i64 0, i32 1
+ store i8 1, ptr %__owns_.i.i, align 8, !tbaa !6
+ call void @_ZNSt3__15mutex4lockEv(ptr %__mut_) #4
+ invoke void @_ZNSt3__117__assoc_sub_state10__sub_waitERNS_11unique_lockINS_5mutexEEE(ptr %this, ptr %__lk) #4
to label %invoke.cont unwind label %lpad
invoke.cont: ; preds = %entry
- %__exception_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state", %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 1
- %0 = bitcast { i64, i64 }* %tmp to i8*
- call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 16, i1 false)
- call void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"* %ref.tmp, { i64, i64 }* byval({ i64, i64 }) %tmp) #5
- %call = call zeroext i1 @_ZNSt15__exception_ptrneERKNS_13exception_ptrES2_(%"class.std::__exception_ptr::exception_ptr"* %__exception_, %"class.std::__exception_ptr::exception_ptr"* %ref.tmp) #5
- call void @_ZNSt15__exception_ptr13exception_ptrD1Ev(%"class.std::__exception_ptr::exception_ptr"* %ref.tmp) #5
+ %__exception_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state", ptr %this, i64 0, i32 1
+ call void @llvm.memset.p0.i64(ptr align 8 %tmp, i8 0, i64 16, i1 false)
+ call void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(ptr %ref.tmp, ptr byval({ i64, i64 }) %tmp) #5
+ %call = call zeroext i1 @_ZNSt15__exception_ptrneERKNS_13exception_ptrES2_(ptr %__exception_, ptr %ref.tmp) #5
+ call void @_ZNSt15__exception_ptr13exception_ptrD1Ev(ptr %ref.tmp) #5
br i1 %call, label %if.then, label %if.end
if.then: ; preds = %invoke.cont
- call void @_ZNSt15__exception_ptr13exception_ptrC1ERKS0_(%"class.std::__exception_ptr::exception_ptr"* %agg.tmp, %"class.std::__exception_ptr::exception_ptr"* %__exception_) #5
- invoke void @_ZSt17rethrow_exceptionNSt15__exception_ptr13exception_ptrE(%"class.std::__exception_ptr::exception_ptr"* %agg.tmp) #6
+ call void @_ZNSt15__exception_ptr13exception_ptrC1ERKS0_(ptr %agg.tmp, ptr %__exception_) #5
+ invoke void @_ZSt17rethrow_exceptionNSt15__exception_ptr13exception_ptrE(ptr %agg.tmp) #6
to label %invoke.cont4 unwind label %lpad3
invoke.cont4: ; preds = %if.then
unreachable
lpad: ; preds = %entry
- %1 = landingpad { i8*, i32 }
+ %0 = landingpad { ptr, i32 }
cleanup
- %2 = extractvalue { i8*, i32 } %1, 0
- %3 = extractvalue { i8*, i32 } %1, 1
+ %1 = extractvalue { ptr, i32 } %0, 0
+ %2 = extractvalue { ptr, i32 } %0, 1
br label %ehcleanup
lpad3: ; preds = %if.then
- %4 = landingpad { i8*, i32 }
+ %3 = landingpad { ptr, i32 }
cleanup
- %5 = extractvalue { i8*, i32 } %4, 0
- %6 = extractvalue { i8*, i32 } %4, 1
- call void @_ZNSt15__exception_ptr13exception_ptrD1Ev(%"class.std::__exception_ptr::exception_ptr"* %agg.tmp) #5
+ %4 = extractvalue { ptr, i32 } %3, 0
+ %5 = extractvalue { ptr, i32 } %3, 1
+ call void @_ZNSt15__exception_ptr13exception_ptrD1Ev(ptr %agg.tmp) #5
br label %ehcleanup
if.end: ; preds = %invoke.cont
- %7 = load i8, i8* %__owns_.i.i, align 8, !tbaa !6, !range !4
- %tobool.i.i = icmp eq i8 %7, 0
+ %6 = load i8, ptr %__owns_.i.i, align 8, !tbaa !6, !range !4
+ %tobool.i.i = icmp eq i8 %6, 0
br i1 %tobool.i.i, label %_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit, label %if.then.i.i
if.then.i.i: ; preds = %if.end
- %8 = load %"class.std::__1::mutex"*, %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
- call void @_ZNSt3__15mutex6unlockEv(%"class.std::__1::mutex"* %8) #5
+ %7 = load ptr, ptr %__lk, align 8, !tbaa !5
+ call void @_ZNSt3__15mutex6unlockEv(ptr %7) #5
br label %_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit
_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit: ; preds = %if.then.i.i, %if.end
ret void
ehcleanup: ; preds = %lpad3, %lpad
- %exn.slot.0 = phi i8* [ %5, %lpad3 ], [ %2, %lpad ]
- %ehselector.slot.0 = phi i32 [ %6, %lpad3 ], [ %3, %lpad ]
- %9 = load i8, i8* %__owns_.i.i, align 8, !tbaa !6, !range !4
- %tobool.i.i9 = icmp eq i8 %9, 0
+ %exn.slot.0 = phi ptr [ %4, %lpad3 ], [ %1, %lpad ]
+ %ehselector.slot.0 = phi i32 [ %5, %lpad3 ], [ %2, %lpad ]
+ %8 = load i8, ptr %__owns_.i.i, align 8, !tbaa !6, !range !4
+ %tobool.i.i9 = icmp eq i8 %8, 0
br i1 %tobool.i.i9, label %_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit12, label %if.then.i.i11
if.then.i.i11: ; preds = %ehcleanup
- %10 = load %"class.std::__1::mutex"*, %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
- call void @_ZNSt3__15mutex6unlockEv(%"class.std::__1::mutex"* %10) #5
+ %9 = load ptr, ptr %__lk, align 8, !tbaa !5
+ call void @_ZNSt3__15mutex6unlockEv(ptr %9) #5
br label %_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit12
_ZNSt3__111unique_lockINS_5mutexEED1Ev.exit12: ; preds = %if.then.i.i11, %ehcleanup
- %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn.slot.0, 0
- %lpad.val5 = insertvalue { i8*, i32 } %lpad.val, i32 %ehselector.slot.0, 1
- resume { i8*, i32 } %lpad.val5
+ %lpad.val = insertvalue { ptr, i32 } undef, ptr %exn.slot.0, 0
+ %lpad.val5 = insertvalue { ptr, i32 } %lpad.val, i32 %ehselector.slot.0, 1
+ resume { ptr, i32 } %lpad.val5
}
; Function Attrs: optsize
-declare void @_ZNSt3__117__assoc_sub_state10__sub_waitERNS_11unique_lockINS_5mutexEEE(%"class.std::__1::__assoc_sub_state"*, %"class.std::__1::unique_lock"*) #0 align 2
+declare void @_ZNSt3__117__assoc_sub_state10__sub_waitERNS_11unique_lockINS_5mutexEEE(ptr, ptr) #0 align 2
; Function Attrs: nounwind optsize
-declare zeroext i1 @_ZNSt15__exception_ptrneERKNS_13exception_ptrES2_(%"class.std::__exception_ptr::exception_ptr"*, %"class.std::__exception_ptr::exception_ptr"*) #1
+declare zeroext i1 @_ZNSt15__exception_ptrneERKNS_13exception_ptrES2_(ptr, ptr) #1
; Function Attrs: nounwind optsize
-declare void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"*, { i64, i64 }* byval({ i64, i64 })) #1
+declare void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(ptr, ptr byval({ i64, i64 })) #1
; Function Attrs: nounwind optsize
-declare void @_ZNSt15__exception_ptr13exception_ptrD1Ev(%"class.std::__exception_ptr::exception_ptr"*) #1
+declare void @_ZNSt15__exception_ptr13exception_ptrD1Ev(ptr) #1
; Function Attrs: noreturn optsize
-declare void @_ZSt17rethrow_exceptionNSt15__exception_ptr13exception_ptrE(%"class.std::__exception_ptr::exception_ptr"*) #2
+declare void @_ZSt17rethrow_exceptionNSt15__exception_ptr13exception_ptrE(ptr) #2
; Function Attrs: nounwind optsize
-declare void @_ZNSt15__exception_ptr13exception_ptrC1ERKS0_(%"class.std::__exception_ptr::exception_ptr"*, %"class.std::__exception_ptr::exception_ptr"*) #1
+declare void @_ZNSt15__exception_ptr13exception_ptrC1ERKS0_(ptr, ptr) #1
; Function Attrs: nounwind optsize
-declare void @_ZNSt3__15mutex6unlockEv(%"class.std::__1::mutex"*) #1
+declare void @_ZNSt3__15mutex6unlockEv(ptr) #1
; Function Attrs: optsize
-declare void @_ZNSt3__15mutex4lockEv(%"class.std::__1::mutex"*) #0
+declare void @_ZNSt3__15mutex4lockEv(ptr) #0
; Function Attrs: nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #3
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #3
attributes #0 = { optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
; CHECK-NEXT: plbz r3, _ZL13StaticBoolVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* @_ZL13StaticBoolVar, align 1, !range !0
+ %0 = load i8, ptr @_ZL13StaticBoolVar, align 1, !range !0
%tobool = icmp ne i8 %0, 0
ret i1 %tobool
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* @_ZL19StaticSignedCharVar, align 1
+ %0 = load i8, ptr @_ZL19StaticSignedCharVar, align 1
ret i8 %0
}
; CHECK-NEXT: plbz r3, _ZL21StaticUnsignedCharVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* @_ZL21StaticUnsignedCharVar, align 1
+ %0 = load i8, ptr @_ZL21StaticUnsignedCharVar, align 1
ret i8 %0
}
; CHECK-NEXT: plha r3, _ZL20StaticSignedShortVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* @_ZL20StaticSignedShortVar, align 2
+ %0 = load i16, ptr @_ZL20StaticSignedShortVar, align 2
ret i16 %0
}
; CHECK-NEXT: plhz r3, _ZL22StaticUnsignedShortVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* @_ZL22StaticUnsignedShortVar, align 2
+ %0 = load i16, ptr @_ZL22StaticUnsignedShortVar, align 2
ret i16 %0
}
; CHECK-NEXT: plwa r3, _ZL18StaticSignedIntVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @_ZL18StaticSignedIntVar, align 4
+ %0 = load i32, ptr @_ZL18StaticSignedIntVar, align 4
ret i32 %0
}
; CHECK-NEXT: plwz r3, _ZL20StaticUnsignedIntVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @_ZL20StaticUnsignedIntVar, align 4
+ %0 = load i32, ptr @_ZL20StaticUnsignedIntVar, align 4
ret i32 %0
}
; CHECK-NEXT: pld r3, _ZL19StaticSignedLongVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* @_ZL19StaticSignedLongVar, align 8
+ %0 = load i64, ptr @_ZL19StaticSignedLongVar, align 8
ret i64 %0
}
; CHECK-NEXT: plfs f1, _ZL14StaticFloatVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load float, float* @_ZL14StaticFloatVar, align 4
+ %0 = load float, ptr @_ZL14StaticFloatVar, align 4
ret float %0
}
; CHECK-NEXT: plfd f1, _ZL15StaticDoubleVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load double, double* @_ZL15StaticDoubleVar, align 8
+ %0 = load double, ptr @_ZL15StaticDoubleVar, align 8
ret double %0
}
; CHECK-NEXT: plfd f2, _ZL19StaticLongDoubleVar@PCREL+8(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load ppc_fp128, ppc_fp128* @_ZL19StaticLongDoubleVar, align 16
+ %0 = load ppc_fp128, ptr @_ZL19StaticLongDoubleVar, align 16
ret ppc_fp128 %0
}
; CHECK-NEXT: pld r4, _ZL23StaticSigned__Int128Var@PCREL+8(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load i128, i128* @_ZL23StaticSigned__Int128Var, align 16
+ %0 = load i128, ptr @_ZL23StaticSigned__Int128Var, align 16
ret i128 %0
}
; CHECK-NEXT: plxv v2, _ZL19Static__Float128Var@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load fp128, fp128* @_ZL19Static__Float128Var, align 16
+ %0 = load fp128, ptr @_ZL19Static__Float128Var, align 16
ret fp128 %0
}
; CHECK-NEXT: plxv v2, _ZL25StaticVectorSignedCharVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* @_ZL25StaticVectorSignedCharVar, align 16
+ %0 = load <16 x i8>, ptr @_ZL25StaticVectorSignedCharVar, align 16
ret <16 x i8> %0
}
; CHECK-NEXT: plxv v2, _ZL26StaticVectorSignedShortVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load <8 x i16>, <8 x i16>* @_ZL26StaticVectorSignedShortVar, align 16
+ %0 = load <8 x i16>, ptr @_ZL26StaticVectorSignedShortVar, align 16
ret <8 x i16> %0
}
; CHECK-NEXT: plxv v2, _ZL24StaticVectorSignedIntVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load <4 x i32>, <4 x i32>* @_ZL24StaticVectorSignedIntVar, align 16
+ %0 = load <4 x i32>, ptr @_ZL24StaticVectorSignedIntVar, align 16
ret <4 x i32> %0
}
; CHECK-NEXT: plxv v2, _ZL29StaticVectorSignedLongLongVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load <2 x i64>, <2 x i64>* @_ZL29StaticVectorSignedLongLongVar, align 16
+ %0 = load <2 x i64>, ptr @_ZL29StaticVectorSignedLongLongVar, align 16
ret <2 x i64> %0
}
; CHECK-NEXT: plxv v2, _ZL29StaticVectorSigned__Int128Var@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load <1 x i128>, <1 x i128>* @_ZL29StaticVectorSigned__Int128Var, align 16
+ %0 = load <1 x i128>, ptr @_ZL29StaticVectorSigned__Int128Var, align 16
ret <1 x i128> %0
}
; CHECK-NEXT: plxv v2, _ZL20StaticVectorFloatVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load <4 x float>, <4 x float>* @_ZL20StaticVectorFloatVar, align 16
+ %0 = load <4 x float>, ptr @_ZL20StaticVectorFloatVar, align 16
ret <4 x float> %0
}
; CHECK-NEXT: plxv v2, _ZL21StaticVectorDoubleVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load <2 x double>, <2 x double>* @_ZL21StaticVectorDoubleVar, align 16
+ %0 = load <2 x double>, ptr @_ZL21StaticVectorDoubleVar, align 16
ret <2 x double> %0
}
; CHECK-NEXT: blr
entry:
%frombool = zext i1 %val to i8
- store i8 %frombool, i8* @_ZL13StaticBoolVar, align 1
+ store i8 %frombool, ptr @_ZL13StaticBoolVar, align 1
ret void
}
; CHECK-NEXT: pstb r3, _ZL19StaticSignedCharVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store i8 %val, i8* @_ZL19StaticSignedCharVar, align 1
+ store i8 %val, ptr @_ZL19StaticSignedCharVar, align 1
ret void
}
; CHECK-NEXT: pstb r3, _ZL21StaticUnsignedCharVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store i8 %val, i8* @_ZL21StaticUnsignedCharVar, align 1
+ store i8 %val, ptr @_ZL21StaticUnsignedCharVar, align 1
ret void
}
; CHECK-NEXT: psth r3, _ZL20StaticSignedShortVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store i16 %val, i16* @_ZL20StaticSignedShortVar, align 2
+ store i16 %val, ptr @_ZL20StaticSignedShortVar, align 2
ret void
}
; CHECK-NEXT: psth r3, _ZL22StaticUnsignedShortVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store i16 %val, i16* @_ZL22StaticUnsignedShortVar, align 2
+ store i16 %val, ptr @_ZL22StaticUnsignedShortVar, align 2
ret void
}
; CHECK-NEXT: pstw r3, _ZL18StaticSignedIntVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store i32 %val, i32* @_ZL18StaticSignedIntVar, align 4
+ store i32 %val, ptr @_ZL18StaticSignedIntVar, align 4
ret void
}
; CHECK-NEXT: pstw r3, _ZL20StaticUnsignedIntVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store i32 %val, i32* @_ZL20StaticUnsignedIntVar, align 4
+ store i32 %val, ptr @_ZL20StaticUnsignedIntVar, align 4
ret void
}
; CHECK-NEXT: pstd r3, _ZL19StaticSignedLongVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store i64 %val, i64* @_ZL19StaticSignedLongVar, align 8
+ store i64 %val, ptr @_ZL19StaticSignedLongVar, align 8
ret void
}
; CHECK-NEXT: pstfs f1, _ZL14StaticFloatVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store float %val, float* @_ZL14StaticFloatVar, align 4
+ store float %val, ptr @_ZL14StaticFloatVar, align 4
ret void
}
; CHECK-NEXT: pstfd f1, _ZL15StaticDoubleVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store double %val, double* @_ZL15StaticDoubleVar, align 8
+ store double %val, ptr @_ZL15StaticDoubleVar, align 8
ret void
}
; CHECK-NEXT: pstfd f1, _ZL19StaticLongDoubleVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store ppc_fp128 %val, ppc_fp128* @_ZL19StaticLongDoubleVar, align 16
+ store ppc_fp128 %val, ptr @_ZL19StaticLongDoubleVar, align 16
ret void
}
; CHECK-NEXT: pstd r3, _ZL23StaticSigned__Int128Var@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store i128 %val, i128* @_ZL23StaticSigned__Int128Var, align 16
+ store i128 %val, ptr @_ZL23StaticSigned__Int128Var, align 16
ret void
}
; CHECK-NEXT: pstxv v2, _ZL19Static__Float128Var@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store fp128 %val, fp128* @_ZL19Static__Float128Var, align 16
+ store fp128 %val, ptr @_ZL19Static__Float128Var, align 16
ret void
}
; CHECK-NEXT: pstxv v2, _ZL25StaticVectorSignedCharVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store <16 x i8> %val, <16 x i8>* @_ZL25StaticVectorSignedCharVar, align 16
+ store <16 x i8> %val, ptr @_ZL25StaticVectorSignedCharVar, align 16
ret void
}
; CHECK-NEXT: pstxv v2, _ZL26StaticVectorSignedShortVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store <8 x i16> %val, <8 x i16>* @_ZL26StaticVectorSignedShortVar, align 16
+ store <8 x i16> %val, ptr @_ZL26StaticVectorSignedShortVar, align 16
ret void
}
; CHECK-NEXT: pstxv v2, _ZL24StaticVectorSignedIntVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store <4 x i32> %val, <4 x i32>* @_ZL24StaticVectorSignedIntVar, align 16
+ store <4 x i32> %val, ptr @_ZL24StaticVectorSignedIntVar, align 16
ret void
}
; CHECK-NEXT: pstxv v2, _ZL29StaticVectorSignedLongLongVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store <2 x i64> %val, <2 x i64>* @_ZL29StaticVectorSignedLongLongVar, align 16
+ store <2 x i64> %val, ptr @_ZL29StaticVectorSignedLongLongVar, align 16
ret void
}
; CHECK-NEXT: pstxv v2, _ZL29StaticVectorSigned__Int128Var@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store <1 x i128> %val, <1 x i128>* @_ZL29StaticVectorSigned__Int128Var, align 16
+ store <1 x i128> %val, ptr @_ZL29StaticVectorSigned__Int128Var, align 16
ret void
}
; CHECK-NEXT: pstxv v2, _ZL20StaticVectorFloatVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store <4 x float> %val, <4 x float>* @_ZL20StaticVectorFloatVar, align 16
+ store <4 x float> %val, ptr @_ZL20StaticVectorFloatVar, align 16
ret void
}
; CHECK-NEXT: pstxv v2, _ZL21StaticVectorDoubleVar@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store <2 x double> %val, <2 x double>* @_ZL21StaticVectorDoubleVar, align 16
+ store <2 x double> %val, ptr @_ZL21StaticVectorDoubleVar, align 16
ret void
}
- @_ZL3ptr = internal unnamed_addr global i32* null, align 8
+ @_ZL3ptr = internal unnamed_addr global ptr null, align 8
define void @_Z14WriteStaticPtrv() {
; CHECK-LABEL: _Z14WriteStaticPtrv:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i32*, i32** @_ZL3ptr, align 8
- store i32 3, i32* %0, align 4
+ %0 = load ptr, ptr @_ZL3ptr, align 8
+ store i32 3, ptr %0, align 4
ret void
}
@.str = private unnamed_addr constant [13 x i8] c"Hello World\0A\00", align 1
-@str = dso_local local_unnamed_addr global i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i64 0, i64 0), align 8
+@str = dso_local local_unnamed_addr global ptr @.str, align 8
define zeroext i8 @_Z17Char0InStrLiteralv() {
; CHECK-LABEL: _Z17Char0InStrLiteralv:
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i8*, i8** @str, align 8
- %1 = load i8, i8* %0, align 1
+ %0 = load ptr, ptr @str, align 8
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
; CHECK-NEXT: lbz r3, 3(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i8*, i8** @str, align 8
- %arrayidx = getelementptr inbounds i8, i8* %0, i64 3
- %1 = load i8, i8* %arrayidx, align 1
+ %0 = load ptr, ptr @str, align 8
+ %arrayidx = getelementptr inbounds i8, ptr %0, i64 3
+ %1 = load i8, ptr %arrayidx, align 1
ret i8 %1
}
; CHECK-NEXT: plwa r3, _ZL5array@PCREL+12(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @_ZL5array, i64 0, i64 3), align 4
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @_ZL5array, i64 0, i64 3), align 4
ret i32 %0
}
; CHECK-NEXT: pstw r3, _ZL5array@PCREL+12(0), 1
; CHECK-NEXT: blr
entry:
- store i32 5, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @_ZL5array, i64 0, i64 3), align 4
+ store i32 5, ptr getelementptr inbounds ([10 x i32], ptr @_ZL5array, i64 0, i64 3), align 4
ret void
}
; CHECK-NEXT: plwa r3, _ZL9structure@PCREL+4(0), 1
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds (%struct.Struct, %struct.Struct* @_ZL9structure, i64 0, i32 2), align 4
+ %0 = load i32, ptr getelementptr inbounds (%struct.Struct, ptr @_ZL9structure, i64 0, i32 2), align 4
ret i32 %0
}
; CHECK-NEXT: pstw r3, _ZL9structure@PCREL+4(0), 1
; CHECK-NEXT: blr
entry:
- store i32 3, i32* getelementptr inbounds (%struct.Struct, %struct.Struct* @_ZL9structure, i64 0, i32 2), align 4
+ store i32 3, ptr getelementptr inbounds (%struct.Struct, ptr @_ZL9structure, i64 0, i32 2), align 4
ret void
}
; Tests for various operations on half precison float. Much of the test is
; copied from test/CodeGen/X86/half.ll.
-define dso_local double @loadd(i16* nocapture readonly %a) local_unnamed_addr #0 {
+define dso_local double @loadd(ptr nocapture readonly %a) local_unnamed_addr #0 {
; P8-LABEL: loadd:
; P8: # %bb.0: # %entry
; P8-NEXT: mflr r0
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i16, i16* %a, i64 1
- %0 = load i16, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %a, i64 1
+ %0 = load i16, ptr %arrayidx, align 2
%1 = tail call double @llvm.convert.from.fp16.f64(i16 %0)
ret double %1
}
declare double @llvm.convert.from.fp16.f64(i16)
-define dso_local float @loadf(i16* nocapture readonly %a) local_unnamed_addr #0 {
+define dso_local float @loadf(ptr nocapture readonly %a) local_unnamed_addr #0 {
; P8-LABEL: loadf:
; P8: # %bb.0: # %entry
; P8-NEXT: mflr r0
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i16, i16* %a, i64 1
- %0 = load i16, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %a, i64 1
+ %0 = load i16, ptr %arrayidx, align 2
%1 = tail call float @llvm.convert.from.fp16.f32(i16 %0)
ret float %1
}
declare float @llvm.convert.from.fp16.f32(i16)
-define dso_local void @stored(i16* nocapture %a, double %b) local_unnamed_addr #0 {
+define dso_local void @stored(ptr nocapture %a, double %b) local_unnamed_addr #0 {
; P8-LABEL: stored:
; P8: # %bb.0: # %entry
; P8-NEXT: mflr r0
; SOFT-NEXT: blr
entry:
%0 = tail call i16 @llvm.convert.to.fp16.f64(double %b)
- store i16 %0, i16* %a, align 2
+ store i16 %0, ptr %a, align 2
ret void
}
declare i16 @llvm.convert.to.fp16.f64(double)
-define dso_local void @storef(i16* nocapture %a, float %b) local_unnamed_addr #0 {
+define dso_local void @storef(ptr nocapture %a, float %b) local_unnamed_addr #0 {
; P8-LABEL: storef:
; P8: # %bb.0: # %entry
; P8-NEXT: mflr r0
; SOFT-NEXT: blr
entry:
%0 = tail call i16 @llvm.convert.to.fp16.f32(float %b)
- store i16 %0, i16* %a, align 2
+ store i16 %0, ptr %a, align 2
ret void
}
declare i16 @llvm.convert.to.fp16.f32(float)
-define void @test_load_store(half* %in, half* %out) #0 {
+define void @test_load_store(ptr %in, ptr %out) #0 {
; P8-LABEL: test_load_store:
; P8: # %bb.0:
; P8-NEXT: lhz r3, 0(r3)
; SOFT-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: blr
- %val = load half, half* %in
- store half %val, half* %out
+ %val = load half, ptr %in
+ store half %val, ptr %out
ret void
}
-define i16 @test_bitcast_from_half(half* %addr) #0 {
+define i16 @test_bitcast_from_half(ptr %addr) #0 {
; P8-LABEL: test_bitcast_from_half:
; P8: # %bb.0:
; P8-NEXT: lhz r3, 0(r3)
; SOFT: # %bb.0:
; SOFT-NEXT: lhz r3, 0(r3)
; SOFT-NEXT: blr
- %val = load half, half* %addr
+ %val = load half, ptr %addr
%val_int = bitcast half %val to i16
ret i16 %val_int
}
-define void @test_bitcast_to_half(half* %addr, i16 %in) #0 {
+define void @test_bitcast_to_half(ptr %addr, i16 %in) #0 {
; P8-LABEL: test_bitcast_to_half:
; P8: # %bb.0:
; P8-NEXT: sth r4, 0(r3)
; SOFT-NEXT: sth r4, 0(r3)
; SOFT-NEXT: blr
%val_fp = bitcast i16 %in to half
- store half %val_fp, half* %addr
+ store half %val_fp, ptr %addr
ret void
}
-define float @test_extend32(half* %addr) #0 {
+define float @test_extend32(ptr %addr) #0 {
; P8-LABEL: test_extend32:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: ld r0, 16(r1)
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: blr
- %val16 = load half, half* %addr
+ %val16 = load half, ptr %addr
%val32 = fpext half %val16 to float
ret float %val32
}
-define double @test_extend64(half* %addr) #0 {
+define double @test_extend64(ptr %addr) #0 {
; P8-LABEL: test_extend64:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: ld r0, 16(r1)
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: blr
- %val16 = load half, half* %addr
+ %val16 = load half, ptr %addr
%val32 = fpext half %val16 to double
ret double %val32
}
-define void @test_trunc32(float %in, half* %addr) #0 {
+define void @test_trunc32(float %in, ptr %addr) #0 {
; P8-LABEL: test_trunc32:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: blr
%val16 = fptrunc float %in to half
- store half %val16, half* %addr
+ store half %val16, ptr %addr
ret void
}
-define void @test_trunc64(double %in, half* %addr) #0 {
+define void @test_trunc64(double %in, ptr %addr) #0 {
; P8-LABEL: test_trunc64:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: blr
%val16 = fptrunc double %in to half
- store half %val16, half* %addr
+ store half %val16, ptr %addr
ret void
}
-define i64 @test_fptosi_i64(half* %p) #0 {
+define i64 @test_fptosi_i64(ptr %p) #0 {
; P8-LABEL: test_fptosi_i64:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: ld r0, 16(r1)
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: blr
- %a = load half, half* %p, align 2
+ %a = load half, ptr %p, align 2
%r = fptosi half %a to i64
ret i64 %r
}
-define void @test_sitofp_i64(i64 %a, half* %p) #0 {
+define void @test_sitofp_i64(i64 %a, ptr %p) #0 {
; P8-LABEL: test_sitofp_i64:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: blr
%r = sitofp i64 %a to half
- store half %r, half* %p
+ store half %r, ptr %p
ret void
}
-define i64 @test_fptoui_i64(half* %p) #0 {
+define i64 @test_fptoui_i64(ptr %p) #0 {
; P8-LABEL: test_fptoui_i64:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: ld r0, 16(r1)
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: blr
- %a = load half, half* %p, align 2
+ %a = load half, ptr %p, align 2
%r = fptoui half %a to i64
ret i64 %r
}
-define void @test_uitofp_i64(i64 %a, half* %p) #0 {
+define void @test_uitofp_i64(i64 %a, ptr %p) #0 {
; P8-LABEL: test_uitofp_i64:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: blr
%r = uitofp i64 %a to half
- store half %r, half* %p
+ store half %r, ptr %p
ret void
}
-define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 {
+define <4 x float> @test_extend32_vec4(ptr %p) #0 {
; P8-LABEL: test_extend32_vec4:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: ld r27, -40(r1) # 8-byte Folded Reload
; SOFT-NEXT: blr
- %a = load <4 x half>, <4 x half>* %p, align 8
+ %a = load <4 x half>, ptr %p, align 8
%b = fpext <4 x half> %a to <4 x float>
ret <4 x float> %b
}
-define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 {
+define <4 x double> @test_extend64_vec4(ptr %p) #0 {
; P8-LABEL: test_extend64_vec4:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: ld r27, -40(r1) # 8-byte Folded Reload
; SOFT-NEXT: blr
- %a = load <4 x half>, <4 x half>* %p, align 8
+ %a = load <4 x half>, ptr %p, align 8
%b = fpext <4 x half> %a to <4 x double>
ret <4 x double> %b
}
-define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
+define void @test_trunc32_vec4(<4 x float> %a, ptr %p) #0 {
; P8-LABEL: test_trunc32_vec4:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: ld r26, -48(r1) # 8-byte Folded Reload
; SOFT-NEXT: blr
%v = fptrunc <4 x float> %a to <4 x half>
- store <4 x half> %v, <4 x half>* %p
+ store <4 x half> %v, ptr %p
ret void
}
-define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) #0 {
+define void @test_trunc64_vec4(<4 x double> %a, ptr %p) #0 {
; P8-LABEL: test_trunc64_vec4:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: ld r26, -48(r1) # 8-byte Folded Reload
; SOFT-NEXT: blr
%v = fptrunc <4 x double> %a to <4 x half>
- store <4 x half> %v, <4 x half>* %p
+ store <4 x half> %v, ptr %p
ret void
}
-define float @test_sitofp_fadd_i32(i32 %a, half* %b) #0 {
+define float @test_sitofp_fadd_i32(i32 %a, ptr %b) #0 {
; P8-LABEL: test_sitofp_fadd_i32:
; P8: # %bb.0:
; P8-NEXT: mflr r0
; SOFT-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
; SOFT-NEXT: mtlr r0
; SOFT-NEXT: blr
- %tmp0 = load half, half* %b
+ %tmp0 = load half, ptr %b
%tmp1 = sitofp i32 %a to half
%tmp2 = fadd half %tmp0, %tmp1
%tmp3 = fpext half %tmp2 to float
@.str = internal constant [13 x i8] c"Hello World!\00"
define i32 @main() {
- %tmp2 = tail call i32 @puts( i8* getelementptr ([13 x i8], [13 x i8]* @.str, i32 0, i64 0) )
+ %tmp2 = tail call i32 @puts( ptr @.str )
ret i32 0
}
-declare i32 @puts(i8*)
+declare i32 @puts(ptr)
define i32 @t() nounwind readonly {
entry:
- %0 = load i32, i32* @x, align 4
- %1 = load i32, i32* @y, align 4
+ %0 = load i32, ptr @x, align 4
+ %1 = load i32, ptr @y, align 4
%2 = add i32 %1, %0
ret i32 %2
}
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | not grep non_lazy_ptr
-@x = weak hidden global i32 0 ; <i32*> [#uses=1]
+@x = weak hidden global i32 0 ; <ptr> [#uses=1]
define i32 @t() nounwind readonly {
entry:
- %0 = load i32, i32* @x, align 4 ; <i32> [#uses=1]
+ %0 = load i32, ptr @x, align 4 ; <i32> [#uses=1]
ret i32 %0
}
; This is good - eliminate an op by hoisting logic.
-define i32 @lshr_or(i32 %x, i32 %y, i32 %z, i32* %p1, i32* %p2) {
+define i32 @lshr_or(i32 %x, i32 %y, i32 %z, ptr %p1, ptr %p2) {
; CHECK-LABEL: lshr_or:
; CHECK: # %bb.0:
; CHECK-NEXT: or 3, 3, 4
; This is questionable - hoisting doesn't eliminate anything.
; It might result in an extra register move.
-define i32 @lshr_or_multiuse1(i32 %x, i32 %y, i32 %z, i32* %p1, i32* %p2) {
+define i32 @lshr_or_multiuse1(i32 %x, i32 %y, i32 %z, ptr %p1, ptr %p2) {
; CHECK-LABEL: lshr_or_multiuse1:
; CHECK: # %bb.0:
; CHECK-NEXT: srw 7, 3, 5
; CHECK-NEXT: blr
%xt = lshr i32 %x, %z
%yt = lshr i32 %y, %z
- store i32 %xt, i32* %p1
+ store i32 %xt, ptr %p1
%r = or i32 %xt, %yt
ret i32 %r
}
; This is questionable - hoisting doesn't eliminate anything.
-define i32 @lshr_multiuse2(i32 %x, i32 %y, i32 %z, i32* %p1, i32* %p2) {
+define i32 @lshr_multiuse2(i32 %x, i32 %y, i32 %z, ptr %p1, ptr %p2) {
; CHECK-LABEL: lshr_multiuse2:
; CHECK: # %bb.0:
; CHECK-NEXT: srw 3, 3, 5
; CHECK-NEXT: blr
%xt = lshr i32 %x, %z
%yt = lshr i32 %y, %z
- store i32 %yt, i32* %p2
+ store i32 %yt, ptr %p2
%r = or i32 %xt, %yt
ret i32 %r
}
; This is not profitable to hoist. We need an extra shift instruction.
-define i32 @lshr_multiuse3(i32 %x, i32 %y, i32 %z, i32* %p1, i32* %p2) {
+define i32 @lshr_multiuse3(i32 %x, i32 %y, i32 %z, ptr %p1, ptr %p2) {
; CHECK-LABEL: lshr_multiuse3:
; CHECK: # %bb.0:
; CHECK-NEXT: srw 3, 3, 5
; CHECK-NEXT: blr
%xt = lshr i32 %x, %z
%yt = lshr i32 %y, %z
- store i32 %xt, i32* %p1
- store i32 %yt, i32* %p2
+ store i32 %xt, ptr %p1
+ store i32 %yt, ptr %p2
%r = or i32 %xt, %yt
ret i32 %r
}
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-ibm-aix-xcoff < %s \
; RUN: 2>&1 | FileCheck --check-prefix=CHECK-BE %s
-%0 = type <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, [8 x i8] }>
-@global.1 = internal global %0 <{ i32 129, i32 2, i32 118, i32 0, i32 5, i32 0, i32 0, i32 0, i32 120, i32 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @global.2, i32 0, i32 0), [8 x i8] c"\00\00\00\00\00\00\00\03" }>, align 4
+%0 = type <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, [8 x i8] }>
+@global.1 = internal global %0 <{ i32 129, i32 2, i32 118, i32 0, i32 5, i32 0, i32 0, i32 0, i32 120, i32 0, ptr @global.2, [8 x i8] c"\00\00\00\00\00\00\00\03" }>, align 4
@global.2 = internal constant [3 x i8] c"x.c"
-@alias = dso_local alias i32 (), i32 ()* @main
+@alias = dso_local alias i32 (), ptr @main
define dso_local signext i32 @main() nounwind {
; CHECK-LE-LABEL: main:
bb:
%tmp = alloca [2147484000 x i8], align 8
tail call void @pluto()
- %tmp6 = tail call i64 @snork(i64 6, i32 257, %0* nonnull @global.1, i64 32768, i8* null, i64 0, i8* null)
- %tmp7 = getelementptr inbounds [2147484000 x i8], [2147484000 x i8]* %tmp, i64 0, i64 2147483992
- %tmp8 = bitcast i8* %tmp7 to double*
- %tmp9 = call i64 @zot(i64 %tmp6, double* nonnull %tmp8, i64 8, i64 8)
+ %tmp6 = tail call i64 @snork(i64 6, i32 257, ptr nonnull @global.1, i64 32768, ptr null, i64 0, ptr null)
+ %tmp7 = getelementptr inbounds [2147484000 x i8], ptr %tmp, i64 0, i64 2147483992
+ %tmp9 = call i64 @zot(i64 %tmp6, ptr nonnull %tmp7, i64 8, i64 8)
%tmp10 = call i64 @wibble(i64 %tmp6)
call void @snork.3(i64 0)
unreachable
declare void @pluto()
-declare signext i64 @snork(i64, i32, %0*, i64, i8*, i64, i8*)
+declare signext i64 @snork(i64, i32, ptr, i64, ptr, i64, ptr)
-declare signext i64 @zot(i64, double*, i64, i64)
+declare signext i64 @zot(i64, ptr, i64, i64)
declare signext i64 @wibble(i64)
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-ibm-aix-xcoff < %s \
; RUN: 2>&1 | FileCheck --check-prefix=CHECK-BE %s
-declare void @bar(i8*)
+declare void @bar(ptr)
define void @foo(i8 %x) {
; CHECK-LE-LABEL: foo:
; CHECK-BE-NEXT: blr
entry:
%a = alloca i8, i64 4294967296, align 16
- %b = getelementptr i8, i8* %a, i64 0
- %c = getelementptr i8, i8* %a, i64 2147483648
- %d = getelementptr i8, i8* %a, i64 4294967295
- store volatile i8 %x, i8* %b
- store volatile i8 %x, i8* %c
- store volatile i8 %x, i8* %d
+ %c = getelementptr i8, ptr %a, i64 2147483648
+ %d = getelementptr i8, ptr %a, i64 4294967295
+ store volatile i8 %x, ptr %a
+ store volatile i8 %x, ptr %c
+ store volatile i8 %x, ptr %d
ret void
}
; RUN: not --crash llc -verify-machineinstrs -mtriple=powerpc-unknown-unknown < %s \
; RUN: 2>&1 | FileCheck %s
-declare void @bar(i8*)
+declare void @bar(ptr)
define void @foo(i8 %x) {
; CHECK: Unhandled stack size
entry:
%a = alloca i8, i64 4294967296, align 16
- %b = getelementptr i8, i8* %a, i64 0
- store volatile i8 %x, i8* %b
+ store volatile i8 %x, ptr %a
ret void
}
; CHECK-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.uitofp.f64.i1(i1 %i, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
- store volatile double %d, double* @foo, align 8
+ store volatile double %d, ptr @foo, align 8
ret double %conv
}
; CHECK-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.sitofp.f64.i1(i1 %i, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
- store volatile double %d, double* @foo, align 8
+ store volatile double %d, ptr @foo, align 8
ret double %conv
}
; CHECK-NOT: std {{[0-9]+}}, 0(0)
; CHECK: blr
-define void @test1({ i8*, void (i8*, i8*)* } %fn_arg) {
- %fn = alloca { i8*, void (i8*, i8*)* }
- %sp = alloca i8*, align 8
+define void @test1({ ptr, ptr } %fn_arg) {
+ %fn = alloca { ptr, ptr }
+ %sp = alloca ptr, align 8
%regs = alloca [18 x i64], align 8
- store { i8*, void (i8*, i8*)* } %fn_arg, { i8*, void (i8*, i8*)* }* %fn
- %1 = bitcast [18 x i64]* %regs to i64*
- call void asm sideeffect "std 14, $0", "=*m"(i64* elementtype(i64) %1)
- %2 = bitcast [18 x i64]* %regs to i8*
- %3 = getelementptr i8, i8* %2, i32 8
- %4 = bitcast i8* %3 to i64*
- call void asm sideeffect "std 15, $0", "=*m"(i64* elementtype(i64) %4)
- %5 = bitcast [18 x i64]* %regs to i8*
- %6 = getelementptr i8, i8* %5, i32 16
- %7 = bitcast i8* %6 to i64*
- call void asm sideeffect "std 16, $0", "=*m"(i64* elementtype(i64) %7)
- %8 = bitcast [18 x i64]* %regs to i8*
- %9 = getelementptr i8, i8* %8, i32 24
- %10 = bitcast i8* %9 to i64*
- call void asm sideeffect "std 17, $0", "=*m"(i64* elementtype(i64) %10)
- %11 = bitcast [18 x i64]* %regs to i8*
- %12 = getelementptr i8, i8* %11, i32 32
- %13 = bitcast i8* %12 to i64*
- call void asm sideeffect "std 18, $0", "=*m"(i64* elementtype(i64) %13)
- %14 = bitcast [18 x i64]* %regs to i8*
- %15 = getelementptr i8, i8* %14, i32 40
- %16 = bitcast i8* %15 to i64*
- call void asm sideeffect "std 19, $0", "=*m"(i64* elementtype(i64) %16)
- %17 = bitcast [18 x i64]* %regs to i8*
- %18 = getelementptr i8, i8* %17, i32 48
- %19 = bitcast i8* %18 to i64*
- call void asm sideeffect "std 20, $0", "=*m"(i64* elementtype(i64) %19)
- %20 = bitcast [18 x i64]* %regs to i8*
- %21 = getelementptr i8, i8* %20, i32 56
- %22 = bitcast i8* %21 to i64*
- call void asm sideeffect "std 21, $0", "=*m"(i64* elementtype(i64) %22)
- %23 = bitcast [18 x i64]* %regs to i8*
- %24 = getelementptr i8, i8* %23, i32 64
- %25 = bitcast i8* %24 to i64*
- call void asm sideeffect "std 22, $0", "=*m"(i64* elementtype(i64) %25)
- %26 = bitcast [18 x i64]* %regs to i8*
- %27 = getelementptr i8, i8* %26, i32 72
- %28 = bitcast i8* %27 to i64*
- call void asm sideeffect "std 23, $0", "=*m"(i64* elementtype(i64) %28)
- %29 = bitcast [18 x i64]* %regs to i8*
- %30 = getelementptr i8, i8* %29, i32 80
- %31 = bitcast i8* %30 to i64*
- call void asm sideeffect "std 24, $0", "=*m"(i64* elementtype(i64) %31)
- %32 = bitcast [18 x i64]* %regs to i8*
- %33 = getelementptr i8, i8* %32, i32 88
- %34 = bitcast i8* %33 to i64*
- call void asm sideeffect "std 25, $0", "=*m"(i64* elementtype(i64) %34)
- %35 = bitcast [18 x i64]* %regs to i8*
- %36 = getelementptr i8, i8* %35, i32 96
- %37 = bitcast i8* %36 to i64*
- call void asm sideeffect "std 26, $0", "=*m"(i64* elementtype(i64) %37)
- %38 = bitcast [18 x i64]* %regs to i8*
- %39 = getelementptr i8, i8* %38, i32 104
- %40 = bitcast i8* %39 to i64*
- call void asm sideeffect "std 27, $0", "=*m"(i64* elementtype(i64) %40)
- %41 = bitcast [18 x i64]* %regs to i8*
- %42 = getelementptr i8, i8* %41, i32 112
- %43 = bitcast i8* %42 to i64*
- call void asm sideeffect "std 28, $0", "=*m"(i64* elementtype(i64) %43)
- %44 = bitcast [18 x i64]* %regs to i8*
- %45 = getelementptr i8, i8* %44, i32 120
- %46 = bitcast i8* %45 to i64*
- call void asm sideeffect "std 29, $0", "=*m"(i64* elementtype(i64) %46)
- %47 = bitcast [18 x i64]* %regs to i8*
- %48 = getelementptr i8, i8* %47, i32 128
- %49 = bitcast i8* %48 to i64*
- call void asm sideeffect "std 30, $0", "=*m"(i64* elementtype(i64) %49)
- %50 = bitcast [18 x i64]* %regs to i8*
- %51 = getelementptr i8, i8* %50, i32 136
- %52 = bitcast i8* %51 to i64*
- call void asm sideeffect "std 31, $0", "=*m"(i64* elementtype(i64) %52)
- %53 = getelementptr { i8*, void (i8*, i8*)* }, { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 1
- %.funcptr = load void (i8*, i8*)*, void (i8*, i8*)** %53
- %54 = getelementptr { i8*, void (i8*, i8*)* }, { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 0
- %.ptr = load i8*, i8** %54
- %55 = load i8*, i8** %sp
- call void %.funcptr(i8* %.ptr, i8* %55)
+ store { ptr, ptr } %fn_arg, ptr %fn
+ call void asm sideeffect "std 14, $0", "=*m"(ptr elementtype(i64) %regs)
+ %1 = getelementptr i8, ptr %regs, i32 8
+ call void asm sideeffect "std 15, $0", "=*m"(ptr elementtype(i64) %1)
+ %2 = getelementptr i8, ptr %regs, i32 16
+ call void asm sideeffect "std 16, $0", "=*m"(ptr elementtype(i64) %2)
+ %3 = getelementptr i8, ptr %regs, i32 24
+ call void asm sideeffect "std 17, $0", "=*m"(ptr elementtype(i64) %3)
+ %4 = getelementptr i8, ptr %regs, i32 32
+ call void asm sideeffect "std 18, $0", "=*m"(ptr elementtype(i64) %4)
+ %5 = getelementptr i8, ptr %regs, i32 40
+ call void asm sideeffect "std 19, $0", "=*m"(ptr elementtype(i64) %5)
+ %6 = getelementptr i8, ptr %regs, i32 48
+ call void asm sideeffect "std 20, $0", "=*m"(ptr elementtype(i64) %6)
+ %7 = getelementptr i8, ptr %regs, i32 56
+ call void asm sideeffect "std 21, $0", "=*m"(ptr elementtype(i64) %7)
+ %8 = getelementptr i8, ptr %regs, i32 64
+ call void asm sideeffect "std 22, $0", "=*m"(ptr elementtype(i64) %8)
+ %9 = getelementptr i8, ptr %regs, i32 72
+ call void asm sideeffect "std 23, $0", "=*m"(ptr elementtype(i64) %9)
+ %10 = getelementptr i8, ptr %regs, i32 80
+ call void asm sideeffect "std 24, $0", "=*m"(ptr elementtype(i64) %10)
+ %11 = getelementptr i8, ptr %regs, i32 88
+ call void asm sideeffect "std 25, $0", "=*m"(ptr elementtype(i64) %11)
+ %12 = getelementptr i8, ptr %regs, i32 96
+ call void asm sideeffect "std 26, $0", "=*m"(ptr elementtype(i64) %12)
+ %13 = getelementptr i8, ptr %regs, i32 104
+ call void asm sideeffect "std 27, $0", "=*m"(ptr elementtype(i64) %13)
+ %14 = getelementptr i8, ptr %regs, i32 112
+ call void asm sideeffect "std 28, $0", "=*m"(ptr elementtype(i64) %14)
+ %15 = getelementptr i8, ptr %regs, i32 120
+ call void asm sideeffect "std 29, $0", "=*m"(ptr elementtype(i64) %15)
+ %16 = getelementptr i8, ptr %regs, i32 128
+ call void asm sideeffect "std 30, $0", "=*m"(ptr elementtype(i64) %16)
+ %17 = getelementptr i8, ptr %regs, i32 136
+ call void asm sideeffect "std 31, $0", "=*m"(ptr elementtype(i64) %17)
+ %18 = getelementptr { ptr, ptr }, ptr %fn, i32 0, i32 1
+ %.funcptr = load ptr, ptr %18
+ %19 = getelementptr { ptr, ptr }, ptr %fn, i32 0, i32 0
+ %.ptr = load ptr, ptr %19
+ %20 = load ptr, ptr %sp
+ call void %.funcptr(ptr %.ptr, ptr %20)
ret void
}
define i64 @main() #0 {
entry:
%x = alloca i64, align 8
- store i64 0, i64* %x, align 8
- %0 = call i64 asm sideeffect "ld $0,$1\0A\09add${2:I} $0,$0,$2", "=&r,*m,Ir"(i64* elementtype(i64) %x, i64 -1) #0
+ store i64 0, ptr %x, align 8
+ %0 = call i64 asm sideeffect "ld $0,$1\0A\09add${2:I} $0,$0,$2", "=&r,*m,Ir"(ptr elementtype(i64) %x, i64 -1) #0
ret i64 %0
}
; CHECK: blr
; Function Attrs: nounwind
-declare signext i32 @printf(i8* nocapture readonly, ...) #0
+declare signext i32 @printf(ptr nocapture readonly, ...) #0
attributes #0 = { nounwind }
br i1 undef, label %return, label %if.end4
if.end: ; preds = %entry
- br i1 icmp ne (i32 (i8*, i8*, i8* (i8*)*, i8*)* @_ZN11__sanitizer19real_pthread_createEPvS0_PFS0_S0_ES0_, i32 (i8*, i8*, i8* (i8*)*, i8*)* null), label %if.end4, label %return
+ br i1 icmp ne (ptr @_ZN11__sanitizer19real_pthread_createEPvS0_PFS0_S0_ES0_, ptr null), label %if.end4, label %return
if.end4: ; preds = %if.end, %land.lhs.true
- %call5 = tail call i8* @_ZN11__sanitizer21internal_start_threadEPFvPvES0_(void (i8*)* nonnull @_ZN11__sanitizer16BackgroundThreadEPv, i8* null) #7
+ %call5 = tail call ptr @_ZN11__sanitizer21internal_start_threadEPFvPvES0_(ptr nonnull @_ZN11__sanitizer16BackgroundThreadEPv, ptr null) #7
unreachable
return: ; preds = %if.end, %land.lhs.true
ret void
}
-declare extern_weak signext i32 @_ZN11__sanitizer19real_pthread_createEPvS0_PFS0_S0_ES0_(i8*, i8*, i8* (i8*)*, i8*) #2
+declare extern_weak signext i32 @_ZN11__sanitizer19real_pthread_createEPvS0_PFS0_S0_ES0_(ptr, ptr, ptr, ptr) #2
-declare i8* @_ZN11__sanitizer21internal_start_threadEPFvPvES0_(void (i8*)*, i8*) local_unnamed_addr #2
+declare ptr @_ZN11__sanitizer21internal_start_threadEPFvPvES0_(ptr, ptr) local_unnamed_addr #2
-declare hidden void @_ZN11__sanitizer16BackgroundThreadEPv(i8* nocapture readnone) #5
+declare hidden void @_ZN11__sanitizer16BackgroundThreadEPv(ptr nocapture readnone) #5
attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+power8-vector,+vsx" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #7 = { nobuiltin nounwind }
; RUN: llc %s -o - -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr10 \
; RUN: -verify-machineinstrs | FileCheck --check-prefix=LEP10 %s
-@ifunc1 = dso_local ifunc void(), void()* ()* @resolver
-@ifunc2 = ifunc void(), void()* ()* @resolver
+@ifunc1 = dso_local ifunc void(), ptr @resolver
+@ifunc2 = ifunc void(), ptr @resolver
-define void()* @resolver() { ret void()* null }
+define ptr @resolver() { ret ptr null }
define void @foo() #0 {
; REL-LABEL: foo
; CHECK: @f
entry:
- %0 = tail call double* asm sideeffect "qvstfdux $2,$0,$1", "=b,{r7},{f11},0,~{memory}"(i32 64, double undef, double* undef)
+ %0 = tail call ptr asm sideeffect "qvstfdux $2,$0,$1", "=b,{r7},{f11},0,~{memory}"(i32 64, double undef, ptr undef)
ret void
; CHECK: qvstfdux 11,{{[0-9]+}},7
; CHECK-LABEL: f:
; CHECK-NOT: stwx {{[0-9]+}}, {{[0-9]+}}, 64
-define void @f(%class.test* %this) {
+define void @f(ptr %this) {
entry:
- %Subminor.i.i = getelementptr inbounds %class.test, %class.test* %this, i64 0, i32 1
- %0 = bitcast [5 x i8]* %Subminor.i.i to i40*
- %bf.load2.i.i = load i40, i40* %0, align 4
+ %Subminor.i.i = getelementptr inbounds %class.test, ptr %this, i64 0, i32 1
+ %bf.load2.i.i = load i40, ptr %Subminor.i.i, align 4
%bf.clear7.i.i = and i40 %bf.load2.i.i, -8589934592
- store i40 %bf.clear7.i.i, i40* %0, align 4
+ store i40 %bf.clear7.i.i, ptr %Subminor.i.i, align 4
ret void
}
@a = external hidden global i32
@b = external global i32
-define i32* @get_a() {
- ret i32* @a
+define ptr @get_a() {
+ ret ptr @a
}
-define i32* @get_b() {
- ret i32* @b
+define ptr @get_b() {
+ ret ptr @b
}
; CHECK: .globl get_a
; RUN: llc < %s -relocation-model=static -mtriple=powerpc-unknown-linux-gnu -ppc-asm-full-reg-names | FileCheck %s -check-prefix=STATIC
; RUN: llc < %s -relocation-model=pic -mtriple=powerpc64-unknown-linux-gnu -ppc-asm-full-reg-names | FileCheck %s -check-prefix=PPC64
-@nextaddr = global i8* null ; <i8**> [#uses=2]
-@C.0.2070 = private constant [5 x i8*] [i8* blockaddress(@foo, %L1), i8* blockaddress(@foo, %L2), i8* blockaddress(@foo, %L3), i8* blockaddress(@foo, %L4), i8* blockaddress(@foo, %L5)] ; <[5 x i8*]*> [#uses=1]
+@nextaddr = global ptr null ; <ptr> [#uses=2]
+@C.0.2070 = private constant [5 x ptr] [ptr blockaddress(@foo, %L1), ptr blockaddress(@foo, %L2), ptr blockaddress(@foo, %L3), ptr blockaddress(@foo, %L4), ptr blockaddress(@foo, %L5)] ; <ptr> [#uses=1]
define internal i32 @foo(i32 %i) nounwind {
; PIC-LABEL: foo:
; STATIC-LABEL: foo:
; PPC64-LABEL: foo:
entry:
- %0 = load i8*, i8** @nextaddr, align 4 ; <i8*> [#uses=2]
- %1 = icmp eq i8* %0, null ; <i1> [#uses=1]
+ %0 = load ptr, ptr @nextaddr, align 4 ; <ptr> [#uses=2]
+ %1 = icmp eq ptr %0, null ; <i1> [#uses=1]
br i1 %1, label %bb3, label %bb2
bb2: ; preds = %entry, %bb3
- %gotovar.4.0 = phi i8* [ %gotovar.4.0.pre, %bb3 ], [ %0, %entry ] ; <i8*> [#uses=1]
+ %gotovar.4.0 = phi ptr [ %gotovar.4.0.pre, %bb3 ], [ %0, %entry ] ; <ptr> [#uses=1]
; PIC: mtctr
; PIC-NEXT: bctr
; PIC: li
; PPC64: b .LBB
; PPC64: li
; PPC64: b .LBB
- indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
+ indirectbr ptr %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
bb3: ; preds = %entry
- %2 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
- %gotovar.4.0.pre = load i8*, i8** %2, align 4 ; <i8*> [#uses=1]
+ %2 = getelementptr inbounds [5 x ptr], ptr @C.0.2070, i32 0, i32 %i ; <ptr> [#uses=1]
+ %gotovar.4.0.pre = load ptr, ptr %2, align 4 ; <ptr> [#uses=1]
br label %bb2
L5: ; preds = %bb2
; STATIC-NEXT: lis r[[R1:[0-9]+]], nextaddr@ha
; STATIC-NEXT: addis r[[R0]], r[[R0]], .Ltmp0@ha
; STATIC-NEXT: stw r[[R0]], nextaddr@l(r[[R1]]
- store i8* blockaddress(@foo, %L5), i8** @nextaddr, align 4
+ store ptr blockaddress(@foo, %L5), ptr @nextaddr, align 4
ret i32 %res.3
}
%tbu = alloca i32, align 4
%tbl = alloca i32, align 4
%temp = alloca i32, align 4
- store i32 0, i32* %retval, align 4
+ store i32 0, ptr %retval, align 4
%0 = call { i32, i32, i32 } asm sideeffect "mftbu $2\0Amftb $0\0Amftbu $1\0Acmpw $2,$1\0Abne $$-0x10\0A", "=r,=r,=r,~{cc}"()
%asmresult = extractvalue { i32, i32, i32 } %0, 0
%asmresult1 = extractvalue { i32, i32, i32 } %0, 1
%asmresult2 = extractvalue { i32, i32, i32 } %0, 2
- store i32 %asmresult, i32* %tbl, align 4
- store i32 %asmresult1, i32* %tbu, align 4
- store i32 %asmresult2, i32* %temp, align 4
- %1 = load i32, i32* %tbu, align 4
+ store i32 %asmresult, ptr %tbl, align 4
+ store i32 %asmresult1, ptr %tbu, align 4
+ store i32 %asmresult2, ptr %temp, align 4
+ %1 = load i32, ptr %tbu, align 4
%conv = zext i32 %1 to i64
%shl = shl i64 %conv, 32
- %2 = load i32, i32* %tbl, align 4
+ %2 = load i32, ptr %tbl, align 4
%conv3 = zext i32 %2 to i64
%or = or i64 %shl, %conv3
%conv4 = trunc i64 %or to i32
; Function Attrs: noinline nounwind optnone uwtable
-define dso_local signext i32 @NoBarrier_CompareAndSwap(i32* %ptr, i32 signext %old_value, i32 signext %new_value) #0 {
+define dso_local signext i32 @NoBarrier_CompareAndSwap(ptr %ptr, i32 signext %old_value, i32 signext %new_value) #0 {
; CHECK-LABEL: NoBarrier_CompareAndSwap:
; CHECK: #APP
; CHECK-NEXT: L..tmp0:
; NOIS-NEXT: 2:
entry:
- %ptr.addr = alloca i32*, align 8 %old_value.addr = alloca i32, align 4
+ %ptr.addr = alloca ptr, align 8 %old_value.addr = alloca i32, align 4
%new_value.addr = alloca i32, align 4
%result = alloca i32, align 4
- store i32* %ptr, i32** %ptr.addr, align 8
- store i32 %old_value, i32* %old_value.addr, align 4
- store i32 %new_value, i32* %new_value.addr, align 4
- %0 = load i32*, i32** %ptr.addr, align 8
- %1 = load i32, i32* %old_value.addr, align 4
- %2 = load i32, i32* %new_value.addr, align 4
- %3 = call i32 asm sideeffect "1: lwarx $0, $4, $1 \0A\09 cmpw $2, $0 \0A\09 bne- 2f \0A\09 stwcx. $3, $4, $1 \0A\09 bne- 1b \0A\092: \0A\09", "=&b,b,b,b,i,~{cr0},~{ctr}"(i32* %0, i32 %1, i32 %2, i32 0)
- store i32 %3, i32* %result, align 4
- %4 = load i32, i32* %result, align 4
+ store ptr %ptr, ptr %ptr.addr, align 8
+ store i32 %old_value, ptr %old_value.addr, align 4
+ store i32 %new_value, ptr %new_value.addr, align 4
+ %0 = load ptr, ptr %ptr.addr, align 8
+ %1 = load i32, ptr %old_value.addr, align 4
+ %2 = load i32, ptr %new_value.addr, align 4
+ %3 = call i32 asm sideeffect "1: lwarx $0, $4, $1 \0A\09 cmpw $2, $0 \0A\09 bne- 2f \0A\09 stwcx. $3, $4, $1 \0A\09 bne- 1b \0A\092: \0A\09", "=&b,b,b,b,i,~{cr0},~{ctr}"(ptr %0, i32 %1, i32 %2, i32 0)
+ store i32 %3, ptr %result, align 4
+ %4 = load i32, ptr %result, align 4
ret i32 %4
}
-define dso_local signext i32 @NoBarrier_CompareAndSwapExtMne(i32* %ptr, i32 signext %old_value, i32 signext %new_value) #0 {
+define dso_local signext i32 @NoBarrier_CompareAndSwapExtMne(ptr %ptr, i32 signext %old_value, i32 signext %new_value) #0 {
; CHECK-LABEL: NoBarrier_CompareAndSwapExtMne:
; CHECK: #APP
; CHECK-NEXT: L..tmp2:
; NOIS-NEXT: 2:
entry:
- %ptr.addr = alloca i32*, align 8 %old_value.addr = alloca i32, align 4
+ %ptr.addr = alloca ptr, align 8 %old_value.addr = alloca i32, align 4
%new_value.addr = alloca i32, align 4
%result = alloca i32, align 4
- store i32* %ptr, i32** %ptr.addr, align 8
- store i32 %old_value, i32* %old_value.addr, align 4
- store i32 %new_value, i32* %new_value.addr, align 4
- %0 = load i32*, i32** %ptr.addr, align 8
- %1 = load i32, i32* %old_value.addr, align 4
- %2 = load i32, i32* %new_value.addr, align 4
- %3 = call i32 asm sideeffect "1: lwarx $0, $4, $1, 0 \0A\09 cmpw $2, $0 \0A\09 bne- 2f \0A\09 stwcx. $3, $4, $1 \0A\09 bne- 1b \0A\092: \0A\09", "=&b,b,b,b,i,~{cr0},~{ctr}"(i32* %0, i32 %1, i32 %2, i32 0)
- store i32 %3, i32* %result, align 4
- %4 = load i32, i32* %result, align 4
+ store ptr %ptr, ptr %ptr.addr, align 8
+ store i32 %old_value, ptr %old_value.addr, align 4
+ store i32 %new_value, ptr %new_value.addr, align 4
+ %0 = load ptr, ptr %ptr.addr, align 8
+ %1 = load i32, ptr %old_value.addr, align 4
+ %2 = load i32, ptr %new_value.addr, align 4
+ %3 = call i32 asm sideeffect "1: lwarx $0, $4, $1, 0 \0A\09 cmpw $2, $0 \0A\09 bne- 2f \0A\09 stwcx. $3, $4, $1 \0A\09 bne- 1b \0A\092: \0A\09", "=&b,b,b,b,i,~{cr0},~{ctr}"(ptr %0, i32 %1, i32 %2, i32 0)
+ store i32 %3, ptr %result, align 4
+ %4 = load i32, ptr %result, align 4
ret i32 %4
}
define void @bar() {
; access foo[1][1]
; CHECK: # foo+12
- tail call void asm sideeffect "# ${0:c}", "i"(i32* getelementptr inbounds ([2 x [2 x i32]], [2 x [2 x i32]]* @foo, i64 0, i64 1, i64 1))
+ tail call void asm sideeffect "# ${0:c}", "i"(ptr getelementptr inbounds ([2 x [2 x i32]], ptr @foo, i64 0, i64 1, i64 1))
ret void
}
%struct.BG_CoordinateMapping_t = type { [4 x i8] }
; Function Attrs: alwaysinline inlinehint nounwind
-define zeroext i32 @Kernel_RanksToCoords(i64 %mapsize, %struct.BG_CoordinateMapping_t* %map, i64* %numentries) #0 {
+define zeroext i32 @Kernel_RanksToCoords(i64 %mapsize, ptr %map, ptr %numentries) #0 {
entry:
%mapsize.addr = alloca i64, align 8
- %map.addr = alloca %struct.BG_CoordinateMapping_t*, align 8
- %numentries.addr = alloca i64*, align 8
+ %map.addr = alloca ptr, align 8
+ %numentries.addr = alloca ptr, align 8
%r0 = alloca i64, align 8
%r3 = alloca i64, align 8
%r4 = alloca i64, align 8
%r5 = alloca i64, align 8
%tmp = alloca i64, align 8
- store i64 %mapsize, i64* %mapsize.addr, align 8
- store %struct.BG_CoordinateMapping_t* %map, %struct.BG_CoordinateMapping_t** %map.addr, align 8
- store i64* %numentries, i64** %numentries.addr, align 8
- store i64 1055, i64* %r0, align 8
- %0 = load i64, i64* %mapsize.addr, align 8
- store i64 %0, i64* %r3, align 8
- %1 = load %struct.BG_CoordinateMapping_t*, %struct.BG_CoordinateMapping_t** %map.addr, align 8
- %2 = ptrtoint %struct.BG_CoordinateMapping_t* %1 to i64
- store i64 %2, i64* %r4, align 8
- %3 = load i64*, i64** %numentries.addr, align 8
- %4 = ptrtoint i64* %3 to i64
- store i64 %4, i64* %r5, align 8
- %5 = load i64, i64* %r0, align 8
- %6 = load i64, i64* %r3, align 8
- %7 = load i64, i64* %r4, align 8
- %8 = load i64, i64* %r5, align 8
+ store i64 %mapsize, ptr %mapsize.addr, align 8
+ store ptr %map, ptr %map.addr, align 8
+ store ptr %numentries, ptr %numentries.addr, align 8
+ store i64 1055, ptr %r0, align 8
+ %0 = load i64, ptr %mapsize.addr, align 8
+ store i64 %0, ptr %r3, align 8
+ %1 = load ptr, ptr %map.addr, align 8
+ %2 = ptrtoint ptr %1 to i64
+ store i64 %2, ptr %r4, align 8
+ %3 = load ptr, ptr %numentries.addr, align 8
+ %4 = ptrtoint ptr %3 to i64
+ store i64 %4, ptr %r5, align 8
+ %5 = load i64, ptr %r0, align 8
+ %6 = load i64, ptr %r3, align 8
+ %7 = load i64, ptr %r4, align 8
+ %8 = load i64, ptr %r5, align 8
%9 = call { i64, i64, i64, i64 } asm sideeffect "sc", "={r0},={r3},={r4},={r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 %5, i64 %6, i64 %7, i64 %8) #1, !srcloc !0
; CHECK-LABEL: @Kernel_RanksToCoords
%asmresult1 = extractvalue { i64, i64, i64, i64 } %9, 1
%asmresult2 = extractvalue { i64, i64, i64, i64 } %9, 2
%asmresult3 = extractvalue { i64, i64, i64, i64 } %9, 3
- store i64 %asmresult, i64* %r0, align 8
- store i64 %asmresult1, i64* %r3, align 8
- store i64 %asmresult2, i64* %r4, align 8
- store i64 %asmresult3, i64* %r5, align 8
- %10 = load i64, i64* %r3, align 8
- store i64 %10, i64* %tmp
- %11 = load i64, i64* %tmp
+ store i64 %asmresult, ptr %r0, align 8
+ store i64 %asmresult1, ptr %r3, align 8
+ store i64 %asmresult2, ptr %r4, align 8
+ store i64 %asmresult3, ptr %r5, align 8
+ %10 = load i64, ptr %r3, align 8
+ store i64 %10, ptr %tmp
+ %11 = load i64, ptr %tmp
%conv = trunc i64 %11 to i32
ret i32 %conv
}
declare void @mtrace()
-define signext i32 @main(i32 signext %argc, i8** %argv) {
+define signext i32 @main(i32 signext %argc, ptr %argv) {
entry:
%argc.addr = alloca i32, align 4
- store i32 %argc, i32* %argc.addr, align 4
+ store i32 %argc, ptr %argc.addr, align 4
%0 = call { i64, i64 } asm sideeffect "sc", "={r0},={r3},{r0},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1076)
%asmresult1.i = extractvalue { i64, i64 } %0, 1
%conv.i = trunc i64 %asmresult1.i to i32
if.then: ; preds = %entry
call void @mtrace()
- %.pre = load i32, i32* %argc.addr, align 4
+ %.pre = load i32, ptr %argc.addr, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
; CHECK: #TEST baz
@baz = internal global i32 0, align 4
define dso_local i32 @test_inlineasm_c_output_template1() {
- tail call void asm sideeffect "#TEST ${0:c}", "i"(i32* nonnull @baz)
+ tail call void asm sideeffect "#TEST ${0:c}", "i"(ptr nonnull @baz)
ret i32 43
}
; CHECK: # 4(5)
; PPC64-LABEL: test_inlineasm_L_output_template
; PPC64: # 8(4)
-define dso_local void @test_inlineasm_L_output_template(i64 %0, i64* %1) {
- tail call void asm sideeffect "# ${0:L}", "*m"(i64* elementtype(i64) %1)
+define dso_local void @test_inlineasm_L_output_template(i64 %0, ptr %1) {
+ tail call void asm sideeffect "# ${0:L}", "*m"(ptr elementtype(i64) %1)
ret void
}
; CHECK: - String: "\n"
-define void @callThroughPtrWithArgs(void (i32, i16, i64)* nocapture) {
+define void @callThroughPtrWithArgs(ptr nocapture) {
tail call void %0(i32 signext 1, i16 zeroext 2, i64 3)
ret void
}
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i128*
- %1 = load i128, i128* %0, align 16
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i128, ptr %0, align 16
ret i128 %1
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i128 @ld_unalign16___int128___int128(i8* nocapture readonly %ptr) {
+define dso_local i128 @ld_unalign16___int128___int128(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign16___int128___int128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pld 5, 1(3), 0
; CHECK-P8-NEXT: mr 3, 5
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to i128*
- %1 = load i128, i128* %0, align 16
- ret i128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load i128, ptr %add.ptr, align 16
+ ret i128 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i128 @ld_align16___int128___int128(i8* nocapture readonly %ptr) {
+define dso_local i128 @ld_align16___int128___int128(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16___int128___int128:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld 5, 8(3)
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i128*
- %1 = load i128, i128* %0, align 16
- ret i128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i128, ptr %add.ptr, align 16
+ ret i128 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i128 @ld_unalign32___int128___int128(i8* nocapture readonly %ptr) {
+define dso_local i128 @ld_unalign32___int128___int128(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign32___int128___int128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli 4, 99999
; CHECK-PREP10-NEXT: mr 3, 5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to i128*
- %1 = load i128, i128* %0, align 16
- ret i128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ %0 = load i128, ptr %add.ptr, align 16
+ ret i128 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i128 @ld_align32___int128___int128(i8* nocapture readonly %ptr) {
+define dso_local i128 @ld_align32___int128___int128(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32___int128___int128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli 4, 99999000
; CHECK-PREP10-NEXT: mr 3, 5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i128*
- %1 = load i128, i128* %0, align 16
- ret i128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i128, ptr %add.ptr, align 16
+ ret i128 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i128 @ld_unalign64___int128___int128(i8* nocapture readonly %ptr) {
+define dso_local i128 @ld_unalign64___int128___int128(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64___int128___int128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli 4, 232
; CHECK-PREP10-NEXT: mr 3, 5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to i128*
- %1 = load i128, i128* %0, align 16
- ret i128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load i128, ptr %add.ptr, align 16
+ ret i128 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i128 @ld_align64___int128___int128(i8* nocapture readonly %ptr) {
+define dso_local i128 @ld_align64___int128___int128(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64___int128___int128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli 4, 244140625
; CHECK-PREP10-NEXT: mr 3, 5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i128*
- %1 = load i128, i128* %0, align 16
- ret i128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i128, ptr %add.ptr, align 16
+ ret i128 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i128 @ld_reg___int128___int128(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i128 @ld_reg___int128___int128(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg___int128___int128:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ldux 5, 3, 4
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i128*
- %1 = load i128, i128* %0, align 16
- ret i128 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i128, ptr %add.ptr, align 16
+ ret i128 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i128*
- %1 = load i128, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i128, ptr %0, align 16
ret i128 %1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i128*
- %1 = load i128, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i128, ptr %0, align 16
ret i128 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i128*
- %1 = load i128, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i128, ptr %0, align 16
ret i128 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to i128*
- %1 = load i128, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i128, ptr %0, align 16
ret i128 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i128*
- %1 = load i128, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i128, ptr %0, align 16
ret i128 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i128*
- %1 = load i128, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i128, ptr %0, align 16
ret i128 %1
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to i128*
- %1 = load i128, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i128, ptr %0, align 16
ret i128 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i128*
- %1 = load i128, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i128, ptr %0, align 16
ret i128 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i128*
- %1 = load i128, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i128, ptr %0, align 16
ret i128 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i128*
- %1 = load i128, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i128, ptr %0, align 16
ret i128 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i128*
- %1 = load i128, i128* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i128, ptr %0, align 4096
ret i128 %1
}
; CHECK-NEXT: ld 4, 0(4)
; CHECK-NEXT: blr
entry:
- %0 = load i128, i128* inttoptr (i64 255 to i128*), align 16
+ %0 = load i128, ptr inttoptr (i64 255 to ptr), align 16
ret i128 %0
}
; CHECK-NEXT: ld 4, 4088(0)
; CHECK-NEXT: blr
entry:
- %0 = load i128, i128* inttoptr (i64 4080 to i128*), align 16
+ %0 = load i128, ptr inttoptr (i64 4080 to ptr), align 16
ret i128 %0
}
; CHECK-P8-NEXT: ld 4, 0(5)
; CHECK-P8-NEXT: blr
entry:
- %0 = load i128, i128* inttoptr (i64 99999 to i128*), align 16
+ %0 = load i128, ptr inttoptr (i64 99999 to ptr), align 16
ret i128 %0
}
; CHECK-NEXT: ld 4, -27100(4)
; CHECK-NEXT: blr
entry:
- %0 = load i128, i128* inttoptr (i64 9999900 to i128*), align 16
+ %0 = load i128, ptr inttoptr (i64 9999900 to ptr), align 16
ret i128 %0
}
; CHECK-P8-NEXT: ld 4, 0(5)
; CHECK-P8-NEXT: blr
entry:
- %0 = load i128, i128* inttoptr (i64 1000000000001 to i128*), align 16
+ %0 = load i128, ptr inttoptr (i64 1000000000001 to ptr), align 16
ret i128 %0
}
; CHECK-P8-NEXT: ld 4, 0(5)
; CHECK-P8-NEXT: blr
entry:
- %0 = load i128, i128* inttoptr (i64 1000000000000 to i128*), align 4096
+ %0 = load i128, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret i128 %0
}
; CHECK-NEXT: std 4, 0(3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i128*
- store i128 %str, i128* %0, align 16
+ %0 = inttoptr i64 %ptr to ptr
+ store i128 %str, ptr %0, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_unalign16__int128___int128(i8* nocapture %ptr, i128 %str) {
+define dso_local void @st_unalign16__int128___int128(ptr nocapture %ptr, i128 %str) {
; CHECK-P10-LABEL: st_unalign16__int128___int128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd 5, 9(3), 0
; CHECK-P8-NEXT: stdx 4, 3, 7
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to i128*
- store i128 %str, i128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ store i128 %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16__int128___int128(i8* nocapture %ptr, i128 %str) {
+define dso_local void @st_align16__int128___int128(ptr nocapture %ptr, i128 %str) {
; CHECK-LABEL: st_align16__int128___int128:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std 5, 16(3)
; CHECK-NEXT: std 4, 8(3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i128*
- store i128 %str, i128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i128 %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_unalign32__int128___int128(i8* nocapture %ptr, i128 %str) {
+define dso_local void @st_unalign32__int128___int128(ptr nocapture %ptr, i128 %str) {
; CHECK-P10-LABEL: st_unalign32__int128___int128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli 6, 99999
; CHECK-PREP10-NEXT: std 5, 8(3)
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to i128*
- store i128 %str, i128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ store i128 %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32__int128___int128(i8* nocapture %ptr, i128 %str) {
+define dso_local void @st_align32__int128___int128(ptr nocapture %ptr, i128 %str) {
; CHECK-P10-LABEL: st_align32__int128___int128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli 6, 99999000
; CHECK-PREP10-NEXT: std 5, 8(3)
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i128*
- store i128 %str, i128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i128 %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_unalign64__int128___int128(i8* nocapture %ptr, i128 %str) {
+define dso_local void @st_unalign64__int128___int128(ptr nocapture %ptr, i128 %str) {
; CHECK-P10-LABEL: st_unalign64__int128___int128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli 6, 232
; CHECK-PREP10-NEXT: std 5, 8(3)
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to i128*
- store i128 %str, i128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ store i128 %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64__int128___int128(i8* nocapture %ptr, i128 %str) {
+define dso_local void @st_align64__int128___int128(ptr nocapture %ptr, i128 %str) {
; CHECK-P10-LABEL: st_align64__int128___int128:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli 6, 244140625
; CHECK-PREP10-NEXT: std 5, 8(3)
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i128*
- store i128 %str, i128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i128 %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg__int128___int128(i8* nocapture %ptr, i64 %off, i128 %str) {
+define dso_local void @st_reg__int128___int128(ptr nocapture %ptr, i64 %off, i128 %str) {
; CHECK-LABEL: st_reg__int128___int128:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdux 5, 3, 4
; CHECK-NEXT: std 6, 8(3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i128*
- store i128 %str, i128* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i128 %str, ptr %add.ptr, align 16
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i128*
- store i128 %str, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i128 %str, ptr %0, align 16
ret void
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i128*
- store i128 %str, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i128 %str, ptr %0, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i128*
- store i128 %str, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i128 %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to i128*
- store i128 %str, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i128 %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i128*
- store i128 %str, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i128 %str, ptr %0, align 16
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i128*
- store i128 %str, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i128 %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to i128*
- store i128 %str, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i128 %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i128*
- store i128 %str, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i128 %str, ptr %0, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i128*
- store i128 %str, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i128 %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i128*
- store i128 %str, i128* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i128 %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i128*
- store i128 %str, i128* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i128 %str, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: std 3, 0(6)
; CHECK-P8-NEXT: blr
entry:
- store i128 %str, i128* inttoptr (i64 255 to i128*), align 16
+ store i128 %str, ptr inttoptr (i64 255 to ptr), align 16
ret void
}
; CHECK-NEXT: std 3, 4080(0)
; CHECK-NEXT: blr
entry:
- store i128 %str, i128* inttoptr (i64 4080 to i128*), align 16
+ store i128 %str, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: std 3, 0(5)
; CHECK-P8-NEXT: blr
entry:
- store i128 %str, i128* inttoptr (i64 99999 to i128*), align 16
+ store i128 %str, ptr inttoptr (i64 99999 to ptr), align 16
ret void
}
; CHECK-NEXT: std 3, -27108(5)
; CHECK-NEXT: blr
entry:
- store i128 %str, i128* inttoptr (i64 9999900 to i128*), align 16
+ store i128 %str, ptr inttoptr (i64 9999900 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: std 3, 0(5)
; CHECK-P8-NEXT: blr
entry:
- store i128 %str, i128* inttoptr (i64 1000000000001 to i128*), align 16
+ store i128 %str, ptr inttoptr (i64 1000000000001 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: std 4, 0(5)
; CHECK-P8-NEXT: blr
entry:
- store i128 %str, i128* inttoptr (i64 1000000000000 to i128*), align 4096
+ store i128 %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- | not grep xori
-define i32 @test(i1 %B, i32* %P) {
+define i32 @test(i1 %B, ptr %P) {
br i1 %B, label %T, label %F
T: ; preds = %0
- store i32 123, i32* %P
+ store i32 123, ptr %P
ret i32 0
F: ; preds = %0
crc32_gentab.exit: ; preds = %for.cond1.preheader.i
%tobool.i19.i.i = icmp eq i32 undef, 0
- %retval.0.i.i.i = select i1 %tobool.i19.i.i, i32* getelementptr inbounds ([1 x [9 x i32]], [1 x [9 x i32]]* @g_62, i64 0, i64 0, i64 6), i32* getelementptr inbounds ([1 x [9 x i32]], [1 x [9 x i32]]* @g_62, i64 0, i64 0, i64 8)
+ %retval.0.i.i.i = select i1 %tobool.i19.i.i, ptr getelementptr inbounds ([1 x [9 x i32]], ptr @g_62, i64 0, i64 0, i64 6), ptr getelementptr inbounds ([1 x [9 x i32]], ptr @g_62, i64 0, i64 0, i64 8)
br label %for.cond1.preheader.i2961.i
for.cond1.preheader.i2961.i: ; preds = %for.inc44.i2977.i, %crc32_gentab.exit
- call void @llvm.memset.p0i8.i64(i8* align 4 bitcast ([1 x [9 x i32]]* @g_62 to i8*), i8 -1, i64 36, i1 false) #1
- %0 = load i32, i32* %retval.0.i.i.i, align 4
+ call void @llvm.memset.p0.i64(ptr align 4 @g_62, i8 -1, i64 36, i1 false) #1
+ %0 = load i32, ptr %retval.0.i.i.i, align 4
%tobool.i2967.i = icmp eq i32 %0, 0
br label %for.body21.i2968.i
}
; Function Attrs: nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #1
attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "ssp-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
ret ptr %0
}
-define void @foobar({} addrspace(10)* addrspace(11)* %p) {
+define void @foobar(ptr addrspace(11) %p) {
; CHECK-LE-LABEL: foobar:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: ld 3, 0(3)
; CHECK-NEXT: bne- 7, .+4
; CHECK-NEXT: isync
entry:
- %0 = load atomic {} addrspace(10)*, {} addrspace(10)* addrspace(11)* %p acquire, align 8
+ %0 = load atomic ptr addrspace(10), ptr addrspace(11) %p acquire, align 8
unreachable
}
%struct.S6 = type { [6 x i8] }
%struct.S7 = type { [7 x i8] }
-define void @test(%struct.S3* byval(%struct.S3) %s3, %struct.S5* byval(%struct.S5) %s5, %struct.S6* byval(%struct.S6) %s6, %struct.S7* byval(%struct.S7) %s7) nounwind {
+define void @test(ptr byval(%struct.S3) %s3, ptr byval(%struct.S5) %s5, ptr byval(%struct.S6) %s6, ptr byval(%struct.S7) %s7) nounwind {
entry:
- call void @check(%struct.S3* byval(%struct.S3) %s3, %struct.S5* byval(%struct.S5) %s5, %struct.S6* byval(%struct.S6) %s6, %struct.S7* byval(%struct.S7) %s7)
+ call void @check(ptr byval(%struct.S3) %s3, ptr byval(%struct.S5) %s5, ptr byval(%struct.S6) %s6, ptr byval(%struct.S7) %s7)
ret void
}
; CHECK-DAG: ld 4, 56(1)
; CHECK-DAG: ld 3, 48(1)
-declare void @check(%struct.S3* byval(%struct.S3), %struct.S5* byval(%struct.S5), %struct.S6* byval(%struct.S6), %struct.S7* byval(%struct.S7))
+declare void @check(ptr byval(%struct.S3), ptr byval(%struct.S5), ptr byval(%struct.S6), ptr byval(%struct.S7))
%struct.anon = type { i32 }
-@b = common dso_local global %struct.anon* null, align 8
+@b = common dso_local global ptr null, align 8
@a = common dso_local global i64 0, align 8
; Function Attrs: nounwind
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %redo_first_pass
- %call = tail call signext i32 bitcast (i32 (...)* @fn2 to i32 ()*)() #2
+ %call = tail call signext i32 @fn2() #2
%tobool1 = icmp ne i32 %call, 0
br label %if.end
br i1 %tobool2, label %if.end4, label %if.then3
if.then3: ; preds = %if.end
- %0 = load %struct.anon*, %struct.anon** @b, align 8
- %contains_i = getelementptr inbounds %struct.anon, %struct.anon* %0, i64 0, i32 0
- store i32 1, i32* %contains_i, align 4
+ %0 = load ptr, ptr @b, align 8
+ store i32 1, ptr %0, align 4
br label %if.end4
if.end4: ; preds = %if.end, %if.then3
br i1 %c.1.off0, label %if.then6, label %if.end13
if.then6: ; preds = %if.end4
- %1 = load i64, i64* @a, align 8
+ %1 = load i64, ptr @a, align 8
%cmp21 = icmp eq i64 %1, 0
br i1 %cmp21, label %if.end13, label %for.body
for.body: ; preds = %if.then6, %for.body
%s.122 = phi i64 [ %inc, %for.body ], [ 0, %if.then6 ]
- %call7 = tail call signext i32 bitcast (i32 (...)* @fn3 to i32 ()*)()
+ %call7 = tail call signext i32 @fn3()
%inc = add nuw i64 %s.122, 1
%exitcond = icmp eq i64 %inc, %1
br i1 %exitcond, label %for.end, label %for.body
%struct.p5rx = type { i32 }
; Function Attrs: nounwind
-define dso_local signext i32 @spillCRUNSET(%struct.p5rx* readonly %p1, i32 signext %p2, i32 signext %p3) {
+define dso_local signext i32 @spillCRUNSET(ptr readonly %p1, i32 signext %p2, i32 signext %p3) {
; CHECK-LABEL: spillCRUNSET:
; CHECK: # %bb.0: # %entry
; CHECK-DAG: li [[REG1:.*]], 0
entry:
%and = and i32 %p3, 128
%tobool = icmp eq i32 %and, 0
- %tobool2 = icmp eq %struct.p5rx* %p1, null
- %sv_any = getelementptr inbounds %struct.p5rx, %struct.p5rx* %p1, i64 0, i32 0
+ %tobool2 = icmp eq ptr %p1, null
%tobool12 = icmp eq i32 %p2, 0
br label %redo_first_pass
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %redo_first_pass
- %call = tail call signext i32 bitcast (i32 (...)* @fn2 to i32 ()*)()
+ %call = tail call signext i32 @fn2()
%tobool1 = icmp ne i32 %call, 0
br label %if.end
br i1 %tobool2, label %if.end11, label %land.lhs.true
land.lhs.true: ; preds = %if.end
- %call3 = tail call signext i32 bitcast (i32 (...)* @fn3 to i32 ()*)()
+ %call3 = tail call signext i32 @fn3()
%tobool4 = icmp eq i32 %call3, 0
br i1 %tobool4, label %if.end11, label %land.lhs.true5
land.lhs.true5: ; preds = %land.lhs.true
- %0 = load i32, i32* %sv_any, align 4
+ %0 = load i32, ptr %p1, align 4
%tobool6 = icmp eq i32 %0, 0
%a.1.off0.not = xor i1 %a.1.off0, true
%brmerge = or i1 %tobool6, %a.1.off0.not
br i1 %brmerge, label %if.end11, label %if.then9
if.then9: ; preds = %land.lhs.true5
- %call10 = tail call signext i32 bitcast (i32 (...)* @fn4 to i32 ()*)()
+ %call10 = tail call signext i32 @fn4()
br label %if.end11
if.end11: ; preds = %land.lhs.true5, %land.lhs.true, %if.end, %if.then9
; RUN: llc -ppc-asm-full-reg-names -verify-machineinstrs -mtriple=powerpc64-- < %s | FileCheck %s --check-prefixes=BE
; RUN: llc -ppc-asm-full-reg-names -verify-machineinstrs -mtriple=powerpc64le-- < %s | FileCheck %s --check-prefixes=LE
-define signext i32 @test(i32* nocapture readonly %P) nounwind {
+define signext i32 @test(ptr nocapture readonly %P) nounwind {
; BE-LABEL: test:
; BE: # %bb.0:
; BE-NEXT: lbz r3, 0(r3)
; LE: # %bb.0:
; LE-NEXT: lbz r3, 3(r3)
; LE-NEXT: blr
- %t0 = load i32, i32* %P, align 4
+ %t0 = load i32, ptr %P, align 4
%shr = lshr i32 %t0, 24
ret i32 %shr
}
define fastcc void @allocateSpace(i1 %cond1, i1 %cond2, i32 %offset) nounwind {
entry:
- %0 = load i8*, i8** undef, align 8
+ %0 = load ptr, ptr undef, align 8
br i1 undef, label %return, label %lor.lhs.false
lor.lhs.false: ; preds = %entry
while.cond: ; preds = %while.body, %if.then15
%idxprom17 = sext i32 0 to i64
- %arrayidx18 = getelementptr inbounds i8, i8* %0, i64 %idxprom17
+ %arrayidx18 = getelementptr inbounds i8, ptr %0, i64 %idxprom17
br i1 %cond1, label %if.end71, label %while.body
while.body: ; preds = %while.cond
if.then45: ; preds = %while.body
%idxprom48139 = zext i32 %offset to i64
- %arrayidx49 = getelementptr inbounds i8, i8* %0, i64 %idxprom48139
- %1 = bitcast i8* %arrayidx49 to i16*
- %2 = bitcast i8* %arrayidx18 to i16*
- %3 = load i16, i16* %1, align 1
- store i16 %3, i16* %2, align 1
+ %arrayidx49 = getelementptr inbounds i8, ptr %0, i64 %idxprom48139
+ %1 = load i16, ptr %arrayidx49, align 1
+ store i16 %1, ptr %arrayidx18, align 1
br label %return
if.end71: ; preds = %while.cond, %if.end7
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=powerpc64-- -mcpu=pwr5 -verify-machineinstrs < %s | \
; RUN: FileCheck %s
-define void @bs(i64* %p) {
+define void @bs(ptr %p) {
; CHECK-LABEL: bs:
; CHECK: # %bb.0:
; CHECK-NEXT: li 4, 4
; CHECK-NEXT: rldimi 5, 4, 32, 0
; CHECK-NEXT: std 5, 0(3)
; CHECK-NEXT: blr
- %x = load i64, i64* %p, align 8
+ %x = load i64, ptr %p, align 8
%b = call i64 @llvm.bswap.i64(i64 %x)
- store i64 %b, i64* %p, align 8
+ store i64 %b, ptr %p, align 8
ret void
}
-define i64 @volatile_ld(i64* %p) {
+define i64 @volatile_ld(ptr %p) {
; CHECK-LABEL: volatile_ld:
; CHECK: # %bb.0:
; CHECK-NEXT: ld 4, 0(3)
; CHECK-NEXT: rldimi 3, 5, 48, 8
; CHECK-NEXT: rldimi 3, 4, 56, 0
; CHECK-NEXT: blr
- %x = load volatile i64, i64* %p, align 8
+ %x = load volatile i64, ptr %p, align 8
%b = call i64 @llvm.bswap.i64(i64 %x)
ret i64 %b
}
-define i64 @misaligned_ld(i64* %p) {
+define i64 @misaligned_ld(ptr %p) {
; CHECK-LABEL: misaligned_ld:
; CHECK: # %bb.0:
; CHECK-NEXT: li 4, 4
; CHECK-NEXT: lwbrx 3, 0, 3
; CHECK-NEXT: rldimi 3, 4, 32, 0
; CHECK-NEXT: blr
- %x = load i64, i64* %p, align 1
+ %x = load i64, ptr %p, align 1
%b = call i64 @llvm.bswap.i64(i64 %x)
ret i64 %b
}
target triple = "powerpc-unknown-linux-gnu"
; Function Attrs: nounwind
-define i32* @test4(i32* readonly %X, i32* nocapture %dest) #0 {
- %Y = getelementptr i32, i32* %X, i64 4
- %A = load i32, i32* %Y, align 4
- store i32 %A, i32* %dest, align 4
- ret i32* %Y
+define ptr @test4(ptr readonly %X, ptr nocapture %dest) #0 {
+ %Y = getelementptr i32, ptr %X, i64 4
+ %A = load i32, ptr %Y, align 4
+ store i32 %A, ptr %dest, align 4
+ ret ptr %Y
; CHECK-LABEL: @test4
; CHECK: lwzu [[REG1:[0-9]+]], 16(3)
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu < %s \
; RUN: -stop-after=finalize-isel -verify-machineinstrs | FileCheck %s
-define i64 @load(i64* %p) {
+define i64 @load(ptr %p) {
; CHECK-LABEL: name: load
; CHECK: bb.0.entry:
; CHECK: liveins: $x3
; CHECK: $x3 = COPY [[LD]]
; CHECK: BLR8 implicit $lr8, implicit $rm, implicit $x3
entry:
- %arrayidx = getelementptr inbounds i64, i64* %p, i64 3
- %0 = load i64, i64* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i64, ptr %p, i64 3
+ %0 = load i64, ptr %arrayidx, align 2
ret i64 %0
}
-define void @store(i64* %p) {
+define void @store(ptr %p) {
; CHECK-LABEL: name: store
; CHECK: bb.0.entry:
; CHECK: liveins: $x3
; CHECK: STD killed [[LI8_]], 16, [[COPY]] :: (store (s64) into %ir.arrayidx, align 1)
; CHECK: BLR8 implicit $lr8, implicit $rm
entry:
- %arrayidx = getelementptr inbounds i64, i64* %p, i64 2
- store i64 9, i64* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i64, ptr %p, i64 2
+ store i64 9, ptr %arrayidx, align 1
ret void
}
-define void @store_aligned(i64* %p) {
+define void @store_aligned(ptr %p) {
; CHECK-LABEL: name: store_aligned
; CHECK: bb.0.entry:
; CHECK: liveins: $x3
; CHECK: STD killed [[LI8_]], 16, [[COPY]] :: (store (s64) into %ir.arrayidx, align 4)
; CHECK: BLR8 implicit $lr8, implicit $rm
entry:
- %arrayidx = getelementptr inbounds i64, i64* %p, i64 2
- store i64 9, i64* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i64, ptr %p, i64 2
+ store i64 9, ptr %arrayidx, align 4
ret void
}
@phasor = external constant [4096 x i32]
; Function Attrs: nounwind
-define void @test(i32* nocapture %out, i32 zeroext %step_size) #0 {
+define void @test(ptr nocapture %out, i32 zeroext %step_size) #0 {
entry:
%shl = shl i32 %step_size, 2
%idxprom = zext i32 %shl to i64
%shl1 = shl i32 %0, %step_size
%idxprom2 = sext i32 %shl1 to i64
%arrayidx.sum = add nsw i64 %idxprom2, %idxprom
- %arrayidx3 = getelementptr inbounds [4096 x i32], [4096 x i32]* @phasor, i64 0, i64 %arrayidx.sum
- %1 = load i32, i32* %arrayidx3, align 4
- %arrayidx5 = getelementptr inbounds i32, i32* %out, i64 %indvars.iv
- store i32 %1, i32* %arrayidx5, align 4
+ %arrayidx3 = getelementptr inbounds [4096 x i32], ptr @phasor, i64 0, i64 %arrayidx.sum
+ %1 = load i32, ptr %arrayidx3, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %out, i64 %indvars.iv
+ store i32 %1, ptr %arrayidx5, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
%cmp = icmp slt i64 %indvars.iv.next, 1020
br i1 %cmp, label %for.body, label %for.end
; LE-NEXT: lxvd2x 1, 0, 4
; LE-NEXT: xxswapd 35, 1
; LE-NEXT: blr
- %args = alloca i8*, align 4
- %x = va_arg i8** %args, <8 x i32>
+ %args = alloca ptr, align 4
+ %x = va_arg ptr %args, <8 x i32>
ret <8 x i32> %x
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- | grep lha
-define i32 @test(i16* %a) {
- %tmp.1 = load i16, i16* %a ; <i16> [#uses=1]
+define i32 @test(ptr %a) {
+ %tmp.1 = load i16, ptr %a ; <i16> [#uses=1]
%tmp.2 = sext i16 %tmp.1 to i32 ; <i32> [#uses=1]
ret i32 %tmp.2
}
; Test case is reduced from the snappy benchmark.
; Verify MachineLICM will always hoist trivially rematerializable instructions even when register pressure is high.
-%"class.snappy::SnappyDecompressor" = type <{ %"class.snappy::Source"*, i8*, i8*, i32, i8, [5 x i8], [6 x i8] }>
-%"class.snappy::Source" = type { i32 (...)** }
-%"struct.snappy::iovec" = type { i8*, i64 }
-%"class.snappy::SnappyIOVecWriter" = type { %"struct.snappy::iovec"*, i64, i64, i64, i64, i64 }
+%"class.snappy::SnappyDecompressor" = type <{ ptr, ptr, ptr, i32, i8, [5 x i8], [6 x i8] }>
+%"class.snappy::Source" = type { ptr }
+%"struct.snappy::iovec" = type { ptr, i64 }
+%"class.snappy::SnappyIOVecWriter" = type { ptr, i64, i64, i64, i64, i64 }
@_ZN6snappy8internalL10char_tableE = internal unnamed_addr constant [5 x i16] [i16 1, i16 2052, i16 4097, i16 8193, i16 2], align 2
@_ZN6snappy8internalL8wordmaskE = internal unnamed_addr constant [5 x i32] [i32 0, i32 255, i32 65535, i32 16777215, i32 -1], align 4
; Function Attrs: argmemonly nounwind
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #2
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #2
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #2
-define linkonce_odr void @ZN6snappyDecompressor_(%"class.snappy::SnappyDecompressor"* %this, %"class.snappy::SnappyIOVecWriter"* %writer) {
+define linkonce_odr void @ZN6snappyDecompressor_(ptr %this, ptr %writer) {
; CHECK-LABEL: ZN6snappyDecompressor_:
; CHECK: # %bb.0: # %entry
; CHECK: addis 3, 2, _ZN6snappy8internalL8wordmaskE@toc@ha
; CHECK-NOT: addis {{[0-9]+}}, 2, _ZN6snappy8internalL10char_tableE@toc@ha
; CHECK: bctrl
entry:
- %ip_limit_ = getelementptr inbounds %"class.snappy::SnappyDecompressor", %"class.snappy::SnappyDecompressor"* %this, i64 0, i32 2
- %0 = bitcast i8** %ip_limit_ to i64*
- %curr_iov_index_.i = getelementptr inbounds %"class.snappy::SnappyIOVecWriter", %"class.snappy::SnappyIOVecWriter"* %writer, i64 0, i32 2
- %curr_iov_written_.i = getelementptr inbounds %"class.snappy::SnappyIOVecWriter", %"class.snappy::SnappyIOVecWriter"* %writer, i64 0, i32 3
- %1 = bitcast i64* %curr_iov_written_.i to <2 x i64>*
+ %ip_limit_ = getelementptr inbounds %"class.snappy::SnappyDecompressor", ptr %this, i64 0, i32 2
+ %curr_iov_index_.i = getelementptr inbounds %"class.snappy::SnappyIOVecWriter", ptr %writer, i64 0, i32 2
+ %curr_iov_written_.i = getelementptr inbounds %"class.snappy::SnappyIOVecWriter", ptr %writer, i64 0, i32 3
br label %for.cond
for.cond: ; preds = %if.end82, %if.then56, %if.end49, %entry
- %ip.0 = phi i8* [ null, %entry ], [ %add.ptr50, %if.end49 ], [ null, %if.then56 ], [ undef, %if.end82 ]
- %incdec.ptr = getelementptr inbounds i8, i8* %ip.0, i64 1
- %2 = load i8, i8* %ip.0, align 1
- %conv = zext i8 %2 to i32
+ %ip.0 = phi ptr [ null, %entry ], [ %add.ptr50, %if.end49 ], [ null, %if.then56 ], [ undef, %if.end82 ]
+ %incdec.ptr = getelementptr inbounds i8, ptr %ip.0, i64 1
+ %0 = load i8, ptr %ip.0, align 1
+ %conv = zext i8 %0 to i32
br i1 undef, label %if.then7, label %if.else
if.then7: ; preds = %for.cond
- %3 = lshr i32 %conv, 2
- %add = add nuw nsw i32 %3, 1
+ %1 = lshr i32 %conv, 2
+ %add = add nuw nsw i32 %1, 1
%conv9 = zext i32 %add to i64
- %4 = load i64, i64* %0, align 8
- %sub.ptr.sub13 = sub i64 %4, 0
- %5 = load i64, i64* undef, align 8
- %6 = load i64, i64* null, align 8
- %sub.i = sub i64 %5, %6
+ %2 = load i64, ptr %ip_limit_, align 8
+ %sub.ptr.sub13 = sub i64 %2, 0
+ %3 = load i64, ptr undef, align 8
+ %4 = load i64, ptr null, align 8
+ %sub.i = sub i64 %3, %4
%cmp.i = icmp ult i32 %add, 17
%cmp2.i = icmp ugt i64 %sub.ptr.sub13, 20
%or.cond.i = and i1 %cmp.i, %cmp2.i
br i1 %or.cond13.i, label %land.lhs.true5.i, label %if.end17
land.lhs.true5.i: ; preds = %if.then7
- %7 = load %"struct.snappy::iovec"*, %"struct.snappy::iovec"** undef, align 8
- %8 = load i64, i64* %curr_iov_index_.i, align 8
- %9 = load i64, i64* %curr_iov_written_.i, align 8
- %sub6.i = sub i64 0, %9
+ %5 = load ptr, ptr undef, align 8
+ %6 = load i64, ptr %curr_iov_index_.i, align 8
+ %7 = load i64, ptr %curr_iov_written_.i, align 8
+ %sub6.i = sub i64 0, %7
%cmp7.i = icmp ugt i64 %sub6.i, 15
br i1 %cmp7.i, label %cleanup102, label %if.end17
if.end17: ; preds = %land.lhs.true5.i, %if.then7
%sub = add nsw i64 %conv9, -60
- %10 = load i32, i32* undef, align 4
- %arrayidx = getelementptr inbounds [5 x i32], [5 x i32]* @_ZN6snappy8internalL8wordmaskE, i64 0, i64 %sub
- %11 = load i32, i32* %arrayidx, align 4
- %and21 = and i32 %11, %10
+ %8 = load i32, ptr undef, align 4
+ %arrayidx = getelementptr inbounds [5 x i32], ptr @_ZN6snappy8internalL8wordmaskE, i64 0, i64 %sub
+ %9 = load i32, ptr %arrayidx, align 4
+ %and21 = and i32 %9, %8
%add22 = add i32 %and21, 1
%conv23 = zext i32 %add22 to i64
- %add.ptr24 = getelementptr inbounds i8, i8* %incdec.ptr, i64 %sub
+ %add.ptr24 = getelementptr inbounds i8, ptr %incdec.ptr, i64 %sub
br label %if.end25
if.end25: ; preds = %if.end17
- %sub.ptr.rhs.cast28 = ptrtoint i8* %add.ptr24 to i64
+ %sub.ptr.rhs.cast28 = ptrtoint ptr %add.ptr24 to i64
%cmp30233 = icmp ugt i64 %conv23, 0
br i1 %cmp30233, label %while.body.preheader, label %while.end
while.body.preheader: ; preds = %if.end25
- %add.i158256 = add i64 %6, 0
- %cmp.i160257 = icmp ugt i64 %add.i158256, %5
+ %add.i158256 = add i64 %4, 0
+ %cmp.i160257 = icmp ugt i64 %add.i158256, %3
br i1 %cmp.i160257, label %cleanup105, label %while.cond.preheader.i
while.cond.preheader.i: ; preds = %while.body.preheader
- %call39 = call i8* undef(%"class.snappy::Source"* undef, i64* nonnull undef)
+ %call39 = call ptr undef(ptr undef, ptr nonnull undef)
unreachable
while.end: ; preds = %if.end25
br i1 undef, label %if.end49, label %while.body.lr.ph.i182
while.body.lr.ph.i182: ; preds = %while.cond.preheader.i176
- %.pre.i181 = load i64, i64* %curr_iov_written_.i, align 8
- %12 = load %"struct.snappy::iovec"*, %"struct.snappy::iovec"** undef, align 8
- %13 = load i64, i64* %curr_iov_index_.i, align 8
- %iov_len.i185 = getelementptr inbounds %"struct.snappy::iovec", %"struct.snappy::iovec"* %12, i64 %13, i32 1
- %14 = load i64, i64* %iov_len.i185, align 8
+ %.pre.i181 = load i64, ptr %curr_iov_written_.i, align 8
+ %10 = load ptr, ptr undef, align 8
+ %11 = load i64, ptr %curr_iov_index_.i, align 8
+ %iov_len.i185 = getelementptr inbounds %"struct.snappy::iovec", ptr %10, i64 %11, i32 1
+ %12 = load i64, ptr %iov_len.i185, align 8
br label %cond.end.i190
cond.end.i190: ; preds = %while.body.lr.ph.i182
br i1 undef, label %if.end18.i207, label %if.then10.i193
if.then10.i193: ; preds = %cond.end.i190
- %add12.i191 = add i64 %13, 1
- %iov_len22.phi.trans.insert.i194 = getelementptr inbounds %"struct.snappy::iovec", %"struct.snappy::iovec"* %12, i64 %add12.i191, i32 1
- %.pre48.i195 = load i64, i64* %iov_len22.phi.trans.insert.i194, align 8
+ %add12.i191 = add i64 %11, 1
+ %iov_len22.phi.trans.insert.i194 = getelementptr inbounds %"struct.snappy::iovec", ptr %10, i64 %add12.i191, i32 1
+ %.pre48.i195 = load i64, ptr %iov_len22.phi.trans.insert.i194, align 8
br label %if.end18.i207
if.end18.i207: ; preds = %if.then10.i193, %cond.end.i190
- %15 = phi i64 [ %.pre.i181, %cond.end.i190 ], [ 0, %if.then10.i193 ]
- %16 = phi i64 [ %14, %cond.end.i190 ], [ %.pre48.i195, %if.then10.i193 ]
- %17 = phi i64 [ %13, %cond.end.i190 ], [ %add12.i191, %if.then10.i193 ]
- %sub.i197 = sub i64 %16, %15
+ %13 = phi i64 [ %.pre.i181, %cond.end.i190 ], [ 0, %if.then10.i193 ]
+ %14 = phi i64 [ %12, %cond.end.i190 ], [ %.pre48.i195, %if.then10.i193 ]
+ %15 = phi i64 [ %11, %cond.end.i190 ], [ %add12.i191, %if.then10.i193 ]
+ %sub.i197 = sub i64 %14, %13
%cmp.i.i198 = icmp ult i64 %sub.i197, %conv23
%.sroa.speculated.i199 = select i1 %cmp.i.i198, i64 %sub.i197, i64 %conv23
- %iov_base.i.i200 = getelementptr inbounds %"struct.snappy::iovec", %"struct.snappy::iovec"* %12, i64 %17, i32 0
- %18 = load i8*, i8** %iov_base.i.i200, align 8
- %add.ptr.i.i201 = getelementptr inbounds i8, i8* %18, i64 %15
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %add.ptr.i.i201, i8* %add.ptr24, i64 %.sroa.speculated.i199, i1 false) #12
+ %iov_base.i.i200 = getelementptr inbounds %"struct.snappy::iovec", ptr %10, i64 %15, i32 0
+ %16 = load ptr, ptr %iov_base.i.i200, align 8
+ %add.ptr.i.i201 = getelementptr inbounds i8, ptr %16, i64 %13
+ call void @llvm.memcpy.p0.p0.i64(ptr %add.ptr.i.i201, ptr %add.ptr24, i64 %.sroa.speculated.i199, i1 false) #12
%add30.i203 = add i64 0, %.sroa.speculated.i199
- store i64 %add30.i203, i64* null, align 8
- %.pre245 = load i64, i64* %0, align 8
+ store i64 %add30.i203, ptr null, align 8
+ %.pre245 = load i64, ptr %ip_limit_, align 8
br label %if.end49
if.end49: ; preds = %if.end18.i207, %while.cond.preheader.i176
- %19 = phi i64 [ %.pre245, %if.end18.i207 ], [ %4, %while.cond.preheader.i176 ]
- %add.ptr50 = getelementptr inbounds i8, i8* %add.ptr24, i64 %conv23
- %sub.ptr.sub54 = sub i64 %19, 0
+ %17 = phi i64 [ %.pre245, %if.end18.i207 ], [ %2, %while.cond.preheader.i176 ]
+ %add.ptr50 = getelementptr inbounds i8, ptr %add.ptr24, i64 %conv23
+ %sub.ptr.sub54 = sub i64 %17, 0
%cmp55 = icmp slt i64 %sub.ptr.sub54, 5
br i1 %cmp55, label %if.then56, label %for.cond
br label %for.cond
if.else: ; preds = %for.cond
- %idxprom = zext i8 %2 to i64
- %arrayidx68 = getelementptr inbounds [5 x i16], [5 x i16]* @_ZN6snappy8internalL10char_tableE, i64 0, i64 %idxprom
- %20 = load i16, i16* %arrayidx68, align 2
- %conv69 = zext i16 %20 to i64
- %21 = load i32, i32* undef, align 4
+ %idxprom = zext i8 %0 to i64
+ %arrayidx68 = getelementptr inbounds [5 x i16], ptr @_ZN6snappy8internalL10char_tableE, i64 0, i64 %idxprom
+ %18 = load i16, ptr %arrayidx68, align 2
+ %conv69 = zext i16 %18 to i64
+ %19 = load i32, ptr undef, align 4
%shr71 = lshr i64 %conv69, 11
- %arrayidx72 = getelementptr inbounds [5 x i32], [5 x i32]* @_ZN6snappy8internalL8wordmaskE, i64 0, i64 %shr71
- %22 = load i32, i32* %arrayidx72, align 4
- %and73 = and i32 %22, %21
+ %arrayidx72 = getelementptr inbounds [5 x i32], ptr @_ZN6snappy8internalL8wordmaskE, i64 0, i64 %shr71
+ %20 = load i32, ptr %arrayidx72, align 4
+ %and73 = and i32 %20, %19
%conv74 = zext i32 %and73 to i64
%add79 = add nuw nsw i64 0, %conv74
- %call80 = call zeroext i1 @_ZN6snappy17SnappyIOVecWriterAppendFromSelfEmm(%"class.snappy::SnappyIOVecWriter"* %writer, i64 %add79, i64 undef)
+ %call80 = call zeroext i1 @_ZN6snappy17SnappyIOVecWriterAppendFromSelfEmm(ptr %writer, i64 %add79, i64 undef)
br i1 %call80, label %if.end82, label %cleanup105
if.end82: ; preds = %if.else
br label %for.cond
cleanup102: ; preds = %land.lhs.true5.i
- %iov_base.i.i = getelementptr inbounds %"struct.snappy::iovec", %"struct.snappy::iovec"* %7, i64 %8, i32 0
- %23 = load i8*, i8** %iov_base.i.i, align 8
- %add.ptr.i.i = getelementptr inbounds i8, i8* %23, i64 %9
- call void @llvm.memmove.p0i8.p0i8.i64(i8* %add.ptr.i.i, i8* %incdec.ptr, i64 16, i1 false) #12
- %24 = load <2 x i64>, <2 x i64>* %1, align 8
- %25 = insertelement <2 x i64> undef, i64 %conv9, i32 0
- %26 = shufflevector <2 x i64> %25, <2 x i64> undef, <2 x i32> zeroinitializer
- %27 = add <2 x i64> %24, %26
- store <2 x i64> %27, <2 x i64>* undef, align 8
+ %iov_base.i.i = getelementptr inbounds %"struct.snappy::iovec", ptr %5, i64 %6, i32 0
+ %21 = load ptr, ptr %iov_base.i.i, align 8
+ %add.ptr.i.i = getelementptr inbounds i8, ptr %21, i64 %7
+ call void @llvm.memmove.p0.p0.i64(ptr %add.ptr.i.i, ptr %incdec.ptr, i64 16, i1 false) #12
+ %22 = load <2 x i64>, ptr %curr_iov_written_.i, align 8
+ %23 = insertelement <2 x i64> undef, i64 %conv9, i32 0
+ %24 = shufflevector <2 x i64> %23, <2 x i64> undef, <2 x i32> zeroinitializer
+ %25 = add <2 x i64> %22, %24
+ store <2 x i64> %25, ptr undef, align 8
unreachable
cleanup105: ; preds = %if.else, %while.body.preheader
}
; Function Attrs: inlinehint
-declare zeroext i1 @_ZN6snappy17SnappyIOVecWriterAppendFromSelfEmm(%"class.snappy::SnappyIOVecWriter"*, i64, i64) local_unnamed_addr #10 align 2
+declare zeroext i1 @_ZN6snappy17SnappyIOVecWriterAppendFromSelfEmm(ptr, i64, i64) local_unnamed_addr #10 align 2
@ga = external global i32, align 4
@gb = external global i32, align 4
-define signext i32 @test(i32 (i32)* nocapture %FP) local_unnamed_addr #0 {
+define signext i32 @test(ptr nocapture %FP) local_unnamed_addr #0 {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr 0
; CHECKAIX32-NOT: lwz 6, L..C1(2)
; CHECK: blr
entry:
- %0 = load volatile i32, i32* @ga, align 4
- %1 = load volatile i32, i32* @gb, align 4
+ %0 = load volatile i32, ptr @ga, align 4
+ %1 = load volatile i32, ptr @gb, align 4
%cmp1 = icmp sgt i32 %0, %1
- %2 = load volatile i32, i32* @ga, align 4
+ %2 = load volatile i32, ptr @ga, align 4
br i1 %cmp1, label %if.then, label %if.end
if.then: ; preds = %if.end, %entry
if.end: ; preds = %entry, %if.end
%3 = phi i32 [ %6, %if.end ], [ %2, %entry ]
%inc = add nsw i32 %3, 1
- store volatile i32 %inc, i32* @ga, align 4
- %4 = load volatile i32, i32* @ga, align 4
- %5 = load volatile i32, i32* @gb, align 4
+ store volatile i32 %inc, ptr @ga, align 4
+ %4 = load volatile i32, ptr @ga, align 4
+ %5 = load volatile i32, ptr @gb, align 4
%cmp = icmp sgt i32 %4, %5
- %6 = load volatile i32, i32* @ga, align 4
+ %6 = load volatile i32, ptr @ga, align 4
br i1 %cmp, label %if.then, label %if.end
}
; RUN: -check-prefix=P7-AIX32
; v2f64
-define dso_local void @test(<2 x double>* nocapture %c, double* nocapture readonly %a) local_unnamed_addr {
+define dso_local void @test(ptr nocapture %c, ptr nocapture readonly %a) local_unnamed_addr {
; P9-LABEL: test:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r4, r4, 24
; P7-AIX32-NEXT: stxvd2x vs0, 0, r3
; P7-AIX32-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds double, double* %a, i64 3
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %a, i64 3
+ %0 = load double, ptr %arrayidx, align 8
%splat.splatinsert.i = insertelement <2 x double> undef, double %0, i32 0
%splat.splat.i = shufflevector <2 x double> %splat.splatinsert.i, <2 x double> undef, <2 x i32> zeroinitializer
- store <2 x double> %splat.splat.i, <2 x double>* %c, align 16
+ store <2 x double> %splat.splat.i, ptr %c, align 16
ret void
}
; v4f32
-define dso_local void @test2(<4 x float>* nocapture %c, float* nocapture readonly %a) local_unnamed_addr {
+define dso_local void @test2(ptr nocapture %c, ptr nocapture readonly %a) local_unnamed_addr {
; P9-LABEL: test2:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r4, r4, 12
; P7-AIX32-NEXT: stxvw4x vs0, 0, r3
; P7-AIX32-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds float, float* %a, i64 3
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %a, i64 3
+ %0 = load float, ptr %arrayidx, align 4
%splat.splatinsert.i = insertelement <4 x float> undef, float %0, i32 0
%splat.splat.i = shufflevector <4 x float> %splat.splatinsert.i, <4 x float> undef, <4 x i32> zeroinitializer
- store <4 x float> %splat.splat.i, <4 x float>* %c, align 16
+ store <4 x float> %splat.splat.i, ptr %c, align 16
ret void
}
; v4i32
-define dso_local void @test3(<4 x i32>* nocapture %c, i32* nocapture readonly %a) local_unnamed_addr {
+define dso_local void @test3(ptr nocapture %c, ptr nocapture readonly %a) local_unnamed_addr {
; P9-LABEL: test3:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r4, r4, 12
; P7-AIX32-NEXT: stxvw4x vs0, 0, r3
; P7-AIX32-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 3
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 3
+ %0 = load i32, ptr %arrayidx, align 4
%splat.splatinsert.i = insertelement <4 x i32> undef, i32 %0, i32 0
%splat.splat.i = shufflevector <4 x i32> %splat.splatinsert.i, <4 x i32> undef, <4 x i32> zeroinitializer
- store <4 x i32> %splat.splat.i, <4 x i32>* %c, align 16
+ store <4 x i32> %splat.splat.i, ptr %c, align 16
ret void
}
; v2i64
-define dso_local void @test4(<2 x i64>* nocapture %c, i64* nocapture readonly %a) local_unnamed_addr {
+define dso_local void @test4(ptr nocapture %c, ptr nocapture readonly %a) local_unnamed_addr {
; P9-LABEL: test4:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r4, r4, 24
; P7-AIX32-NEXT: stxvw4x v2, 0, r3
; P7-AIX32-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i64, i64* %a, i64 3
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %a, i64 3
+ %0 = load i64, ptr %arrayidx, align 8
%splat.splatinsert.i = insertelement <2 x i64> undef, i64 %0, i32 0
%splat.splat.i = shufflevector <2 x i64> %splat.splatinsert.i, <2 x i64> undef, <2 x i32> zeroinitializer
- store <2 x i64> %splat.splat.i, <2 x i64>* %c, align 16
+ store <2 x i64> %splat.splat.i, ptr %c, align 16
ret void
}
; sext v2i64
-define void @test5(<2 x i64>* %a, i32* %in) {
+define void @test5(ptr %a, ptr %in) {
; P9-LABEL: test5:
; P9: # %bb.0: # %entry
; P9-NEXT: lfiwax f0, 0, r4
; P7-AIX32-NEXT: stxvw4x v2, 0, r3
; P7-AIX32-NEXT: blr
entry:
- %0 = load i32, i32* %in, align 4
+ %0 = load i32, ptr %in, align 4
%conv = sext i32 %0 to i64
%splat.splatinsert.i = insertelement <2 x i64> poison, i64 %conv, i32 0
%splat.splat.i = shufflevector <2 x i64> %splat.splatinsert.i, <2 x i64> poison, <2 x i32> zeroinitializer
- store <2 x i64> %splat.splat.i, <2 x i64>* %a, align 16
+ store <2 x i64> %splat.splat.i, ptr %a, align 16
ret void
}
; zext v2i64
-define void @test6(<2 x i64>* %a, i32* %in) {
+define void @test6(ptr %a, ptr %in) {
; P9-LABEL: test6:
; P9: # %bb.0: # %entry
; P9-NEXT: lfiwzx f0, 0, r4
; P7-AIX32-NEXT: stxvw4x v2, 0, r3
; P7-AIX32-NEXT: blr
entry:
- %0 = load i32, i32* %in, align 4
+ %0 = load i32, ptr %in, align 4
%conv = zext i32 %0 to i64
%splat.splatinsert.i = insertelement <2 x i64> poison, i64 %conv, i32 0
%splat.splat.i = shufflevector <2 x i64> %splat.splatinsert.i, <2 x i64> poison, <2 x i32> zeroinitializer
- store <2 x i64> %splat.splat.i, <2 x i64>* %a, align 16
+ store <2 x i64> %splat.splat.i, ptr %a, align 16
ret void
}
; v8i16
-define void @test7(<8 x i16>* %a, i16* %in) {
+define void @test7(ptr %a, ptr %in) {
; P9-LABEL: test7:
; P9: # %bb.0: # %entry
; P9-NEXT: lxsihzx v2, 0, r4
; P7-AIX32-NEXT: stxvw4x v2, 0, r3
; P7-AIX32-NEXT: blr
entry:
- %0 = load i16, i16* %in, align 2
+ %0 = load i16, ptr %in, align 2
%splat.splatinsert.i = insertelement <8 x i16> poison, i16 %0, i32 0
%splat.splat.i = shufflevector <8 x i16> %splat.splatinsert.i, <8 x i16> poison, <8 x i32> zeroinitializer
- store <8 x i16> %splat.splat.i, <8 x i16>* %a, align 16
+ store <8 x i16> %splat.splat.i, ptr %a, align 16
ret void
}
; v16i8
-define void @test8(<16 x i8>* %a, i8* %in) {
+define void @test8(ptr %a, ptr %in) {
; P9-LABEL: test8:
; P9: # %bb.0: # %entry
; P9-NEXT: lxsibzx v2, 0, r4
; P7-AIX32-NEXT: stxvw4x v2, 0, r3
; P7-AIX32-NEXT: blr
entry:
- %0 = load i8, i8* %in, align 1
+ %0 = load i8, ptr %in, align 1
%splat.splatinsert.i = insertelement <16 x i8> poison, i8 %0, i32 0
%splat.splat.i = shufflevector <16 x i8> %splat.splatinsert.i, <16 x i8> poison, <16 x i32> zeroinitializer
- store <16 x i8> %splat.splat.i, <16 x i8>* %a, align 16
+ store <16 x i8> %splat.splat.i, ptr %a, align 16
ret void
}
-define <16 x i8> @unadjusted_lxvwsx(i32* %s, i32* %t) {
+define <16 x i8> @unadjusted_lxvwsx(ptr %s, ptr %t) {
; P9-LABEL: unadjusted_lxvwsx:
; P9: # %bb.0: # %entry
; P9-NEXT: lxvwsx v2, 0, r3
; P7-AIX32-NEXT: xxspltw v2, vs0, 1
; P7-AIX32-NEXT: blr
entry:
- %0 = bitcast i32* %s to <4 x i8>*
- %1 = load <4 x i8>, <4 x i8>* %0, align 4
- %2 = shufflevector <4 x i8> %1, <4 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
- ret <16 x i8> %2
+ %0 = load <4 x i8>, ptr %s, align 4
+ %1 = shufflevector <4 x i8> %0, <4 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ ret <16 x i8> %1
}
-define <16 x i8> @adjusted_lxvwsx(i64* %s, i64* %t) {
+define <16 x i8> @adjusted_lxvwsx(ptr %s, ptr %t) {
; P9-LABEL: adjusted_lxvwsx:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r3, r3, 4
; P7-AIX32-NEXT: xxspltw v2, vs0, 1
; P7-AIX32-NEXT: blr
entry:
- %0 = bitcast i64* %s to <8 x i8>*
- %1 = load <8 x i8>, <8 x i8>* %0, align 8
- %2 = shufflevector <8 x i8> %1, <8 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
- ret <16 x i8> %2
+ %0 = load <8 x i8>, ptr %s, align 8
+ %1 = shufflevector <8 x i8> %0, <8 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ ret <16 x i8> %1
}
-define <16 x i8> @unadjusted_lxvwsx_v16i8(<16 x i8> *%s, <16 x i8> %t) {
+define <16 x i8> @unadjusted_lxvwsx_v16i8(ptr %s, <16 x i8> %t) {
; P9-LABEL: unadjusted_lxvwsx_v16i8:
; P9: # %bb.0: # %entry
; P9-NEXT: lxvwsx v2, 0, r3
; P7-AIX32-NEXT: xxspltw v2, vs0, 0
; P7-AIX32-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* %s, align 16
+ %0 = load <16 x i8>, ptr %s, align 16
%1 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
ret <16 x i8> %1
}
-define <16 x i8> @adjusted_lxvwsx_v16i8(<16 x i8> *%s, <16 x i8> %t) {
+define <16 x i8> @adjusted_lxvwsx_v16i8(ptr %s, <16 x i8> %t) {
; P9-LABEL: adjusted_lxvwsx_v16i8:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r3, r3, 4
; P7-AIX32-NEXT: xxspltw v2, vs0, 1
; P7-AIX32-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* %s, align 16
+ %0 = load <16 x i8>, ptr %s, align 16
%1 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
ret <16 x i8> %1
}
-define <16 x i8> @adjusted_lxvwsx_v16i8_2(<16 x i8> *%s, <16 x i8> %t) {
+define <16 x i8> @adjusted_lxvwsx_v16i8_2(ptr %s, <16 x i8> %t) {
; P9-LABEL: adjusted_lxvwsx_v16i8_2:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r3, r3, 8
; P7-AIX32-NEXT: xxspltw v2, vs0, 2
; P7-AIX32-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* %s, align 16
+ %0 = load <16 x i8>, ptr %s, align 16
%1 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11>
ret <16 x i8> %1
}
-define <16 x i8> @adjusted_lxvwsx_v16i8_3(<16 x i8> *%s, <16 x i8> %t) {
+define <16 x i8> @adjusted_lxvwsx_v16i8_3(ptr %s, <16 x i8> %t) {
; P9-LABEL: adjusted_lxvwsx_v16i8_3:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r3, r3, 12
; P7-AIX32-NEXT: xxspltw v2, vs0, 3
; P7-AIX32-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* %s, align 16
+ %0 = load <16 x i8>, ptr %s, align 16
%1 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15>
ret <16 x i8> %1
}
-define <16 x i8> @unadjusted_lxvdsx(i64* %s, i64* %t) {
+define <16 x i8> @unadjusted_lxvdsx(ptr %s, ptr %t) {
; P9-LABEL: unadjusted_lxvdsx:
; P9: # %bb.0: # %entry
; P9-NEXT: lxvdsx v2, 0, r3
; P7-AIX32-NEXT: xxmrghd v2, vs0, vs0
; P7-AIX32-NEXT: blr
entry:
- %0 = bitcast i64* %s to <8 x i8>*
- %1 = load <8 x i8>, <8 x i8>* %0, align 8
- %2 = shufflevector <8 x i8> %1, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <16 x i8> %2
+ %0 = load <8 x i8>, ptr %s, align 8
+ %1 = shufflevector <8 x i8> %0, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <16 x i8> %1
}
-define <16 x i8> @unadjusted_lxvdsx_v16i8(<16 x i8> *%s, <16 x i8> %t) {
+define <16 x i8> @unadjusted_lxvdsx_v16i8(ptr %s, <16 x i8> %t) {
; P9-LABEL: unadjusted_lxvdsx_v16i8:
; P9: # %bb.0: # %entry
; P9-NEXT: lxvdsx v2, 0, r3
; P7-AIX32-NEXT: lxvdsx v2, 0, r3
; P7-AIX32-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* %s, align 16
+ %0 = load <16 x i8>, ptr %s, align 16
%1 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <16 x i8> %1
}
-define <16 x i8> @adjusted_lxvdsx_v16i8(<16 x i8> *%s, <16 x i8> %t) {
+define <16 x i8> @adjusted_lxvdsx_v16i8(ptr %s, <16 x i8> %t) {
; P9-LABEL: adjusted_lxvdsx_v16i8:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r3, r3, 8
; P7-AIX32-NEXT: lxvdsx v2, 0, r3
; P7-AIX32-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* %s, align 16
+ %0 = load <16 x i8>, ptr %s, align 16
%1 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
ret <16 x i8> %1
}
-define <8 x i16> @test_unaligned_v8i16(i16* %Ptr) {
+define <8 x i16> @test_unaligned_v8i16(ptr %Ptr) {
; P9-LABEL: test_unaligned_v8i16:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r3, r3, 6
; P7-AIX32-NEXT: vsplth v2, v2, 0
; P7-AIX32-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i16, i16* %Ptr, i64 3
- %0 = load i16, i16* %add.ptr, align 16
+ %add.ptr = getelementptr inbounds i16, ptr %Ptr, i64 3
+ %0 = load i16, ptr %add.ptr, align 16
%splat.splatinsert = insertelement <8 x i16> poison, i16 %0, i32 0
%splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> poison, <8 x i32> zeroinitializer
ret <8 x i16> %splat.splat
}
-define <16 x i8> @test_unaligned_v16i8(i8* %Ptr) {
+define <16 x i8> @test_unaligned_v16i8(ptr %Ptr) {
; P9-LABEL: test_unaligned_v16i8:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r3, r3, 3
; P7-AIX32-NEXT: vspltb v2, v2, 0
; P7-AIX32-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %Ptr, i64 3
- %0 = load i8, i8* %add.ptr, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %Ptr, i64 3
+ %0 = load i8, ptr %add.ptr, align 16
%splat.splatinsert = insertelement <16 x i8> poison, i8 %0, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> poison, <16 x i32> zeroinitializer
ret <16 x i8> %splat.splat
}
-define <8 x i16> @test_aligned_v8i16_1(i16* %Ptr) {
+define <8 x i16> @test_aligned_v8i16_1(ptr %Ptr) {
; P9-LABEL: test_aligned_v8i16_1:
; P9: # %bb.0: # %entry
; P9-NEXT: lxsihzx v2, 0, r3
; P7-AIX32-NEXT: vsplth v2, v2, 0
; P7-AIX32-NEXT: blr
entry:
- %0 = load i16, i16* %Ptr, align 16
+ %0 = load i16, ptr %Ptr, align 16
%splat.splatinsert = insertelement <8 x i16> poison, i16 %0, i32 0
%splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> poison, <8 x i32> zeroinitializer
ret <8 x i16> %splat.splat
}
-define <8 x i16> @test_aligned_v8i16_2(i16* %Ptr) {
+define <8 x i16> @test_aligned_v8i16_2(ptr %Ptr) {
; P9-LABEL: test_aligned_v8i16_2:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r3, r3, 32
; P7-AIX32-NEXT: vsplth v2, v2, 0
; P7-AIX32-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i16, i16* %Ptr, i64 16
- %0 = load i16, i16* %add.ptr, align 16
+ %add.ptr = getelementptr inbounds i16, ptr %Ptr, i64 16
+ %0 = load i16, ptr %add.ptr, align 16
%splat.splatinsert = insertelement <8 x i16> poison, i16 %0, i32 0
%splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> poison, <8 x i32> zeroinitializer
ret <8 x i16> %splat.splat
}
-define <16 x i8> @test_aligned_v16i8_1(i8* %Ptr) {
+define <16 x i8> @test_aligned_v16i8_1(ptr %Ptr) {
; P9-LABEL: test_aligned_v16i8_1:
; P9: # %bb.0: # %entry
; P9-NEXT: lxsibzx v2, 0, r3
; P7-AIX32-NEXT: vspltb v2, v2, 0
; P7-AIX32-NEXT: blr
entry:
- %0 = load i8, i8* %Ptr, align 16
+ %0 = load i8, ptr %Ptr, align 16
%splat.splatinsert = insertelement <16 x i8> poison, i8 %0, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> poison, <16 x i32> zeroinitializer
ret <16 x i8> %splat.splat
}
-define <16 x i8> @test_aligned_v16i8_2(i8* %Ptr) {
+define <16 x i8> @test_aligned_v16i8_2(ptr %Ptr) {
; P9-LABEL: test_aligned_v16i8_2:
; P9: # %bb.0: # %entry
; P9-NEXT: addi r3, r3, 16
; P7-AIX32-NEXT: vspltb v2, v2, 0
; P7-AIX32-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %Ptr, i64 16
- %0 = load i8, i8* %add.ptr, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %Ptr, i64 16
+ %0 = load i8, ptr %add.ptr, align 16
%splat.splatinsert = insertelement <16 x i8> poison, i8 %0, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> poison, <16 x i32> zeroinitializer
ret <16 x i8> %splat.splat
; as we cannot handle extending loads (from f32 to f64), and this test
; shows that there are multiple uses of the extending load (other than
; a build vector node). `lxvdsx` should not be produced in this case.
-define <2 x double> @test_v2f64_multiple_use(float* nocapture readonly %a, double* nocapture %b, double* nocapture %c) {
+define <2 x double> @test_v2f64_multiple_use(ptr nocapture readonly %a, ptr nocapture %b, ptr nocapture %c) {
; P9-LABEL: test_v2f64_multiple_use:
; P9: # %bb.0: # %entry
; P9-NEXT: lfs f0, 0(r3)
; P7-AIX32-NEXT: stfd f1, 0(r5)
; P7-AIX32-NEXT: blr
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fpext float %0 to double
- %1 = load double, double* %b, align 8
+ %1 = load double, ptr %b, align 8
%add = fadd double %1, %conv
- store double %add, double* %b, align 8
- %2 = load double, double* %c, align 8
+ store double %add, ptr %b, align 8
+ %2 = load double, ptr %c, align 8
%add2 = fadd double %2, %conv
- store double %add2, double* %c, align 8
+ store double %add2, ptr %c, align 8
%vecinit = insertelement <2 x double> undef, double %conv, i64 0
%vecinit5 = shufflevector <2 x double> %vecinit, <2 x double> poison, <2 x i32> zeroinitializer
ret <2 x double> %vecinit5
}
-define <4 x i32> @test_splatW(<8 x i16>* %ptr) {
+define <4 x i32> @test_splatW(ptr %ptr) {
; P9-LABEL: test_splatW:
; P9: # %bb.0: # %entry
; P9-NEXT: lxvwsx v2, 0, r3
; P7-AIX32-NEXT: xxspltw v2, vs0, 0
; P7-AIX32-NEXT: blr
entry:
- %0 = load <8 x i16>, <8 x i16>* %ptr, align 16
+ %0 = load <8 x i16>, ptr %ptr, align 16
%1 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
%2 = bitcast<8 x i16> %1 to <4 x i32>
ret <4 x i32> %2
}
-define <4 x i32> @test_splatD(<8 x i16>* %ptr) {
+define <4 x i32> @test_splatD(ptr %ptr) {
; P9-LABEL: test_splatD:
; P9: # %bb.0: # %entry
; P9-NEXT: lxvdsx v2, 0, r3
; P7-AIX32-NEXT: lxvdsx v2, 0, r3
; P7-AIX32-NEXT: blr
entry:
- %0 = load <8 x i16>, <8 x i16>* %ptr, align 16
+ %0 = load <8 x i16>, ptr %ptr, align 16
%1 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
%2 = bitcast<8 x i16> %1 to <4 x i32>
ret <4 x i32> %2
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- | not grep ori
define float @test() {
- %tmp.i = load float, float* inttoptr (i32 186018016 to float*) ; <float> [#uses=1]
+ %tmp.i = load float, ptr inttoptr (i32 186018016 to ptr) ; <float> [#uses=1]
ret float %tmp.i
}
; RUN: -mcpu=pwr9 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names \
; RUN: < %s | FileCheck %s --check-prefix=CHECK-P9
-define <8 x i16> @test1(i16* %a) {
+define <8 x i16> @test1(ptr %a) {
; CHECK-P10LE-LABEL: test1:
; CHECK-P10LE: # %bb.0: # %entry
; CHECK-P10LE-NEXT: lxvrhx v2, 0, r3
; CHECK-P9-NEXT: vsplth v2, v2, 3
; CHECK-P9-NEXT: blr
entry:
- %0 = load i16, i16* %a, align 2
+ %0 = load i16, ptr %a, align 2
%vecinit = insertelement <8 x i16> undef, i16 %0, i32 0
ret <8 x i16> %vecinit
}
-define <16 x i8> @test2(i8* %a) {
+define <16 x i8> @test2(ptr %a) {
; CHECK-P10LE-LABEL: test2:
; CHECK-P10LE: # %bb.0: # %entry
; CHECK-P10LE-NEXT: lxvrbx v2, 0, r3
; CHECK-P9-NEXT: vspltb v2, v2, 7
; CHECK-P9-NEXT: blr
entry:
- %0 = load i8, i8* %a, align 1
+ %0 = load i8, ptr %a, align 1
%vecins = insertelement <16 x i8> undef, i8 %0, i32 0
ret <16 x i8> %vecins
}
; pre-increment load. The result was a crash when attempting to process an
; add with a token-chain operand.
-%struct.Info = type { i32, i32, i8*, i8*, i8*, [32 x i8*], i64, [32 x i64], i64, i64, i64, [32 x i64] }
-%struct.S1847 = type { [12 x i8], [4 x i8], [8 x i8], [4 x i8], [8 x i8], [2 x i8], i8, [4 x i64], i8, [3 x i8], [4 x i8], i8, i16, [4 x %struct.anon.76], i16, i8, i8* }
+%struct.Info = type { i32, i32, ptr, ptr, ptr, [32 x ptr], i64, [32 x i64], i64, i64, i64, [32 x i64] }
+%struct.S1847 = type { [12 x i8], [4 x i8], [8 x i8], [4 x i8], [8 x i8], [2 x i8], i8, [4 x i64], i8, [3 x i8], [4 x i8], i8, i16, [4 x %struct.anon.76], i16, i8, ptr }
%struct.anon.76 = type { i32 }
@info = common global %struct.Info zeroinitializer, align 8
@fails = common global i32 0, align 4
define void @test1847() nounwind {
entry:
%j = alloca i32, align 4
- %0 = load i64, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 8), align 8
- %1 = load i32, i32* @fails, align 4
- %bf.load1 = load i96, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847], [5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
+ %0 = load i64, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 8), align 8
+ %1 = load i32, ptr @fails, align 4
+ %bf.load1 = load i96, ptr getelementptr inbounds ([5 x %struct.S1847], ptr @a1847, i32 0, i64 2), align 8
%bf.clear2 = and i96 %bf.load1, 302231454903657293676543
%bf.set3 = or i96 %bf.clear2, -38383394772764476296921088
- store i96 %bf.set3, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847], [5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
- %2 = load i32, i32* %j, align 4
- %3 = load i32, i32* %j, align 4
+ store i96 %bf.set3, ptr getelementptr inbounds ([5 x %struct.S1847], ptr @a1847, i32 0, i64 2), align 8
+ %2 = load i32, ptr %j, align 4
+ %3 = load i32, ptr %j, align 4
%inc11 = add nsw i32 %3, 1
- store i32 %inc11, i32* %j, align 4
- %bf.load15 = load i96, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847], [5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
+ store i32 %inc11, ptr %j, align 4
+ %bf.load15 = load i96, ptr getelementptr inbounds ([5 x %struct.S1847], ptr @a1847, i32 0, i64 2), align 8
%bf.clear16 = and i96 %bf.load15, -18446744069414584321
%bf.set17 = or i96 %bf.clear16, 18446743532543672320
- store i96 %bf.set17, i96* bitcast (%struct.S1847* getelementptr inbounds ([5 x %struct.S1847], [5 x %struct.S1847]* @a1847, i32 0, i64 2) to i96*), align 8
+ store i96 %bf.set17, ptr getelementptr inbounds ([5 x %struct.S1847], ptr @a1847, i32 0, i64 2), align 8
ret void
}
; RUN: llc -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr -verify-machineinstrs -mcpu=pwr9 -mattr=+vsx \
; RUN: -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=CHECK-P9-BE
-define <2 x i64> @load_swap00(<2 x i64>* %vp1, <2 x i64>* %vp2) {
+define <2 x i64> @load_swap00(ptr %vp1, ptr %vp2) {
; CHECK-P8-LABEL: load_swap00:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: lxvd2x v2, 0, r3
; CHECK-P9-BE-NEXT: lxv v2, 0(r3)
; CHECK-P9-BE-NEXT: xxswapd v2, v2
; CHECK-P9-BE-NEXT: blr
- %v1 = load <2 x i64>, <2 x i64>* %vp1
- %v2 = load <2 x i64>, <2 x i64>* %vp2
+ %v1 = load <2 x i64>, ptr %vp1
+ %v2 = load <2 x i64>, ptr %vp2
%v3 = shufflevector <2 x i64> %v1, <2 x i64> %v2, <2 x i32> <i32 1, i32 0>
ret <2 x i64> %v3
}
-define <2 x i64> @load_swap01(<2 x i64>* %vp1, <2 x i64>* %vp2) {
+define <2 x i64> @load_swap01(ptr %vp1, ptr %vp2) {
; CHECK-P8-LABEL: load_swap01:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: lxvd2x v2, 0, r4
; CHECK-P9-BE-NEXT: lxv v2, 0(r4)
; CHECK-P9-BE-NEXT: xxswapd v2, v2
; CHECK-P9-BE-NEXT: blr
- %v1 = load <2 x i64>, <2 x i64>* %vp1
- %v2 = load <2 x i64>, <2 x i64>* %vp2
+ %v1 = load <2 x i64>, ptr %vp1
+ %v2 = load <2 x i64>, ptr %vp2
%v3 = shufflevector <2 x i64> %v1, <2 x i64> %v2, <2 x i32> <i32 3, i32 2>
ret <2 x i64> %v3
}
-define <4 x i32> @load_swap10(<4 x i32>* %vp1, <4 x i32>* %vp2) {
+define <4 x i32> @load_swap10(ptr %vp1, ptr %vp2) {
; CHECK-P8-LABEL: load_swap10:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r4, r2, .LCPI2_0@toc@ha
; CHECK-P9-BE-NEXT: lxv v3, 0(r3)
; CHECK-P9-BE-NEXT: vperm v2, v2, v2, v3
; CHECK-P9-BE-NEXT: blr
- %v1 = load <4 x i32>, <4 x i32>* %vp1
- %v2 = load <4 x i32>, <4 x i32>* %vp2
+ %v1 = load <4 x i32>, ptr %vp1
+ %v2 = load <4 x i32>, ptr %vp2
%v3 = shufflevector <4 x i32> %v1, <4 x i32> %v2, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
ret <4 x i32> %v3
}
-define <4 x i32> @load_swap11(<4 x i32>* %vp1, <4 x i32>* %vp2) {
+define <4 x i32> @load_swap11(ptr %vp1, ptr %vp2) {
; CHECK-P8-LABEL: load_swap11:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r3, r2, .LCPI3_0@toc@ha
; CHECK-P9-BE-NEXT: lxv v3, 0(r3)
; CHECK-P9-BE-NEXT: vperm v2, v2, v2, v3
; CHECK-P9-BE-NEXT: blr
- %v1 = load <4 x i32>, <4 x i32>* %vp1
- %v2 = load <4 x i32>, <4 x i32>* %vp2
+ %v1 = load <4 x i32>, ptr %vp1
+ %v2 = load <4 x i32>, ptr %vp2
%v3 = shufflevector <4 x i32> %v1, <4 x i32> %v2, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
ret <4 x i32> %v3
}
-define <8 x i16> @load_swap20(<8 x i16>* %vp1, <8 x i16>* %vp2){
+define <8 x i16> @load_swap20(ptr %vp1, ptr %vp2){
; CHECK-P8-LABEL: load_swap20:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r4, r2, .LCPI4_0@toc@ha
; CHECK-P9-BE-NEXT: lxv v3, 0(r3)
; CHECK-P9-BE-NEXT: vperm v2, v2, v2, v3
; CHECK-P9-BE-NEXT: blr
- %v1 = load <8 x i16>, <8 x i16>* %vp1
- %v2 = load <8 x i16>, <8 x i16>* %vp2
+ %v1 = load <8 x i16>, ptr %vp1
+ %v2 = load <8 x i16>, ptr %vp2
%v3 = shufflevector <8 x i16> %v1, <8 x i16> %v2, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <8 x i16> %v3
}
-define <8 x i16> @load_swap21(<8 x i16>* %vp1, <8 x i16>* %vp2){
+define <8 x i16> @load_swap21(ptr %vp1, ptr %vp2){
; CHECK-P8-LABEL: load_swap21:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r3, r2, .LCPI5_0@toc@ha
; CHECK-P9-BE-NEXT: lxv v3, 0(r3)
; CHECK-P9-BE-NEXT: vperm v2, v2, v2, v3
; CHECK-P9-BE-NEXT: blr
- %v1 = load <8 x i16>, <8 x i16>* %vp1
- %v2 = load <8 x i16>, <8 x i16>* %vp2
+ %v1 = load <8 x i16>, ptr %vp1
+ %v2 = load <8 x i16>, ptr %vp2
%v3 = shufflevector <8 x i16> %v1, <8 x i16> %v2, <8 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
ret <8 x i16> %v3
}
-define <16 x i8> @load_swap30(<16 x i8>* %vp1, <16 x i8>* %vp2){
+define <16 x i8> @load_swap30(ptr %vp1, ptr %vp2){
; CHECK-P8-LABEL: load_swap30:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r4, r2, .LCPI6_0@toc@ha
; CHECK-P9-BE-NEXT: lxv vs0, 0(r3)
; CHECK-P9-BE-NEXT: xxbrq v2, vs0
; CHECK-P9-BE-NEXT: blr
- %v1 = load <16 x i8>, <16 x i8>* %vp1
- %v2 = load <16 x i8>, <16 x i8>* %vp2
+ %v1 = load <16 x i8>, ptr %vp1
+ %v2 = load <16 x i8>, ptr %vp2
%v3 = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <16 x i8> %v3
}
-define <16 x i8> @load_swap31(<16 x i8>* %vp1, <16 x i8>* %vp2){
+define <16 x i8> @load_swap31(ptr %vp1, ptr %vp2){
; CHECK-P8-LABEL: load_swap31:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r3, r2, .LCPI7_0@toc@ha
; CHECK-P9-BE-NEXT: lxv vs0, 0(r4)
; CHECK-P9-BE-NEXT: xxbrq v2, vs0
; CHECK-P9-BE-NEXT: blr
- %v1 = load <16 x i8>, <16 x i8>* %vp1
- %v2 = load <16 x i8>, <16 x i8>* %vp2
+ %v1 = load <16 x i8>, ptr %vp1
+ %v2 = load <16 x i8>, ptr %vp2
%v3 = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16>
ret <16 x i8> %v3
}
-define <2 x double> @load_swap40(<2 x double>* %vp1, <2 x double>* %vp2) {
+define <2 x double> @load_swap40(ptr %vp1, ptr %vp2) {
; CHECK-P8-LABEL: load_swap40:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: lxvd2x v2, 0, r4
; CHECK-P9-BE-NEXT: lxv vs0, 0(r4)
; CHECK-P9-BE-NEXT: xxswapd v2, vs0
; CHECK-P9-BE-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %vp1
- %v2 = load <2 x double>, <2 x double>* %vp2
+ %v1 = load <2 x double>, ptr %vp1
+ %v2 = load <2 x double>, ptr %vp2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> <i32 3, i32 2>
ret <2 x double> %v3
}
-define <4 x float> @load_swap50(<4 x float>* %vp1, <4 x float>* %vp2) {
+define <4 x float> @load_swap50(ptr %vp1, ptr %vp2) {
; CHECK-P8-LABEL: load_swap50:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r4, r2, .LCPI9_0@toc@ha
; CHECK-P9-BE-NEXT: lxv v3, 0(r3)
; CHECK-P9-BE-NEXT: vperm v2, v2, v2, v3
; CHECK-P9-BE-NEXT: blr
- %v1 = load <4 x float>, <4 x float>* %vp1
- %v2 = load <4 x float>, <4 x float>* %vp2
+ %v1 = load <4 x float>, ptr %vp1
+ %v2 = load <4 x float>, ptr %vp2
%v3 = shufflevector <4 x float> %v1, <4 x float> %v2, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
ret <4 x float> %v3
}
-define <4 x float> @load_swap51(<4 x float>* %vp1, <4 x float>* %vp2) {
+define <4 x float> @load_swap51(ptr %vp1, ptr %vp2) {
; CHECK-P8-LABEL: load_swap51:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r3, r2, .LCPI10_0@toc@ha
; CHECK-P9-BE-NEXT: lxv v3, 0(r3)
; CHECK-P9-BE-NEXT: vperm v2, v2, v2, v3
; CHECK-P9-BE-NEXT: blr
- %v1 = load <4 x float>, <4 x float>* %vp1
- %v2 = load <4 x float>, <4 x float>* %vp2
+ %v1 = load <4 x float>, ptr %vp1
+ %v2 = load <4 x float>, ptr %vp2
%v3 = shufflevector <4 x float> %v1, <4 x float> %v2, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
ret <4 x float> %v3
}
-define void @swap_store00(<2 x i64> %v1, <2 x i64> %v2, <2 x i64>* %vp) {
+define void @swap_store00(<2 x i64> %v1, <2 x i64> %v2, ptr %vp) {
; CHECK-P8-LABEL: swap_store00:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: stxvd2x v2, 0, r7
; CHECK-P9-BE-NEXT: stxv vs0, 0(r7)
; CHECK-P9-BE-NEXT: blr
%v3 = shufflevector <2 x i64> %v1, <2 x i64> %v2, <2 x i32> <i32 1, i32 0>
- store <2 x i64> %v3, <2 x i64>* %vp
+ store <2 x i64> %v3, ptr %vp
ret void
}
-define void @swap_store01(<2 x i64> %v1, <2 x i64> %v2, <2 x i64>* %vp) {
+define void @swap_store01(<2 x i64> %v1, <2 x i64> %v2, ptr %vp) {
; CHECK-P8-LABEL: swap_store01:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: stxvd2x v3, 0, r7
; CHECK-P9-BE-NEXT: stxv vs0, 0(r7)
; CHECK-P9-BE-NEXT: blr
%v3 = shufflevector <2 x i64> %v1, <2 x i64> %v2, <2 x i32> <i32 3, i32 2>
- store <2 x i64> %v3, <2 x i64>* %vp
+ store <2 x i64> %v3, ptr %vp
ret void
}
-define void @swap_store10(<4 x i32> %v1, <4 x i32> %v2, <4 x i32>* %vp) {
+define void @swap_store10(<4 x i32> %v1, <4 x i32> %v2, ptr %vp) {
; CHECK-P8-LABEL: swap_store10:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r3, r2, .LCPI13_0@toc@ha
; CHECK-P9-BE-NEXT: stxv v2, 0(r7)
; CHECK-P9-BE-NEXT: blr
%v3 = shufflevector <4 x i32> %v1, <4 x i32> %v2, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- store <4 x i32> %v3, <4 x i32>* %vp
+ store <4 x i32> %v3, ptr %vp
ret void
}
-define void @swap_store11(<4 x i32> %v1, <4 x i32> %v2, <4 x i32>* %vp) {
+define void @swap_store11(<4 x i32> %v1, <4 x i32> %v2, ptr %vp) {
; CHECK-P8-LABEL: swap_store11:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r3, r2, .LCPI14_0@toc@ha
; CHECK-P9-BE-NEXT: stxv v2, 0(r7)
; CHECK-P9-BE-NEXT: blr
%v3 = shufflevector <4 x i32> %v1, <4 x i32> %v2, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
- store <4 x i32> %v3, <4 x i32>* %vp
+ store <4 x i32> %v3, ptr %vp
ret void
}
-define void @swap_store20(<8 x i16> %v1, <8 x i16> %v2, <8 x i16>* %vp) {
+define void @swap_store20(<8 x i16> %v1, <8 x i16> %v2, ptr %vp) {
; CHECK-P8-LABEL: swap_store20:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r3, r2, .LCPI15_0@toc@ha
; CHECK-P9-BE-NEXT: stxv v2, 0(r7)
; CHECK-P9-BE-NEXT: blr
%v3 = shufflevector <8 x i16> %v1, <8 x i16> %v2, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
- store <8 x i16> %v3, <8 x i16>* %vp
+ store <8 x i16> %v3, ptr %vp
ret void
}
-define void @swap_store21(<8 x i16> %v1, <8 x i16> %v2, <8 x i16>* %vp) {
+define void @swap_store21(<8 x i16> %v1, <8 x i16> %v2, ptr %vp) {
; CHECK-P8-LABEL: swap_store21:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r3, r2, .LCPI16_0@toc@ha
; CHECK-P9-BE-NEXT: stxv v2, 0(r7)
; CHECK-P9-BE-NEXT: blr
%v3 = shufflevector <8 x i16> %v1, <8 x i16> %v2, <8 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
- store <8 x i16> %v3, <8 x i16>* %vp
+ store <8 x i16> %v3, ptr %vp
ret void
}
-define void @swap_store30(<16 x i8> %v1, <16 x i8> %v2, <16 x i8>* %vp) {
+define void @swap_store30(<16 x i8> %v1, <16 x i8> %v2, ptr %vp) {
; CHECK-P8-LABEL: swap_store30:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r3, r2, .LCPI17_0@toc@ha
; CHECK-P9-BE-NEXT: stxv vs0, 0(r7)
; CHECK-P9-BE-NEXT: blr
%v3 = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
- store <16 x i8> %v3, <16 x i8>* %vp
+ store <16 x i8> %v3, ptr %vp
ret void
}
-define void @swap_store31(<16 x i8> %v1, <16 x i8> %v2, <16 x i8>* %vp) {
+define void @swap_store31(<16 x i8> %v1, <16 x i8> %v2, ptr %vp) {
; CHECK-P8-LABEL: swap_store31:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r3, r2, .LCPI18_0@toc@ha
; CHECK-P9-BE-NEXT: stxv vs0, 0(r7)
; CHECK-P9-BE-NEXT: blr
%v3 = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16>
- store <16 x i8> %v3, <16 x i8>* %vp
+ store <16 x i8> %v3, ptr %vp
ret void
}
-define void @swap_store40(<2 x double> %v1, <2 x double> %v2, <2 x double>* %vp) {
+define void @swap_store40(<2 x double> %v1, <2 x double> %v2, ptr %vp) {
; CHECK-P8-LABEL: swap_store40:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: stxvd2x v2, 0, r7
; CHECK-P9-BE-NEXT: stxv vs0, 0(r7)
; CHECK-P9-BE-NEXT: blr
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> <i32 1, i32 0>
- store <2 x double> %v3, <2 x double>* %vp
+ store <2 x double> %v3, ptr %vp
ret void
}
-define void @swap_store41(<2 x double> %v1, <2 x double> %v2, <2 x double>* %vp) {
+define void @swap_store41(<2 x double> %v1, <2 x double> %v2, ptr %vp) {
; CHECK-P8-LABEL: swap_store41:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: stxvd2x v3, 0, r7
; CHECK-P9-BE-NEXT: stxv vs0, 0(r7)
; CHECK-P9-BE-NEXT: blr
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> <i32 3, i32 2>
- store <2 x double> %v3, <2 x double>* %vp
+ store <2 x double> %v3, ptr %vp
ret void
}
-define void @swap_store50(<4 x float> %v1, <4 x float> %v2, <4 x float>* %vp) {
+define void @swap_store50(<4 x float> %v1, <4 x float> %v2, ptr %vp) {
; CHECK-P8-LABEL: swap_store50:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r3, r2, .LCPI21_0@toc@ha
; CHECK-P9-BE-NEXT: stxv v2, 0(r7)
; CHECK-P9-BE-NEXT: blr
%v3 = shufflevector <4 x float> %v1, <4 x float> %v2, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- store <4 x float> %v3, <4 x float>* %vp
+ store <4 x float> %v3, ptr %vp
ret void
}
-define void @swap_store51(<4 x float> %v1, <4 x float> %v2, <4 x float>* %vp) {
+define void @swap_store51(<4 x float> %v1, <4 x float> %v2, ptr %vp) {
; CHECK-P8-LABEL: swap_store51:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: addis r3, r2, .LCPI22_0@toc@ha
; CHECK-P9-BE-NEXT: stxv v2, 0(r7)
; CHECK-P9-BE-NEXT: blr
%v3 = shufflevector <4 x float> %v1, <4 x float> %v2, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
- store <4 x float> %v3, <4 x float>* %vp
+ store <4 x float> %v3, ptr %vp
ret void
}
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s
target datalayout = "E-m:e-i64:64-n32:64"
-define void @_Z4testSt7complexIfE(float %v0, float %v1, i64* %ref.tmp, float* %_M_value.realp.i.i, float* %_M_value.imagp.i.i) {
+define void @_Z4testSt7complexIfE(float %v0, float %v1, ptr %ref.tmp, ptr %_M_value.realp.i.i, ptr %_M_value.imagp.i.i) {
entry:
- %v2 = load i64, i64* %ref.tmp, align 8
+ %v2 = load i64, ptr %ref.tmp, align 8
%v3 = lshr i64 %v2, 32
%v4 = trunc i64 %v3 to i32
%v5 = bitcast i32 %v4 to float
%mul_ac.i.i = fmul float %v5, %v0
%mul_bd.i.i = fmul float %v7, %v1
%mul_r.i.i = fsub float %mul_ac.i.i, %mul_bd.i.i
- store float %mul_r.i.i, float* %_M_value.realp.i.i, align 4
- store float %mul_i.i.i, float* %_M_value.imagp.i.i, align 4
+ store float %mul_r.i.i, ptr %_M_value.realp.i.i, align 4
+ store float %mul_i.i.i, ptr %_M_value.imagp.i.i, align 4
ret void
; CHECK-LABEL: @_Z4testSt7complexIfE
; CHECK: blr
}
-define i64* @_Z4testSt7complexIfE_idx(float %v0, float %v1, i64* %ref.tmp, float* %_M_value.realp.i.i, float* %_M_value.imagp.i.i) {
+define ptr @_Z4testSt7complexIfE_idx(float %v0, float %v1, ptr %ref.tmp, ptr %_M_value.realp.i.i, ptr %_M_value.imagp.i.i) {
entry:
- %r = getelementptr i64, i64* %ref.tmp, i64 1
- %v2 = load i64, i64* %r, align 8
+ %r = getelementptr i64, ptr %ref.tmp, i64 1
+ %v2 = load i64, ptr %r, align 8
%v3 = lshr i64 %v2, 32
%v4 = trunc i64 %v3 to i32
%v5 = bitcast i32 %v4 to float
%mul_ac.i.i = fmul float %v5, %v0
%mul_bd.i.i = fmul float %v7, %v1
%mul_r.i.i = fsub float %mul_ac.i.i, %mul_bd.i.i
- store float %mul_r.i.i, float* %_M_value.realp.i.i, align 4
- store float %mul_i.i.i, float* %_M_value.imagp.i.i, align 4
- ret i64* %r
+ store float %mul_r.i.i, ptr %_M_value.realp.i.i, align 4
+ store float %mul_i.i.i, ptr %_M_value.imagp.i.i, align 4
+ ret ptr %r
; CHECK-LABEL: @_Z4testSt7complexIfE
; CHECK-NOT: ld {{[0-9]+}}, 8(5)
; RUN: --check-prefix=CHECK-AIX-32 -implicit-check-not vmrg \
; RUN: -implicit-check-not=vperm %s
-define <16 x i8> @test(i32* %s, i32* %t) {
+define <16 x i8> @test(ptr %s, ptr %t) {
; CHECK-LE-LABEL: test:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lfiwzx f0, 0, r3
; CHECK-AIX-32-NEXT: blr
entry:
- %0 = bitcast i32* %s to <4 x i8>*
- %1 = load <4 x i8>, <4 x i8>* %0, align 4
- %2 = shufflevector <4 x i8> %1, <4 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
- ret <16 x i8> %2
+ %0 = load <4 x i8>, ptr %s, align 4
+ %1 = shufflevector <4 x i8> %0, <4 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ ret <16 x i8> %1
}
ret i32 %retval.0
}
-define void @neg_truncate_i32_eq(i32 *%ptr) {
+define void @neg_truncate_i32_eq(ptr %ptr) {
; CHECK-LABEL: neg_truncate_i32_eq:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 0(r3)
; CHECK-NEXT: bclr 12, eq, 0
; CHECK-NEXT: # %bb.1: # %if.end29.thread136
entry:
- %0 = load i32, i32* %ptr, align 4
+ %0 = load i32, ptr %ptr, align 4
%rem17127 = and i32 %0, 1
%cmp18 = icmp eq i32 %rem17127, 0
br label %if.else
ret i64 %retval.0
}
-define void @neg_truncate_i64_eq(i64 *%ptr) {
+define void @neg_truncate_i64_eq(ptr %ptr) {
; CHECK-LABEL: neg_truncate_i64_eq:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 0(r3)
; CHECK-NEXT: bclr 12, eq, 0
; CHECK-NEXT: # %bb.1: # %if.end29.thread136
entry:
- %0 = load i64, i64* %ptr, align 4
+ %0 = load i64, ptr %ptr, align 4
%rem17127 = and i64 %0, 1
%cmp18 = icmp eq i64 %rem17127, 0
br label %if.else
ret i64 %retval.0
}
-define void @neg_truncate_i64_ne(i64 *%ptr) {
+define void @neg_truncate_i64_ne(ptr %ptr) {
; CHECK-LABEL: neg_truncate_i64_ne:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 0(r3)
; CHECK-NEXT: bclr 12, gt, 0
; CHECK-NEXT: # %bb.1: # %if.end29.thread136
entry:
- %0 = load i64, i64* %ptr, align 4
+ %0 = load i64, ptr %ptr, align 4
%rem17127 = and i64 %0, 1
%cmp18 = icmp ne i64 %rem17127, 0
br label %if.else
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -disable-ppc-innermost-loop-align32 -mtriple powerpc64-unknown-linux-gnu < %s | FileCheck %s -check-prefixes=CHECK,PWR-DISABLE-PPC-INNERMOST-LOOP-ALIGN32
-%struct.parm = type { i32*, i32, i32 }
+%struct.parm = type { ptr, i32, i32 }
; Test the loop alignment when the innermost hot loop has more than 8 instructions.
-define void @big_loop(%struct.parm* %arg) {
+define void @big_loop(ptr %arg) {
entry:
- %localArg.sroa.0.0..sroa_idx = getelementptr inbounds %struct.parm, %struct.parm* %arg, i64 0, i32 0
- %localArg.sroa.0.0.copyload = load i32*, i32** %localArg.sroa.0.0..sroa_idx, align 8
- %localArg.sroa.4.0..sroa_idx56 = getelementptr inbounds %struct.parm, %struct.parm* %arg, i64 0, i32 1
- %localArg.sroa.4.0.copyload = load i32, i32* %localArg.sroa.4.0..sroa_idx56, align 8
- %localArg.sroa.5.0..sroa_idx58 = getelementptr inbounds %struct.parm, %struct.parm* %arg, i64 0, i32 2
- %localArg.sroa.5.0.copyload = load i32, i32* %localArg.sroa.5.0..sroa_idx58, align 4
+ %localArg.sroa.0.0.copyload = load ptr, ptr %arg, align 8
+ %localArg.sroa.4.0..sroa_idx56 = getelementptr inbounds %struct.parm, ptr %arg, i64 0, i32 1
+ %localArg.sroa.4.0.copyload = load i32, ptr %localArg.sroa.4.0..sroa_idx56, align 8
+ %localArg.sroa.5.0..sroa_idx58 = getelementptr inbounds %struct.parm, ptr %arg, i64 0, i32 2
+ %localArg.sroa.5.0.copyload = load i32, ptr %localArg.sroa.5.0..sroa_idx58, align 4
%0 = sext i32 %localArg.sroa.5.0.copyload to i64
br label %do.body
do.body3: ; preds = %do.body3, %do.body
%indvars.iv = phi i64 [ %indvars.iv.next, %do.body3 ], [ %0, %do.body ]
%1 = add nsw i64 %indvars.iv, 2
- %arrayidx = getelementptr inbounds i32, i32* %localArg.sroa.0.0.copyload, i64 %1
+ %arrayidx = getelementptr inbounds i32, ptr %localArg.sroa.0.0.copyload, i64 %1
%2 = add nsw i64 %indvars.iv, 3
%3 = trunc i64 %1 to i32
%4 = add nsw i64 %indvars.iv, 4
- %arrayidx10 = getelementptr inbounds i32, i32* %localArg.sroa.0.0.copyload, i64 %2
+ %arrayidx10 = getelementptr inbounds i32, ptr %localArg.sroa.0.0.copyload, i64 %2
%5 = trunc i64 %2 to i32
- store i32 %5, i32* %arrayidx10, align 4
- %arrayidx12 = getelementptr inbounds i32, i32* %localArg.sroa.0.0.copyload, i64 %4
+ store i32 %5, ptr %arrayidx10, align 4
+ %arrayidx12 = getelementptr inbounds i32, ptr %localArg.sroa.0.0.copyload, i64 %4
%6 = trunc i64 %4 to i32
- store i32 %6, i32* %arrayidx12, align 4
- store i32 %3, i32* %arrayidx, align 4
- %arrayidx21 = getelementptr inbounds i32, i32* %localArg.sroa.0.0.copyload, i64 %indvars.iv
+ store i32 %6, ptr %arrayidx12, align 4
+ store i32 %3, ptr %arrayidx, align 4
+ %arrayidx21 = getelementptr inbounds i32, ptr %localArg.sroa.0.0.copyload, i64 %indvars.iv
%7 = trunc i64 %indvars.iv to i32
%8 = add i32 %7, 1
- store i32 %8, i32* %arrayidx21, align 4
+ store i32 %8, ptr %arrayidx21, align 4
%indvars.iv.next = add nsw i64 %indvars.iv, -1
%9 = icmp eq i64 %indvars.iv, 0
br i1 %9, label %do.end, label %do.body3
br i1 %tobool25, label %do.end26, label %do.body
do.end26: ; preds = %do.end
- %arrayidx28 = getelementptr inbounds i32, i32* %localArg.sroa.0.0.copyload, i64 %0
- store i32 0, i32* %arrayidx28, align 4
+ %arrayidx28 = getelementptr inbounds i32, ptr %localArg.sroa.0.0.copyload, i64 %0
+ store i32 0, ptr %arrayidx28, align 4
ret void
}
; Test the loop alignment when the innermost hot loop has 5-8 instructions.
-define void @general_loop(i32* %s, i64 %m) {
+define void @general_loop(ptr %s, i64 %m) {
entry:
%tobool40 = icmp eq i64 %m, 0
br i1 %tobool40, label %while.end18, label %while.body3.lr.ph
while.body3: ; preds = %while.body3.lr.ph, %while.body3
%n.039 = phi i64 [ %m.addr.041, %while.body3.lr.ph ], [ %dec16, %while.body3 ]
%inc = add nsw i64 %n.039, 1
- %arrayidx = getelementptr inbounds i32, i32* %s, i64 %n.039
+ %arrayidx = getelementptr inbounds i32, ptr %s, i64 %n.039
%inc5 = add nsw i64 %n.039, 2
- %arrayidx6 = getelementptr inbounds i32, i32* %s, i64 %inc
+ %arrayidx6 = getelementptr inbounds i32, ptr %s, i64 %inc
%sub = sub nsw i64 %dec, %inc5
%conv7 = trunc i64 %sub to i32
- %arrayidx9 = getelementptr inbounds i32, i32* %s, i64 %inc5
- store i32 %conv7, i32* %arrayidx9, align 4
- store i32 %conv11, i32* %arrayidx6, align 4
- store i32 %conv, i32* %arrayidx, align 4
+ %arrayidx9 = getelementptr inbounds i32, ptr %s, i64 %inc5
+ store i32 %conv7, ptr %arrayidx9, align 4
+ store i32 %conv11, ptr %arrayidx6, align 4
+ store i32 %conv, ptr %arrayidx, align 4
%dec16 = add nsw i64 %n.039, -1
%tobool2 = icmp eq i64 %dec16, 0
br i1 %tobool2, label %while.cond.loopexit, label %while.body3
}
; Test the loop alignment when the innermost cold loop has more than 8 instructions.
-define void @big_loop_cold_innerloop(%struct.parm* %arg) {
+define void @big_loop_cold_innerloop(ptr %arg) {
entry:
- %localArg.sroa.0.0..sroa_idx = getelementptr inbounds %struct.parm, %struct.parm* %arg, i64 0, i32 0
- %localArg.sroa.0.0.copyload = load i32*, i32** %localArg.sroa.0.0..sroa_idx, align 8
- %localArg.sroa.4.0..sroa_idx56 = getelementptr inbounds %struct.parm, %struct.parm* %arg, i64 0, i32 1
- %localArg.sroa.4.0.copyload = load i32, i32* %localArg.sroa.4.0..sroa_idx56, align 8
- %localArg.sroa.5.0..sroa_idx58 = getelementptr inbounds %struct.parm, %struct.parm* %arg, i64 0, i32 2
- %localArg.sroa.5.0.copyload = load i32, i32* %localArg.sroa.5.0..sroa_idx58, align 4
+ %localArg.sroa.0.0.copyload = load ptr, ptr %arg, align 8
+ %localArg.sroa.4.0..sroa_idx56 = getelementptr inbounds %struct.parm, ptr %arg, i64 0, i32 1
+ %localArg.sroa.4.0.copyload = load i32, ptr %localArg.sroa.4.0..sroa_idx56, align 8
+ %localArg.sroa.5.0..sroa_idx58 = getelementptr inbounds %struct.parm, ptr %arg, i64 0, i32 2
+ %localArg.sroa.5.0.copyload = load i32, ptr %localArg.sroa.5.0..sroa_idx58, align 4
%0 = sext i32 %localArg.sroa.5.0.copyload to i64
br label %do.body
do.body3: ; preds = %do.body3, %do.body
%indvars.iv = phi i64 [ %indvars.iv.next, %do.body3 ], [ %0, %do.body ]
%1 = add nsw i64 %indvars.iv, 2
- %arrayidx = getelementptr inbounds i32, i32* %localArg.sroa.0.0.copyload, i64 %1
+ %arrayidx = getelementptr inbounds i32, ptr %localArg.sroa.0.0.copyload, i64 %1
%2 = add nsw i64 %indvars.iv, 3
%3 = trunc i64 %1 to i32
%4 = add nsw i64 %indvars.iv, 4
- %arrayidx10 = getelementptr inbounds i32, i32* %localArg.sroa.0.0.copyload, i64 %2
+ %arrayidx10 = getelementptr inbounds i32, ptr %localArg.sroa.0.0.copyload, i64 %2
%5 = trunc i64 %2 to i32
- store i32 %5, i32* %arrayidx10, align 4
- %arrayidx12 = getelementptr inbounds i32, i32* %localArg.sroa.0.0.copyload, i64 %4
+ store i32 %5, ptr %arrayidx10, align 4
+ %arrayidx12 = getelementptr inbounds i32, ptr %localArg.sroa.0.0.copyload, i64 %4
%6 = trunc i64 %4 to i32
- store i32 %6, i32* %arrayidx12, align 4
- store i32 %3, i32* %arrayidx, align 4
- %arrayidx21 = getelementptr inbounds i32, i32* %localArg.sroa.0.0.copyload, i64 %indvars.iv
+ store i32 %6, ptr %arrayidx12, align 4
+ store i32 %3, ptr %arrayidx, align 4
+ %arrayidx21 = getelementptr inbounds i32, ptr %localArg.sroa.0.0.copyload, i64 %indvars.iv
%7 = trunc i64 %indvars.iv to i32
%8 = add i32 %7, 1
- store i32 %8, i32* %arrayidx21, align 4
+ store i32 %8, ptr %arrayidx21, align 4
%indvars.iv.next = add nsw i64 %indvars.iv, -1
%9 = icmp eq i64 %indvars.iv, 0
br i1 %9, label %do.end, label %do.body3
br i1 %tobool25, label %do.end26, label %do.body
do.end26: ; preds = %do.end
- %arrayidx28 = getelementptr inbounds i32, i32* %localArg.sroa.0.0.copyload, i64 %0
- store i32 0, i32* %arrayidx28, align 4
+ %arrayidx28 = getelementptr inbounds i32, ptr %localArg.sroa.0.0.copyload, i64 %0
+ store i32 0, ptr %arrayidx28, align 4
ret void
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=powerpc64le-linux-gnu < %s | FileCheck %s -check-prefix=PPC64LE
-define void @test(i8* %ptr, i8 %cmp, i8 %val) {
+define void @test(ptr %ptr, i8 %cmp, i8 %val) {
; PPC64LE-LABEL: test:
; PPC64LE: # %bb.0:
; PPC64LE-NEXT: clrlwi 4, 4, 24
; PPC64LE-NEXT: bne 0, .LBB0_1
; PPC64LE-NEXT: # %bb.3:
; PPC64LE-NEXT: blr
- %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic
+ %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val monotonic monotonic
ret void
}
target triple = "powerpc64le-unknown-linux"
; Function Attrs: nounwind
-define void @foo(double* %x, double* nocapture readonly %y) #0 {
+define void @foo(ptr %x, ptr nocapture readonly %y) #0 {
entry:
- %scevgep = getelementptr double, double* %x, i64 1599
- %scevgep20 = getelementptr double, double* %y, i64 1599
+ %scevgep = getelementptr double, ptr %x, i64 1599
+ %scevgep20 = getelementptr double, ptr %y, i64 1599
br label %vector.memcheck
vector.memcheck: ; preds = %for.end, %entry
%j.015 = phi i32 [ 0, %entry ], [ %inc7, %for.end ]
- %bound0 = icmp uge double* %scevgep20, %x
- %bound1 = icmp uge double* %scevgep, %y
+ %bound0 = icmp uge ptr %scevgep20, %x
+ %bound1 = icmp uge ptr %scevgep, %y
%memcheck.conflict = and i1 %bound0, %bound1
br i1 %memcheck.conflict, label %middle.block, label %vector.body
vector.body: ; preds = %vector.memcheck, %vector.body
%index = phi i64 [ %index.next, %vector.body ], [ 0, %vector.memcheck ]
- %0 = getelementptr inbounds double, double* %y, i64 %index
- %1 = bitcast double* %0 to <4 x double>*
- %wide.load = load <4 x double>, <4 x double>* %1, align 8
- %2 = fadd <4 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
- %3 = getelementptr inbounds double, double* %x, i64 %index
- %4 = bitcast double* %3 to <4 x double>*
- store <4 x double> %2, <4 x double>* %4, align 8
+ %0 = getelementptr inbounds double, ptr %y, i64 %index
+ %wide.load = load <4 x double>, ptr %0, align 8
+ %1 = fadd <4 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
+ %2 = getelementptr inbounds double, ptr %x, i64 %index
+ store <4 x double> %1, ptr %2, align 8
%index.next = add i64 %index, 4
- %5 = icmp eq i64 %index.next, 1600
- br i1 %5, label %middle.block, label %vector.body
+ %3 = icmp eq i64 %index.next, 1600
+ br i1 %3, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body, %vector.memcheck
%resume.val = phi i1 [ false, %vector.memcheck ], [ true, %vector.body ]
for.body3: ; preds = %middle.block, %for.body3
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ %trunc.resume.val, %middle.block ]
- %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %6 = load double, double* %arrayidx, align 8
- %add = fadd double %6, 1.000000e+00
- %arrayidx5 = getelementptr inbounds double, double* %x, i64 %indvars.iv
- store double %add, double* %arrayidx5, align 8
+ %arrayidx = getelementptr inbounds double, ptr %y, i64 %indvars.iv
+ %4 = load double, ptr %arrayidx, align 8
+ %add = fadd double %4, 1.000000e+00
+ %arrayidx5 = getelementptr inbounds double, ptr %x, i64 %indvars.iv
+ store double %add, ptr %arrayidx5, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1600
br i1 %exitcond, label %for.end, label %for.body3
for.end: ; preds = %middle.block, %for.body3
- tail call void @bar(double* %x) #2
+ tail call void @bar(ptr %x) #2
%inc7 = add nuw nsw i32 %j.015, 1
%exitcond16 = icmp eq i32 %inc7, 100
br i1 %exitcond16, label %for.end8, label %vector.memcheck
; CHECK: dcbt
}
-declare void @bar(double*) #1
+declare void @bar(ptr) #1
attributes #0 = { nounwind "target-cpu"="a2q" }
attributes #1 = { "target-cpu"="a2q" }
target triple = "powerpc64le-unknown-linux"
; Function Attrs: nounwind
-define void @foo(double* nocapture %a, double* nocapture readonly %b) #0 {
+define void @foo(ptr nocapture %a, ptr nocapture readonly %b) #0 {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, double* %b, i64 %indvars.iv
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %b, i64 %indvars.iv
+ %0 = load double, ptr %arrayidx, align 8
%add = fadd double %0, 1.000000e+00
- %arrayidx2 = getelementptr inbounds double, double* %a, i64 %indvars.iv
- store double %add, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %a, i64 %indvars.iv
+ store double %add, ptr %arrayidx2, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1600
br i1 %exitcond, label %for.end, label %for.body
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -hoist-const-stores -ppc-stack-ptr-caller-preserved < %s | FileCheck %s -check-prefix=CHECKBE
; Test hoist out of single loop
-define signext i32 @test1(i32 signext %lim, i32 (i32)* nocapture %Func) {
+define signext i32 @test1(i32 signext %lim, ptr nocapture %Func) {
entry:
; CHECK-LABEL: test1
; CHECK: for.body.preheader
}
; Test hoist of nested loop goes to outter loop preheader
-define signext i32 @test2(i32 signext %lim, i32 (i32)* nocapture %Func) {
+define signext i32 @test2(i32 signext %lim, ptr nocapture %Func) {
entry:
; CHECK-LABEL: test2
; CHECK: for.body4.lr.ph.preheader
; Test hoist out of if statement with low branch probability
; FIXME: we shouldn't hoist in such cases as it could increase the number
; of stores after hoisting.
-define signext i32 @test3(i32 signext %lim, i32 (i32)* nocapture %Func) {
+define signext i32 @test3(i32 signext %lim, ptr nocapture %Func) {
entry:
; CHECK-LABEL: test3
; CHECK: %for.body.lr.ph
; return sum;
; }
-define i64 @foo(i8* %p, i32 signext %n, i32 signext %count) {
+define i64 @foo(ptr %p, i32 signext %n, i32 signext %count) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpwi r4, 0
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
%i.019 = phi i32 [ 0, %for.body.preheader ], [ %inc, %for.body ]
%sum.018 = phi i64 [ 0, %for.body.preheader ], [ %add5, %for.body ]
- %add.ptr = getelementptr inbounds i8, i8* %p, i64 %indvars.iv
- %add.ptr1 = getelementptr inbounds i8, i8* %add.ptr, i64 5
- %1 = bitcast i8* %add.ptr1 to i64*
- %2 = load i64, i64* %1, align 8
- %add = add i64 %2, %sum.018
- %add.ptr4 = getelementptr inbounds i8, i8* %add.ptr, i64 9
- %3 = bitcast i8* %add.ptr4 to i64*
- %4 = load i64, i64* %3, align 8
- %add5 = add i64 %add, %4
+ %add.ptr = getelementptr inbounds i8, ptr %p, i64 %indvars.iv
+ %add.ptr1 = getelementptr inbounds i8, ptr %add.ptr, i64 5
+ %1 = load i64, ptr %add.ptr1, align 8
+ %add = add i64 %1, %sum.018
+ %add.ptr4 = getelementptr inbounds i8, ptr %add.ptr, i64 9
+ %2 = load i64, ptr %add.ptr4, align 8
+ %add5 = add i64 %add, %2
%indvars.iv.next = add nsw i64 %indvars.iv, %0
%inc = add nuw nsw i32 %i.019, 1
%exitcond.not = icmp eq i32 %inc, %n
; return sum;
; }
-define zeroext i8 @foo1(i8* %p, i32 signext %n, i32 signext %count) {
+define zeroext i8 @foo1(ptr %p, i32 signext %n, i32 signext %count) {
; CHECK-LABEL: foo1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpwi r4, 0
for.body.preheader: ; preds = %entry
%0 = sext i32 %count to i64
- %add.ptr = getelementptr inbounds i8, i8* %p, i64 1000
+ %add.ptr = getelementptr inbounds i8, ptr %p, i64 1000
br label %for.body
for.cond.cleanup: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
%i.013 = phi i32 [ 0, %for.body.preheader ], [ %inc, %for.body ]
%sum.012 = phi i8 [ 0, %for.body.preheader ], [ %add, %for.body ]
- %add.ptr1 = getelementptr inbounds i8, i8* %add.ptr, i64 %indvars.iv
- %1 = load i8, i8* %add.ptr1, align 1
+ %add.ptr1 = getelementptr inbounds i8, ptr %add.ptr, i64 %indvars.iv
+ %1 = load i8, ptr %add.ptr1, align 1
%add = add i8 %1, %sum.012
%indvars.iv.next = add nsw i64 %indvars.iv, %0
%inc = add nuw nsw i32 %i.013, 1
%_elem_type_of_x = type <{ double }>
%_elem_type_of_y = type <{ double }>
-define void @foo(i64* %.n, [0 x %_elem_type_of_x]* %.x, [0 x %_elem_type_of_y]* %.y, <2 x double>* %.sum) {
+define void @foo(ptr %.n, ptr %.x, ptr %.y, ptr %.sum) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r5, 0(r3)
; CHECK-BE-NEXT: stxv vs0, 0(r6)
; CHECK-BE-NEXT: blr
entry:
- %_val_n_2 = load i64, i64* %.n, align 8
+ %_val_n_2 = load i64, ptr %.n, align 8
%_grt_tmp7 = icmp slt i64 %_val_n_2, 1
br i1 %_grt_tmp7, label %_return_bb, label %_loop_1_do_.lr.ph
_loop_1_do_.lr.ph: ; preds = %entry
- %x_rvo_based_addr_5 = getelementptr inbounds [0 x %_elem_type_of_x], [0 x %_elem_type_of_x]* %.x, i64 0, i64 -1
- %.sum.promoted = load <2 x double>, <2 x double>* %.sum, align 16
+ %x_rvo_based_addr_5 = getelementptr inbounds [0 x %_elem_type_of_x], ptr %.x, i64 0, i64 -1
+ %.sum.promoted = load <2 x double>, ptr %.sum, align 16
br label %_loop_1_do_
_loop_1_do_: ; preds = %_loop_1_do_.lr.ph, %_loop_1_do_
%_val_sum_9 = phi <2 x double> [ %.sum.promoted, %_loop_1_do_.lr.ph ], [ %_add_tmp49, %_loop_1_do_ ]
%i.08 = phi i64 [ 1, %_loop_1_do_.lr.ph ], [ %_loop_1_update_loop_ix, %_loop_1_do_ ]
- %x_ix_dim_0_6 = getelementptr %_elem_type_of_x, %_elem_type_of_x* %x_rvo_based_addr_5, i64 %i.08
- %x_ix_dim_0_ = bitcast %_elem_type_of_x* %x_ix_dim_0_6 to i8*
- %0 = getelementptr i8, i8* %x_ix_dim_0_, i64 1
- %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %0)
+ %x_ix_dim_0_6 = getelementptr %_elem_type_of_x, ptr %x_rvo_based_addr_5, i64 %i.08
+ %0 = getelementptr i8, ptr %x_ix_dim_0_6, i64 1
+ %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
%2 = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> %1)
%.fca.0.extract1 = extractvalue { <16 x i8>, <16 x i8> } %2, 0
%.fca.1.extract2 = extractvalue { <16 x i8>, <16 x i8> } %2, 1
- %3 = getelementptr i8, i8* %x_ix_dim_0_, i64 33
- %4 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %3)
+ %3 = getelementptr i8, ptr %x_ix_dim_0_6, i64 33
+ %4 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %3)
%5 = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> %4)
%.fca.0.extract = extractvalue { <16 x i8>, <16 x i8> } %5, 0
%.fca.1.extract = extractvalue { <16 x i8>, <16 x i8> } %5, 1
br i1 %_grt_tmp, label %_loop_1_loopHeader_._return_bb_crit_edge, label %_loop_1_do_
_loop_1_loopHeader_._return_bb_crit_edge: ; preds = %_loop_1_do_
- store <2 x double> %_add_tmp49, <2 x double>* %.sum, align 16
+ store <2 x double> %_add_tmp49, ptr %.sum, align 16
br label %_return_bb
_return_bb: ; preds = %_loop_1_loopHeader_._return_bb_crit_edge, %entry
ret void
}
-declare <256 x i1> @llvm.ppc.vsx.lxvp(i8*)
+declare <256 x i1> @llvm.ppc.vsx.lxvp(ptr)
declare { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1>)
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-define void @foo(double* nocapture %x, double* nocapture readonly %y) #0 {
+define void @foo(ptr nocapture %x, ptr nocapture readonly %y) #0 {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %y, i64 %indvars.iv
+ %0 = load double, ptr %arrayidx, align 8
%add = fadd double %0, 1.000000e+00
- %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
- store double %add, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %x, i64 %indvars.iv
+ store double %add, ptr %arrayidx2, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond19 = icmp eq i64 %indvars.iv.next, 1600
br i1 %exitcond19, label %for.body7, label %for.body
for.body7: ; preds = %for.body, %for.body7
%i3.017 = phi i32 [ %inc9, %for.body7 ], [ 0, %for.body ]
- tail call void bitcast (void (...)* @bar to void ()*)() #0
+ tail call void @bar() #0
%inc9 = add nuw nsw i32 %i3.017, 1
%exitcond = icmp eq i32 %inc9, 1024
br i1 %exitcond, label %for.cond.cleanup6, label %for.body7
define i32 @test_load() {
entry:
- %0 = load i32, i32* @a
+ %0 = load i32, ptr @a
ret i32 %0
}
@b = common global i32 0
define void @test_store(i32 %0) {
- store i32 %0, i32* @b
+ store i32 %0, ptr @b
ret void
}
; RUN: -print-before=simple-register-coalescing 2>&1 < %s | FileCheck \
; RUN: --check-prefix=SMALL %s
-@msg = common global i8* null, align 4
-@ptr = common global i8* null, align 4
+@msg = common global ptr null, align 4
+@ptr = common global ptr null, align 4
define void @foo() {
entry:
; LARGE: %4:gprc_and_gprc_nor0 = LWZtocL @ptr, %3:gprc_and_gprc_nor0, implicit $r2 :: (load (s32) from got)
; LARGE: STW %2:gprc, 0, %4:gprc_and_gprc_nor0 :: (store (s32) into @ptr)
- %0 = load i8*, i8** @msg, align 4
- store i8* %0, i8** @ptr, align 4
+ %0 = load ptr, ptr @msg, align 4
+ store ptr %0, ptr @ptr, align 4
ret void
}
define zeroext i32 @test_load() {
entry:
- %0 = load i32, i32* @a
+ %0 = load i32, ptr @a
ret i32 %0
}
@b = common global i32 0
define void @test_store(i32 zeroext %0) {
- store i32 %0, i32* @b
+ store i32 %0, ptr @b
ret void
}
; RUN: -print-before=simple-register-coalescing 2>&1 < %s | FileCheck \
; RUN: --check-prefix=SMALL %s
-@msg = common global i8* null, align 8
-@ptr = common global i8* null, align 8
+@msg = common global ptr null, align 8
+@ptr = common global ptr null, align 8
define void @foo() {
entry:
; LARGE: %4:g8rc_and_g8rc_nox0 = LDtocL @ptr, %3:g8rc_and_g8rc_nox0, implicit $x2 :: (load (s64) from got)
; LARGE: STD %2:g8rc, 0, %4:g8rc_and_g8rc_nox0 :: (store (s64) into @ptr)
- %0 = load i8*, i8** @msg, align 8
- store i8* %0, i8** @ptr, align 8
+ %0 = load ptr, ptr @msg, align 8
+ store ptr %0, ptr @ptr, align 8
ret void
}
; RUN: llc -enable-ppc-gen-scalar-mass -O3 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck --check-prefix=CHECK-LNX %s
; RUN: llc -enable-ppc-gen-scalar-mass -O3 -mtriple=powerpc-ibm-aix-xcoff < %s | FileCheck --check-prefix=CHECK-AIX %s
-define void @cos_f64(double* %arg) {
+define void @cos_f64(ptr %arg) {
; CHECK-LNX-LABEL: cos_f64:
; CHECK-LNX: # %bb.0: # %bb
; CHECK-LNX-NEXT: mflr 0
; CHECK-AIX-NEXT: stfd 0, 0(3)
; CHECK-AIX-NEXT: b L..BB0_1
bb:
- %i = bitcast double* %arg to i8*
- %i1 = getelementptr i8, i8* %i, i64 undef
+ %i1 = getelementptr i8, ptr %arg, i64 undef
br label %bb2
bb2:
- %i3 = getelementptr inbounds i8, i8* %i1, i64 undef
- %i4 = bitcast i8* %i3 to double*
- store double undef, double* %i4, align 8
- %i5 = getelementptr inbounds i8, i8* %i1, i64 0
- %i6 = bitcast i8* %i5 to double*
+ %i3 = getelementptr inbounds i8, ptr %i1, i64 undef
+ store double undef, ptr %i3, align 8
%i7 = tail call afn double @llvm.sqrt.f64(double undef)
%i8 = fmul afn double undef, 0x401921FB54442D28
%i9 = tail call afn double @llvm.cos.f64(double %i8) #2
%i10 = fmul afn double %i7, %i9
- store double %i10, double* %i6, align 8
+ store double %i10, ptr %i1, align 8
br label %bb2
}
-define void @log_f64(double* %arg) {
+define void @log_f64(ptr %arg) {
; CHECK-LNX-LABEL: log_f64:
; CHECK-LNX: # %bb.0: # %bb
; CHECK-LNX-NEXT: mflr 0
; CHECK-AIX-NEXT: stfd 0, 0(3)
; CHECK-AIX-NEXT: b L..BB1_1
bb:
- %i = bitcast double* %arg to i8*
- %i1 = getelementptr i8, i8* %i, i64 undef
+ %i1 = getelementptr i8, ptr %arg, i64 undef
br label %bb2
bb2:
- %i3 = getelementptr inbounds i8, i8* %i1, i64 undef
- %i4 = bitcast i8* %i3 to double*
- store double undef, double* %i4, align 8
- %i5 = getelementptr inbounds i8, i8* %i1, i64 0
- %i6 = bitcast i8* %i5 to double*
+ %i3 = getelementptr inbounds i8, ptr %i1, i64 undef
+ store double undef, ptr %i3, align 8
%i7 = tail call afn double @llvm.sqrt.f64(double undef)
%i8 = fmul afn double undef, 0x401921FB54442D28
%i9 = tail call afn double @llvm.log.f64(double %i8) #2
%i10 = fmul afn double %i7, %i9
- store double %i10, double* %i6, align 8
+ store double %i10, ptr %i1, align 8
br label %bb2
}
; RUN: llc -enable-ppc-gen-scalar-mass -O3 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck --check-prefix=CHECK-LNX %s
; RUN: llc -enable-ppc-gen-scalar-mass -O3 -mtriple=powerpc-ibm-aix-xcoff < %s | FileCheck --check-prefix=CHECK-AIX %s
-define void @cos_f64(double* %arg) {
+define void @cos_f64(ptr %arg) {
; CHECK-LNX-LABEL: cos_f64:
; CHECK-LNX: # %bb.0: # %bb
; CHECK-LNX-NEXT: mflr 0
; CHECK-AIX-NEXT: stfd 0, 0(3)
; CHECK-AIX-NEXT: b L..BB0_1
bb:
- %i = bitcast double* %arg to i8*
- %i1 = getelementptr i8, i8* %i, i64 undef
+ %i1 = getelementptr i8, ptr %arg, i64 undef
br label %bb2
bb2:
- %i3 = getelementptr inbounds i8, i8* %i1, i64 undef
- %i4 = bitcast i8* %i3 to double*
- store double undef, double* %i4, align 8
- %i5 = getelementptr inbounds i8, i8* %i1, i64 0
- %i6 = bitcast i8* %i5 to double*
+ %i3 = getelementptr inbounds i8, ptr %i1, i64 undef
+ store double undef, ptr %i3, align 8
%i7 = tail call fast double @llvm.sqrt.f64(double undef)
%i8 = fmul fast double undef, 0x401921FB54442D28
%i9 = tail call fast double @llvm.cos.f64(double %i8) #2
%i10 = fmul fast double %i7, %i9
- store double %i10, double* %i6, align 8
+ store double %i10, ptr %i1, align 8
br label %bb2
}
-define void @log_f64(double* %arg) {
+define void @log_f64(ptr %arg) {
; CHECK-LNX-LABEL: log_f64:
; CHECK-LNX: # %bb.0: # %bb
; CHECK-LNX-NEXT: mflr 0
; CHECK-AIX-NEXT: stfd 0, 0(3)
; CHECK-AIX-NEXT: b L..BB1_1
bb:
- %i = bitcast double* %arg to i8*
- %i1 = getelementptr i8, i8* %i, i64 undef
+ %i1 = getelementptr i8, ptr %arg, i64 undef
br label %bb2
bb2:
- %i3 = getelementptr inbounds i8, i8* %i1, i64 undef
- %i4 = bitcast i8* %i3 to double*
- store double undef, double* %i4, align 8
- %i5 = getelementptr inbounds i8, i8* %i1, i64 0
- %i6 = bitcast i8* %i5 to double*
+ %i3 = getelementptr inbounds i8, ptr %i1, i64 undef
+ store double undef, ptr %i3, align 8
%i7 = tail call fast double @llvm.sqrt.f64(double undef)
%i8 = fmul fast double undef, 0x401921FB54442D28
%i9 = tail call fast double @llvm.log.f64(double %i8) #2
%i10 = fmul fast double %i7, %i9
- store double %i10, double* %i6, align 8
+ store double %i10, ptr %i1, align 8
br label %bb2
}
%v = alloca [8200 x i32], align 4
%w = alloca [8200 x i32], align 4
%q = alloca [8200 x i32], align 4
- %0 = bitcast [8200 x i32]* %v to i8*
- call void @llvm.lifetime.start.p0i8(i64 32800, i8* %0) #0
- %1 = bitcast [8200 x i32]* %w to i8*
- call void @llvm.lifetime.start.p0i8(i64 32800, i8* %1) #0
- %2 = bitcast [8200 x i32]* %q to i8*
- call void @llvm.lifetime.start.p0i8(i64 32800, i8* %2) #0
- %arraydecay = getelementptr inbounds [8200 x i32], [8200 x i32]* %q, i64 0, i64 0
- %arraydecay1 = getelementptr inbounds [8200 x i32], [8200 x i32]* %v, i64 0, i64 0
- %arraydecay2 = getelementptr inbounds [8200 x i32], [8200 x i32]* %w, i64 0, i64 0
- call void @bar(i32* %arraydecay, i32* %arraydecay1, i32* %arraydecay2) #0
- %3 = load i32, i32* %arraydecay2, align 4
- %arrayidx3 = getelementptr inbounds [8200 x i32], [8200 x i32]* %w, i64 0, i64 1
- %4 = load i32, i32* %arrayidx3, align 4
+ call void @llvm.lifetime.start.p0(i64 32800, ptr %v) #0
+ call void @llvm.lifetime.start.p0(i64 32800, ptr %w) #0
+ call void @llvm.lifetime.start.p0(i64 32800, ptr %q) #0
+ call void @bar(ptr %q, ptr %v, ptr %w) #0
+ %0 = load i32, ptr %w, align 4
+ %arrayidx3 = getelementptr inbounds [8200 x i32], ptr %w, i64 0, i64 1
+ %1 = load i32, ptr %arrayidx3, align 4
; CHECK: @foo
; CHECK-NOT: lwzx
; CHECK: lwz {{[0-9]+}}, 4([[REG]])
; CHECK: blr
- %add = add nsw i32 %4, %3
- call void @llvm.lifetime.end.p0i8(i64 32800, i8* %2) #0
- call void @llvm.lifetime.end.p0i8(i64 32800, i8* %1) #0
- call void @llvm.lifetime.end.p0i8(i64 32800, i8* %0) #0
+ %add = add nsw i32 %1, %0
+ call void @llvm.lifetime.end.p0(i64 32800, ptr %q) #0
+ call void @llvm.lifetime.end.p0(i64 32800, ptr %w) #0
+ call void @llvm.lifetime.end.p0(i64 32800, ptr %v) #0
ret i32 %add
}
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
-declare void @bar(i32*, i32*, i32*)
+declare void @bar(ptr, ptr, ptr)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
attributes #0 = { nounwind }
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mcpu=pwr9 -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
-; void foo(float *data, float d) {
+; void foo(ptr data, float d) {
; long i;
; for (i = 0; i < 8000; i++)
; data[i] = d;
; icmp for loop iteration index and loop trip count(384) has LSRUse for 'reg({0,+,384})'.
; Make sure above icmp does not impact LSR choose best formulae sets based on 'reg({(192 + %0),+,384})'
-define void @foo(float* nocapture %data, float %d) {
+define void @foo(ptr nocapture %data, float %d) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpspn 0, 1
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next.1, %vector.body ]
- %0 = getelementptr inbounds float, float* %data, i64 %index
- %1 = bitcast float* %0 to <4 x float>*
- store <4 x float> %broadcast.splat17, <4 x float>* %1, align 4
- %2 = getelementptr inbounds float, float* %0, i64 4
- %3 = bitcast float* %2 to <4 x float>*
- store <4 x float> %broadcast.splat19, <4 x float>* %3, align 4
- %4 = getelementptr inbounds float, float* %0, i64 8
- %5 = bitcast float* %4 to <4 x float>*
- store <4 x float> %broadcast.splat21, <4 x float>* %5, align 4
- %6 = getelementptr inbounds float, float* %0, i64 12
- %7 = bitcast float* %6 to <4 x float>*
- store <4 x float> %broadcast.splat23, <4 x float>* %7, align 4
- %8 = getelementptr inbounds float, float* %0, i64 16
- %9 = bitcast float* %8 to <4 x float>*
- store <4 x float> %broadcast.splat25, <4 x float>* %9, align 4
- %10 = getelementptr inbounds float, float* %0, i64 20
- %11 = bitcast float* %10 to <4 x float>*
- store <4 x float> %broadcast.splat27, <4 x float>* %11, align 4
- %12 = getelementptr inbounds float, float* %0, i64 24
- %13 = bitcast float* %12 to <4 x float>*
- store <4 x float> %broadcast.splat29, <4 x float>* %13, align 4
- %14 = getelementptr inbounds float, float* %0, i64 28
- %15 = bitcast float* %14 to <4 x float>*
- store <4 x float> %broadcast.splat31, <4 x float>* %15, align 4
- %16 = getelementptr inbounds float, float* %0, i64 32
- %17 = bitcast float* %16 to <4 x float>*
- store <4 x float> %broadcast.splat33, <4 x float>* %17, align 4
- %18 = getelementptr inbounds float, float* %0, i64 36
- %19 = bitcast float* %18 to <4 x float>*
- store <4 x float> %broadcast.splat35, <4 x float>* %19, align 4
- %20 = getelementptr inbounds float, float* %0, i64 40
- %21 = bitcast float* %20 to <4 x float>*
- store <4 x float> %broadcast.splat37, <4 x float>* %21, align 4
- %22 = getelementptr inbounds float, float* %0, i64 44
- %23 = bitcast float* %22 to <4 x float>*
- store <4 x float> %broadcast.splat39, <4 x float>* %23, align 4
+ %0 = getelementptr inbounds float, ptr %data, i64 %index
+ store <4 x float> %broadcast.splat17, ptr %0, align 4
+ %1 = getelementptr inbounds float, ptr %0, i64 4
+ store <4 x float> %broadcast.splat19, ptr %1, align 4
+ %2 = getelementptr inbounds float, ptr %0, i64 8
+ store <4 x float> %broadcast.splat21, ptr %2, align 4
+ %3 = getelementptr inbounds float, ptr %0, i64 12
+ store <4 x float> %broadcast.splat23, ptr %3, align 4
+ %4 = getelementptr inbounds float, ptr %0, i64 16
+ store <4 x float> %broadcast.splat25, ptr %4, align 4
+ %5 = getelementptr inbounds float, ptr %0, i64 20
+ store <4 x float> %broadcast.splat27, ptr %5, align 4
+ %6 = getelementptr inbounds float, ptr %0, i64 24
+ store <4 x float> %broadcast.splat29, ptr %6, align 4
+ %7 = getelementptr inbounds float, ptr %0, i64 28
+ store <4 x float> %broadcast.splat31, ptr %7, align 4
+ %8 = getelementptr inbounds float, ptr %0, i64 32
+ store <4 x float> %broadcast.splat33, ptr %8, align 4
+ %9 = getelementptr inbounds float, ptr %0, i64 36
+ store <4 x float> %broadcast.splat35, ptr %9, align 4
+ %10 = getelementptr inbounds float, ptr %0, i64 40
+ store <4 x float> %broadcast.splat37, ptr %10, align 4
+ %11 = getelementptr inbounds float, ptr %0, i64 44
+ store <4 x float> %broadcast.splat39, ptr %11, align 4
%index.next = add nuw nsw i64 %index, 48
- %24 = getelementptr inbounds float, float* %data, i64 %index.next
- %25 = bitcast float* %24 to <4 x float>*
- store <4 x float> %broadcast.splat17, <4 x float>* %25, align 4
- %26 = getelementptr inbounds float, float* %24, i64 4
- %27 = bitcast float* %26 to <4 x float>*
- store <4 x float> %broadcast.splat19, <4 x float>* %27, align 4
- %28 = getelementptr inbounds float, float* %24, i64 8
- %29 = bitcast float* %28 to <4 x float>*
- store <4 x float> %broadcast.splat21, <4 x float>* %29, align 4
- %30 = getelementptr inbounds float, float* %24, i64 12
- %31 = bitcast float* %30 to <4 x float>*
- store <4 x float> %broadcast.splat23, <4 x float>* %31, align 4
- %32 = getelementptr inbounds float, float* %24, i64 16
- %33 = bitcast float* %32 to <4 x float>*
- store <4 x float> %broadcast.splat25, <4 x float>* %33, align 4
- %34 = getelementptr inbounds float, float* %24, i64 20
- %35 = bitcast float* %34 to <4 x float>*
- store <4 x float> %broadcast.splat27, <4 x float>* %35, align 4
- %36 = getelementptr inbounds float, float* %24, i64 24
- %37 = bitcast float* %36 to <4 x float>*
- store <4 x float> %broadcast.splat29, <4 x float>* %37, align 4
- %38 = getelementptr inbounds float, float* %24, i64 28
- %39 = bitcast float* %38 to <4 x float>*
- store <4 x float> %broadcast.splat31, <4 x float>* %39, align 4
- %40 = getelementptr inbounds float, float* %24, i64 32
- %41 = bitcast float* %40 to <4 x float>*
- store <4 x float> %broadcast.splat33, <4 x float>* %41, align 4
- %42 = getelementptr inbounds float, float* %24, i64 36
- %43 = bitcast float* %42 to <4 x float>*
- store <4 x float> %broadcast.splat35, <4 x float>* %43, align 4
- %44 = getelementptr inbounds float, float* %24, i64 40
- %45 = bitcast float* %44 to <4 x float>*
- store <4 x float> %broadcast.splat37, <4 x float>* %45, align 4
- %46 = getelementptr inbounds float, float* %24, i64 44
- %47 = bitcast float* %46 to <4 x float>*
- store <4 x float> %broadcast.splat39, <4 x float>* %47, align 4
+ %12 = getelementptr inbounds float, ptr %data, i64 %index.next
+ store <4 x float> %broadcast.splat17, ptr %12, align 4
+ %13 = getelementptr inbounds float, ptr %12, i64 4
+ store <4 x float> %broadcast.splat19, ptr %13, align 4
+ %14 = getelementptr inbounds float, ptr %12, i64 8
+ store <4 x float> %broadcast.splat21, ptr %14, align 4
+ %15 = getelementptr inbounds float, ptr %12, i64 12
+ store <4 x float> %broadcast.splat23, ptr %15, align 4
+ %16 = getelementptr inbounds float, ptr %12, i64 16
+ store <4 x float> %broadcast.splat25, ptr %16, align 4
+ %17 = getelementptr inbounds float, ptr %12, i64 20
+ store <4 x float> %broadcast.splat27, ptr %17, align 4
+ %18 = getelementptr inbounds float, ptr %12, i64 24
+ store <4 x float> %broadcast.splat29, ptr %18, align 4
+ %19 = getelementptr inbounds float, ptr %12, i64 28
+ store <4 x float> %broadcast.splat31, ptr %19, align 4
+ %20 = getelementptr inbounds float, ptr %12, i64 32
+ store <4 x float> %broadcast.splat33, ptr %20, align 4
+ %21 = getelementptr inbounds float, ptr %12, i64 36
+ store <4 x float> %broadcast.splat35, ptr %21, align 4
+ %22 = getelementptr inbounds float, ptr %12, i64 40
+ store <4 x float> %broadcast.splat37, ptr %22, align 4
+ %23 = getelementptr inbounds float, ptr %12, i64 44
+ store <4 x float> %broadcast.splat39, ptr %23, align 4
%index.next.1 = add nuw nsw i64 %index, 96
- %48 = icmp eq i64 %index.next.1, 7968
- br i1 %48, label %for.body, label %vector.body
+ %24 = icmp eq i64 %index.next.1, 7968
+ br i1 %24, label %for.body, label %vector.body
for.body: ; preds = %vector.body
- %arrayidx = getelementptr inbounds float, float* %data, i64 7968
- store float %d, float* %arrayidx, align 4
- %arrayidx.1 = getelementptr inbounds float, float* %data, i64 7969
- store float %d, float* %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, float* %data, i64 7970
- store float %d, float* %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, float* %data, i64 7971
- store float %d, float* %arrayidx.3, align 4
- %arrayidx.4 = getelementptr inbounds float, float* %data, i64 7972
- store float %d, float* %arrayidx.4, align 4
- %arrayidx.5 = getelementptr inbounds float, float* %data, i64 7973
- store float %d, float* %arrayidx.5, align 4
- %arrayidx.6 = getelementptr inbounds float, float* %data, i64 7974
- store float %d, float* %arrayidx.6, align 4
- %arrayidx.7 = getelementptr inbounds float, float* %data, i64 7975
- store float %d, float* %arrayidx.7, align 4
- %arrayidx.8 = getelementptr inbounds float, float* %data, i64 7976
- store float %d, float* %arrayidx.8, align 4
- %arrayidx.9 = getelementptr inbounds float, float* %data, i64 7977
- store float %d, float* %arrayidx.9, align 4
- %arrayidx.10 = getelementptr inbounds float, float* %data, i64 7978
- store float %d, float* %arrayidx.10, align 4
- %arrayidx.11 = getelementptr inbounds float, float* %data, i64 7979
- store float %d, float* %arrayidx.11, align 4
- %arrayidx.12 = getelementptr inbounds float, float* %data, i64 7980
- store float %d, float* %arrayidx.12, align 4
- %arrayidx.13 = getelementptr inbounds float, float* %data, i64 7981
- store float %d, float* %arrayidx.13, align 4
- %arrayidx.14 = getelementptr inbounds float, float* %data, i64 7982
- store float %d, float* %arrayidx.14, align 4
- %arrayidx.15 = getelementptr inbounds float, float* %data, i64 7983
- store float %d, float* %arrayidx.15, align 4
- %arrayidx.16 = getelementptr inbounds float, float* %data, i64 7984
- store float %d, float* %arrayidx.16, align 4
- %arrayidx.17 = getelementptr inbounds float, float* %data, i64 7985
- store float %d, float* %arrayidx.17, align 4
- %arrayidx.18 = getelementptr inbounds float, float* %data, i64 7986
- store float %d, float* %arrayidx.18, align 4
- %arrayidx.19 = getelementptr inbounds float, float* %data, i64 7987
- store float %d, float* %arrayidx.19, align 4
- %arrayidx.20 = getelementptr inbounds float, float* %data, i64 7988
- store float %d, float* %arrayidx.20, align 4
- %arrayidx.21 = getelementptr inbounds float, float* %data, i64 7989
- store float %d, float* %arrayidx.21, align 4
- %arrayidx.22 = getelementptr inbounds float, float* %data, i64 7990
- store float %d, float* %arrayidx.22, align 4
- %arrayidx.23 = getelementptr inbounds float, float* %data, i64 7991
- store float %d, float* %arrayidx.23, align 4
- %arrayidx.24 = getelementptr inbounds float, float* %data, i64 7992
- store float %d, float* %arrayidx.24, align 4
- %arrayidx.25 = getelementptr inbounds float, float* %data, i64 7993
- store float %d, float* %arrayidx.25, align 4
- %arrayidx.26 = getelementptr inbounds float, float* %data, i64 7994
- store float %d, float* %arrayidx.26, align 4
- %arrayidx.27 = getelementptr inbounds float, float* %data, i64 7995
- store float %d, float* %arrayidx.27, align 4
- %arrayidx.28 = getelementptr inbounds float, float* %data, i64 7996
- store float %d, float* %arrayidx.28, align 4
- %arrayidx.29 = getelementptr inbounds float, float* %data, i64 7997
- store float %d, float* %arrayidx.29, align 4
- %arrayidx.30 = getelementptr inbounds float, float* %data, i64 7998
- store float %d, float* %arrayidx.30, align 4
- %arrayidx.31 = getelementptr inbounds float, float* %data, i64 7999
- store float %d, float* %arrayidx.31, align 4
+ %arrayidx = getelementptr inbounds float, ptr %data, i64 7968
+ store float %d, ptr %arrayidx, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %data, i64 7969
+ store float %d, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %data, i64 7970
+ store float %d, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %data, i64 7971
+ store float %d, ptr %arrayidx.3, align 4
+ %arrayidx.4 = getelementptr inbounds float, ptr %data, i64 7972
+ store float %d, ptr %arrayidx.4, align 4
+ %arrayidx.5 = getelementptr inbounds float, ptr %data, i64 7973
+ store float %d, ptr %arrayidx.5, align 4
+ %arrayidx.6 = getelementptr inbounds float, ptr %data, i64 7974
+ store float %d, ptr %arrayidx.6, align 4
+ %arrayidx.7 = getelementptr inbounds float, ptr %data, i64 7975
+ store float %d, ptr %arrayidx.7, align 4
+ %arrayidx.8 = getelementptr inbounds float, ptr %data, i64 7976
+ store float %d, ptr %arrayidx.8, align 4
+ %arrayidx.9 = getelementptr inbounds float, ptr %data, i64 7977
+ store float %d, ptr %arrayidx.9, align 4
+ %arrayidx.10 = getelementptr inbounds float, ptr %data, i64 7978
+ store float %d, ptr %arrayidx.10, align 4
+ %arrayidx.11 = getelementptr inbounds float, ptr %data, i64 7979
+ store float %d, ptr %arrayidx.11, align 4
+ %arrayidx.12 = getelementptr inbounds float, ptr %data, i64 7980
+ store float %d, ptr %arrayidx.12, align 4
+ %arrayidx.13 = getelementptr inbounds float, ptr %data, i64 7981
+ store float %d, ptr %arrayidx.13, align 4
+ %arrayidx.14 = getelementptr inbounds float, ptr %data, i64 7982
+ store float %d, ptr %arrayidx.14, align 4
+ %arrayidx.15 = getelementptr inbounds float, ptr %data, i64 7983
+ store float %d, ptr %arrayidx.15, align 4
+ %arrayidx.16 = getelementptr inbounds float, ptr %data, i64 7984
+ store float %d, ptr %arrayidx.16, align 4
+ %arrayidx.17 = getelementptr inbounds float, ptr %data, i64 7985
+ store float %d, ptr %arrayidx.17, align 4
+ %arrayidx.18 = getelementptr inbounds float, ptr %data, i64 7986
+ store float %d, ptr %arrayidx.18, align 4
+ %arrayidx.19 = getelementptr inbounds float, ptr %data, i64 7987
+ store float %d, ptr %arrayidx.19, align 4
+ %arrayidx.20 = getelementptr inbounds float, ptr %data, i64 7988
+ store float %d, ptr %arrayidx.20, align 4
+ %arrayidx.21 = getelementptr inbounds float, ptr %data, i64 7989
+ store float %d, ptr %arrayidx.21, align 4
+ %arrayidx.22 = getelementptr inbounds float, ptr %data, i64 7990
+ store float %d, ptr %arrayidx.22, align 4
+ %arrayidx.23 = getelementptr inbounds float, ptr %data, i64 7991
+ store float %d, ptr %arrayidx.23, align 4
+ %arrayidx.24 = getelementptr inbounds float, ptr %data, i64 7992
+ store float %d, ptr %arrayidx.24, align 4
+ %arrayidx.25 = getelementptr inbounds float, ptr %data, i64 7993
+ store float %d, ptr %arrayidx.25, align 4
+ %arrayidx.26 = getelementptr inbounds float, ptr %data, i64 7994
+ store float %d, ptr %arrayidx.26, align 4
+ %arrayidx.27 = getelementptr inbounds float, ptr %data, i64 7995
+ store float %d, ptr %arrayidx.27, align 4
+ %arrayidx.28 = getelementptr inbounds float, ptr %data, i64 7996
+ store float %d, ptr %arrayidx.28, align 4
+ %arrayidx.29 = getelementptr inbounds float, ptr %data, i64 7997
+ store float %d, ptr %arrayidx.29, align 4
+ %arrayidx.30 = getelementptr inbounds float, ptr %data, i64 7998
+ store float %d, ptr %arrayidx.30, align 4
+ %arrayidx.31 = getelementptr inbounds float, ptr %data, i64 7999
+ store float %d, ptr %arrayidx.31, align 4
ret void
}
%class2 = type { %class3 }
%class3 = type { %class4 }
%class4 = type { %class5, i64, %union.anon }
-%class5 = type { i8* }
+%class5 = type { ptr }
%union.anon = type { i64, [8 x i8] }
@ext = external global %"class1", align 8
; CHECK: blr
define void @unaligned_slot() #0 {
%1 = alloca %class2, align 8
- %2 = getelementptr inbounds %class2, %class2* %1, i64 0, i32 0, i32 0, i32 2
- %3 = bitcast %union.anon* %2 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull getelementptr inbounds (%class1, %class1* @ext, i64 0, i32 0, i32 1, i64 8), i8* align 8 nonnull %3, i64 16, i1 false) #2
+ %2 = getelementptr inbounds %class2, ptr %1, i64 0, i32 0, i32 0, i32 2
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 nonnull getelementptr inbounds (%class1, ptr @ext, i64 0, i32 0, i32 1, i64 8), ptr align 8 nonnull %2, i64 16, i1 false) #2
ret void
}
; CHECK-LABEL: aligned_slot:
; CHECK: blr
define void @aligned_slot() #0 {
%1 = alloca %class2, align 16
- %2 = getelementptr inbounds %class2, %class2* %1, i64 0, i32 0, i32 0, i32 2
- %3 = bitcast %union.anon* %2 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull getelementptr inbounds (%class1, %class1* @ext, i64 0, i32 0, i32 1, i64 8), i8* align 8 nonnull %3, i64 16, i1 false) #2
+ %2 = getelementptr inbounds %class2, ptr %1, i64 0, i32 0, i32 0, i32 2
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 nonnull getelementptr inbounds (%class1, ptr @ext, i64 0, i32 0, i32 1, i64 8), ptr align 8 nonnull %2, i64 16, i1 false) #2
ret void
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
attributes #0 = { nounwind "target-cpu"="pwr9" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+power9-vector,+vsx" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
define void @test() {
entry:
%__a.addr.i = alloca i32, align 4
- %__b.addr.i = alloca <4 x i32>*, align 8
+ %__b.addr.i = alloca ptr, align 8
%i = alloca <4 x i32>, align 16
%j = alloca <4 x i32>, align 16
- store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32>* %i, align 16
- store i32 0, i32* %__a.addr.i, align 4
- store <4 x i32>* %i, <4 x i32>** %__b.addr.i, align 8
- %0 = load i32, i32* %__a.addr.i, align 4
- %1 = load <4 x i32>*, <4 x i32>** %__b.addr.i, align 8
- %2 = bitcast <4 x i32>* %1 to i8*
- %3 = getelementptr i8, i8* %2, i32 %0
- %4 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %3)
+ store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %i, align 16
+ store i32 0, ptr %__a.addr.i, align 4
+ store ptr %i, ptr %__b.addr.i, align 8
+ %0 = load i32, ptr %__a.addr.i, align 4
+ %1 = load ptr, ptr %__b.addr.i, align 8
+ %2 = getelementptr i8, ptr %1, i32 %0
+ %3 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr %2)
; CHECK: lwa [[REG0:[0-9]+]],
; CHECK: lxvd2x [[REG1:[0-9]+]], {{[0-9]+}}, [[REG0]]
; CHECK: xxswapd [[REG1]], [[REG1]]
; CHECK-P9UP: lwa [[REG0:[0-9]+]],
; CHECK-P9UP: lxvx [[REG1:[0-9]+]], {{[0-9]+}}, [[REG0]]
- store <4 x i32> %4, <4 x i32>* %j, align 16
+ store <4 x i32> %3, ptr %j, align 16
ret void
}
; Function Attrs: nounwind readonly
-declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*)
+declare <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr)
; CHECK-P8: Macro fuse: SU([[SU0:[0-9]+]]) - SU([[SU1:[0-9]+]]) / ADDIStocHA8 - LD
; CHECK-P8: SU([[SU0]]): renamable $x[[REG3:[0-9]+]] = ADDIStocHA8 $x2, @m
; CHECK-P8: SU([[SU1]]): renamable $x[[REG3]] = LD target-flags(ppc-toc-lo) @m, renamable $x[[REG3]]
- %0 = load i64, i64* @m, align 8
+ %0 = load i64, ptr @m, align 8
ret i64 %0
}
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc64-unknown-linux-gnu.2.0"
- %struct.re_pattern_buffer = type <{ i8*, i64, i8, [7 x i8] }>
+ %struct.re_pattern_buffer = type <{ ptr, i64, i8, [7 x i8] }>
-define i32 @xre_search_2(%struct.re_pattern_buffer* %bufp, i32 %range) nounwind {
+define i32 @xre_search_2(ptr %bufp, i32 %range) nounwind {
entry:
br i1 false, label %bb16, label %bb49
bb16: ; preds = %entry
- %tmp19 = load i8*, i8** null, align 1 ; <i8*> [#uses=1]
- %tmp21 = load i8, i8* %tmp19, align 1 ; <i8> [#uses=1]
+ %tmp19 = load ptr, ptr null, align 1 ; <ptr> [#uses=1]
+ %tmp21 = load i8, ptr %tmp19, align 1 ; <i8> [#uses=1]
switch i8 %tmp21, label %bb49 [
i8 0, label %bb45
i8 1, label %bb34
define signext i32 @test_external() nounwind {
entry:
- %0 = load i32, i32* @ei, align 4
+ %0 = load i32, ptr @ei, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @ei, align 4
+ store i32 %inc, ptr @ei, align 4
ret i32 %0
}
define i32 @load() {
entry:
- %0 = load i32, i32* @gi, align 4
- %1 = load i32, i32* @fi, align 4
+ %0 = load i32, ptr @gi, align 4
+ %1 = load i32, ptr @fi, align 4
%2 = add i32 %0, %1
ret i32 %2
}
define signext i32 @test_fn_static() nounwind {
entry:
- %0 = load i32, i32* @test_fn_static.si, align 4
+ %0 = load i32, ptr @test_fn_static.si, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @test_fn_static.si, align 4
+ store i32 %inc, ptr @test_fn_static.si, align 4
ret i32 %0
}
define dso_local signext i32 @test_file_static() nounwind {
entry:
- %0 = load i32, i32* @gi, align 4
+ %0 = load i32, ptr @gi, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @gi, align 4
+ store i32 %inc, ptr @gi, align 4
ret i32 %0
}
define signext i32 @test_weak() nounwind {
entry:
- %0 = load i32, i32* @wi, align 4
+ %0 = load i32, ptr @wi, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @wi, align 4
+ store i32 %inc, ptr @wi, align 4
ret i32 %0
}
define signext i32 @test_fn_static() nounwind {
entry:
- %0 = load i32, i32* @test_fn_static.si, align 4
+ %0 = load i32, ptr @test_fn_static.si, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @test_fn_static.si, align 4
+ store i32 %inc, ptr @test_fn_static.si, align 4
ret i32 %0
}
define dso_local signext i32 @test_file_static() nounwind {
entry:
- %0 = load i32, i32* @gi, align 4
+ %0 = load i32, ptr @gi, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @gi, align 4
+ store i32 %inc, ptr @gi, align 4
ret i32 %0
}
define signext i32 @test_jump_table(i32 signext %i) nounwind {
entry:
%i.addr = alloca i32, align 4
- store i32 %i, i32* %i.addr, align 4
- %0 = load i32, i32* %i.addr, align 4
+ store i32 %i, ptr %i.addr, align 4
+ %0 = load i32, ptr %i.addr, align 4
switch i32 %0, label %sw.default [
i32 3, label %sw.bb
i32 4, label %sw.bb1
br label %sw.epilog
sw.bb: ; preds = %entry
- %1 = load i32, i32* %i.addr, align 4
+ %1 = load i32, ptr %i.addr, align 4
%mul = mul nsw i32 %1, 7
- store i32 %mul, i32* %i.addr, align 4
+ store i32 %mul, ptr %i.addr, align 4
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
- %2 = load i32, i32* %i.addr, align 4
+ %2 = load i32, ptr %i.addr, align 4
%dec = add nsw i32 %2, -1
- store i32 %dec, i32* %i.addr, align 4
+ store i32 %dec, ptr %i.addr, align 4
br label %sw.bb2
sw.bb2: ; preds = %entry, %sw.bb1
- %3 = load i32, i32* %i.addr, align 4
+ %3 = load i32, ptr %i.addr, align 4
%add = add nsw i32 %3, 3
- store i32 %add, i32* %i.addr, align 4
+ store i32 %add, ptr %i.addr, align 4
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb2
- %4 = load i32, i32* %i.addr, align 4
+ %4 = load i32, ptr %i.addr, align 4
%shl = shl i32 %4, 1
- store i32 %shl, i32* %i.addr, align 4
+ store i32 %shl, ptr %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb3, %sw.default
- %5 = load i32, i32* %i.addr, align 4
+ %5 = load i32, ptr %i.addr, align 4
ret i32 %5
}
; CHECK-LABEL: test_jump_table:
define signext i32 @test_tentative() nounwind {
entry:
- %0 = load i32, i32* @ti, align 4
+ %0 = load i32, ptr @ti, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @ti, align 4
+ store i32 %inc, ptr @ti, align 4
ret i32 %0
}
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define i8* @test_fnaddr() nounwind {
+define ptr @test_fnaddr() nounwind {
entry:
- %func = alloca i32 (i32)*, align 8
- store i32 (i32)* @foo, i32 (i32)** %func, align 8
- %0 = load i32 (i32)*, i32 (i32)** %func, align 8
- %1 = bitcast i32 (i32)* %0 to i8*
- ret i8* %1
+ %func = alloca ptr, align 8
+ store ptr @foo, ptr %func, align 8
+ %0 = load ptr, ptr %func, align 8
+ ret ptr %0
}
declare signext i32 @foo(i32 signext)
define signext i8 @test_avext() nounwind {
entry:
- %0 = getelementptr inbounds [13 x i8], [13 x i8]* @x, i32 0, i32 0
- %1 = load i8, i8* %0, align 1
- ret i8 %1
+ %0 = load i8, ptr @x, align 1
+ ret i8 %0
}
; CHECK-LABEL: test_avext:
define signext i32 @test_external() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @a, align 4
+ store i32 %inc, ptr @a, align 4
ret i32 %0
}
define signext i32 @test_external() nounwind {
entry:
- %0 = load i32, i32* @ei, align 4
+ %0 = load i32, ptr @ei, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @ei, align 4
+ store i32 %inc, ptr @ei, align 4
ret i32 %0
}
define dso_local signext i32 @test_fn_static() nounwind {
entry:
- %0 = load i32, i32* @test_fn_static.si, align 4
+ %0 = load i32, ptr @test_fn_static.si, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @test_fn_static.si, align 4
+ store i32 %inc, ptr @test_fn_static.si, align 4
ret i32 %0
}
define dso_local signext i32 @test_file_static() nounwind {
entry:
- %0 = load i32, i32* @gi, align 4
+ %0 = load i32, ptr @gi, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @gi, align 4
+ store i32 %inc, ptr @gi, align 4
ret i32 %0
}
define dso_local signext i32 @test_external() nounwind {
entry:
- %0 = load i32, i32* @ei, align 4
+ %0 = load i32, ptr @ei, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @ei, align 4
+ store i32 %inc, ptr @ei, align 4
ret i32 %0
}
define dso_local signext i32 @test_fn_static() nounwind {
entry:
- %0 = load i32, i32* @test_fn_static.si, align 4
+ %0 = load i32, ptr @test_fn_static.si, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @test_fn_static.si, align 4
+ store i32 %inc, ptr @test_fn_static.si, align 4
ret i32 %0
}
define dso_local signext i32 @test_file_static() nounwind {
entry:
- %0 = load i32, i32* @gi, align 4
+ %0 = load i32, ptr @gi, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @gi, align 4
+ store i32 %inc, ptr @gi, align 4
ret i32 %0
}
define dso_local signext i32 @test_tentative() nounwind {
entry:
- %0 = load i32, i32* @ti, align 4
+ %0 = load i32, ptr @ti, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @ti, align 4
+ store i32 %inc, ptr @ti, align 4
ret i32 %0
}
; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM6:[^ ]+]]
; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM6]]
-define i8* @test_fnaddr() nounwind {
+define ptr @test_fnaddr() nounwind {
entry:
- %func = alloca i32 (i32)*, align 8
- store i32 (i32)* @foo, i32 (i32)** %func, align 8
- %0 = load i32 (i32)*, i32 (i32)** %func, align 8
- %1 = bitcast i32 (i32)* %0 to i8*
- ret i8* %1
+ %func = alloca ptr, align 8
+ store ptr @foo, ptr %func, align 8
+ %0 = load ptr, ptr %func, align 8
+ ret ptr %0
}
declare signext i32 @foo(i32 signext)
define dso_local signext i32 @test_jump_table(i32 signext %i) nounwind {
entry:
%i.addr = alloca i32, align 4
- store i32 %i, i32* %i.addr, align 4
- %0 = load i32, i32* %i.addr, align 4
+ store i32 %i, ptr %i.addr, align 4
+ %0 = load i32, ptr %i.addr, align 4
switch i32 %0, label %sw.default [
i32 3, label %sw.bb
i32 4, label %sw.bb1
br label %sw.epilog
sw.bb: ; preds = %entry
- %1 = load i32, i32* %i.addr, align 4
+ %1 = load i32, ptr %i.addr, align 4
%mul = mul nsw i32 %1, 7
- store i32 %mul, i32* %i.addr, align 4
+ store i32 %mul, ptr %i.addr, align 4
br label %sw.bb1
sw.bb1: ; preds = %entry, %sw.bb
- %2 = load i32, i32* %i.addr, align 4
+ %2 = load i32, ptr %i.addr, align 4
%dec = add nsw i32 %2, -1
- store i32 %dec, i32* %i.addr, align 4
+ store i32 %dec, ptr %i.addr, align 4
br label %sw.bb2
sw.bb2: ; preds = %entry, %sw.bb1
- %3 = load i32, i32* %i.addr, align 4
+ %3 = load i32, ptr %i.addr, align 4
%add = add nsw i32 %3, 3
- store i32 %add, i32* %i.addr, align 4
+ store i32 %add, ptr %i.addr, align 4
br label %sw.bb3
sw.bb3: ; preds = %entry, %sw.bb2
- %4 = load i32, i32* %i.addr, align 4
+ %4 = load i32, ptr %i.addr, align 4
%shl = shl i32 %4, 1
- store i32 %shl, i32* %i.addr, align 4
+ store i32 %shl, ptr %i.addr, align 4
br label %sw.epilog
sw.epilog: ; preds = %sw.bb3, %sw.default
- %5 = load i32, i32* %i.addr, align 4
+ %5 = load i32, ptr %i.addr, align 4
ret i32 %5
}
; Codegen lvx (R+16) as t = li 16, lvx t,R
; This shares the 16 between the two loads.
-define void @func(<4 x float>* %a, <4 x float>* %b) {
- %tmp1 = getelementptr <4 x float>, <4 x float>* %b, i32 1 ; <<4 x float>*> [#uses=1]
- %tmp = load <4 x float>, <4 x float>* %tmp1 ; <<4 x float>> [#uses=1]
- %tmp3 = getelementptr <4 x float>, <4 x float>* %a, i32 1 ; <<4 x float>*> [#uses=1]
- %tmp4 = load <4 x float>, <4 x float>* %tmp3 ; <<4 x float>> [#uses=1]
+define void @func(ptr %a, ptr %b) {
+ %tmp1 = getelementptr <4 x float>, ptr %b, i32 1 ; <ptr> [#uses=1]
+ %tmp = load <4 x float>, ptr %tmp1 ; <<4 x float>> [#uses=1]
+ %tmp3 = getelementptr <4 x float>, ptr %a, i32 1 ; <ptr> [#uses=1]
+ %tmp4 = load <4 x float>, ptr %tmp3 ; <<4 x float>> [#uses=1]
%tmp5 = fmul <4 x float> %tmp, %tmp4 ; <<4 x float>> [#uses=1]
- %tmp8 = load <4 x float>, <4 x float>* %b ; <<4 x float>> [#uses=1]
+ %tmp8 = load <4 x float>, ptr %b ; <<4 x float>> [#uses=1]
%tmp9 = fadd <4 x float> %tmp5, %tmp8 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp9, <4 x float>* %a
+ store <4 x float> %tmp9, ptr %a
ret void
}
@zeroEqualityTest04.buffer1 = private unnamed_addr constant [15 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14], align 4
@zeroEqualityTest04.buffer2 = private unnamed_addr constant [15 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 13], align 4
-declare signext i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_addr #1
+declare signext i32 @memcmp(ptr nocapture, ptr nocapture, i64) local_unnamed_addr #1
; Check 4 bytes - requires 1 load for each param.
-define signext i32 @zeroEqualityTest02(i8* %x, i8* %y) {
+define signext i32 @zeroEqualityTest02(ptr %x, ptr %y) {
; CHECK-LABEL: zeroEqualityTest02:
; CHECK: # %bb.0:
; CHECK-NEXT: lwz 3, 0(3)
; CHECK-NEXT: srwi 3, 3, 5
; CHECK-NEXT: xori 3, 3, 1
; CHECK-NEXT: blr
- %call = tail call signext i32 @memcmp(i8* %x, i8* %y, i64 4)
+ %call = tail call signext i32 @memcmp(ptr %x, ptr %y, i64 4)
%not.cmp = icmp ne i32 %call, 0
%. = zext i1 %not.cmp to i32
ret i32 %.
}
; Check 16 bytes - requires 2 loads for each param (or use vectors?).
-define signext i32 @zeroEqualityTest01(i8* %x, i8* %y) {
+define signext i32 @zeroEqualityTest01(ptr %x, ptr %y) {
; CHECK-LABEL: zeroEqualityTest01:
; CHECK: # %bb.0:
; CHECK-NEXT: ld 5, 0(3)
; CHECK-NEXT: .LBB1_2: # %res_block
; CHECK-NEXT: li 3, 1
; CHECK-NEXT: blr
- %call = tail call signext i32 @memcmp(i8* %x, i8* %y, i64 16)
+ %call = tail call signext i32 @memcmp(ptr %x, ptr %y, i64 16)
%not.tobool = icmp ne i32 %call, 0
%. = zext i1 %not.tobool to i32
ret i32 %.
}
; Check 7 bytes - requires 3 loads for each param.
-define signext i32 @zeroEqualityTest03(i8* %x, i8* %y) {
+define signext i32 @zeroEqualityTest03(ptr %x, ptr %y) {
; CHECK-LABEL: zeroEqualityTest03:
; CHECK: # %bb.0:
; CHECK-NEXT: lwz 5, 0(3)
; CHECK-NEXT: .LBB2_3: # %res_block
; CHECK-NEXT: li 3, 1
; CHECK-NEXT: blr
- %call = tail call signext i32 @memcmp(i8* %x, i8* %y, i64 7)
+ %call = tail call signext i32 @memcmp(ptr %x, ptr %y, i64 7)
%not.lnot = icmp ne i32 %call, 0
%cond = zext i1 %not.lnot to i32
ret i32 %cond
; CHECK: # %bb.0: # %loadbb
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: blr
- %call = tail call signext i32 @memcmp(i8* bitcast ([4 x i32]* @zeroEqualityTest02.buffer1 to i8*), i8* bitcast ([4 x i32]* @zeroEqualityTest02.buffer2 to i8*), i64 16)
+ %call = tail call signext i32 @memcmp(ptr @zeroEqualityTest02.buffer1, ptr @zeroEqualityTest02.buffer2, i64 16)
%not.cmp = icmp slt i32 %call, 1
%. = zext i1 %not.cmp to i32
ret i32 %.
; CHECK: # %bb.0: # %loadbb
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: blr
- %call = tail call signext i32 @memcmp(i8* bitcast ([4 x i32]* @zeroEqualityTest03.buffer1 to i8*), i8* bitcast ([4 x i32]* @zeroEqualityTest03.buffer2 to i8*), i64 16)
+ %call = tail call signext i32 @memcmp(ptr @zeroEqualityTest03.buffer1, ptr @zeroEqualityTest03.buffer2, i64 16)
%call.lobit = lshr i32 %call, 31
%call.lobit.not = xor i32 %call.lobit, 1
ret i32 %call.lobit.not
; CHECK: # %bb.0: # %loadbb
; CHECK-NEXT: li 3, 1
; CHECK-NEXT: blr
- %call = tail call signext i32 @memcmp(i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer1 to i8*), i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer2 to i8*), i64 16)
+ %call = tail call signext i32 @memcmp(ptr @zeroEqualityTest04.buffer1, ptr @zeroEqualityTest04.buffer2, i64 16)
%not.tobool = icmp eq i32 %call, 0
%cond = zext i1 %not.tobool to i32
ret i32 %cond
}
-define signext i32 @equalityFoldOneConstant(i8* %X) {
+define signext i32 @equalityFoldOneConstant(ptr %X) {
; CHECK-LABEL: equalityFoldOneConstant:
; CHECK: # %bb.0:
; CHECK-NEXT: ld 4, 0(3)
; CHECK-NEXT: cntlzw 3, 3
; CHECK-NEXT: srwi 3, 3, 5
; CHECK-NEXT: blr
- %call = tail call signext i32 @memcmp(i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer1 to i8*), i8* %X, i64 16)
+ %call = tail call signext i32 @memcmp(ptr @zeroEqualityTest04.buffer1, ptr %X, i64 16)
%not.tobool = icmp eq i32 %call, 0
%cond = zext i1 %not.tobool to i32
ret i32 %cond
}
-define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind {
+define i1 @length2_eq_nobuiltin_attr(ptr %X, ptr %Y) nounwind {
; CHECK-LABEL: length2_eq_nobuiltin_attr:
; CHECK: # %bb.0:
; CHECK-NEXT: mflr 0
; CHECK-NEXT: ld 0, 16(1)
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
- %m = tail call signext i32 @memcmp(i8* %X, i8* %Y, i64 2) nobuiltin
+ %m = tail call signext i32 @memcmp(ptr %X, ptr %Y, i64 2) nobuiltin
%c = icmp eq i32 %m, 0
ret i1 %c
}
@Glob = global i64 4
-define i32* @test0(i32* %X, i32* %dest) nounwind {
- %Y = getelementptr i32, i32* %X, i32 4
- %A = load i32, i32* %Y
- store i32 %A, i32* %dest
- ret i32* %Y
+define ptr @test0(ptr %X, ptr %dest) nounwind {
+ %Y = getelementptr i32, ptr %X, i32 4
+ %A = load i32, ptr %Y
+ store i32 %A, ptr %dest
+ ret ptr %Y
}
-define i32* @test1(i32* %X, i32* %dest) nounwind {
- %Y = getelementptr i32, i32* %X, i32 4
- %A = load i32, i32* %Y
- store i32 %A, i32* %dest
- ret i32* %Y
+define ptr @test1(ptr %X, ptr %dest) nounwind {
+ %Y = getelementptr i32, ptr %X, i32 4
+ %A = load i32, ptr %Y
+ store i32 %A, ptr %dest
+ ret ptr %Y
}
-define i16* @test2(i16* %X, i32* %dest) nounwind {
- %Y = getelementptr i16, i16* %X, i32 4
- %A = load i16, i16* %Y
+define ptr @test2(ptr %X, ptr %dest) nounwind {
+ %Y = getelementptr i16, ptr %X, i32 4
+ %A = load i16, ptr %Y
%B = sext i16 %A to i32
- store i32 %B, i32* %dest
- ret i16* %Y
+ store i32 %B, ptr %dest
+ ret ptr %Y
}
-define i16* @test3(i16* %X, i32* %dest) nounwind {
- %Y = getelementptr i16, i16* %X, i32 4
- %A = load i16, i16* %Y
+define ptr @test3(ptr %X, ptr %dest) nounwind {
+ %Y = getelementptr i16, ptr %X, i32 4
+ %A = load i16, ptr %Y
%B = zext i16 %A to i32
- store i32 %B, i32* %dest
- ret i16* %Y
+ store i32 %B, ptr %dest
+ ret ptr %Y
}
-define i16* @test3a(i16* %X, i64* %dest) nounwind {
- %Y = getelementptr i16, i16* %X, i32 4
- %A = load i16, i16* %Y
+define ptr @test3a(ptr %X, ptr %dest) nounwind {
+ %Y = getelementptr i16, ptr %X, i32 4
+ %A = load i16, ptr %Y
%B = sext i16 %A to i64
- store i64 %B, i64* %dest
- ret i16* %Y
+ store i64 %B, ptr %dest
+ ret ptr %Y
}
-define i64* @test4(i64* %X, i64* %dest) nounwind {
- %Y = getelementptr i64, i64* %X, i32 4
- %A = load i64, i64* %Y
- store i64 %A, i64* %dest
- ret i64* %Y
+define ptr @test4(ptr %X, ptr %dest) nounwind {
+ %Y = getelementptr i64, ptr %X, i32 4
+ %A = load i64, ptr %Y
+ store i64 %A, ptr %dest
+ ret ptr %Y
}
-define i16* @test5(i16* %X) nounwind {
- %Y = getelementptr i16, i16* %X, i32 4
- store i16 7, i16* %Y
- ret i16* %Y
+define ptr @test5(ptr %X) nounwind {
+ %Y = getelementptr i16, ptr %X, i32 4
+ store i16 7, ptr %Y
+ ret ptr %Y
}
-define i64* @test6(i64* %X, i64 %A) nounwind {
- %Y = getelementptr i64, i64* %X, i32 4
- store i64 %A, i64* %Y
- ret i64* %Y
+define ptr @test6(ptr %X, i64 %A) nounwind {
+ %Y = getelementptr i64, ptr %X, i32 4
+ store i64 %A, ptr %Y
+ ret ptr %Y
}
-define i64* @test7(i64* %X, i64 %A) nounwind {
- store i64 %A, i64* @Glob
- ret i64* @Glob
+define ptr @test7(ptr %X, i64 %A) nounwind {
+ store i64 %A, ptr @Glob
+ ret ptr @Glob
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-gnu-linux < %s | FileCheck %s -check-prefix=CHECK
-define signext i32 @memcmp8(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+define signext i32 @memcmp8(ptr nocapture readonly %buffer1, ptr nocapture readonly %buffer2) {
; CHECK-LABEL: memcmp8:
; CHECK: # %bb.0:
; CHECK-NEXT: ldbrx 3, 0, 3
; CHECK-NEXT: sub 3, 4, 3
; CHECK-NEXT: extsw 3, 3
; CHECK-NEXT: blr
- %t0 = bitcast i32* %buffer1 to i8*
- %t1 = bitcast i32* %buffer2 to i8*
- %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 8)
+ %call = tail call signext i32 @memcmp(ptr %buffer1, ptr %buffer2, i64 8)
ret i32 %call
}
-define signext i32 @memcmp4(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+define signext i32 @memcmp4(ptr nocapture readonly %buffer1, ptr nocapture readonly %buffer2) {
; CHECK-LABEL: memcmp4:
; CHECK: # %bb.0:
; CHECK-NEXT: lwbrx 3, 0, 3
; CHECK-NEXT: sub 3, 4, 3
; CHECK-NEXT: extsw 3, 3
; CHECK-NEXT: blr
- %t0 = bitcast i32* %buffer1 to i8*
- %t1 = bitcast i32* %buffer2 to i8*
- %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 4)
+ %call = tail call signext i32 @memcmp(ptr %buffer1, ptr %buffer2, i64 4)
ret i32 %call
}
-define signext i32 @memcmp2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+define signext i32 @memcmp2(ptr nocapture readonly %buffer1, ptr nocapture readonly %buffer2) {
; CHECK-LABEL: memcmp2:
; CHECK: # %bb.0:
; CHECK-NEXT: lhbrx 3, 0, 3
; CHECK-NEXT: sub 3, 3, 4
; CHECK-NEXT: extsw 3, 3
; CHECK-NEXT: blr
- %t0 = bitcast i32* %buffer1 to i8*
- %t1 = bitcast i32* %buffer2 to i8*
- %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 2)
+ %call = tail call signext i32 @memcmp(ptr %buffer1, ptr %buffer2, i64 2)
ret i32 %call
}
-define signext i32 @memcmp1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+define signext i32 @memcmp1(ptr nocapture readonly %buffer1, ptr nocapture readonly %buffer2) {
; CHECK-LABEL: memcmp1:
; CHECK: # %bb.0:
; CHECK-NEXT: lbz 3, 0(3)
; CHECK-NEXT: sub 3, 3, 4
; CHECK-NEXT: extsw 3, 3
; CHECK-NEXT: blr
- %t0 = bitcast i32* %buffer1 to i8*
- %t1 = bitcast i32* %buffer2 to i8*
- %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 1) #2
+ %call = tail call signext i32 @memcmp(ptr %buffer1, ptr %buffer2, i64 1) #2
ret i32 %call
}
-declare signext i32 @memcmp(i8*, i8*, i64)
+declare signext i32 @memcmp(ptr, ptr, i64)
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-define void @foo1(double* nocapture %x, double* nocapture readonly %y) #0 {
+define void @foo1(ptr nocapture %x, ptr nocapture readonly %y) #0 {
entry:
- %0 = bitcast double* %x to i8*
- %1 = bitcast double* %y to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %0, i8* align 8 %1, i64 32, i1 false)
+ tail call void @llvm.memcpy.p0.p0.i64(ptr align 8 %x, ptr align 8 %y, i64 32, i1 false)
ret void
; PWR7-LABEL: @foo1
}
; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #0
; Function Attrs: nounwind
-define void @foo2(double* nocapture %x, double* nocapture readonly %y) #0 {
+define void @foo2(ptr nocapture %x, ptr nocapture readonly %y) #0 {
entry:
- %0 = bitcast double* %x to i8*
- %1 = bitcast double* %y to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %0, i8* align 8 %1, i64 128, i1 false)
+ tail call void @llvm.memcpy.p0.p0.i64(ptr align 8 %x, ptr align 8 %y, i64 128, i1 false)
ret void
; PWR7-LABEL: @foo2
}
; Function Attrs: nounwind
-define void @bar1(double* nocapture %x) #0 {
+define void @bar1(ptr nocapture %x) #0 {
entry:
- %0 = bitcast double* %x to i8*
- tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 128, i1 false)
+ tail call void @llvm.memset.p0.i64(ptr align 8 %x, i8 0, i64 128, i1 false)
ret void
; PWR7-LABEL: @bar1
}
; Function Attrs: nounwind
-define void @bar2(double* nocapture %x) #0 {
+define void @bar2(ptr nocapture %x) #0 {
entry:
- %0 = bitcast double* %x to i8*
- tail call void @llvm.memset.p0i8.i64(i8* align 32 %0, i8 0, i64 128, i1 false)
+ tail call void @llvm.memset.p0.i64(ptr align 32 %x, i8 0, i64 128, i1 false)
ret void
; PWR7-LABEL: @bar2
}
; Function Attrs: nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #0
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #0
attributes #0 = { nounwind }
define void @func(i1 %flag) {
entry:
%pairs = alloca [4 x <2 x i64>], align 8
- %pair1 = getelementptr inbounds [4 x <2 x i64>], [4 x <2 x i64>]* %pairs, i64 0, i64 1
- %pair2 = getelementptr inbounds [4 x <2 x i64>], [4 x <2 x i64>]* %pairs, i64 0, i64 2
- %pvec1 = bitcast <2 x i64>* %pair1 to <2 x i64>*
- %pvec2 = bitcast <2 x i64>* %pair2 to <2 x i64>*
- %dst = bitcast [4 x <2 x i64>]* %pairs to i8*
- %src = bitcast <2 x i64>* %pair2 to i8*
+ %pair1 = getelementptr inbounds [4 x <2 x i64>], ptr %pairs, i64 0, i64 1
+ %pair2 = getelementptr inbounds [4 x <2 x i64>], ptr %pairs, i64 0, i64 2
br i1 %flag, label %end, label %dummy
end:
; copy third element into first element by memcpy
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull %dst, i8* align 8 %src, i64 16, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 nonnull %pairs, ptr align 8 %pair2, i64 16, i1 false)
; copy third element into second element by LD/ST
- %vec2 = load <2 x i64>, <2 x i64>* %pvec2, align 8
- store <2 x i64> %vec2, <2 x i64>* %pvec1, align 8
+ %vec2 = load <2 x i64>, ptr %pair2, align 8
+ store <2 x i64> %vec2, ptr %pair1, align 8
ret void
dummy:
- ; to make use of %src in another BB
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %src, i8* %src, i64 0, i1 false)
+ ; to make use of %pair2 in another BB
+ call void @llvm.memcpy.p0.p0.i64(ptr %pair2, ptr %pair2, i64 0, i1 false)
br label %end
}
define void @func2(i1 %flag) {
entry:
%pairs = alloca [4 x <2 x i64>], align 8
- %pair1 = getelementptr inbounds [4 x <2 x i64>], [4 x <2 x i64>]* %pairs, i64 0, i64 1
- %pair2 = getelementptr inbounds [4 x <2 x i64>], [4 x <2 x i64>]* %pairs, i64 0, i64 2
- %pvec1 = bitcast <2 x i64>* %pair1 to <2 x i64>*
- %pvec2 = bitcast <2 x i64>* %pair2 to <2 x i64>*
- %dst = bitcast [4 x <2 x i64>]* %pairs to i8*
- %src = bitcast <2 x i64>* %pair2 to i8*
+ %pair1 = getelementptr inbounds [4 x <2 x i64>], ptr %pairs, i64 0, i64 1
+ %pair2 = getelementptr inbounds [4 x <2 x i64>], ptr %pairs, i64 0, i64 2
br i1 %flag, label %end, label %dummy
end:
; copy third element into first element by memcpy
- call void @llvm.memmove.p0i8.p0i8.i64(i8* align 8 nonnull %dst, i8* align 8 %src, i64 16, i1 false)
+ call void @llvm.memmove.p0.p0.i64(ptr align 8 nonnull %pairs, ptr align 8 %pair2, i64 16, i1 false)
; copy third element into second element by LD/ST
- %vec2 = load <2 x i64>, <2 x i64>* %pvec2, align 8
- store <2 x i64> %vec2, <2 x i64>* %pvec1, align 8
+ %vec2 = load <2 x i64>, ptr %pair2, align 8
+ store <2 x i64> %vec2, ptr %pair1, align 8
ret void
dummy:
- ; to make use of %src in another BB
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %src, i8* %src, i64 0, i1 false)
+ ; to make use of %pair2 in another BB
+ call void @llvm.memcpy.p0.p0.i64(ptr %pair2, ptr %pair2, i64 0, i1 false)
br label %end
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
attributes #1 = { argmemonly nounwind }
; Function Attrs: nounwind
define void @test_vsx() unnamed_addr #0 align 2 {
entry:
- %0 = load i32, i32* undef, align 4
+ %0 = load i32, ptr undef, align 4
%1 = trunc i32 %0 to i8
- call void @llvm.memset.p0i8.i64(i8* null, i8 %1, i64 32, i1 false)
+ call void @llvm.memset.p0.i64(ptr null, i8 %1, i64 32, i1 false)
ret void
; CHECK-LABEL: @test_vsx
}
; Function Attrs: nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #1
attributes #0 = { nounwind "target-cpu"="pwr8" }
attributes #1 = { nounwind }
; CHECK: @_ZN5clang7tooling15RefactoringTool10runAndSaveEPNS0_21FrontendActionFactoryE
_ZN4llvm18IntrusiveRefCntPtrIN5clang13DiagnosticIDsEEC2EPS2_.exit: ; preds = %entry
- %call2 = call noalias i8* @_Znwm() #3
- %ref_cnt.i.i = bitcast i8* %call2 to i32*
- store <2 x i8*> <i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*)>, <2 x i8*>* undef, align 8
- %IgnoreWarnings.i = getelementptr inbounds i8, i8* %call2, i64 4
- %0 = bitcast i8* %IgnoreWarnings.i to i32*
- call void @llvm.memset.p0i8.i64(i8* align 8 null, i8 0, i64 48, i1 false) #4
- store i32 251658240, i32* %0, align 4
- store i256 37662610426935100959726589394453639584271499769928088551424, i256* null, align 8
- store i32 1, i32* %ref_cnt.i.i, align 4
+ %call2 = call noalias ptr @_Znwm() #3
+ store <2 x ptr> <ptr getelementptr inbounds ([0 x i64], ptr @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3), ptr getelementptr inbounds ([0 x i64], ptr @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3)>, ptr undef, align 8
+ %IgnoreWarnings.i = getelementptr inbounds i8, ptr %call2, i64 4
+ call void @llvm.memset.p0.i64(ptr align 8 null, i8 0, i64 48, i1 false) #4
+ store i32 251658240, ptr %IgnoreWarnings.i, align 4
+ store i256 37662610426935100959726589394453639584271499769928088551424, ptr null, align 8
+ store i32 1, ptr %call2, align 4
unreachable
return: ; preds = %entry
}
; Function Attrs: nobuiltin
-declare noalias i8* @_Znwm() #1
+declare noalias ptr @_Znwm() #1
; Function Attrs: nounwind argmemonly
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #2
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #2
attributes #0 = { nounwind "target-cpu"="pwr7" }
attributes #1 = { nobuiltin "target-cpu"="pwr7" }
; CHECK-NOT: lxvd2x
; CHECK: stxvd2x [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
-define <2 x i64> @func(i64* %pdst) {
+define <2 x i64> @func(ptr %pdst) {
entry:
%a = alloca [4 x i64], align 8
- %psrc0 = bitcast [4 x i64]* %a to i64*
- %psrc1 = getelementptr inbounds i64, i64* %psrc0, i64 1
- %d0 = load i64, i64* %psrc0
- %d1 = load i64, i64* %psrc1
- %pdst0 = getelementptr inbounds i64, i64* %pdst, i64 0
- %pdst1 = getelementptr inbounds i64, i64* %pdst, i64 1
- store i64 %d0, i64* %pdst0, align 8
- store i64 %d1, i64* %pdst1, align 8
- %psrcd = bitcast [4 x i64]* %a to <2 x i64>*
- %vec = load <2 x i64>, <2 x i64>* %psrcd
+ %psrc1 = getelementptr inbounds i64, ptr %a, i64 1
+ %d0 = load i64, ptr %a
+ %d1 = load i64, ptr %psrc1
+ %pdst1 = getelementptr inbounds i64, ptr %pdst, i64 1
+ store i64 %d0, ptr %pdst, align 8
+ store i64 %d1, ptr %pdst1, align 8
+ %vec = load <2 x i64>, ptr %a
ret <2 x i64> %vec
}
; RUN: llc -relocation-model=pic -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 <%s | FileCheck %s
-%struct.Record = type { %struct.Record*, i32 }
+%struct.Record = type { ptr, i32 }
@n = local_unnamed_addr global i32 500000000, align 4
@m = common global %struct.Record zeroinitializer, align 8
-@a = hidden local_unnamed_addr global %struct.Record* @m, align 8
+@a = hidden local_unnamed_addr global ptr @m, align 8
@o = common global %struct.Record zeroinitializer, align 8
-@b = hidden local_unnamed_addr global %struct.Record* @o, align 8
+@b = hidden local_unnamed_addr global ptr @o, align 8
define signext i32 @foo() local_unnamed_addr {
entry:
- %0 = load i64, i64* bitcast (%struct.Record** @b to i64*), align 8
- %1 = load i64*, i64** bitcast (%struct.Record** @a to i64**), align 8
- store i64 %0, i64* %1, align 8
- %2 = load i32, i32* @n, align 4
+ %0 = load i64, ptr @b, align 8
+ %1 = load ptr, ptr @a, align 8
+ store i64 %0, ptr %1, align 8
+ %2 = load i32, ptr @n, align 4
%cmp9 = icmp eq i32 %2, 0
br i1 %cmp9, label %for.end, label %for.body
for.body: ; preds = %entry, %for.body
%i.010 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %3 = load %struct.Record*, %struct.Record** @a, align 8
- %IntComp = getelementptr inbounds %struct.Record, %struct.Record* %3, i64 0, i32 1
- store i32 5, i32* %IntComp, align 8
- %PtrComp2 = getelementptr inbounds %struct.Record, %struct.Record* %3, i64 0, i32 0
- %4 = load %struct.Record*, %struct.Record** %PtrComp2, align 8
- %IntComp3 = getelementptr inbounds %struct.Record, %struct.Record* %4, i64 0, i32 1
- store i32 5, i32* %IntComp3, align 8
- %PtrComp6 = getelementptr inbounds %struct.Record, %struct.Record* %4, i64 0, i32 0
- store %struct.Record* %4, %struct.Record** %PtrComp6, align 8
+ %3 = load ptr, ptr @a, align 8
+ %IntComp = getelementptr inbounds %struct.Record, ptr %3, i64 0, i32 1
+ store i32 5, ptr %IntComp, align 8
+ %4 = load ptr, ptr %3, align 8
+ %IntComp3 = getelementptr inbounds %struct.Record, ptr %4, i64 0, i32 1
+ store i32 5, ptr %IntComp3, align 8
+ store ptr %4, ptr %4, align 8
%inc = add nuw i32 %i.010, 1
%cmp = icmp ult i32 %inc, %2
br i1 %cmp, label %for.body, label %for.end
; CHECK: addi
; CHECK: bne
; CHECK: %true
-define i32 @testload(i32 *%ptr, i32 %sumin) {
+define i32 @testload(ptr %ptr, i32 %sumin) {
entry:
%sum1 = add i32 %sumin, 1
- %val1 = load i32, i32* %ptr
+ %val1 = load i32, ptr %ptr
%p = icmp eq i32 %sumin, 0
br i1 %p, label %true, label %end, !prof !1
true:
%sum2 = add i32 %sum1, 1
- %ptr2 = getelementptr i32, i32* %ptr, i32 1
- %val = load i32, i32* %ptr2
+ %ptr2 = getelementptr i32, ptr %ptr, i32 1
+ %val = load i32, ptr %ptr2
%val2 = add i32 %val1, %val
br label %end
end:
; CHECK: dcbt
; CHECK: addi
; CHECK: blr
-define i32 @testprefetch(i8 *%ptr, i32 %i) {
+define i32 @testprefetch(ptr %ptr, i32 %i) {
entry:
%val1 = add i32 %i, 1
- tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 3, i32 1 )
+ tail call void @llvm.prefetch( ptr %ptr, i32 0, i32 3, i32 1 )
%p = icmp eq i32 %i, 0
br i1 %p, label %true, label %end
true:
%valmerge = phi i32 [ %val1, %entry], [ %val2, %true ]
ret i32 %valmerge
}
-declare void @llvm.prefetch(i8*, i32, i32, i32) nounwind
+declare void @llvm.prefetch(ptr, i32, i32, i32) nounwind
!1 = !{!"branch_weights", i32 2, i32 1}
br i1 undef, label %for.body, label %for.body24.i
for.body24.i: ; preds = %for.body24.i, %for.body
- store double 1.000000e+00, double* undef, align 8
+ store double 1.000000e+00, ptr undef, align 8
br i1 undef, label %for.body24.i58, label %for.body24.i
for.body24.i58: ; preds = %for.body24.i58, %for.body24.i
- %arrayidx26.i55.1 = getelementptr inbounds [16000 x double], [16000 x double]* @b, i64 0, i64 undef
- store double 1.000000e+00, double* %arrayidx26.i55.1, align 8
+ %arrayidx26.i55.1 = getelementptr inbounds [16000 x double], ptr @b, i64 0, i64 undef
+ store double 1.000000e+00, ptr %arrayidx26.i55.1, align 8
br i1 undef, label %for.body24.i64, label %for.body24.i58
for.body24.i64: ; preds = %for.body24.i64, %for.body24.i58
%1 = call <512 x i1> @llvm.ppc.mma.xxsetaccz()
%2 = call <512 x i1> @llvm.ppc.mma.xvf32gerpp(<512 x i1> %0, <16 x i8> %vc, <16 x i8> %vc)
%3 = call <512 x i1> @llvm.ppc.mma.xvf32gerpp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc)
- %4 = getelementptr inbounds <512 x i1>, ptr %res, i64 0
- %5 = getelementptr inbounds <512 x i1>, ptr %res, i64 1
- store <512 x i1> %2, ptr %4, align 64
- store <512 x i1> %3, ptr %5, align 64
+ %4 = getelementptr inbounds <512 x i1>, ptr %res, i64 1
+ store <512 x i1> %2, ptr %res, align 64
+ store <512 x i1> %3, ptr %4, align 64
ret void
}
%1 = call <512 x i1> @llvm.ppc.mma.xxsetaccz()
%2 = call <512 x i1> @llvm.ppc.mma.xvf32gerpp(<512 x i1> %0, <16 x i8> %vc, <16 x i8> %vc)
%3 = call <512 x i1> @llvm.ppc.mma.xvf32gerpn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc)
- %4 = getelementptr inbounds <512 x i1>, ptr %res, i64 0
- %5 = getelementptr inbounds <512 x i1>, ptr %res, i64 1
- store <512 x i1> %2, ptr %4, align 64
- store <512 x i1> %3, ptr %5, align 64
+ %4 = getelementptr inbounds <512 x i1>, ptr %res, i64 1
+ store <512 x i1> %2, ptr %res, align 64
+ store <512 x i1> %3, ptr %4, align 64
ret void
}
%0 = call <512 x i1> @llvm.ppc.mma.xxsetaccz()
%1 = call <512 x i1> @llvm.ppc.mma.xvf32gerpp(<512 x i1> %0, <16 x i8> %vc, <16 x i8> %vc)
%2 = call <512 x i1> @llvm.ppc.mma.xvf32gerpn(<512 x i1> %0, <16 x i8> %vc, <16 x i8> %vc)
- %3 = getelementptr inbounds <512 x i1>, ptr %res, i64 0
- %4 = getelementptr inbounds <512 x i1>, ptr %res, i64 1
- store <512 x i1> %1, ptr %3, align 64
- store <512 x i1> %2, ptr %4, align 64
+ %3 = getelementptr inbounds <512 x i1>, ptr %res, i64 1
+ store <512 x i1> %1, ptr %res, align 64
+ store <512 x i1> %2, ptr %3, align 64
ret void
}
; CHECK-NEXT: blr
test_mtvscr_entry:
%0 = alloca <4 x i32>
- %1 = load <4 x i32>, <4 x i32>* %0
+ %1 = load <4 x i32>, ptr %0
call void @llvm.ppc.altivec.mtvscr(<4 x i32> %1)
ret void
}
test_mfvscr_entry:
%0 = alloca <8 x i16>
%1 = call <8 x i16> @llvm.ppc.altivec.mfvscr()
- store <8 x i16> %1, <8 x i16>* %0
+ store <8 x i16> %1, ptr %0
ret void
}
; CHECK: mtvsrdd v2, 0, r3
}
-define <2 x i64> @noconst0(i64* %a, i64* %b) {
- %1 = load i64, i64* %a, align 8
- %2 = load i64, i64* %b, align 8
+define <2 x i64> @noconst0(ptr %a, ptr %b) {
+ %1 = load i64, ptr %a, align 8
+ %2 = load i64, ptr %b, align 8
%vecinit = insertelement <2 x i64> undef, i64 %2, i32 0
%vecinit1 = insertelement <2 x i64> %vecinit, i64 %1, i32 1
ret <2 x i64> %vecinit1
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s \
; RUN: --check-prefix=CHECK-ITIN
-define void @bn_mul_comba8(i64* nocapture %r, i64* nocapture readonly %a, i64* nocapture readonly %b) {
+define void @bn_mul_comba8(ptr nocapture %r, ptr nocapture readonly %a, ptr nocapture readonly %b) {
; CHECK-LABEL: bn_mul_comba8:
; CHECK: mulhdu
; CHECK-NEXT: mulld
; CHECK-ITIN-NEXT: mulld
; CHECK-ITIN-NEXT: mulhdu
- %1 = load i64, i64* %a, align 8
+ %1 = load i64, ptr %a, align 8
%conv = zext i64 %1 to i128
- %2 = load i64, i64* %b, align 8
+ %2 = load i64, ptr %b, align 8
%conv2 = zext i64 %2 to i128
%mul = mul nuw i128 %conv2, %conv
%shr = lshr i128 %mul, 64
- %agep = getelementptr inbounds i64, i64* %a, i64 1
- %3 = load i64, i64* %agep, align 8
+ %agep = getelementptr inbounds i64, ptr %a, i64 1
+ %3 = load i64, ptr %agep, align 8
%conv14 = zext i64 %3 to i128
%mul15 = mul nuw i128 %conv14, %conv
%add17 = add i128 %mul15, %shr
%shr19 = lshr i128 %add17, 64
%conv20 = trunc i128 %shr19 to i64
- %bgep = getelementptr inbounds i64, i64* %b, i64 1
- %4 = load i64, i64* %bgep, align 8
+ %bgep = getelementptr inbounds i64, ptr %b, i64 1
+ %4 = load i64, ptr %bgep, align 8
%conv28 = zext i64 %4 to i128
%mul31 = mul nuw i128 %conv28, %conv2
%conv32 = and i128 %add17, 18446744073709551615
%add37 = add i64 %conv36, %conv20
%cmp38 = icmp ult i64 %add37, %conv36
%conv148 = zext i1 %cmp38 to i64
- store i64 %conv148, i64* %r, align 8
+ store i64 %conv148, ptr %r, align 8
ret void
}
define void @single_m() nounwind {
entry:
- call void asm "foo $1,$0", "=*m,*m"(i32* elementtype(i32) @mout0, i32* elementtype(i32) @min1) nounwind
+ call void asm "foo $1,$0", "=*m,*m"(ptr elementtype(i32) @mout0, ptr elementtype(i32) @min1) nounwind
ret void
}
entry:
%out0 = alloca i32, align 4
%index = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %index, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %index, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,<r"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* %in1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r<"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,>r"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* %in1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r>"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,r"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @single_i() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
%0 = call i32 asm "foo $1,$0", "=r,i"(i32 1) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @single_n() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
%0 = call i32 asm "foo $1,$0", "=r,n"(i32 1) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @single_E() nounwind {
entry:
%out0 = alloca double, align 8
- store double 0.000000e+000, double* %out0, align 8
+ store double 0.000000e+000, ptr %out0, align 8
; No lowering support.
; %0 = call double asm "foo $1,$0", "=r,E"(double 1.000000e+001) nounwind
-; store double %0, double* %out0, align 8
+; store double %0, ptr %out0, align 8
ret void
}
define void @single_F() nounwind {
entry:
%out0 = alloca double, align 8
- store double 0.000000e+000, double* %out0, align 8
+ store double 0.000000e+000, ptr %out0, align 8
; No lowering support.
; %0 = call double asm "foo $1,$0", "=r,F"(double 1.000000e+000) nounwind
-; store double %0, double* %out0, align 8
+; store double %0, ptr %out0, align 8
ret void
}
define void @single_s() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* @min1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,imr"(i32 1) nounwind
- store i32 %2, i32* %out0, align 4
+ store i32 %2, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* @min1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,X"(i32 1) nounwind
- store i32 %2, i32* %out0, align 4
- %3 = call i32 asm "foo $1,$0", "=r,X"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
- store i32 %3, i32* %out0, align 4
+ store i32 %2, ptr %out0, align 4
+ %3 = call i32 asm "foo $1,$0", "=r,X"(ptr @marray) nounwind
+ store i32 %3, ptr %out0, align 4
%4 = call i32 asm "foo $1,$0", "=r,X"(double 1.000000e+001) nounwind
- store i32 %4, i32* %out0, align 4
+ store i32 %4, ptr %out0, align 4
%5 = call i32 asm "foo $1,$0", "=r,X"(double 1.000000e+000) nounwind
- store i32 %5, i32* %out0, align 4
+ store i32 %5, ptr %out0, align 4
ret void
}
define void @single_p() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- %0 = call i32 asm "foo $1,$0", "=r,r"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
+ %0 = call i32 asm "foo $1,$0", "=r,r"(ptr @marray) nounwind
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @multi_m() nounwind {
entry:
- %tmp = load i32, i32* @min1, align 4
- call void asm "foo $1,$0", "=*m|r,m|r"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
+ %tmp = load i32, ptr @min1, align 4
+ call void asm "foo $1,$0", "=*m|r,m|r"(ptr elementtype(i32) @mout0, i32 %tmp) nounwind
ret void
}
entry:
%out0 = alloca i32, align 4
%index = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %index, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %index, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|<r"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* %in1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r<"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|>r"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* %in1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r>"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|m"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @multi_i() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|i"(i32 1) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @multi_n() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|n"(i32 1) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @multi_E() nounwind {
entry:
%out0 = alloca double, align 8
- store double 0.000000e+000, double* %out0, align 8
+ store double 0.000000e+000, ptr %out0, align 8
; No lowering support.
; %0 = call double asm "foo $1,$0", "=r|r,r|E"(double 1.000000e+001) nounwind
-; store double %0, double* %out0, align 8
+; store double %0, ptr %out0, align 8
ret void
}
define void @multi_F() nounwind {
entry:
%out0 = alloca double, align 8
- store double 0.000000e+000, double* %out0, align 8
+ store double 0.000000e+000, ptr %out0, align 8
; No lowering support.
; %0 = call double asm "foo $1,$0", "=r|r,r|F"(double 1.000000e+000) nounwind
-; store double %0, double* %out0, align 8
+; store double %0, ptr %out0, align 8
ret void
}
define void @multi_s() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* @min1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 1) nounwind
- store i32 %2, i32* %out0, align 4
+ store i32 %2, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* @min1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 1) nounwind
- store i32 %2, i32* %out0, align 4
- %3 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
- store i32 %3, i32* %out0, align 4
+ store i32 %2, ptr %out0, align 4
+ %3 = call i32 asm "foo $1,$0", "=r|r,r|X"(ptr @marray) nounwind
+ store i32 %3, ptr %out0, align 4
%4 = call i32 asm "foo $1,$0", "=r|r,r|X"(double 1.000000e+001) nounwind
- store i32 %4, i32* %out0, align 4
+ store i32 %4, ptr %out0, align 4
%5 = call i32 asm "foo $1,$0", "=r|r,r|X"(double 1.000000e+000) nounwind
- store i32 %5, i32* %out0, align 4
+ store i32 %5, ptr %out0, align 4
ret void
}
define void @multi_p() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- %0 = call i32 asm "foo $1,$0", "=r|r,r|r"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
+ %0 = call i32 asm "foo $1,$0", "=r|r,r|r"(ptr @marray) nounwind
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @single_m() nounwind {
entry:
- call void asm "foo $1,$0", "=*m,*m"(i32* elementtype(i32) @mout0, i32* elementtype(i32) @min1) nounwind
+ call void asm "foo $1,$0", "=*m,*m"(ptr elementtype(i32) @mout0, ptr elementtype(i32) @min1) nounwind
ret void
}
entry:
%out0 = alloca i32, align 4
%index = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %index, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %index, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,<r"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* %in1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r<"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,>r"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* %in1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r>"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,r"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @single_i() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
%0 = call i32 asm "foo $1,$0", "=r,i"(i32 1) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @single_n() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
%0 = call i32 asm "foo $1,$0", "=r,n"(i32 1) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @single_E() nounwind {
entry:
%out0 = alloca double, align 8
- store double 0.000000e+000, double* %out0, align 8
+ store double 0.000000e+000, ptr %out0, align 8
; No lowering support.
; %0 = call double asm "foo $1,$0", "=r,E"(double 1.000000e+001) nounwind
-; store double %0, double* %out0, align 8
+; store double %0, ptr %out0, align 8
ret void
}
define void @single_F() nounwind {
entry:
%out0 = alloca double, align 8
- store double 0.000000e+000, double* %out0, align 8
+ store double 0.000000e+000, ptr %out0, align 8
; No lowering support.
; %0 = call double asm "foo $1,$0", "=r,F"(double 1.000000e+000) nounwind
-; store double %0, double* %out0, align 8
+; store double %0, ptr %out0, align 8
ret void
}
define void @single_s() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* @min1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,imr"(i32 1) nounwind
- store i32 %2, i32* %out0, align 4
+ store i32 %2, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* @min1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,X"(i32 1) nounwind
- store i32 %2, i32* %out0, align 4
- %3 = call i32 asm "foo $1,$0", "=r,X"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
- store i32 %3, i32* %out0, align 4
+ store i32 %2, ptr %out0, align 4
+ %3 = call i32 asm "foo $1,$0", "=r,X"(ptr @marray) nounwind
+ store i32 %3, ptr %out0, align 4
%4 = call i32 asm "foo $1,$0", "=r,X"(double 1.000000e+001) nounwind
- store i32 %4, i32* %out0, align 4
+ store i32 %4, ptr %out0, align 4
%5 = call i32 asm "foo $1,$0", "=r,X"(double 1.000000e+000) nounwind
- store i32 %5, i32* %out0, align 4
+ store i32 %5, ptr %out0, align 4
ret void
}
define void @single_p() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- %0 = call i32 asm "foo $1,$0", "=r,r"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
+ %0 = call i32 asm "foo $1,$0", "=r,r"(ptr @marray) nounwind
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @multi_m() nounwind {
entry:
- %tmp = load i32, i32* @min1, align 4
- call void asm "foo $1,$0", "=*m|r,m|r"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
+ %tmp = load i32, ptr @min1, align 4
+ call void asm "foo $1,$0", "=*m|r,m|r"(ptr elementtype(i32) @mout0, i32 %tmp) nounwind
ret void
}
entry:
%out0 = alloca i32, align 4
%index = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %index, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %index, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|<r"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* %in1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r<"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|>r"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* %in1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r>"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|m"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @multi_i() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|i"(i32 1) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @multi_n() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|n"(i32 1) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 %0, ptr %out0, align 4
ret void
}
define void @multi_E() nounwind {
entry:
%out0 = alloca double, align 8
- store double 0.000000e+000, double* %out0, align 8
+ store double 0.000000e+000, ptr %out0, align 8
; No lowering support.
; %0 = call double asm "foo $1,$0", "=r|r,r|E"(double 1.000000e+001) nounwind
-; store double %0, double* %out0, align 8
+; store double %0, ptr %out0, align 8
ret void
}
define void @multi_F() nounwind {
entry:
%out0 = alloca double, align 8
- store double 0.000000e+000, double* %out0, align 8
+ store double 0.000000e+000, ptr %out0, align 8
; No lowering support.
; %0 = call double asm "foo $1,$0", "=r|r,r|F"(double 1.000000e+000) nounwind
-; store double %0, double* %out0, align 8
+; store double %0, ptr %out0, align 8
ret void
}
define void @multi_s() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* @min1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 1) nounwind
- store i32 %2, i32* %out0, align 4
+ store i32 %2, ptr %out0, align 4
ret void
}
entry:
%out0 = alloca i32, align 4
%in1 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- store i32 1, i32* %in1, align 4
- %tmp = load i32, i32* %in1, align 4
+ store i32 0, ptr %out0, align 4
+ store i32 1, ptr %in1, align 4
+ %tmp = load i32, ptr %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp) nounwind
- store i32 %0, i32* %out0, align 4
- %tmp1 = load i32, i32* @min1, align 4
+ store i32 %0, ptr %out0, align 4
+ %tmp1 = load i32, ptr @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp1) nounwind
- store i32 %1, i32* %out0, align 4
+ store i32 %1, ptr %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 1) nounwind
- store i32 %2, i32* %out0, align 4
- %3 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
- store i32 %3, i32* %out0, align 4
+ store i32 %2, ptr %out0, align 4
+ %3 = call i32 asm "foo $1,$0", "=r|r,r|X"(ptr @marray) nounwind
+ store i32 %3, ptr %out0, align 4
%4 = call i32 asm "foo $1,$0", "=r|r,r|X"(double 1.000000e+001) nounwind
- store i32 %4, i32* %out0, align 4
+ store i32 %4, ptr %out0, align 4
%5 = call i32 asm "foo $1,$0", "=r|r,r|X"(double 1.000000e+000) nounwind
- store i32 %5, i32* %out0, align 4
+ store i32 %5, ptr %out0, align 4
ret void
}
define void @multi_p() nounwind {
entry:
%out0 = alloca i32, align 4
- store i32 0, i32* %out0, align 4
- %0 = call i32 asm "foo $1,$0", "=r|r,r|r"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @marray, i32 0, i32 0)) nounwind
- store i32 %0, i32* %out0, align 4
+ store i32 0, ptr %out0, align 4
+ %0 = call i32 asm "foo $1,$0", "=r|r,r|r"(ptr @marray) nounwind
+ store i32 %0, ptr %out0, align 4
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown \
; RUN: -verify-machineinstrs < %s | FileCheck %s
-define signext i32 @test(i32* noalias %PtrA, i32* noalias %PtrB, i32 signext %LenA, i32 signext %LenB) #0 {
+define signext i32 @test(ptr noalias %PtrA, ptr noalias %PtrB, i32 signext %LenA, i32 signext %LenB) #0 {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 6, 0
if.then: ; preds = %block4
%sub = sub nsw i32 %inc, 1
%idxprom = sext i32 %sub to i64
- %arrayidx = getelementptr inbounds i32, i32* %PtrA, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %PtrA, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
ret i32 %0
if.end: ; preds = %block4
block5: ; preds = %if.end
%inc1 = add nsw i32 %InnerInd.1, 1
%idxprom2 = sext i32 %InnerInd.1 to i64
- %arrayidx3 = getelementptr inbounds i32, i32* %PtrB, i64 %idxprom2
- %1 = load i32, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %PtrB, i64 %idxprom2
+ %1 = load i32, ptr %arrayidx3, align 4
%tobool = icmp ne i32 %1, 0
br i1 %tobool, label %if.then4, label %if.end9
if.then4: ; preds = %block5
%idxprom5 = sext i32 %inc to i64
- %arrayidx6 = getelementptr inbounds i32, i32* %PtrA, i64 %idxprom5
- %2 = load i32, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %PtrA, i64 %idxprom5
+ %2 = load i32, ptr %arrayidx6, align 4
%idxprom7 = sext i32 %inc1 to i64
- %arrayidx8 = getelementptr inbounds i32, i32* %PtrB, i64 %idxprom7
- store i32 %2, i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, ptr %PtrB, i64 %idxprom7
+ store i32 %2, ptr %arrayidx8, align 4
br label %block4
if.end9: ; preds = %block5
block6: ; preds = %if.end9
%idxprom10 = sext i32 %inc to i64
- %arrayidx11 = getelementptr inbounds i32, i32* %PtrA, i64 %idxprom10
- %3 = load i32, i32* %arrayidx11, align 4
+ %arrayidx11 = getelementptr inbounds i32, ptr %PtrA, i64 %idxprom10
+ %3 = load i32, ptr %arrayidx11, align 4
%inc12 = add nsw i32 %3, 1
- store i32 %inc12, i32* %arrayidx11, align 4
+ store i32 %inc12, ptr %arrayidx11, align 4
br label %block8
block8: ; preds = %block6
; CHECK: .long 0
; CHECK: .size X, 4
-@X = weak global i32 0 ; <i32*> [#uses=1]
-@.str = internal constant [4 x i8] c"t.c\00", section "llvm.metadata" ; <[4 x i8]*> [#uses=1]
-@llvm.used = appending global [1 x i8*] [ i8* bitcast (i32* @X to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
+@X = weak global i32 0 ; <ptr> [#uses=1]
+@.str = internal constant [4 x i8] c"t.c\00", section "llvm.metadata" ; <ptr> [#uses=1]
+@llvm.used = appending global [1 x ptr] [ ptr @X ], section "llvm.metadata" ; <ptr> [#uses=0]
BB_1: ; preds = %BB_12, %BB_4
%bcount.1.us = phi i64 [ %.810.us, %BB_4 ], [ 0, %BB_12 ]
%0 = add i64 %arg.ssa, %bcount.1.us
- %.568.us = load i32, i32* undef, align 4
+ %.568.us = load i32, ptr undef, align 4
%.15.i.us = icmp slt i32 0, %.568.us
br i1 %.15.i.us, label %BB_3, label %BB_2
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind readonly
-define double @test1(i64* nocapture readonly %x) #0 {
+define double @test1(ptr nocapture readonly %x) #0 {
entry:
- %0 = load i64, i64* %x, align 8
+ %0 = load i64, ptr %x, align 8
%conv = sitofp i64 %0 to double
ret double %conv
}
; Function Attrs: nounwind readonly
-define double @test2(i32* nocapture readonly %x) #0 {
+define double @test2(ptr nocapture readonly %x) #0 {
entry:
- %0 = load i32, i32* %x, align 4
+ %0 = load i32, ptr %x, align 4
%conv = sitofp i32 %0 to double
ret double %conv
; CHECK: blr
if.then: ; preds = %entry
- tail call void bitcast (void (...)* @bar to void ()*)() #0
+ tail call void @bar() #0
br label %if.end
if.else: ; preds = %entry
- tail call void bitcast (void (...)* @car to void ()*)() #0
+ tail call void @car() #0
br label %if.end
if.end: ; preds = %if.else, %if.then
; CHECK-NEXT: bltlr+ 0
; CHECK-NEXT: # %bb.1: # %for.body.i.i.i.i.i.i.i
entry:
- %0 = load float*, float** undef, align 8
- %1 = load i64, i64* undef, align 8
- %add.ptr.i.i.i.i = getelementptr inbounds float, float* %0, i64 undef
- %2 = ptrtoint float* %add.ptr.i.i.i.i to i64
+ %0 = load ptr, ptr undef, align 8
+ %1 = load i64, ptr undef, align 8
+ %add.ptr.i.i.i.i = getelementptr inbounds float, ptr %0, i64 undef
+ %2 = ptrtoint ptr %add.ptr.i.i.i.i to i64
%and.i.i.i.i.i.i.i = and i64 %2, 3
%tobool.i.i.i.i.i.i.i = icmp eq i64 %and.i.i.i.i.i.i.i, 0
%cmp.i.i.i.i.i.i.i = icmp slt i64 0, %1
}
; Verify nofpexcept is set to constrained conversions when ignoring exceptions
-define void @fptoint_nofpexcept(ppc_fp128 %p, fp128 %m, i32* %addr1, i64* %addr2) {
+define void @fptoint_nofpexcept(ppc_fp128 %p, fp128 %m, ptr %addr1, ptr %addr2) {
; CHECK-LABEL: name: fptoint_nofpexcept
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: BLR8 implicit $lr8, implicit $rm
entry:
%conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %m, metadata !"fpexcept.ignore") #0
- store volatile i32 %conv1, i32* %addr1, align 4
+ store volatile i32 %conv1, ptr %addr1, align 4
%conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %m, metadata !"fpexcept.ignore") #0
- store volatile i32 %conv2, i32* %addr1, align 4
+ store volatile i32 %conv2, ptr %addr1, align 4
%conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %m, metadata !"fpexcept.ignore") #0
- store volatile i64 %conv3, i64* %addr2, align 8
+ store volatile i64 %conv3, ptr %addr2, align 8
%conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %m, metadata !"fpexcept.ignore") #0
- store volatile i64 %conv4, i64* %addr2, align 8
+ store volatile i64 %conv4, ptr %addr2, align 8
%conv5 = tail call i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(ppc_fp128 %p, metadata !"fpexcept.ignore") #0
- store volatile i32 %conv5, i32* %addr1, align 4
+ store volatile i32 %conv5, ptr %addr1, align 4
%conv6 = tail call i32 @llvm.experimental.constrained.fptoui.i32.ppcf128(ppc_fp128 %p, metadata !"fpexcept.ignore") #0
- store volatile i32 %conv6, i32* %addr1, align 4
+ store volatile i32 %conv6, ptr %addr1, align 4
ret void
}
; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
; Function Attrs: nounwind
-define dso_local void @test(float* nocapture readonly %Fptr, <4 x float>* nocapture %Vptr) local_unnamed_addr #0 !dbg !10 {
+define dso_local void @test(ptr nocapture readonly %Fptr, ptr nocapture %Vptr) local_unnamed_addr #0 !dbg !10 {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: #DEBUG_VALUE: test:Fptr <- $x3
; CHECK-NEXT: .loc 1 4 1 is_stmt 1
; CHECK-NEXT: blr
entry:
- call void @llvm.dbg.value(metadata float* %Fptr, metadata !19, metadata !DIExpression()), !dbg !22
- call void @llvm.dbg.value(metadata <4 x float>* %Vptr, metadata !20, metadata !DIExpression()), !dbg !22
- %0 = load float, float* %Fptr, align 4, !dbg !23, !tbaa !24
+ call void @llvm.dbg.value(metadata ptr %Fptr, metadata !19, metadata !DIExpression()), !dbg !22
+ call void @llvm.dbg.value(metadata ptr %Vptr, metadata !20, metadata !DIExpression()), !dbg !22
+ %0 = load float, ptr %Fptr, align 4, !dbg !23, !tbaa !24
%conv = fpext float %0 to double, !dbg !28
%sub = fsub double 1.000000e+00, %conv, !dbg !29
%sub1 = fadd double %sub, -4.300000e+00, !dbg !30
call void @llvm.dbg.value(metadata float %conv2, metadata !21, metadata !DIExpression()), !dbg !22
%vecinit4 = insertelement <4 x float> <float poison, float 0.000000e+00, float 0.000000e+00, float poison>, float %conv2, i32 0, !dbg !32
%vecinit5 = insertelement <4 x float> %vecinit4, float %0, i32 3, !dbg !32
- store <4 x float> %vecinit5, <4 x float>* %Vptr, align 16, !dbg !33, !tbaa !34
+ store <4 x float> %vecinit5, ptr %Vptr, align 16, !dbg !33, !tbaa !34
ret void, !dbg !35
}
br label %vector.body
vector.body: ; preds = %vector.body, %entry
- %wide.load = load <16 x i8>, <16 x i8>* undef, align 1, !tbaa !1, !alias.scope !4
+ %wide.load = load <16 x i8>, ptr undef, align 1, !tbaa !1, !alias.scope !4
%0 = zext <16 x i8> %wide.load to <16 x i32>
- %wide.load279 = load <16 x i8>, <16 x i8>* undef, align 1, !tbaa !1, !alias.scope !4
+ %wide.load279 = load <16 x i8>, ptr undef, align 1, !tbaa !1, !alias.scope !4
%1 = zext <16 x i8> %wide.load279 to <16 x i32>
%2 = add nuw nsw <16 x i32> %1, %0
%3 = add nuw nsw <16 x i32> %2, zeroinitializer
%21 = ashr <16 x i32> %20, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
%22 = select <16 x i1> undef, <16 x i32> %21, <16 x i32> %19
%23 = trunc <16 x i32> %22 to <16 x i8>
- store <16 x i8> %23, <16 x i8>* undef, align 1, !tbaa !1, !alias.scope !7, !noalias !9
+ store <16 x i8> %23, ptr undef, align 1, !tbaa !1, !alias.scope !7, !noalias !9
br label %vector.body
}
define <4 x float> @bar(<4 x float> %v) nounwind {
entry:
%v.addr = alloca <4 x float>, align 16
- store <4 x float> %v, <4 x float>* %v.addr, align 16
- %0 = load <4 x float>, <4 x float>* %v.addr, align 16
+ store <4 x float> %v, ptr %v.addr, align 16
+ %0 = load <4 x float>, ptr %v.addr, align 16
ret <4 x float> %0
}