; CHECK-LABEL: @test_builtin_ppc_compare_and_swaplp(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
-; CHECK-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
-; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg weak volatile i64* [[A_ADDR]], i64 [[B:%.*]], i64 [[C:%.*]] monotonic monotonic, align 8
+; CHECK-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg weak volatile ptr [[A_ADDR]], i64 [[B:%.*]], i64 [[C:%.*]] monotonic monotonic, align 8
; CHECK-NEXT: ret void
;
entry:
%a.addr = alloca i64, align 8
%b.addr = alloca i64, align 8
%c.addr = alloca i64, align 8
- store i64 %a, i64* %a.addr, align 8
- store i64 %b, i64* %b.addr, align 8
- store i64 %c, i64* %c.addr, align 8
- %0 = load i64, i64* %c.addr, align 8
- %1 = load i64, i64* %b.addr, align 8
- %2 = cmpxchg weak volatile i64* %a.addr, i64 %1, i64 %0 monotonic monotonic, align 8
+ store i64 %a, ptr %a.addr, align 8
+ store i64 %b, ptr %b.addr, align 8
+ store i64 %c, ptr %c.addr, align 8
+ %0 = load i64, ptr %c.addr, align 8
+ %1 = load i64, ptr %b.addr, align 8
+ %2 = cmpxchg weak volatile ptr %a.addr, i64 %1, i64 %0 monotonic monotonic, align 8
%3 = extractvalue { i64, i1 } %2, 0
%4 = extractvalue { i64, i1 } %2, 1
- store i64 %3, i64* %b.addr, align 8
+ store i64 %3, ptr %b.addr, align 8
ret void
}
-define dso_local void @test_builtin_ppc_compare_and_swaplp_loop(i64* %a) {
+define dso_local void @test_builtin_ppc_compare_and_swaplp_loop(ptr %a) {
; CHECK-LABEL: @test_builtin_ppc_compare_and_swaplp_loop(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call i64 bitcast (i64 (...)* @bar to i64 ()*)()
+; CHECK-NEXT: [[CALL:%.*]] = tail call i64 @bar()
; CHECK-NEXT: br label [[DO_BODY:%.*]]
; CHECK: do.body:
; CHECK-NEXT: [[X_0:%.*]] = phi i64 [ [[CALL]], [[ENTRY:%.*]] ], [ [[TMP1:%.*]], [[DO_BODY]] ]
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[X_0]], 1
-; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg weak volatile i64* [[A:%.*]], i64 [[X_0]], i64 [[ADD]] monotonic monotonic, align 8
+; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg weak volatile ptr [[A:%.*]], i64 [[X_0]], i64 [[ADD]] monotonic monotonic, align 8
; CHECK-NEXT: [[TMP1]] = extractvalue { i64, i1 } [[TMP0]], 0
; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
; CHECK-NEXT: br i1 [[TMP2]], label [[DO_BODY]], label [[DO_END:%.*]]
; CHECK-NEXT: ret void
;
entry:
- %a.addr = alloca i64*, align 8
+ %a.addr = alloca ptr, align 8
%x = alloca i64, align 8
- store i64* %a, i64** %a.addr, align 8
- %call = call i64 bitcast (i64 (...)* @bar to i64 ()*)()
- store i64 %call, i64* %x, align 8
+ store ptr %a, ptr %a.addr, align 8
+ %call = call i64 @bar()
+ store i64 %call, ptr %x, align 8
br label %do.body
do.body: ; preds = %do.cond, %entry
br label %do.cond
do.cond: ; preds = %do.body
- %0 = load i64*, i64** %a.addr, align 8
- %1 = load i64, i64* %x, align 8
+ %0 = load ptr, ptr %a.addr, align 8
+ %1 = load i64, ptr %x, align 8
%add = add nsw i64 %1, 1
- %2 = load i64*, i64** %a.addr, align 8
- %3 = load i64, i64* %x, align 8
- %4 = cmpxchg weak volatile i64* %2, i64 %3, i64 %add monotonic monotonic, align 8
+ %2 = load ptr, ptr %a.addr, align 8
+ %3 = load i64, ptr %x, align 8
+ %4 = cmpxchg weak volatile ptr %2, i64 %3, i64 %add monotonic monotonic, align 8
%5 = extractvalue { i64, i1 } %4, 0
%6 = extractvalue { i64, i1 } %4, 1
- store i64 %5, i64* %x, align 8
+ store i64 %5, ptr %x, align 8
%tobool = icmp ne i1 %6, false
br i1 %tobool, label %do.body, label %do.end
; In this case, we want to use OR_rec instead of OR + CMPLWI.
; CHECK-LABEL: fn5
-define zeroext i32 @fn5(i32* %p1, i32* %p2) {
+define zeroext i32 @fn5(ptr %p1, ptr %p2) {
; CHECK: OR_rec
; CHECK-NOT: CMP
; CHECK: BCC
- %v1 = load i32, i32* %p1
- %v2 = load i32, i32* %p2
+ %v1 = load i32, ptr %p1
+ %v2 = load i32, ptr %p2
%1 = or i32 %v1, %v2
%2 = icmp eq i32 %1, 0
br i1 %2, label %foo, label %bar
; against a non-zero value.
; CHECK-LABEL: fn6
-define i8* @fn6(i8* readonly %p) {
+define ptr @fn6(ptr readonly %p) {
; CHECK: LBZU
; CHECK: EXTSB_rec
; CHECK-NOT: CMP
; CHECK: BCC
entry:
- %incdec.ptr = getelementptr inbounds i8, i8* %p, i64 -1
- %0 = load i8, i8* %incdec.ptr
+ %incdec.ptr = getelementptr inbounds i8, ptr %p, i64 -1
+ %0 = load i8, ptr %incdec.ptr
%cmp = icmp sgt i8 %0, -1
br i1 %cmp, label %out, label %if.end
if.end:
- %incdec.ptr2 = getelementptr inbounds i8, i8* %p, i64 -2
- %1 = load i8, i8* %incdec.ptr2
+ %incdec.ptr2 = getelementptr inbounds i8, ptr %p, i64 -2
+ %1 = load i8, ptr %incdec.ptr2
%cmp4 = icmp sgt i8 %1, -1
br i1 %cmp4, label %out, label %cleanup
out:
- %p.addr.0 = phi i8* [ %incdec.ptr, %entry ], [ %incdec.ptr2, %if.end ]
+ %p.addr.0 = phi ptr [ %incdec.ptr, %entry ], [ %incdec.ptr2, %if.end ]
br label %cleanup
cleanup:
- %retval.0 = phi i8* [ %p.addr.0, %out ], [ null, %if.end ]
- ret i8* %retval.0
+ %retval.0 = phi ptr [ %p.addr.0, %out ], [ null, %if.end ]
+ ret ptr %retval.0
}
br i1 %cmp, label %if.then, label %if.end
if.then:
- tail call void bitcast (void (...)* @callv to void ()*)()
+ tail call void @callv()
br label %if.end
if.end:
br i1 %cmp, label %if.then, label %if.else
if.then:
- tail call void bitcast (void (...)* @callv to void ()*)()
+ tail call void @callv()
br label %if.end4
if.else:
br i1 %cmp1, label %if.then2, label %if.end4
if.then2:
- tail call void bitcast (void (...)* @callv to void ()*)()
+ tail call void @callv()
br label %if.end4
if.end4:
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define signext i32 @foo(i32 signext %a, i32 signext %b, i32* nocapture %c) #0 {
+define signext i32 @foo(i32 signext %a, i32 signext %b, ptr nocapture %c) #0 {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cmpw 3, 4
; CHECK-NO-ISEL-NEXT: blr
entry:
%sub = sub nsw i32 %a, %b
- store i32 %sub, i32* %c, align 4
+ store i32 %sub, ptr %c, align 4
%cmp = icmp sgt i32 %a, %b
%cond = select i1 %cmp, i32 %a, i32 %b
ret i32 %cond
}
-define signext i32 @foo2(i32 signext %a, i32 signext %b, i32* nocapture %c) #0 {
+define signext i32 @foo2(i32 signext %a, i32 signext %b, ptr nocapture %c) #0 {
; CHECK-LABEL: foo2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: slw 4, 3, 4
; CHECK-NO-ISEL-NEXT: blr
entry:
%shl = shl i32 %a, %b
- store i32 %shl, i32* %c, align 4
+ store i32 %shl, ptr %c, align 4
%cmp = icmp sgt i32 %shl, 0
%conv = zext i1 %cmp to i32
ret i32 %conv
}
-define i64 @fool(i64 %a, i64 %b, i64* nocapture %c) #0 {
+define i64 @fool(i64 %a, i64 %b, ptr nocapture %c) #0 {
; CHECK-LABEL: fool:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub. 6, 3, 4
; CHECK-NO-ISEL-NEXT: blr
entry:
%sub = sub nsw i64 %a, %b
- store i64 %sub, i64* %c, align 8
+ store i64 %sub, ptr %c, align 8
%cmp = icmp sgt i64 %a, %b
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
}
-define i64 @foolb(i64 %a, i64 %b, i64* nocapture %c) #0 {
+define i64 @foolb(i64 %a, i64 %b, ptr nocapture %c) #0 {
; CHECK-LABEL: foolb:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub. 6, 3, 4
; CHECK-NO-ISEL-NEXT: blr
entry:
%sub = sub nsw i64 %a, %b
- store i64 %sub, i64* %c, align 8
+ store i64 %sub, ptr %c, align 8
%cmp = icmp sle i64 %a, %b
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
}
-define i64 @foolc(i64 %a, i64 %b, i64* nocapture %c) #0 {
+define i64 @foolc(i64 %a, i64 %b, ptr nocapture %c) #0 {
; CHECK-LABEL: foolc:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub. 6, 4, 3
; CHECK-NO-ISEL-NEXT: blr
entry:
%sub = sub nsw i64 %b, %a
- store i64 %sub, i64* %c, align 8
+ store i64 %sub, ptr %c, align 8
%cmp = icmp sgt i64 %a, %b
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
}
-define i64 @foold(i64 %a, i64 %b, i64* nocapture %c) #0 {
+define i64 @foold(i64 %a, i64 %b, ptr nocapture %c) #0 {
; CHECK-LABEL: foold:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub. 6, 4, 3
; CHECK-NO-ISEL-NEXT: blr
entry:
%sub = sub nsw i64 %b, %a
- store i64 %sub, i64* %c, align 8
+ store i64 %sub, ptr %c, align 8
%cmp = icmp slt i64 %a, %b
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
}
-define i64 @foold2(i64 %a, i64 %b, i64* nocapture %c) #0 {
+define i64 @foold2(i64 %a, i64 %b, ptr nocapture %c) #0 {
; CHECK-LABEL: foold2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sub. 6, 3, 4
; CHECK-NO-ISEL-NEXT: blr
entry:
%sub = sub nsw i64 %a, %b
- store i64 %sub, i64* %c, align 8
+ store i64 %sub, ptr %c, align 8
%cmp = icmp slt i64 %a, %b
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
}
-define i64 @foo2l(i64 %a, i64 %b, i64* nocapture %c) #0 {
+define i64 @foo2l(i64 %a, i64 %b, ptr nocapture %c) #0 {
; CHECK-LABEL: foo2l:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sld 4, 3, 4
; CHECK-NO-ISEL-NEXT: blr
entry:
%shl = shl i64 %a, %b
- store i64 %shl, i64* %c, align 8
+ store i64 %shl, ptr %c, align 8
%cmp = icmp sgt i64 %shl, 0
%conv1 = zext i1 %cmp to i64
ret i64 %conv1
}
-define double @food(double %a, double %b, double* nocapture %c) #0 {
+define double @food(double %a, double %b, ptr nocapture %c) #0 {
; CHECK-LABEL: food:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsub 0, 1, 2
; CHECK-NO-ISEL-NEXT: blr
entry:
%sub = fsub double %a, %b
- store double %sub, double* %c, align 8
+ store double %sub, ptr %c, align 8
%cmp = fcmp ogt double %a, %b
%cond = select i1 %cmp, double %a, double %b
ret double %cond
}
-define float @foof(float %a, float %b, float* nocapture %c) #0 {
+define float @foof(float %a, float %b, ptr nocapture %c) #0 {
; CHECK-LABEL: foof:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsubs 0, 1, 2
; CHECK-NO-ISEL-NEXT: blr
entry:
%sub = fsub float %a, %b
- store float %sub, float* %c, align 4
+ store float %sub, ptr %c, align 4
%cmp = fcmp ogt float %a, %b
%cond = select i1 %cmp, float %a, float %b
ret float %cond
declare i64 @llvm.ctpop.i64(i64);
-define signext i64 @fooct(i64 signext %a, i64 signext %b, i64* nocapture %c) #0 {
+define signext i64 @fooct(i64 signext %a, i64 signext %b, ptr nocapture %c) #0 {
; CHECK-LABEL: fooct:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lis 6, 21845
entry:
%sub = sub nsw i64 %a, %b
%subc = call i64 @llvm.ctpop.i64(i64 %sub)
- store i64 %subc, i64* %c, align 4
+ store i64 %subc, ptr %c, align 4
%cmp = icmp sgt i64 %subc, 0
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | not grep ori
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | not grep rlwimi
-define i32 @test1(i8* %P) {
- %tmp.2.i = ptrtoint i8* %P to i32 ; <i32> [#uses=2]
+define i32 @test1(ptr %P) {
+ %tmp.2.i = ptrtoint ptr %P to i32 ; <i32> [#uses=2]
%tmp.4.i = and i32 %tmp.2.i, -65536 ; <i32> [#uses=1]
%tmp.10.i = lshr i32 %tmp.2.i, 5 ; <i32> [#uses=1]
%tmp.11.i = and i32 %tmp.10.i, 2040 ; <i32> [#uses=1]
%tmp.13.i = or i32 %tmp.11.i, %tmp.4.i ; <i32> [#uses=1]
- %tmp.14.i = inttoptr i32 %tmp.13.i to i32* ; <i32*> [#uses=1]
- %tmp.3 = load i32, i32* %tmp.14.i ; <i32> [#uses=1]
+ %tmp.14.i = inttoptr i32 %tmp.13.i to ptr ; <ptr> [#uses=1]
+ %tmp.3 = load i32, ptr %tmp.14.i ; <i32> [#uses=1]
ret i32 %tmp.3
}
define i32 @test2(i32 %P) {
%tmp.2 = shl i32 %P, 4 ; <i32> [#uses=1]
%tmp.3 = or i32 %tmp.2, 2 ; <i32> [#uses=1]
- %tmp.4 = inttoptr i32 %tmp.3 to i32* ; <i32*> [#uses=1]
- %tmp.5 = load i32, i32* %tmp.4 ; <i32> [#uses=1]
+ %tmp.4 = inttoptr i32 %tmp.3 to ptr ; <ptr> [#uses=1]
+ %tmp.5 = load i32, ptr %tmp.4 ; <i32> [#uses=1]
ret i32 %tmp.5
}
@_ZL3num = external dso_local unnamed_addr global float, align 4
-define dso_local void @main() local_unnamed_addr personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @main() local_unnamed_addr personality ptr @__gxx_personality_v0 {
; CHECK-P9-LABEL: main:
; CHECK-P9: # %bb.0: # %bb
; CHECK-P9-NEXT: mflr r0
; CHECK-P9-NEXT: bne+ cr0, .LBB0_1
; CHECK-P9-NEXT: .LBB0_2: # %bb16
bb:
- %i = tail call noalias dereferenceable_or_null(6451600) i8* @malloc()
- %i1 = bitcast i8* %i to float*
+ %i = tail call noalias dereferenceable_or_null(6451600) ptr @malloc()
br label %bb2
bb2: ; preds = %bb5, %bb
bb5: ; preds = %bb2
%i6 = mul nuw nsw i64 %i3, 1270
%i7 = add nuw nsw i64 %i6, 0
- %i8 = getelementptr inbounds float, float* %i1, i64 %i7
- store float undef, float* %i8, align 4
+ %i8 = getelementptr inbounds float, ptr %i, i64 %i7
+ store float undef, ptr %i8, align 4
%i9 = add nuw nsw i64 %i3, 3
- %i10 = load float, float* @_ZL3num, align 4
+ %i10 = load float, ptr @_ZL3num, align 4
%i11 = fmul float %i10, 0x3E00000000000000
%i12 = mul nuw nsw i64 %i9, 1270
%i13 = add nuw nsw i64 %i12, 0
- %i14 = getelementptr inbounds float, float* %i1, i64 %i13
- store float %i11, float* %i14, align 4
+ %i14 = getelementptr inbounds float, ptr %i, i64 %i13
+ store float %i11, ptr %i14, align 4
%i15 = add nuw nsw i64 %i3, 5
br label %bb2
declare i32 @__gxx_personality_v0(...)
-declare i8* @malloc() local_unnamed_addr
+declare ptr @malloc() local_unnamed_addr
; RUN: llc %s -mtriple=powerpc -o - | FileCheck %s
; RUN: llc %s -mtriple=powerpc64 -o - | FileCheck %s
-define i1 @no__mulodi4(i32 %a, i64 %b, i32* %c) {
+define i1 @no__mulodi4(i32 %a, i64 %b, ptr %c) {
; CHECK-LABEL: no__mulodi4
; CHECK-NOT: bl __mulodi4
; CHECK-NOT: bl __multi3
%5 = sext i32 %4 to i64
%6 = icmp ne i64 %3, %5
%7 = or i1 %2, %6
- store i32 %4, i32* %c, align 4
+ store i32 %4, ptr %c, align 4
ret i1 %7
}
%b.1.i = phi i32 [ %b.2.i, %sw.epilog.i ], [ 0, %while.body ]
%c.1.i = phi i32 [ %c.2.i, %sw.epilog.i ], [ 291, %while.body ]
%d.1.i = phi i32 [ %d.2.i, %sw.epilog.i ], [ 1179648, %while.body ]
- %0 = load i8, i8* null, align 1
+ %0 = load i8, ptr null, align 1
%cmp1.i = icmp eq i8 %0, 1
br i1 %cmp1.i, label %if.then.i, label %if.else.i
entry:
%cmp = icmp slt i8 %a, 1
%conv1 = zext i1 %cmp to i8
- store i8 %conv1, i8* @globalVal, align 1
+ store i8 %conv1, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp slt i32 %a, 1
%conv1 = zext i1 %cmp to i32
- store i32 %conv1, i32* @globalVal2, align 4
+ store i32 %conv1, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp slt i64 %a, 1
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp slt i16 %a, 1
%conv1 = zext i1 %cmp to i16
- store i16 %conv1, i16* @globalVal4, align 2
+ store i16 %conv1, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp sgt i8 %a, 1
%conv1 = zext i1 %cmp to i8
- store i8 %conv1, i8* @globalVal, align 1
+ store i8 %conv1, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sgt i32 %a, 1
%conv1 = zext i1 %cmp to i32
- store i32 %conv1, i32* @globalVal2, align 4
+ store i32 %conv1, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sgt i64 %a, 1
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sgt i16 %a, 1
%conv1 = zext i1 %cmp to i16
- store i16 %conv1, i16* @globalVal4, align 2
+ store i16 %conv1, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, 1
%conv1 = zext i1 %cmp to i8
- store i8 %conv1, i8* @globalVal, align 1
+ store i8 %conv1, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, 1
%conv1 = zext i1 %cmp to i32
- store i32 %conv1, i32* @globalVal2, align 4
+ store i32 %conv1, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, 1
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, 1
%conv1 = zext i1 %cmp to i16
- store i16 %conv1, i16* @globalVal4, align 2
+ store i16 %conv1, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp ugt i64 %a, 1
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp sgt i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sgt i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sgt i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sgt i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp slt i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp slt i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp slt i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp slt i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp sgt i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp slt i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp ne i8 %a, 1
%conv1 = zext i1 %cmp to i8
- store i8 %conv1, i8* @globalVal, align 1
+ store i8 %conv1, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp ne i32 %a, 1
%conv1 = zext i1 %cmp to i32
- store i32 %conv1, i32* @globalVal2, align 4
+ store i32 %conv1, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp ne i64 %a, 1
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp ne i16 %a, 1
%conv1 = zext i1 %cmp to i16
- store i16 %conv1, i16* @globalVal4, align 2
+ store i16 %conv1, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp sge i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sge i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sge i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sge i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp uge i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3
+ store i64 %conv1, ptr @globalVal3
ret void
}
entry:
%cmp = icmp sle i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sle i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sle i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sle i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp ule i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3
+ store i64 %conv1, ptr @globalVal3
ret void
}
entry:
%cmp = icmp ne i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp ne i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp ne i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp ne i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp ne i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp ne i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp sge i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sge i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sge i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sge i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp uge i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3
+ store i64 %conv1, ptr @globalVal3
ret void
}
entry:
%cmp = icmp sle i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sle i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sle i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sle i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp ule i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3
+ store i64 %conv1, ptr @globalVal3
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp slt i8 %a, 1
%conv1 = sext i1 %cmp to i8
- store i8 %conv1, i8* @globalVal, align 1
+ store i8 %conv1, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp slt i32 %a, 1
%conv1 = sext i1 %cmp to i32
- store i32 %conv1, i32* @globalVal2, align 4
+ store i32 %conv1, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp slt i64 %a, 1
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp slt i16 %a, 1
%conv1 = sext i1 %cmp to i16
- store i16 %conv1, i16* @globalVal4, align 2
+ store i16 %conv1, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp sgt i8 %a, 1
%conv1 = sext i1 %cmp to i8
- store i8 %conv1, i8* @globalVal, align 1
+ store i8 %conv1, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sgt i32 %a, 1
%conv1 = sext i1 %cmp to i32
- store i32 %conv1, i32* @globalVal2, align 4
+ store i32 %conv1, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sgt i64 %a, 1
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sgt i16 %a, 1
%conv1 = sext i1 %cmp to i16
- store i16 %conv1, i16* @globalVal4, align 2
+ store i16 %conv1, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, 1
%conv1 = sext i1 %cmp to i8
- store i8 %conv1, i8* @globalVal, align 1
+ store i8 %conv1, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, 1
%conv1 = sext i1 %cmp to i32
- store i32 %conv1, i32* @globalVal2, align 4
+ store i32 %conv1, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, 1
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, 1
%conv1 = sext i1 %cmp to i16
- store i16 %conv1, i16* @globalVal4, align 2
+ store i16 %conv1, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp ugt i64 %a, 1
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sgt i8 %a, 0
%conv1 = sext i1 %cmp to i8
- store i8 %conv1, i8* @globalVal, align 1
+ store i8 %conv1, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sgt i32 %a, 0
%conv1 = sext i1 %cmp to i32
- store i32 %conv1, i32* @globalVal2, align 4
+ store i32 %conv1, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sgt i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sgt i16 %a, 0
%conv1 = sext i1 %cmp to i16
- store i16 %conv1, i16* @globalVal4, align 2
+ store i16 %conv1, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, 0
%conv1 = sext i1 %cmp to i8
- store i8 %conv1, i8* @globalVal, align 1
+ store i8 %conv1, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, 0
%conv1 = sext i1 %cmp to i32
- store i32 %conv1, i32* @globalVal2, align 4
+ store i32 %conv1, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, 0
%conv1 = sext i1 %cmp to i16
- store i16 %conv1, i16* @globalVal4, align 2
+ store i16 %conv1, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%conv = sext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%conv = sext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp sgt i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sgt i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sgt i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sgt i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp ugt i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp ugt i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp ugt i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp slt i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp slt i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp slt i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp slt i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp ult i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp ult i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp ult i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp sgt i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp ugt i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp ugt i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp ugt i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp slt i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp ult i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp ult i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp ult i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp uge i8 %a, 1
%conv1 = sext i1 %cmp to i8
- store i8 %conv1, i8* @globalVal, align 1
+ store i8 %conv1, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp uge i32 %a, 1
%conv1 = sext i1 %cmp to i32
- store i32 %conv1, i32* @globalVal2, align 4
+ store i32 %conv1, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp uge i64 %a, 1
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp uge i16 %a, 1
%conv1 = sext i1 %cmp to i16
- store i16 %conv1, i16* @globalVal4, align 2
+ store i16 %conv1, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp ne i8 %a, 1
%conv1 = sext i1 %cmp to i8
- store i8 %conv1, i8* @globalVal, align 1
+ store i8 %conv1, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp ne i32 %a, 1
%conv1 = sext i1 %cmp to i32
- store i32 %conv1, i32* @globalVal2, align 4
+ store i32 %conv1, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp ne i64 %a, 1
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp ne i16 %a, 1
%conv1 = sext i1 %cmp to i16
- store i16 %conv1, i16* @globalVal4, align 2
+ store i16 %conv1, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp sge i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sge i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sge i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sge i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp uge i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal
+ store i8 %conv3, ptr @globalVal
ret void
}
entry:
%cmp = icmp uge i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2
+ store i32 %sub, ptr @globalVal2
ret void
}
entry:
%cmp = icmp uge i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3
+ store i64 %conv1, ptr @globalVal3
ret void
}
entry:
%cmp = icmp uge i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4
+ store i16 %conv3, ptr @globalVal4
ret void
}
entry:
%cmp = icmp sle i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sle i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sle i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sle i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp ule i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal
+ store i8 %conv3, ptr @globalVal
ret void
}
entry:
%cmp = icmp ule i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2
+ store i32 %sub, ptr @globalVal2
ret void
}
entry:
%cmp = icmp ule i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3
+ store i64 %conv1, ptr @globalVal3
ret void
}
entry:
%cmp = icmp ule i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4
+ store i16 %conv3, ptr @globalVal4
ret void
}
entry:
%cmp = icmp ne i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp ne i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp ne i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp ne i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp ne i32 %a, %b
%conv = sext i1 %cmp to i32
- store i32 %conv, i32* @globalVal2, align 4
+ store i32 %conv, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp ne i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp sge i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sge i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sge i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sge i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp uge i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal
+ store i8 %conv3, ptr @globalVal
ret void
}
entry:
%cmp = icmp uge i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2
+ store i32 %sub, ptr @globalVal2
ret void
}
entry:
%cmp = icmp uge i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3
+ store i64 %conv1, ptr @globalVal3
ret void
}
entry:
%cmp = icmp uge i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4
+ store i16 %conv3, ptr @globalVal4
ret void
}
entry:
%cmp = icmp sle i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal, align 1
+ store i8 %conv3, ptr @globalVal, align 1
ret void
}
entry:
%cmp = icmp sle i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2, align 4
+ store i32 %sub, ptr @globalVal2, align 4
ret void
}
entry:
%cmp = icmp sle i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
entry:
%cmp = icmp sle i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4, align 2
+ store i16 %conv3, ptr @globalVal4, align 2
ret void
}
entry:
%cmp = icmp ule i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @globalVal
+ store i8 %conv3, ptr @globalVal
ret void
}
entry:
%cmp = icmp ule i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @globalVal2
+ store i32 %sub, ptr @globalVal2
ret void
}
entry:
%cmp = icmp ule i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3
+ store i64 %conv1, ptr @globalVal3
ret void
}
entry:
%cmp = icmp ule i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @globalVal4
+ store i16 %conv3, ptr @globalVal4
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @globalVal3, align 8
+ store i64 %conv1, ptr @globalVal3, align 8
ret void
}
; bit of any CR field is spilled. We need to test the spilling of a CR bit
; other than the LT bit. Hence this test case is rather complex.
-%0 = type { i32, %1*, %0*, [1 x i8], i8*, i8*, i8*, i8*, i64, i32, [20 x i8] }
-%1 = type { %1*, %0*, i32 }
-%2 = type { [200 x i8], [200 x i8], %3*, %3*, %4*, %4*, %4*, %4*, %4*, i64 }
-%3 = type { i64, i32, %3*, %3*, %3*, %3*, %4*, %4*, %4*, %4*, i64, i32, i32 }
-%4 = type { i32, i64, %3*, %3*, i16, %4*, %4*, i64, i64 }
+%0 = type { i32, ptr, ptr, [1 x i8], ptr, ptr, ptr, ptr, i64, i32, [20 x i8] }
+%1 = type { ptr, ptr, i32 }
+%2 = type { [200 x i8], [200 x i8], ptr, ptr, ptr, ptr, ptr, ptr, ptr, i64 }
+%3 = type { i64, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i64, i32, i32 }
+%4 = type { i32, i64, ptr, ptr, i16, ptr, ptr, i64, i64 }
-define dso_local double @P10_Spill_CR_EQ(%2* %arg) local_unnamed_addr #0 {
+define dso_local double @P10_Spill_CR_EQ(ptr %arg) local_unnamed_addr #0 {
; CHECK-LABEL: P10_Spill_CR_EQ:
; CHECK: # %bb.0: # %bb
; CHECK-NEXT: mfcr r12
; CHECK-NEXT: xsadddp f1, f0, f1
; CHECK-NEXT: blr
bb:
- %tmp = getelementptr inbounds %4, %4* null, i64 undef, i32 7
- %tmp1 = load i64, i64* undef, align 8
- %tmp2 = load i64, i64* null, align 8
- %tmp3 = load i64, i64* %tmp, align 8
+ %tmp = getelementptr inbounds %4, ptr null, i64 undef, i32 7
+ %tmp1 = load i64, ptr undef, align 8
+ %tmp2 = load i64, ptr null, align 8
+ %tmp3 = load i64, ptr %tmp, align 8
%tmp4 = icmp eq i64 %tmp1, 0
%tmp5 = icmp eq i64 %tmp2, 0
%tmp6 = icmp eq i64 %tmp3, 0
br i1 %tmp4, label %bb12, label %bb10
bb10: ; preds = %bb
- %tmp11 = load i32, i32* undef, align 8
+ %tmp11 = load i32, ptr undef, align 8
br label %bb12
bb12: ; preds = %bb10, %bb
br i1 %tmp5, label %bb16, label %bb14
bb14: ; preds = %bb12
- %tmp15 = load i32, i32* undef, align 8
+ %tmp15 = load i32, ptr undef, align 8
br label %bb16
bb16: ; preds = %bb14, %bb12
br i1 %tmp6, label %bb20, label %bb18
bb18: ; preds = %bb16
- %tmp19 = load i32, i32* undef, align 8
+ %tmp19 = load i32, ptr undef, align 8
br label %bb20
bb20: ; preds = %bb18, %bb16
br i1 %tmp31, label %bb34, label %bb36
bb34: ; preds = %bb20
- %tmp35 = load i64, i64* undef, align 8
+ %tmp35 = load i64, ptr undef, align 8
br label %bb36
bb36: ; preds = %bb34, %bb20
br i1 %tmp33, label %bb38, label %bb40
bb38: ; preds = %bb36
- %tmp39 = load i64, i64* undef, align 8
+ %tmp39 = load i64, ptr undef, align 8
br label %bb40
bb40: ; preds = %bb38, %bb36
br i1 %tmp47, label %bb48, label %bb50
bb48: ; preds = %bb40
- %tmp49 = load %3*, %3** undef, align 8
+ %tmp49 = load ptr, ptr undef, align 8
br label %bb50
bb50: ; preds = %bb48, %bb40
- %tmp51 = phi %3* [ undef, %bb40 ], [ %tmp49, %bb48 ]
+ %tmp51 = phi ptr [ undef, %bb40 ], [ %tmp49, %bb48 ]
br i1 %tmp45, label %bb52, label %bb54
bb52: ; preds = %bb50
- %tmp53 = load i32, i32* undef, align 8
+ %tmp53 = load i32, ptr undef, align 8
br label %bb54
bb54: ; preds = %bb52, %bb50
br i1 %tmp46, label %bb56, label %bb58
bb56: ; preds = %bb54
- %tmp57 = load i32, i32* undef, align 8
+ %tmp57 = load i32, ptr undef, align 8
br label %bb58
bb58: ; preds = %bb56, %bb54
%tmp59 = phi i32 [ undef, %bb54 ], [ %tmp57, %bb56 ]
- %tmp60 = getelementptr inbounds %3, %3* %tmp51, i64 0, i32 12
- %tmp61 = load i32, i32* %tmp60, align 8
+ %tmp60 = getelementptr inbounds %3, ptr %tmp51, i64 0, i32 12
+ %tmp61 = load i32, ptr %tmp60, align 8
%tmp62 = icmp slt i32 %tmp55, 1
%tmp63 = icmp slt i32 %tmp59, 1
%tmp64 = icmp slt i32 %tmp61, 1
br i1 %tmp65, label %bb68, label %bb70
bb68: ; preds = %bb58
- %tmp69 = load i64, i64* undef, align 8
+ %tmp69 = load i64, ptr undef, align 8
br label %bb70
bb70: ; preds = %bb68, %bb58
%tmp71 = phi i64 [ undef, %bb58 ], [ %tmp69, %bb68 ]
- %tmp72 = load i64, i64* undef, align 8
+ %tmp72 = load i64, ptr undef, align 8
%tmp73 = xor i1 %tmp25, true
%tmp74 = xor i1 %tmp26, true
%tmp75 = xor i1 %tmp27, true
; CHECK-BE-NEXT: bc 4, 4*cr4+eq, .LBB0_34
; CHECK-BE-NEXT: b .LBB0_35
bb:
- %tmp = load i32, i32* undef, align 8
+ %tmp = load i32, ptr undef, align 8
%tmp1 = and i32 %tmp, 16
%tmp2 = icmp ne i32 %tmp1, 0
%tmp3 = and i32 %tmp, 32
bb5: ; preds = %bb63, %bb
%tmp6 = phi i32 [ 0, %bb ], [ %tmp64, %bb63 ]
%tmp7 = phi i1 [ %tmp4, %bb ], [ undef, %bb63 ]
- %tmp8 = load i32, i32* undef, align 8
+ %tmp8 = load i32, ptr undef, align 8
br i1 %tmp2, label %bb9, label %bb10
bb9: ; preds = %bb5
br i1 %tmp7, label %bb33, label %bb36
bb33: ; preds = %bb32
- %tmp34 = getelementptr inbounds i8, i8* null, i64 -1
- %tmp35 = select i1 %tmp12, i8* %tmp34, i8* null
- store i8 0, i8* %tmp35, align 1
+ %tmp34 = getelementptr inbounds i8, ptr null, i64 -1
+ %tmp35 = select i1 %tmp12, ptr %tmp34, ptr null
+ store i8 0, ptr %tmp35, align 1
br label %bb36
bb36: ; preds = %bb33, %bb32
br i1 %tmp30, label %bb37, label %bb38
bb37: ; preds = %bb36
- store i16 undef, i16* null, align 2
+ store i16 undef, ptr null, align 2
br label %bb38
bb38: ; preds = %bb37, %bb36
bb43: ; preds = %bb10, %bb10
call void @call_1()
- %tmp44 = getelementptr inbounds i8, i8* null, i64 -1
- %tmp45 = select i1 %tmp12, i8* %tmp44, i8* null
- store i8 0, i8* %tmp45, align 1
+ %tmp44 = getelementptr inbounds i8, ptr null, i64 -1
+ %tmp45 = select i1 %tmp12, ptr %tmp44, ptr null
+ store i8 0, ptr %tmp45, align 1
br label %bb63
bb46: ; preds = %bb46, %bb10
; other than the LT bit. Hence this test case is rather complex.
%0 = type { %1 }
-%1 = type { %0*, %0*, %0*, i32 }
+%1 = type { ptr, ptr, ptr, i32 }
@call_1 = external dso_local unnamed_addr global i32, align 4
-declare %0* @call_2() local_unnamed_addr
+declare ptr @call_2() local_unnamed_addr
declare i32 @call_3() local_unnamed_addr
declare void @call_4() local_unnamed_addr
; CHECK-BE-NEXT: .LBB0_13: # %bb3
; CHECK-BE-NEXT: .LBB0_14: # %bb2
bb:
- %tmp = tail call %0* @call_2()
- %tmp1 = icmp ne %0* %tmp, null
+ %tmp = tail call ptr @call_2()
+ %tmp1 = icmp ne ptr %tmp, null
switch i32 undef, label %bb4 [
i32 3, label %bb2
i32 2, label %bb3
unreachable
bb4: ; preds = %bb
- %tmp5 = load i64, i64* undef, align 8
+ %tmp5 = load i64, ptr undef, align 8
%tmp6 = trunc i64 %tmp5 to i32
%tmp7 = add i32 0, %tmp6
%tmp8 = icmp sgt i32 %tmp7, 0
%tmp9 = icmp eq i8 0, 0
%tmp10 = zext i1 %tmp9 to i32
- %tmp11 = icmp eq %0* %tmp, null
+ %tmp11 = icmp eq ptr %tmp, null
br label %bb12
bb12: ; preds = %bb38, %bb4
br label %bb24
bb24: ; preds = %bb23
- %tmp25 = load i32, i32* @call_1, align 4
+ %tmp25 = load i32, ptr @call_1, align 4
%tmp26 = icmp eq i32 %tmp25, 0
br i1 %tmp26, label %bb30, label %bb27
%0 = type { i32, [768 x i8], [768 x i8], [1024 x i8], [768 x i8], [768 x i8], [768 x i8], [768 x i8], [768 x i8], [1024 x i8], [1024 x i8], i32, i16, i16, i16, i16, i16, i16, i32, i32, i32, i16, i16, i32, i32, i32, i32, i32, i32, i32, i16, i16, i16, i16, [64 x i8], i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i32, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i8, i8, i8, i8, i16, i16, i16, i16, i16, i16, float, float, i32, i16, i16, float, i16, i16, i16, i16}
%1 = type opaque
-%2 = type { i8* }
-%3 = type { %3*, %3*, %4* (i8*)*, %2, i32, %2, %2*, i8*, double*, float*, i8*, i8*, %4* }
-%4 = type { %4*, %4*, %4*, i32, i32, i32, i32, i32, i8*, [3 x float], i8, [64 x i8] }
+%2 = type { ptr }
+%3 = type { ptr, ptr, ptr, %2, i32, %2, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+%4 = type { ptr, ptr, ptr, i32, i32, i32, i32, i32, ptr, [3 x float], i8, [64 x i8] }
@global_1 = external dso_local unnamed_addr constant [1 x i8], align 1
@global_2 = external local_unnamed_addr global %0, align 8
-@global_3 = external local_unnamed_addr global i8* (i64, i8*)*, align 8
+@global_3 = external local_unnamed_addr global ptr, align 8
@global_4 = external dso_local unnamed_addr constant [14 x i8], align 1
-declare i8 @call_1(%1*) local_unnamed_addr
-declare i32 @call_2(%2*, %1*) local_unnamed_addr
-declare i32 @call_3(%2*, %1*) local_unnamed_addr
-declare %3* @call_4(%4*, i32, i32, i32, i32, i32, i16, i16, %2*, %1*, i32, float, float, float, float, i8*) local_unnamed_addr
-declare i32 @call_5(i8*) local_unnamed_addr
-declare i8 @call_6(%1*, i32) local_unnamed_addr
+declare i8 @call_1(ptr) local_unnamed_addr
+declare i32 @call_2(ptr, ptr) local_unnamed_addr
+declare i32 @call_3(ptr, ptr) local_unnamed_addr
+declare ptr @call_4(ptr, i32, i32, i32, i32, i32, i16, i16, ptr, ptr, i32, float, float, float, float, ptr) local_unnamed_addr
+declare i32 @call_5(ptr) local_unnamed_addr
+declare i8 @call_6(ptr, i32) local_unnamed_addr
-define dso_local void @P10_Spill_CR_UN(%2* %arg, %1* %arg1, i32 %arg2) local_unnamed_addr {
+define dso_local void @P10_Spill_CR_UN(ptr %arg, ptr %arg1, i32 %arg2) local_unnamed_addr {
; CHECK-LABEL: P10_Spill_CR_UN:
; CHECK: # %bb.0: # %bb
; CHECK-NEXT: mfcr r12
; CHECK-BE-NEXT: .LBB0_19: # %bb55
bb:
%tmp = alloca [3 x i8], align 1
- %tmp3 = tail call zeroext i8 @call_1(%1* %arg1)
+ %tmp3 = tail call zeroext i8 @call_1(ptr %arg1)
%tmp4 = icmp ne i8 %tmp3, 0
- %tmp5 = tail call signext i32 @call_2(%2* %arg, %1* %arg1)
+ %tmp5 = tail call signext i32 @call_2(ptr %arg, ptr %arg1)
%tmp6 = and i32 %arg2, 16
%tmp7 = icmp ne i32 %tmp6, 0
br label %bb8
br i1 undef, label %bb9, label %bb11
bb9: ; preds = %bb8
- %tmp10 = call signext i32 @call_3(%2* %arg, %1* %arg1)
+ %tmp10 = call signext i32 @call_3(ptr %arg, ptr %arg1)
br label %bb12
bb11: ; preds = %bb8
br i1 %tmp18, label %bb37, label %bb19
bb19: ; preds = %bb16
- %tmp20 = getelementptr inbounds [3 x i8], [3 x i8]* %tmp, i64 0, i64 0
- %tmp21 = load i8* (i64, i8*)*, i8* (i64, i8*)** @global_3, align 8
- %tmp22 = call i8* %tmp21(i64 undef, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @global_4, i64 0, i64 0))
- %tmp23 = bitcast i8* %tmp22 to i32*
- %tmp24 = icmp eq i32* %tmp23, null
+ %tmp21 = load ptr, ptr @global_3, align 8
+ %tmp22 = call ptr %tmp21(i64 undef, ptr @global_4)
+ %tmp24 = icmp eq ptr %tmp22, null
%tmp25 = icmp eq i32 %tmp13, 0
%tmp26 = zext i32 %tmp5 to i64
br label %bb27
bb27: ; preds = %bb34, %bb19
- %tmp28 = call zeroext i8 @call_6(%1* %arg1, i32 signext undef)
- store i8 %tmp28, i8* %tmp20, align 1
+ %tmp28 = call zeroext i8 @call_6(ptr %arg1, i32 signext undef)
+ store i8 %tmp28, ptr %tmp, align 1
br label %bb29
bb29: ; preds = %bb27
br label %bb54
bb37: ; preds = %bb16
- %tmp38 = load i32, i32* undef, align 8
- %tmp39 = select i1 %tmp7, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @global_1, i64 0, i64 0), i8* null
+ %tmp38 = load i32, ptr undef, align 8
+ %tmp39 = select i1 %tmp7, ptr @global_1, ptr null
%tmp40 = icmp ne i32 %tmp38, 0
switch i32 undef, label %bb41 [
i32 1, label %bb42
br label %bb50
bb42: ; preds = %bb37, %bb37
- %tmp43 = call signext i32 @call_5(i8* %tmp39)
- %tmp44 = load i16, i16* getelementptr inbounds (%0, %0* @global_2, i64 0, i32 81), align 4
+ %tmp43 = call signext i32 @call_5(ptr %tmp39)
+ %tmp44 = load i16, ptr getelementptr inbounds (%0, ptr @global_2, i64 0, i32 81), align 4
%tmp45 = sitofp i16 %tmp44 to float
%tmp46 = select i1 %tmp40, float 1.750000e+00, float 1.500000e+00
%tmp47 = fmul fast float %tmp46, %tmp45
bb50: ; preds = %bb42, %bb41
%tmp51 = phi i32 [ %tmp49, %bb42 ], [ undef, %bb41 ]
%tmp52 = trunc i32 %tmp51 to i16
- %tmp53 = call %3* @call_4(%4* nonnull undef, i32 signext 1024, i32 signext 0, i32 signext %tmp38, i32 signext 0, i32 signext 0, i16 signext %tmp52, i16 signext undef, %2* %arg, %1* %arg1, i32 signext -1, float 0.000000e+00, float undef, float -1.000000e+00, float -1.000000e+00, i8* null)
+ %tmp53 = call ptr @call_4(ptr nonnull undef, i32 signext 1024, i32 signext 0, i32 signext %tmp38, i32 signext 0, i32 signext 0, i16 signext %tmp52, i16 signext undef, ptr %arg, ptr %arg1, i32 signext -1, float 0.000000e+00, float undef, float -1.000000e+00, float -1.000000e+00, ptr null)
br label %bb54
bb54: ; preds = %bb50, %bb36
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-define void @foo(i32* nocapture %r1, i32* nocapture %r2, i32* nocapture %r3, i32* nocapture %r4, i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d) #0 {
+define void @foo(ptr nocapture %r1, ptr nocapture %r2, ptr nocapture %r3, ptr nocapture %r4, i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d) #0 {
entry:
%tobool = icmp ne i32 %a, 0
%cond = select i1 %tobool, i32 %b, i32 %c
- store i32 %cond, i32* %r1, align 4
+ store i32 %cond, ptr %r1, align 4
%cond5 = select i1 %tobool, i32 %b, i32 %d
- store i32 %cond5, i32* %r2, align 4
+ store i32 %cond5, ptr %r2, align 4
%add = add nsw i32 %b, 1
%sub = add nsw i32 %d, -2
%cond10 = select i1 %tobool, i32 %add, i32 %sub
- store i32 %cond10, i32* %r3, align 4
+ store i32 %cond10, ptr %r3, align 4
%add13 = add nsw i32 %b, 3
%sub15 = add nsw i32 %d, -5
%cond17 = select i1 %tobool, i32 %add13, i32 %sub15
- store i32 %cond17, i32* %r4, align 4
+ store i32 %cond17, ptr %r4, align 4
ret void
}
; CHECK-AIX-NEXT: lxvdsx 34, 0, 3
; CHECK-AIX-NEXT: blr
entry:
- %0 = load double, double* @d, align 8
+ %0 = load double, ptr @d, align 8
%splat.splatinsert = insertelement <2 x double> undef, double %0, i32 0
%splat.splat = shufflevector <2 x double> %splat.splatinsert, <2 x double> undef, <2 x i32> zeroinitializer
ret <2 x double> %splat.splat
; CHECK-NEXT: ld r3, 0(r3)
; CHECK-NEXT: std r3, 0(r3)
entry:
- %0 = load <4 x i16>, <4 x i16>* bitcast ([4 x i16]* @best8x8mode to <4 x i16>*), align 2
- store <4 x i16> %0, <4 x i16>* undef, align 4
+ %0 = load <4 x i16>, ptr @best8x8mode, align 2
+ store <4 x i16> %0, ptr undef, align 4
unreachable
}
; CHECK-NEXT: stxv 34, 0(3)
; CHECK-NEXT: blr
entry:
- %0 = load <2 x i64>, <2 x i64>* @vull, align 16
- %1 = load <16 x i8>, <16 x i8>* @vuc, align 16
+ %0 = load <2 x i64>, ptr @vull, align 16
+ %1 = load <16 x i8>, ptr @vuc, align 16
%2 = call <2 x i64> @llvm.ppc.altivec.vbpermd(<2 x i64> %0, <16 x i8> %1)
- store <2 x i64> %2, <2 x i64>* @res_vull, align 16
+ store <2 x i64> %2, ptr @res_vull, align 16
ret void
}
declare <2 x i64> @llvm.ppc.altivec.vbpermd(<2 x i64>, <16 x i8>)
; assemble_pair
declare <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8>, <16 x i8>)
-define void @ass_pair(<256 x i1>* %ptr, <16 x i8> %vc) {
+define void @ass_pair(ptr %ptr, <16 x i8> %vc) {
; CHECK-LABEL: ass_pair:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmr v3, v2
; CHECK-BE-NOMMA-NEXT: blr
entry:
%0 = tail call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %vc, <16 x i8> %vc)
- store <256 x i1> %0, <256 x i1>* %ptr, align 32
+ store <256 x i1> %0, ptr %ptr, align 32
ret void
}
; disassemble_pair
declare { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1>)
-define void @disass_pair(<256 x i1>* %ptr1, <16 x i8>* %ptr2, <16 x i8>* %ptr3) {
+define void @disass_pair(ptr %ptr1, ptr %ptr2, ptr %ptr3) {
; CHECK-LABEL: disass_pair:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v3, 0(r3)
; CHECK-BE-NOMMA-NEXT: stxv v3, 0(r5)
; CHECK-BE-NOMMA-NEXT: blr
entry:
- %0 = load <256 x i1>, <256 x i1>* %ptr1, align 32
+ %0 = load <256 x i1>, ptr %ptr1, align 32
%1 = tail call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> %0)
%2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0
%3 = extractvalue { <16 x i8>, <16 x i8> } %1, 1
- store <16 x i8> %2, <16 x i8>* %ptr2, align 16
- store <16 x i8> %3, <16 x i8>* %ptr3, align 16
+ store <16 x i8> %2, ptr %ptr2, align 16
+ store <16 x i8> %3, ptr %ptr3, align 16
ret void
}
-define void @test_ldst_1(<256 x i1>* %vpp, <256 x i1>* %vp2) {
+define void @test_ldst_1(ptr %vpp, ptr %vp2) {
; CHECK-LABEL: test_ldst_1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvp vsp34, 0(r3)
; CHECK-BE-NOMMA-NEXT: stxvp vsp34, 0(r4)
; CHECK-BE-NOMMA-NEXT: blr
entry:
- %0 = bitcast <256 x i1>* %vpp to i8*
- %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %0)
- %2 = bitcast <256 x i1>* %vp2 to i8*
- tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, i8* %2)
+ %0 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %vpp)
+ tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %0, ptr %vp2)
ret void
}
-declare <256 x i1> @llvm.ppc.vsx.lxvp(i8*)
-declare void @llvm.ppc.vsx.stxvp(<256 x i1>, i8*)
+declare <256 x i1> @llvm.ppc.vsx.lxvp(ptr)
+declare void @llvm.ppc.vsx.stxvp(<256 x i1>, ptr)
-define void @test_ldst_2(<256 x i1>* %vpp, i64 %offset, <256 x i1>* %vp2) {
+define void @test_ldst_2(ptr %vpp, i64 %offset, ptr %vp2) {
; CHECK-LABEL: test_ldst_2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvpx vsp34, r3, r4
; CHECK-BE-NOMMA-NEXT: stxvpx vsp34, r5, r4
; CHECK-BE-NOMMA-NEXT: blr
entry:
- %0 = bitcast <256 x i1>* %vpp to i8*
- %1 = getelementptr i8, i8* %0, i64 %offset
- %2 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %1)
- %3 = bitcast <256 x i1>* %vp2 to i8*
- %4 = getelementptr i8, i8* %3, i64 %offset
- tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %2, i8* %4)
+ %0 = getelementptr i8, ptr %vpp, i64 %offset
+ %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
+ %2 = getelementptr i8, ptr %vp2, i64 %offset
+ tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %2)
ret void
}
-define void @test_ldst_3(<256 x i1>* %vpp, <256 x i1>* %vp2) {
+define void @test_ldst_3(ptr %vpp, ptr %vp2) {
; CHECK-LABEL: test_ldst_3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: plxvp vsp34, 18(r3), 0
; CHECK-BE-NOMMA-NEXT: pstxvp vsp34, 18(r4), 0
; CHECK-BE-NOMMA-NEXT: blr
entry:
- %0 = bitcast <256 x i1>* %vpp to i8*
- %1 = getelementptr i8, i8* %0, i64 18
- %2 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %1)
- %3 = bitcast <256 x i1>* %vp2 to i8*
- %4 = getelementptr i8, i8* %3, i64 18
- tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %2, i8* %4)
+ %0 = getelementptr i8, ptr %vpp, i64 18
+ %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
+ %2 = getelementptr i8, ptr %vp2, i64 18
+ tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %2)
ret void
}
-define void @test_ldst_4(<256 x i1>* %vpp, <256 x i1>* %vp2) {
+define void @test_ldst_4(ptr %vpp, ptr %vp2) {
; CHECK-LABEL: test_ldst_4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: plxvp vsp34, 1(r3), 0
; CHECK-BE-NOMMA-NEXT: pstxvp vsp34, 1(r4), 0
; CHECK-BE-NOMMA-NEXT: blr
entry:
- %0 = bitcast <256 x i1>* %vpp to i8*
- %1 = getelementptr i8, i8* %0, i64 1
- %2 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %1)
- %3 = bitcast <256 x i1>* %vp2 to i8*
- %4 = getelementptr i8, i8* %3, i64 1
- tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %2, i8* %4)
+ %0 = getelementptr i8, ptr %vpp, i64 1
+ %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
+ %2 = getelementptr i8, ptr %vp2, i64 1
+ tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %2)
ret void
}
-define void @test_ldst_5(<256 x i1>* %vpp, <256 x i1>* %vp2) {
+define void @test_ldst_5(ptr %vpp, ptr %vp2) {
; CHECK-LABEL: test_ldst_5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: plxvp vsp34, 42(r3), 0
; CHECK-BE-NOMMA-NEXT: pstxvp vsp34, 42(r4), 0
; CHECK-BE-NOMMA-NEXT: blr
entry:
- %0 = bitcast <256 x i1>* %vpp to i8*
- %1 = getelementptr i8, i8* %0, i64 42
- %2 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %1)
- %3 = bitcast <256 x i1>* %vp2 to i8*
- %4 = getelementptr i8, i8* %3, i64 42
- tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %2, i8* %4)
+ %0 = getelementptr i8, ptr %vpp, i64 42
+ %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
+ %2 = getelementptr i8, ptr %vp2, i64 42
+ tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %2)
ret void
}
-define void @test_ldst_6(<256 x i1>* %vpp, <256 x i1>* %vp2) {
+define void @test_ldst_6(ptr %vpp, ptr %vp2) {
; CHECK-LABEL: test_ldst_6:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvp vsp34, 4096(r3)
; CHECK-BE-NOMMA-NEXT: stxvp vsp34, 4096(r4)
; CHECK-BE-NOMMA-NEXT: blr
entry:
- %0 = getelementptr <256 x i1>, <256 x i1>* %vpp, i64 128
- %1 = bitcast <256 x i1>* %0 to i8*
- %2 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %1)
- %3 = getelementptr <256 x i1>, <256 x i1>* %vp2, i64 128
- %4 = bitcast <256 x i1>* %3 to i8*
- tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %2, i8* %4)
+ %0 = getelementptr <256 x i1>, ptr %vpp, i64 128
+ %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
+ %2 = getelementptr <256 x i1>, ptr %vp2, i64 128
+ tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %2)
ret void
}
-define void @test_ldst_7(<256 x i1>* %vpp, <256 x i1>* %vp2) {
+define void @test_ldst_7(ptr %vpp, ptr %vp2) {
; FIXME: A prefixed load (plxvp) is expected here as the offset in this
; test case is a constant that fits within 34-bits.
; CHECK-LABEL: test_ldst_7:
; CHECK-BE-NOMMA-NEXT: pstxvp vsp34, 32799(r4), 0
; CHECK-BE-NOMMA-NEXT: blr
entry:
- %0 = bitcast <256 x i1>* %vpp to i8*
- %1 = getelementptr i8, i8* %0, i64 32799
- %2 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %1)
- %3 = bitcast <256 x i1>* %vp2 to i8*
- %4 = getelementptr i8, i8* %3, i64 32799
- tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %2, i8* %4)
+ %0 = getelementptr i8, ptr %vpp, i64 32799
+ %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %0)
+ %2 = getelementptr i8, ptr %vp2, i64 32799
+ tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, ptr %2)
ret void
}
; CHECK: paddi r3, 0, .Ltmp0@PCREL, 1
; CHECK: bl helper@notoc
entry:
- tail call void @helper(i8* blockaddress(@blockaddress, %label))
+ tail call void @helper(ptr blockaddress(@blockaddress, %label))
br label %label
label: ; preds = %entry
ret void
}
-declare void @helper(i8*)
+declare void @helper(ptr)
; CHECK-BE-NEXT: lbz r3, GlobLd1@toc@l(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i1, i1* getelementptr inbounds ([20 x i1], [20 x i1]* @GlobLd1, i64 0, i64 0), align 1
+ %0 = load i1, ptr @GlobLd1, align 1
ret i1 %0
}
; CHECK-BE-NEXT: lbz r3, GlobLd1@toc@l(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i1, i1* getelementptr inbounds ([20 x i1], [20 x i1]* @GlobLd1, i64 0, i64 0), align 1
+ %0 = load i1, ptr @GlobLd1, align 1
ret i1 %0
}
; CHECK-BE-NEXT: stb r3, GlobSt1@toc@l(r4)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i1, i1* getelementptr inbounds ([20 x i1], [20 x i1]* @GlobLd1, i64 0, i64 0), align 1
- store i1 %0, i1* getelementptr inbounds ([20 x i1], [20 x i1]* @GlobSt1, i64 0, i64 0), align 1
+ %0 = load i1, ptr @GlobLd1, align 1
+ store i1 %0, ptr @GlobSt1, align 1
ret void
}
%1 = type { i64 }
@Glob1 = external dso_local global %1, align 8
@Glob2 = external dso_local unnamed_addr constant [11 x i8], align 1
-declare i32 @Decl(%1*, i8*) local_unnamed_addr #0
+declare i32 @Decl(ptr, ptr) local_unnamed_addr #0
define dso_local i1 @i32_ExtLoad_i1() local_unnamed_addr #0 {
; CHECK-LE-LABEL: i32_ExtLoad_i1:
; CHECK-BE-NEXT: mtlr r0
; CHECK-BE-NEXT: blr
bb:
- %i = call signext i32 @Decl(%1* nonnull dereferenceable(32) @Glob1, i8* getelementptr inbounds ([11 x i8], [11 x i8]* @Glob2, i64 0, i64 0)) #1
+ %i = call signext i32 @Decl(ptr nonnull dereferenceable(32) @Glob1, ptr @Glob2) #1
%i1 = icmp eq i32 %i, 0
- %i2 = load i1, i1* getelementptr inbounds ([20 x i1], [20 x i1]* @GlobLd1, i64 0, i64 0), align 1
+ %i2 = load i1, ptr @GlobLd1, align 1
%i3 = select i1 %i1, i1 false, i1 %i2
ret i1 %i3
}
entry:
%add = add nsw i32 %b, %a
tail call void asm sideeffect "li 2, 0", "~{r2}"()
- %0 = load i32, i32* @global_int, align 4
+ %0 = load i32, ptr @global_int, align 4
%add1 = add nsw i32 %add, %0
ret i32 %add1
}
; CHECK-LARGE: add r2, r2, r12
; CHECK-ALL: # %bb.0: # %entry
entry:
- %0 = load i32, i32* @global_int, align 4
+ %0 = load i32, ptr @global_int, align 4
ret i32 %0
}
; CHECK-O-NEXT: R_PPC64_REL24_NOTOC callee
define dso_local signext i32 @caller() local_unnamed_addr {
entry:
- %call = tail call signext i32 bitcast (i32 (...)* @callee to i32 ()*)()
+ %call = tail call signext i32 @callee()
ret i32 %call
}
; CHECK-O-LABEL: ExternalSymbol
; CHECK-O: b
; CHECK-O-NEXT: R_PPC64_REL24_NOTOC memcpy
-define dso_local void @ExternalSymbol(i8* nocapture %out, i8* nocapture readonly %in, i64 %num) local_unnamed_addr {
+define dso_local void @ExternalSymbol(ptr nocapture %out, ptr nocapture readonly %in, i64 %num) local_unnamed_addr {
entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %out, i8* align 1 %in, i64 %num, i1 false)
+ tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 %out, ptr align 1 %in, i64 %num, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
; CHECK-S-LABEL: callerNoTail
; CHECK-O: blr
define dso_local signext i32 @callerNoTail() local_unnamed_addr {
entry:
- %call1 = tail call signext i32 bitcast (i32 (...)* @callee to i32 ()*)()
- %call2 = tail call signext i32 bitcast (i32 (...)* @callee to i32 ()*)()
+ %call1 = tail call signext i32 @callee()
+ %call2 = tail call signext i32 @callee()
%add = add i32 %call1, %call2
ret i32 %add
}
@globalVar = common dso_local local_unnamed_addr global i32 0, align 4
@externGlobalVar = external local_unnamed_addr global i32, align 4
-@indirectCall = common dso_local local_unnamed_addr global i32 (i32)* null, align 8
+@indirectCall = common dso_local local_unnamed_addr global ptr null, align 8
; This funcion needs to remain as noinline.
; The compiler needs to know this function is local but must be forced to call
entry:
%add = add nsw i32 %b, %a
%call = tail call signext i32 @localCall(i32 signext %add)
- %0 = load i32, i32* @globalVar, align 4
+ %0 = load i32, ptr @globalVar, align 4
%mul = mul nsw i32 %0, %call
ret i32 %mul
}
entry:
%add = add nsw i32 %b, %a
%call = tail call signext i32 @localCall(i32 signext %add)
- %0 = load i32, i32* @externGlobalVar, align 4
+ %0 = load i32, ptr @externGlobalVar, align 4
%mul = mul nsw i32 %0, %call
ret i32 %mul
}
entry:
%add = add nsw i32 %b, %a
%call = tail call signext i32 @externCall(i32 signext %add)
- %0 = load i32, i32* @globalVar, align 4
+ %0 = load i32, ptr @globalVar, align 4
%mul = mul nsw i32 %0, %call
ret i32 %mul
}
entry:
%add = add nsw i32 %b, %a
%call = tail call signext i32 @externCall(i32 signext %add)
- %0 = load i32, i32* @externGlobalVar, align 4
+ %0 = load i32, ptr @externGlobalVar, align 4
%mul = mul nsw i32 %0, %call
ret i32 %mul
}
; CHECK-S-NEXT: extsw r3, r3
; CHECK-S-NEXT: b localCall@notoc
entry:
- %0 = load i32, i32* @globalVar, align 4
+ %0 = load i32, ptr @globalVar, align 4
%add = add nsw i32 %0, %a
%call = tail call signext i32 @localCall(i32 signext %add)
ret i32 %call
; CHECK-S-NEXT: extsw r3, r3
; CHECK-S-NEXT: b localCall@notoc
entry:
- %0 = load i32, i32* @externGlobalVar, align 4
+ %0 = load i32, ptr @externGlobalVar, align 4
%add = add nsw i32 %0, %a
%call = tail call signext i32 @localCall(i32 signext %add)
ret i32 %call
; CHECK-S-NEXT: extsw r3, r3
; CHECK-S-NEXT: b externCall@notoc
entry:
- %0 = load i32, i32* @globalVar, align 4
+ %0 = load i32, ptr @globalVar, align 4
%add = add nsw i32 %0, %a
%call = tail call signext i32 @externCall(i32 signext %add)
ret i32 %call
; CHECK-S-NEXT: extsw r3, r3
; CHECK-S-NEXT: b externCall@notoc
entry:
- %0 = load i32, i32* @externGlobalVar, align 4
+ %0 = load i32, ptr @externGlobalVar, align 4
%add = add nsw i32 %0, %a
%call = tail call signext i32 @externCall(i32 signext %add)
ret i32 %call
; CHECK-S-NEXT: blr
entry:
%add = add nsw i32 %b, %a
- %0 = load i32 (i32)*, i32 (i32)** @indirectCall, align 8
+ %0 = load ptr, ptr @indirectCall, align 8
%call = tail call signext i32 %0(i32 signext %add)
- %1 = load i32, i32* @globalVar, align 4
+ %1 = load i32, ptr @globalVar, align 4
%mul = mul nsw i32 %1, %call
ret i32 %mul
}
; CHECK-S-NEXT: blr
entry:
%add = add nsw i32 %b, %a
- %0 = load i32 (i32)*, i32 (i32)** @indirectCall, align 8
+ %0 = load ptr, ptr @indirectCall, align 8
%call = tail call signext i32 %0(i32 signext %add)
- %1 = load i32, i32* @externGlobalVar, align 4
+ %1 = load i32, ptr @externGlobalVar, align 4
%mul = mul nsw i32 %1, %call
ret i32 %mul
}
-define dso_local signext i32 @IndirectCall3(i32 signext %a, i32 signext %b, i32 (i32)* nocapture %call_param) local_unnamed_addr {
+define dso_local signext i32 @IndirectCall3(i32 signext %a, i32 signext %b, ptr nocapture %call_param) local_unnamed_addr {
; CHECK-ALL-LABEL: IndirectCall3:
; CHECK-S: # %bb.0: # %entry
; CHECK-S-NEXT: mflr r0
entry:
%add = add nsw i32 %b, %a
%call = tail call signext i32 %call_param(i32 signext %add)
- %0 = load i32, i32* @globalVar, align 4
+ %0 = load i32, ptr @globalVar, align 4
%mul = mul nsw i32 %0, %call
ret i32 %mul
}
-define dso_local signext i32 @IndirectCallNoGlobal(i32 signext %a, i32 signext %b, i32 (i32)* nocapture %call_param) local_unnamed_addr {
+define dso_local signext i32 @IndirectCallNoGlobal(i32 signext %a, i32 signext %b, ptr nocapture %call_param) local_unnamed_addr {
; CHECK-ALL-LABEL: IndirectCallNoGlobal:
; CHECK-S: # %bb.0: # %entry
; CHECK-S-NEXT: mflr r0
ret i32 %add
}
-define dso_local signext i32 @IndirectCallOnly(i32 signext %a, i32 (i32)* nocapture %call_param) local_unnamed_addr {
+define dso_local signext i32 @IndirectCallOnly(i32 signext %a, ptr nocapture %call_param) local_unnamed_addr {
; CHECK-ALL-LABEL: IndirectCallOnly:
; CHECK-S: # %bb.0: # %entry
; CHECK-S-NEXT: mtctr r4
@valInt = external global i32, align 4
@valUnsigned = external local_unnamed_addr global i32, align 4
@valLong = external local_unnamed_addr global i64, align 8
-@ptr = external local_unnamed_addr global i32*, align 8
+@ptr = external local_unnamed_addr global ptr, align 8
@array = external local_unnamed_addr global [10 x i32], align 4
@structure = external local_unnamed_addr global %struct.Struct, align 4
-@ptrfunc = external local_unnamed_addr global void (...)*, align 8
+@ptrfunc = external local_unnamed_addr global ptr, align 8
define dso_local signext i32 @ReadGlobalVarChar() local_unnamed_addr {
; LE-LABEL: ReadGlobalVarChar:
; BE-NEXT: lbz r3, 0(r3)
; BE-NEXT: blr
entry:
- %0 = load i8, i8* @valChar, align 1
+ %0 = load i8, ptr @valChar, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
; BE-NEXT: stb r4, 0(r3)
; BE-NEXT: blr
entry:
- store i8 3, i8* @valChar, align 1
+ store i8 3, ptr @valChar, align 1
ret void
}
; BE-NEXT: lha r3, 0(r3)
; BE-NEXT: blr
entry:
- %0 = load i16, i16* @valShort, align 2
+ %0 = load i16, ptr @valShort, align 2
%conv = sext i16 %0 to i32
ret i32 %conv
}
; BE-NEXT: sth r4, 0(r3)
; BE-NEXT: blr
entry:
- store i16 3, i16* @valShort, align 2
+ store i16 3, ptr @valShort, align 2
ret void
}
; BE-NEXT: lwa r3, 0(r3)
; BE-NEXT: blr
entry:
- %0 = load i32, i32* @valInt, align 4
+ %0 = load i32, ptr @valInt, align 4
ret i32 %0
}
; BE-NEXT: stw r4, 0(r3)
; BE-NEXT: blr
entry:
- store i32 33, i32* @valInt, align 4
+ store i32 33, ptr @valInt, align 4
ret void
}
; BE-NEXT: lwa r3, 0(r3)
; BE-NEXT: blr
entry:
- %0 = load i32, i32* @valUnsigned, align 4
+ %0 = load i32, ptr @valUnsigned, align 4
ret i32 %0
}
; BE-NEXT: stw r4, 0(r3)
; BE-NEXT: blr
entry:
- store i32 33, i32* @valUnsigned, align 4
+ store i32 33, ptr @valUnsigned, align 4
ret void
}
; BE-NEXT: lwa r3, 4(r3)
; BE-NEXT: blr
entry:
- %0 = load i64, i64* @valLong, align 8
+ %0 = load i64, ptr @valLong, align 8
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; BE-NEXT: std r4, 0(r3)
; BE-NEXT: blr
entry:
- store i64 3333, i64* @valLong, align 8
+ store i64 3333, ptr @valLong, align 8
ret void
}
-define dso_local i32* @ReadGlobalPtr() local_unnamed_addr {
+define dso_local ptr @ReadGlobalPtr() local_unnamed_addr {
; LE-LABEL: ReadGlobalPtr:
; LE: # %bb.0: # %entry
; LE-NEXT: pld r3, ptr@got@pcrel(0), 1
; BE-NEXT: ld r3, 0(r3)
; BE-NEXT: blr
entry:
- %0 = load i32*, i32** @ptr, align 8
- ret i32* %0
+ %0 = load ptr, ptr @ptr, align 8
+ ret ptr %0
}
define dso_local void @WriteGlobalPtr() local_unnamed_addr {
; BE-NEXT: stw r4, 0(r3)
; BE-NEXT: blr
entry:
- %0 = load i32*, i32** @ptr, align 8
- store i32 3, i32* %0, align 4
+ %0 = load ptr, ptr @ptr, align 8
+ store i32 3, ptr %0, align 4
ret void
}
-define dso_local nonnull i32* @GlobalVarAddr() local_unnamed_addr {
+define dso_local nonnull ptr @GlobalVarAddr() local_unnamed_addr {
; LE-LABEL: GlobalVarAddr:
; LE: # %bb.0: # %entry
; LE-NEXT: pld r3, valInt@got@pcrel(0), 1
; BE-NEXT: pld r3, valInt@got@pcrel(0), 1
; BE-NEXT: blr
entry:
- ret i32* @valInt
+ ret ptr @valInt
}
define dso_local signext i32 @ReadGlobalArray() local_unnamed_addr {
; BE-NEXT: lwa r3, 12(r3)
; BE-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @array, i64 0, i64 3), align 4
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @array, i64 0, i64 3), align 4
ret i32 %0
}
; BE-NEXT: stw r4, 12(r3)
; BE-NEXT: blr
entry:
- store i32 5, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @array, i64 0, i64 3), align 4
+ store i32 5, ptr getelementptr inbounds ([10 x i32], ptr @array, i64 0, i64 3), align 4
ret void
}
; BE-NEXT: lwa r3, 4(r3)
; BE-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds (%struct.Struct, %struct.Struct* @structure, i64 0, i32 2), align 4
+ %0 = load i32, ptr getelementptr inbounds (%struct.Struct, ptr @structure, i64 0, i32 2), align 4
ret i32 %0
}
; BE-NEXT: stw r4, 4(r3)
; BE-NEXT: blr
entry:
- store i32 3, i32* getelementptr inbounds (%struct.Struct, %struct.Struct* @structure, i64 0, i32 2), align 4
+ store i32 3, ptr getelementptr inbounds (%struct.Struct, ptr @structure, i64 0, i32 2), align 4
ret void
}
; BE-NEXT: bctr
; BE-NEXT: #TC_RETURNr8 ctr 0
entry:
- %0 = load void ()*, void ()** bitcast (void (...)** @ptrfunc to void ()**), align 8
+ %0 = load ptr, ptr @ptrfunc, align 8
tail call void %0()
ret void
}
; BE-NEXT: std r4, 0(r3)
; BE-NEXT: blr
entry:
- store void (...)* @function, void (...)** @ptrfunc, align 8
+ store ptr @function, ptr @ptrfunc, align 8
ret void
}
; is passed as a parameter in this test.
; Function Attrs: noinline
-define dso_local void @IndirectCallExternFuncPtr(void ()* nocapture %ptrfunc) {
+define dso_local void @IndirectCallExternFuncPtr(ptr nocapture %ptrfunc) {
; CHECK-LABEL: IndirectCallExternFuncPtr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtctr r3
define dso_local void @FuncPtrPassAsParam() {
entry:
- tail call void @IndirectCallExternFuncPtr(void ()* nonnull @Function)
+ tail call void @IndirectCallExternFuncPtr(ptr nonnull @Function)
ret void
}
; OFF-NEXT: lbz r3, 0(r3)
; OFF-NEXT: blr
entry:
- %0 = load i8, i8* @input8, align 1
+ %0 = load i8, ptr @input8, align 1
ret i8 %0
}
@outputVi64 = external local_unnamed_addr global <2 x i64>, align 16
@ArrayIn = external global [10 x i32], align 4
@ArrayOut = external local_unnamed_addr global [10 x i32], align 4
-@IntPtrIn = external local_unnamed_addr global i32*, align 8
-@IntPtrOut = external local_unnamed_addr global i32*, align 8
-@FuncPtrIn = external local_unnamed_addr global void (...)*, align 8
-@FuncPtrOut = external local_unnamed_addr global void (...)*, align 8
+@IntPtrIn = external local_unnamed_addr global ptr, align 8
+@IntPtrOut = external local_unnamed_addr global ptr, align 8
+@FuncPtrIn = external local_unnamed_addr global ptr, align 8
+@FuncPtrOut = external local_unnamed_addr global ptr, align 8
define dso_local void @ReadWrite8() local_unnamed_addr #0 {
; In this test the stb r3, 0(r4) cannot be optimized because it
; CHECK-NEXT: stb r3, 0(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* @input8, align 1
- store i8 %0, i8* @output8, align 1
+ %0 = load i8, ptr @input8, align 1
+ store i8 %0, ptr @output8, align 1
ret void
}
; CHECK-NEXT: sth r3, 0(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* @input16, align 2
- store i16 %0, i16* @output16, align 2
+ %0 = load i16, ptr @input16, align 2
+ store i16 %0, ptr @output16, align 2
ret void
}
; CHECK-NEXT: stw r3, 0(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @input32, align 4
- store i32 %0, i32* @output32, align 4
+ %0 = load i32, ptr @input32, align 4
+ store i32 %0, ptr @output32, align 4
ret void
}
; CHECK-NEXT: std r3, 0(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* @input64, align 8
- store i64 %0, i64* @output64, align 8
+ %0 = load i64, ptr @input64, align 8
+ store i64 %0, ptr @output64, align 8
ret void
}
; CHECK-NEXT: stxv vs0, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i128, i128* @input128, align 16
- store i128 %0, i128* @output128, align 16
+ %0 = load i128, ptr @input128, align 16
+ store i128 %0, ptr @output128, align 16
ret void
}
; CHECK-NEXT: stfs f0, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load float, float* @inputf32, align 4
+ %0 = load float, ptr @inputf32, align 4
%add = fadd float %0, 0x400851EB80000000
- store float %add, float* @outputf32, align 4
+ store float %add, ptr @outputf32, align 4
ret void
}
; CHECK-NEXT: stfd f0, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load double, double* @inputf64, align 8
+ %0 = load double, ptr @inputf64, align 8
%add = fadd double %0, 6.800000e+00
- store double %add, double* @outputf64, align 8
+ store double %add, ptr @outputf64, align 8
ret void
}
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load <4 x i32>, <4 x i32>* @inputVi32, align 16
+ %0 = load <4 x i32>, ptr @inputVi32, align 16
%vecins = insertelement <4 x i32> %0, i32 45, i32 1
- store <4 x i32> %vecins, <4 x i32>* @outputVi32, align 16
+ store <4 x i32> %vecins, ptr @outputVi32, align 16
ret void
}
; CHECK-NEXT: stxv vs0, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load <2 x i64>, <2 x i64>* @inputVi64, align 16
- store <2 x i64> %0, <2 x i64>* @outputVi64, align 16
+ %0 = load <2 x i64>, ptr @inputVi64, align 16
+ store <2 x i64> %0, ptr @outputVi64, align 16
ret void
}
; CHECK-NEXT: stw r3, 8(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ArrayIn, i64 0, i64 7), align 4
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @ArrayIn, i64 0, i64 7), align 4
%add = add nsw i32 %0, 42
- store i32 %add, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ArrayOut, i64 0, i64 2), align 4
+ store i32 %add, ptr getelementptr inbounds ([10 x i32], ptr @ArrayOut, i64 0, i64 2), align 4
ret void
}
; CHECK-NEXT: stw r4, 24(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ArrayIn, i64 0, i64 3), align 4
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @ArrayIn, i64 0, i64 3), align 4
%add = add nsw i32 %0, 8
- store i32 %add, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ArrayIn, i64 0, i64 6), align 4
+ store i32 %add, ptr getelementptr inbounds ([10 x i32], ptr @ArrayIn, i64 0, i64 6), align 4
ret void
}
; CHECK-NEXT: stw r3, 136(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i32*, i32** @IntPtrIn, align 8
- %arrayidx = getelementptr inbounds i32, i32* %0, i64 54
- %1 = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, i32* %0, i64 12
- %2 = load i32, i32* %arrayidx1, align 4
+ %0 = load ptr, ptr @IntPtrIn, align 8
+ %arrayidx = getelementptr inbounds i32, ptr %0, i64 54
+ %1 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %0, i64 12
+ %2 = load i32, ptr %arrayidx1, align 4
%add = add nsw i32 %2, %1
- %3 = load i32*, i32** @IntPtrOut, align 8
- %arrayidx2 = getelementptr inbounds i32, i32* %3, i64 34
- store i32 %add, i32* %arrayidx2, align 4
+ %3 = load ptr, ptr @IntPtrOut, align 8
+ %arrayidx2 = getelementptr inbounds i32, ptr %3, i64 34
+ store i32 %add, ptr %arrayidx2, align 4
ret void
}
; CHECK-NEXT: std r3, 0(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* bitcast (void (...)** @FuncPtrIn to i64*), align 8
- store i64 %0, i64* bitcast (void (...)** @FuncPtrOut to i64*), align 8
+ %0 = load i64, ptr @FuncPtrIn, align 8
+ store i64 %0, ptr @FuncPtrOut, align 8
ret void
}
; CHECK-NEXT: std r4, 0(r3)
; CHECK-NEXT: blr
entry:
- store void (...)* @Callee, void (...)** @FuncPtrOut, align 8
+ store ptr @Callee, ptr @FuncPtrOut, align 8
ret void
}
; CHECK-NEXT: bctr
; CHECK-NEXT: #TC_RETURNr8 ctr 0
entry:
- %0 = load void ()*, void ()** bitcast (void (...)** @FuncPtrIn to void ()**), align 8
+ %0 = load ptr, ptr @FuncPtrIn, align 8
tail call void %0()
ret void
}
; CHECK-NEXT: lwa r3, 4(r3)
; CHECK-NEXT: blr
entry:
- %0 = load <4 x i32>, <4 x i32>* @inputVi32, align 16
+ %0 = load <4 x i32>, ptr @inputVi32, align 16
%vecext = extractelement <4 x i32> %0, i32 1
ret i32 %vecext
}
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %0 = load <4 x i32>, <4 x i32>* @inputVi32, align 16
- tail call void bitcast (void (...)* @Callee to void ()*)()
- %1 = load <4 x i32>, <4 x i32>* @inputVi32, align 16
+ %0 = load <4 x i32>, ptr @inputVi32, align 16
+ tail call void @Callee()
+ %1 = load <4 x i32>, ptr @inputVi32, align 16
%2 = extractelement <4 x i32> %1, i32 2
%3 = extractelement <4 x i32> %0, i64 1
%4 = add nsw i32 %2, %3
- tail call void bitcast (void (...)* @Callee to void ()*)()
- %5 = load <4 x i32>, <4 x i32>* @inputVi32, align 16
+ tail call void @Callee()
+ %5 = load <4 x i32>, ptr @inputVi32, align 16
%vecext2 = extractelement <4 x i32> %5, i32 0
%add3 = add nsw i32 %4, %vecext2
ret i32 %add3
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ArrayIn, i64 0, i64 4), align 4
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @ArrayIn, i64 0, i64 4), align 4
%add = add nsw i32 %0, %a
- %call = tail call signext i32 @getAddr(i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ArrayIn, i64 0, i64 0))
+ %call = tail call signext i32 @getAddr(ptr @ArrayIn)
%add1 = add nsw i32 %add, %call
ret i32 %add1
}
-declare signext i32 @getAddr(i32*) local_unnamed_addr
+declare signext i32 @getAddr(ptr) local_unnamed_addr
-define dso_local nonnull i32* @AddrTaken32() local_unnamed_addr #0 {
+define dso_local nonnull ptr @AddrTaken32() local_unnamed_addr #0 {
; CHECK-LABEL: AddrTaken32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pld r3, input32@got@pcrel(0), 1
; CHECK-NEXT: blr
entry:
- ret i32* @input32
+ ret ptr @input32
}
attributes #0 = { nounwind }
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @global, align 4
+ %0 = load i32, ptr @global, align 4
%add = add nsw i32 %0, %a
%call = tail call signext i32 @callee(i32 signext %add)
%mul = mul nsw i32 %call, %call
- store i32 %mul, i32* @global, align 4
+ store i32 %mul, ptr @global, align 4
ret void
}
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @global, align 4
+ %0 = load i32, ptr @global, align 4
%add = add nsw i32 %0, %a
%call = tail call signext i32 @callee(i32 signext %add)
ret i32 %call
; CHECK-O-NEXT: R_PPC64_PCREL34 array2+0x1c
; CHECK-O-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @array2, i64 0, i64 7), align 4
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @array2, i64 0, i64 7), align 4
ret i32 %0
}
; CHECK-O-NEXT: R_PPC64_PCREL34 array2-0x8
; CHECK-O-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @array2, i64 0, i64 -2), align 4
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @array2, i64 0, i64 -2), align 4
ret i32 %0
}
; CHECK-O: lwa 3, 16(3)
; CHECK-O-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @array1, i64 0, i64 4), align 4
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @array1, i64 0, i64 4), align 4
ret i32 %0
}
; CHECK-O: lwa 3, -4(3)
; CHECK-O-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @array1, i64 0, i64 -1), align 4
+ %0 = load i32, ptr getelementptr inbounds ([10 x i32], ptr @array1, i64 0, i64 -1), align 4
ret i32 %0
}
; the past as we no longer need to restore the TOC pointer into R2 after
; most calls.
-@Func = external local_unnamed_addr global i32 (...)*, align 8
-@FuncLocal = common dso_local local_unnamed_addr global i32 (...)* null, align 8
+@Func = external local_unnamed_addr global ptr, align 8
+@FuncLocal = common dso_local local_unnamed_addr global ptr null, align 8
; No calls in this function but we assign the function pointers.
define dso_local void @AssignFuncPtr() local_unnamed_addr {
; CHECK-NEXT: pstd r4, FuncLocal@PCREL(0), 1
; CHECK-NEXT: blr
entry:
- store i32 (...)* @Function, i32 (...)** @Func, align 8
- store i32 (...)* @Function, i32 (...)** @FuncLocal, align 8
+ store ptr @Function, ptr @Func, align 8
+ store ptr @Function, ptr @FuncLocal, align 8
ret void
}
; CHECK-NEXT: bctr
; CHECK-NEXT: #TC_RETURNr8 ctr 0
entry:
- %0 = load i32 ()*, i32 ()** bitcast (i32 (...)** @FuncLocal to i32 ()**), align 8
+ %0 = load ptr, ptr @FuncLocal, align 8
%call = tail call signext i32 %0()
ret void
}
; CHECK-NEXT: bctr
; CHECK-NEXT: #TC_RETURNr8 ctr 0
entry:
- %0 = load i32 ()*, i32 ()** bitcast (i32 (...)** @Func to i32 ()**), align 8
+ %0 = load ptr, ptr @Func, align 8
%call = tail call signext i32 %0()
ret void
}
-define dso_local signext i32 @TailCallParamFuncPtr(i32 (...)* nocapture %passedfunc) local_unnamed_addr {
+define dso_local signext i32 @TailCallParamFuncPtr(ptr nocapture %passedfunc) local_unnamed_addr {
; CHECK-LABEL: TailCallParamFuncPtr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtctr r3
; CHECK-NEXT: bctr
; CHECK-NEXT: #TC_RETURNr8 ctr 0
entry:
- %callee.knr.cast = bitcast i32 (...)* %passedfunc to i32 ()*
- %call = tail call signext i32 %callee.knr.cast()
+ %call = tail call signext i32 %passedfunc()
ret i32 %call
}
-define dso_local signext i32 @NoTailIndirectCall(i32 (...)* nocapture %passedfunc, i32 signext %a) local_unnamed_addr {
+define dso_local signext i32 @NoTailIndirectCall(ptr nocapture %passedfunc, i32 signext %a) local_unnamed_addr {
; CHECK-LABEL: NoTailIndirectCall:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %callee.knr.cast = bitcast i32 (...)* %passedfunc to i32 ()*
- %call = tail call signext i32 %callee.knr.cast()
+ %call = tail call signext i32 %passedfunc()
%add = add nsw i32 %call, %a
ret i32 %add
}
; CHECK-NEXT: b Function@notoc
; CHECK-NEXT: #TC_RETURNd8 Function@notoc 0
entry:
- %call = tail call signext i32 bitcast (i32 (...)* @Function to i32 ()*)()
+ %call = tail call signext i32 @Function()
ret i32 %call
}
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %call = tail call signext i32 bitcast (i32 (...)* @Function to i32 ()*)()
+ %call = tail call signext i32 @Function()
%add = add nsw i32 %call, %a
ret i32 %add
}
; CHECK-NEXT: bctr
; CHECK-NEXT: #TC_RETURNr8 ctr 0
entry:
- %call = tail call signext i32 inttoptr (i64 400 to i32 ()*)()
+ %call = tail call signext i32 inttoptr (i64 400 to ptr)()
ret i32 %call
}
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %call = tail call signext i32 inttoptr (i64 400 to i32 ()*)()
+ %call = tail call signext i32 inttoptr (i64 400 to ptr)()
%add = add nsw i32 %call, %a
ret i32 %add
}
@x = external thread_local global i32, align 4
-define nonnull i32* @GeneralDynamicAddressLoad() {
+define nonnull ptr @GeneralDynamicAddressLoad() {
; CHECK-S-LABEL: GeneralDynamicAddressLoad:
; CHECK-S: paddi r3, 0, x@got@tlsgd@pcrel, 1
; CHECK-S-NEXT: bl __tls_get_addr@notoc(x@tlsgd)
; CHECK-O-NEXT: 0000000000000014: R_PPC64_TLSGD x
; CHECK-O-NEXT: 0000000000000014: R_PPC64_REL24_NOTOC __tls_get_addr
entry:
- ret i32* @x
+ ret ptr @x
}
define i32 @GeneralDynamicValueLoad() {
; CHECK-SYM-LABEL: Symbol table '.symtab' contains 7 entries
; CHECK-SYM: 0000000000000000 0 TLS GLOBAL DEFAULT UND x
entry:
- %0 = load i32, i32* @x, align 4
+ %0 = load i32, ptr @x, align 4
ret i32 %0
}
@x = external thread_local global i32, align 4
-define i32* @InitialExecAddressLoad() {
+define ptr @InitialExecAddressLoad() {
; CHECK-S-LABEL: InitialExecAddressLoad:
; CHECK-S: # %bb.0: # %entry
; CHECK-S-NEXT: pld r3, x@got@tprel@pcrel(0), 1
; CHECK-O-NEXT: 0000000000000009: R_PPC64_TLS x
; CHECK-O-NEXT: 20 00 80 4e blr
entry:
- ret i32* @x
+ ret ptr @x
}
define i32 @InitialExecValueLoad() {
; CHECK-SYM-LABEL: Symbol table '.symtab' contains 6 entries
; CHECK-SYM: 0000000000000000 0 TLS GLOBAL DEFAULT UND x
entry:
- %0 = load i32, i32* @x, align 4
+ %0 = load i32, ptr @x, align 4
ret i32 %0
}
@x = hidden thread_local global i32 0, align 4
-define nonnull i32* @LocalDynamicAddressLoad() {
+define nonnull ptr @LocalDynamicAddressLoad() {
; CHECK-S-LABEL: LocalDynamicAddressLoad:
; CHECK-S: paddi r3, 0, x@got@tlsld@pcrel, 1
; CHECK-S-NEXT: bl __tls_get_addr@notoc(x@tlsld)
; CHECK-O-NEXT: 18: paddi 3, 3, 0, 0
; CHECK-O-NEXT: 0000000000000018: R_PPC64_DTPREL34 x
entry:
- ret i32* @x
+ ret ptr @x
}
define i32 @LocalDynamicValueLoad() {
; CHECK-O-NEXT: 0000000000000058: R_PPC64_DTPREL34 x
; CHECK-O-NEXT: 60: lwz 3, 0(3)
entry:
- %0 = load i32, i32* @x, align 4
+ %0 = load i32, ptr @x, align 4
ret i32 %0
}
@x = dso_local thread_local global i32 0, align 4
@y = dso_local thread_local global [5 x i32] [i32 0, i32 0, i32 0, i32 0, i32 0], align 4
-define dso_local i32* @LocalExecAddressLoad() {
+define dso_local ptr @LocalExecAddressLoad() {
; CHECK-S-LABEL: LocalExecAddressLoad:
; CHECK-S: # %bb.0: # %entry
; CHECK-S-NEXT: paddi r3, r13, x@TPREL, 0
; CHECK-O-NEXT: 0000000000000000: R_PPC64_TPREL34 x
; CHECK-O-NEXT: 8: blr
entry:
- ret i32* @x
+ ret ptr @x
}
define dso_local i32 @LocalExecValueLoad() {
; CHECK-O-NEXT: 28: lwz 3, 0(3)
; CHECK-O-NEXT: 2c: blr
entry:
- %0 = load i32, i32* @x, align 4
+ %0 = load i32, ptr @x, align 4
ret i32 %0
}
; CHECK-O-NEXT: 48: stw 3, 0(4)
; CHECK-O-NEXT: 4c: blr
entry:
- store i32 %in, i32* @x, align 4
+ store i32 %in, ptr @x, align 4
ret void
}
; CHECK-O-NEXT: 68: lwz 3, 12(3)
; CHECK-O-NEXT: 6c: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @y, i64 0, i64 3), align 4
+ %0 = load i32, ptr getelementptr inbounds ([5 x i32], ptr @y, i64 0, i64 3), align 4
ret i32 %0
}
-define dso_local i32* @LocalExecValueLoadOffsetNoLoad() {
+define dso_local ptr @LocalExecValueLoadOffsetNoLoad() {
; CHECK-S-LABEL: LocalExecValueLoadOffsetNoLoad:
; CHECK-S: # %bb.0: # %entry
; CHECK-S-NEXT: paddi r3, r13, y@TPREL, 0
; CHECK-O-NEXT: 88: addi 3, 3, 12
; CHECK-O-NEXT: 8c: blr
entry:
- ret i32* getelementptr inbounds ([5 x i32], [5 x i32]* @y, i64 0, i64 3)
+ ret ptr getelementptr inbounds ([5 x i32], ptr @y, i64 0, i64 3)
}
; RUN: llc -verify-machineinstrs -mtriple="powerpc64le-unknown-linux-gnu" \
; RUN: -ppc-asm-full-reg-names -mcpu=pwr10 -relocation-model=pic < %s | FileCheck %s
-%0 = type { i32 (...)**, %0* }
-@x = external dso_local thread_local unnamed_addr global %0*, align 8
-define void @test(i8* %arg) {
+%0 = type { ptr, ptr }
+@x = external dso_local thread_local unnamed_addr global ptr, align 8
+define void @test(ptr %arg) {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-NEXT: ld r30, -16(r1)
; CHECK-NEXT: mtlr r0
entry:
- store i8* %arg, i8** bitcast (%0** @x to i8**), align 8
+ store ptr %arg, ptr @x, align 8
ret void
}
; CHECK-O-NEXT: R_PPC64_PCREL34 valIntLoc
; CHECK-O-NEXT: blr
entry:
- %0 = load i32, i32* @valIntLoc, align 4
+ %0 = load i32, ptr @valIntLoc, align 4
ret i32 %0
}
; CHECK-O-NEXT: lwa 3, 0(3)
; CHECK-O-NEXT: blr
entry:
- %0 = load i32, i32* @valIntGlob, align 4
+ %0 = load i32, ptr @valIntGlob, align 4
ret i32 %0
}
; CHECK-NEXT: stb r3, GlobSt1@toc@l(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd1, i64 0, i64 0), align 1
- store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt1, i64 0, i64 0), align 1
+ %0 = load i8, ptr @GlobLd1, align 1
+ store i8 %0, ptr @GlobSt1, align 1
ret void
}
; CHECK-NEXT: stb r3, 3(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd1, i64 0, i64 3), align 1
- store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt1, i64 0, i64 3), align 1
+ %0 = load i8, ptr getelementptr inbounds ([20 x i8], ptr @GlobLd1, i64 0, i64 3), align 1
+ store i8 %0, ptr getelementptr inbounds ([20 x i8], ptr @GlobSt1, i64 0, i64 3), align 1
ret void
}
; CHECK-NEXT: stb r3, GlobSt1@toc@l+4(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd1, i64 0, i64 4), align 1
- store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt1, i64 0, i64 4), align 1
+ %0 = load i8, ptr getelementptr inbounds ([20 x i8], ptr @GlobLd1, i64 0, i64 4), align 1
+ store i8 %0, ptr getelementptr inbounds ([20 x i8], ptr @GlobSt1, i64 0, i64 4), align 1
ret void
}
; CHECK-NEXT: stb r3, GlobSt1@toc@l+16(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd1, i64 0, i64 16), align 1
- store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt1, i64 0, i64 16), align 1
+ %0 = load i8, ptr getelementptr inbounds ([20 x i8], ptr @GlobLd1, i64 0, i64 16), align 1
+ store i8 %0, ptr getelementptr inbounds ([20 x i8], ptr @GlobSt1, i64 0, i64 16), align 1
ret void
}
; CHECK-NEXT: stbx r4, r5, r3
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x i8], [20 x i8]* @GlobLd1, i64 0, i64 %Idx
- %0 = load i8, i8* %arrayidx, align 1
- %arrayidx1 = getelementptr inbounds [20 x i8], [20 x i8]* @GlobSt1, i64 0, i64 %Idx
- store i8 %0, i8* %arrayidx1, align 1
+ %arrayidx = getelementptr inbounds [20 x i8], ptr @GlobLd1, i64 0, i64 %Idx
+ %0 = load i8, ptr %arrayidx, align 1
+ %arrayidx1 = getelementptr inbounds [20 x i8], ptr @GlobSt1, i64 0, i64 %Idx
+ store i8 %0, ptr %arrayidx1, align 1
ret void
}
; CHECK-NEXT: stb r3, GlobSt2@toc@l(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd2, i64 0, i64 0), align 1
- store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt2, i64 0, i64 0), align 1
+ %0 = load i8, ptr @GlobLd2, align 1
+ store i8 %0, ptr @GlobSt2, align 1
ret void
}
; CHECK-NEXT: stb r3, 3(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd2, i64 0, i64 3), align 1
- store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt2, i64 0, i64 3), align 1
+ %0 = load i8, ptr getelementptr inbounds ([20 x i8], ptr @GlobLd2, i64 0, i64 3), align 1
+ store i8 %0, ptr getelementptr inbounds ([20 x i8], ptr @GlobSt2, i64 0, i64 3), align 1
ret void
}
; CHECK-NEXT: stb r3, GlobSt2@toc@l+4(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd2, i64 0, i64 4), align 1
- store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt2, i64 0, i64 4), align 1
+ %0 = load i8, ptr getelementptr inbounds ([20 x i8], ptr @GlobLd2, i64 0, i64 4), align 1
+ store i8 %0, ptr getelementptr inbounds ([20 x i8], ptr @GlobSt2, i64 0, i64 4), align 1
ret void
}
; CHECK-NEXT: stb r3, GlobSt2@toc@l+16(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd2, i64 0, i64 16), align 1
- store i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt2, i64 0, i64 16), align 1
+ %0 = load i8, ptr getelementptr inbounds ([20 x i8], ptr @GlobLd2, i64 0, i64 16), align 1
+ store i8 %0, ptr getelementptr inbounds ([20 x i8], ptr @GlobSt2, i64 0, i64 16), align 1
ret void
}
; CHECK-NEXT: stbx r4, r5, r3
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x i8], [20 x i8]* @GlobLd2, i64 0, i64 %Idx
- %0 = load i8, i8* %arrayidx, align 1
- %arrayidx1 = getelementptr inbounds [20 x i8], [20 x i8]* @GlobSt2, i64 0, i64 %Idx
- store i8 %0, i8* %arrayidx1, align 1
+ %arrayidx = getelementptr inbounds [20 x i8], ptr @GlobLd2, i64 0, i64 %Idx
+ %0 = load i8, ptr %arrayidx, align 1
+ %arrayidx1 = getelementptr inbounds [20 x i8], ptr @GlobSt2, i64 0, i64 %Idx
+ store i8 %0, ptr %arrayidx1, align 1
ret void
}
; CHECK-NEXT: sth r3, GlobSt3@toc@l(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd3, i64 0, i64 0), align 2
- store i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt3, i64 0, i64 0), align 2
+ %0 = load i16, ptr @GlobLd3, align 2
+ store i16 %0, ptr @GlobSt3, align 2
ret void
}
; CHECK-NEXT: sth r3, 3(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i16]* @GlobLd3 to i8*), i64 3) to i16*), align 2
- store i16 %0, i16* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i16]* @GlobSt3 to i8*), i64 3) to i16*), align 2
+ %0 = load i16, ptr getelementptr inbounds (i8, ptr @GlobLd3, i64 3), align 2
+ store i16 %0, ptr getelementptr inbounds (i8, ptr @GlobSt3, i64 3), align 2
ret void
}
; CHECK-NEXT: sth r3, GlobSt3@toc@l+4(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd3, i64 0, i64 2), align 2
- store i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt3, i64 0, i64 2), align 2
+ %0 = load i16, ptr getelementptr inbounds ([20 x i16], ptr @GlobLd3, i64 0, i64 2), align 2
+ store i16 %0, ptr getelementptr inbounds ([20 x i16], ptr @GlobSt3, i64 0, i64 2), align 2
ret void
}
; CHECK-NEXT: sth r3, GlobSt3@toc@l+16(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd3, i64 0, i64 8), align 2
- store i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt3, i64 0, i64 8), align 2
+ %0 = load i16, ptr getelementptr inbounds ([20 x i16], ptr @GlobLd3, i64 0, i64 8), align 2
+ store i16 %0, ptr getelementptr inbounds ([20 x i16], ptr @GlobSt3, i64 0, i64 8), align 2
ret void
}
; CHECK-NEXT: sthx r4, r5, r3
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x i16], [20 x i16]* @GlobLd3, i64 0, i64 %Idx
- %0 = load i16, i16* %arrayidx, align 2
- %arrayidx1 = getelementptr inbounds [20 x i16], [20 x i16]* @GlobSt3, i64 0, i64 %Idx
- store i16 %0, i16* %arrayidx1, align 2
+ %arrayidx = getelementptr inbounds [20 x i16], ptr @GlobLd3, i64 0, i64 %Idx
+ %0 = load i16, ptr %arrayidx, align 2
+ %arrayidx1 = getelementptr inbounds [20 x i16], ptr @GlobSt3, i64 0, i64 %Idx
+ store i16 %0, ptr %arrayidx1, align 2
ret void
}
; CHECK-NEXT: sth r3, GlobSt4@toc@l(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd4, i64 0, i64 0), align 2
- store i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt4, i64 0, i64 0), align 2
+ %0 = load i16, ptr @GlobLd4, align 2
+ store i16 %0, ptr @GlobSt4, align 2
ret void
}
; CHECK-NEXT: sth r3, 3(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i16]* @GlobLd4 to i8*), i64 3) to i16*), align 2
- store i16 %0, i16* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i16]* @GlobSt4 to i8*), i64 3) to i16*), align 2
+ %0 = load i16, ptr getelementptr inbounds (i8, ptr @GlobLd4, i64 3), align 2
+ store i16 %0, ptr getelementptr inbounds (i8, ptr @GlobSt4, i64 3), align 2
ret void
}
; CHECK-NEXT: sth r3, GlobSt4@toc@l+4(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd4, i64 0, i64 2), align 2
- store i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt4, i64 0, i64 2), align 2
+ %0 = load i16, ptr getelementptr inbounds ([20 x i16], ptr @GlobLd4, i64 0, i64 2), align 2
+ store i16 %0, ptr getelementptr inbounds ([20 x i16], ptr @GlobSt4, i64 0, i64 2), align 2
ret void
}
; CHECK-NEXT: sth r3, GlobSt4@toc@l+16(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd4, i64 0, i64 8), align 2
- store i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt4, i64 0, i64 8), align 2
+ %0 = load i16, ptr getelementptr inbounds ([20 x i16], ptr @GlobLd4, i64 0, i64 8), align 2
+ store i16 %0, ptr getelementptr inbounds ([20 x i16], ptr @GlobSt4, i64 0, i64 8), align 2
ret void
}
; CHECK-NEXT: sthx r4, r5, r3
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x i16], [20 x i16]* @GlobLd4, i64 0, i64 %Idx
- %0 = load i16, i16* %arrayidx, align 2
- %arrayidx1 = getelementptr inbounds [20 x i16], [20 x i16]* @GlobSt4, i64 0, i64 %Idx
- store i16 %0, i16* %arrayidx1, align 2
+ %arrayidx = getelementptr inbounds [20 x i16], ptr @GlobLd4, i64 0, i64 %Idx
+ %0 = load i16, ptr %arrayidx, align 2
+ %arrayidx1 = getelementptr inbounds [20 x i16], ptr @GlobSt4, i64 0, i64 %Idx
+ store i16 %0, ptr %arrayidx1, align 2
ret void
}
; CHECK-NEXT: stw r3, GlobSt5@toc@l(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd5, i64 0, i64 0), align 4
- store i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt5, i64 0, i64 0), align 4
+ %0 = load i32, ptr @GlobLd5, align 4
+ store i32 %0, ptr @GlobSt5, align 4
ret void
}
; CHECK-NEXT: stw r3, GlobSt5@toc@l+3(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i32]* @GlobLd5 to i8*), i64 3) to i32*), align 4
- store i32 %0, i32* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i32]* @GlobSt5 to i8*), i64 3) to i32*), align 4
+ %0 = load i32, ptr getelementptr inbounds (i8, ptr @GlobLd5, i64 3), align 4
+ store i32 %0, ptr getelementptr inbounds (i8, ptr @GlobSt5, i64 3), align 4
ret void
}
; CHECK-NEXT: stw r3, GlobSt5@toc@l+4(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd5, i64 0, i64 1), align 4
- store i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt5, i64 0, i64 1), align 4
+ %0 = load i32, ptr getelementptr inbounds ([20 x i32], ptr @GlobLd5, i64 0, i64 1), align 4
+ store i32 %0, ptr getelementptr inbounds ([20 x i32], ptr @GlobSt5, i64 0, i64 1), align 4
ret void
}
; CHECK-NEXT: stw r3, GlobSt5@toc@l+16(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd5, i64 0, i64 4), align 4
- store i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt5, i64 0, i64 4), align 4
+ %0 = load i32, ptr getelementptr inbounds ([20 x i32], ptr @GlobLd5, i64 0, i64 4), align 4
+ store i32 %0, ptr getelementptr inbounds ([20 x i32], ptr @GlobSt5, i64 0, i64 4), align 4
ret void
}
; CHECK-NEXT: stwx r4, r5, r3
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x i32], [20 x i32]* @GlobLd5, i64 0, i64 %Idx
- %0 = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds [20 x i32], [20 x i32]* @GlobSt5, i64 0, i64 %Idx
- store i32 %0, i32* %arrayidx1, align 4
+ %arrayidx = getelementptr inbounds [20 x i32], ptr @GlobLd5, i64 0, i64 %Idx
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds [20 x i32], ptr @GlobSt5, i64 0, i64 %Idx
+ store i32 %0, ptr %arrayidx1, align 4
ret void
}
; CHECK-NEXT: stw r3, GlobSt6@toc@l(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd6, i64 0, i64 0), align 4
- store i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt6, i64 0, i64 0), align 4
+ %0 = load i32, ptr @GlobLd6, align 4
+ store i32 %0, ptr @GlobSt6, align 4
ret void
}
; CHECK-NEXT: stw r3, GlobSt6@toc@l+3(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i32]* @GlobLd6 to i8*), i64 3) to i32*), align 4
- store i32 %0, i32* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i32]* @GlobSt6 to i8*), i64 3) to i32*), align 4
+ %0 = load i32, ptr getelementptr inbounds (i8, ptr @GlobLd6, i64 3), align 4
+ store i32 %0, ptr getelementptr inbounds (i8, ptr @GlobSt6, i64 3), align 4
ret void
}
; CHECK-NEXT: stw r3, GlobSt6@toc@l+4(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd6, i64 0, i64 1), align 4
- store i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt6, i64 0, i64 1), align 4
+ %0 = load i32, ptr getelementptr inbounds ([20 x i32], ptr @GlobLd6, i64 0, i64 1), align 4
+ store i32 %0, ptr getelementptr inbounds ([20 x i32], ptr @GlobSt6, i64 0, i64 1), align 4
ret void
}
; CHECK-NEXT: stw r3, GlobSt6@toc@l+16(r4)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd6, i64 0, i64 4), align 4
- store i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt6, i64 0, i64 4), align 4
+ %0 = load i32, ptr getelementptr inbounds ([20 x i32], ptr @GlobLd6, i64 0, i64 4), align 4
+ store i32 %0, ptr getelementptr inbounds ([20 x i32], ptr @GlobSt6, i64 0, i64 4), align 4
ret void
}
; CHECK-NEXT: stwx r4, r5, r3
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x i32], [20 x i32]* @GlobLd6, i64 0, i64 %Idx
- %0 = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds [20 x i32], [20 x i32]* @GlobSt6, i64 0, i64 %Idx
- store i32 %0, i32* %arrayidx1, align 4
+ %arrayidx = getelementptr inbounds [20 x i32], ptr @GlobLd6, i64 0, i64 %Idx
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds [20 x i32], ptr @GlobSt6, i64 0, i64 %Idx
+ store i32 %0, ptr %arrayidx1, align 4
ret void
}
; CHECK-P8-NEXT: std r3, GlobSt7@toc@l(r4)
; CHECK-P8-NEXT: blr
entry:
- %0 = load i64, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobLd7, i64 0, i64 0), align 8
- store i64 %0, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobSt7, i64 0, i64 0), align 8
+ %0 = load i64, ptr @GlobLd7, align 8
+ store i64 %0, ptr @GlobSt7, align 8
ret void
}
; CHECK-NEXT: stdx r3, r5, r4
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobLd7 to i8*), i64 3) to i64*), align 8
- store i64 %0, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobSt7 to i8*), i64 3) to i64*), align 8
+ %0 = load i64, ptr getelementptr inbounds (i8, ptr @GlobLd7, i64 3), align 8
+ store i64 %0, ptr getelementptr inbounds (i8, ptr @GlobSt7, i64 3), align 8
ret void
}
; CHECK-P8-NEXT: std r3, GlobSt7@toc@l+4(r4)
; CHECK-P8-NEXT: blr
entry:
- %0 = load i64, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobLd7 to i8*), i64 4) to i64*), align 8
- store i64 %0, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobSt7 to i8*), i64 4) to i64*), align 8
+ %0 = load i64, ptr getelementptr inbounds (i8, ptr @GlobLd7, i64 4), align 8
+ store i64 %0, ptr getelementptr inbounds (i8, ptr @GlobSt7, i64 4), align 8
ret void
}
; CHECK-P8-NEXT: std r3, GlobSt7@toc@l+16(r4)
; CHECK-P8-NEXT: blr
entry:
- %0 = load i64, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobLd7, i64 0, i64 2), align 8
- store i64 %0, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobSt7, i64 0, i64 2), align 8
+ %0 = load i64, ptr getelementptr inbounds ([20 x i64], ptr @GlobLd7, i64 0, i64 2), align 8
+ store i64 %0, ptr getelementptr inbounds ([20 x i64], ptr @GlobSt7, i64 0, i64 2), align 8
ret void
}
; CHECK-NEXT: stdx r4, r5, r3
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x i64], [20 x i64]* @GlobLd7, i64 0, i64 %Idx
- %0 = load i64, i64* %arrayidx, align 8
- %arrayidx1 = getelementptr inbounds [20 x i64], [20 x i64]* @GlobSt7, i64 0, i64 %Idx
- store i64 %0, i64* %arrayidx1, align 8
+ %arrayidx = getelementptr inbounds [20 x i64], ptr @GlobLd7, i64 0, i64 %Idx
+ %0 = load i64, ptr %arrayidx, align 8
+ %arrayidx1 = getelementptr inbounds [20 x i64], ptr @GlobSt7, i64 0, i64 %Idx
+ store i64 %0, ptr %arrayidx1, align 8
ret void
}
; CHECK-P8-NEXT: std r3, GlobSt8@toc@l(r4)
; CHECK-P8-NEXT: blr
entry:
- %0 = load i64, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobLd8, i64 0, i64 0), align 8
- store i64 %0, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobSt8, i64 0, i64 0), align 8
+ %0 = load i64, ptr @GlobLd8, align 8
+ store i64 %0, ptr @GlobSt8, align 8
ret void
}
; CHECK-NEXT: stdx r3, r5, r4
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobLd8 to i8*), i64 3) to i64*), align 8
- store i64 %0, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobSt8 to i8*), i64 3) to i64*), align 8
+ %0 = load i64, ptr getelementptr inbounds (i8, ptr @GlobLd8, i64 3), align 8
+ store i64 %0, ptr getelementptr inbounds (i8, ptr @GlobSt8, i64 3), align 8
ret void
}
; CHECK-P8-NEXT: std r3, GlobSt8@toc@l+4(r4)
; CHECK-P8-NEXT: blr
entry:
- %0 = load i64, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobLd8 to i8*), i64 4) to i64*), align 8
- store i64 %0, i64* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x i64]* @GlobSt8 to i8*), i64 4) to i64*), align 8
+ %0 = load i64, ptr getelementptr inbounds (i8, ptr @GlobLd8, i64 4), align 8
+ store i64 %0, ptr getelementptr inbounds (i8, ptr @GlobSt8, i64 4), align 8
ret void
}
; CHECK-P8-NEXT: std r3, GlobSt8@toc@l+16(r4)
; CHECK-P8-NEXT: blr
entry:
- %0 = load i64, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobLd8, i64 0, i64 2), align 8
- store i64 %0, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobSt8, i64 0, i64 2), align 8
+ %0 = load i64, ptr getelementptr inbounds ([20 x i64], ptr @GlobLd8, i64 0, i64 2), align 8
+ store i64 %0, ptr getelementptr inbounds ([20 x i64], ptr @GlobSt8, i64 0, i64 2), align 8
ret void
}
; CHECK-NEXT: stdx r4, r5, r3
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x i64], [20 x i64]* @GlobLd8, i64 0, i64 %Idx
- %0 = load i64, i64* %arrayidx, align 8
- %arrayidx1 = getelementptr inbounds [20 x i64], [20 x i64]* @GlobSt8, i64 0, i64 %Idx
- store i64 %0, i64* %arrayidx1, align 8
+ %arrayidx = getelementptr inbounds [20 x i64], ptr @GlobLd8, i64 0, i64 %Idx
+ %0 = load i64, ptr %arrayidx, align 8
+ %arrayidx1 = getelementptr inbounds [20 x i64], ptr @GlobSt8, i64 0, i64 %Idx
+ store i64 %0, ptr %arrayidx1, align 8
ret void
}
; CHECK-NEXT: stw r3, GlobSt9@toc@l(r4)
; CHECK-NEXT: blr
entry:
- %0 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @GlobLd9, i64 0, i64 0), align 4
- store float %0, float* getelementptr inbounds ([20 x float], [20 x float]* @GlobSt9, i64 0, i64 0), align 4
+ %0 = load float, ptr @GlobLd9, align 4
+ store float %0, ptr @GlobSt9, align 4
ret void
}
; CHECK-NEXT: stw r3, GlobSt9@toc@l+3(r4)
; CHECK-NEXT: blr
entry:
- %0 = load float, float* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x float]* @GlobLd9 to i8*), i64 3) to float*), align 4
- store float %0, float* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x float]* @GlobSt9 to i8*), i64 3) to float*), align 4
+ %0 = load float, ptr getelementptr inbounds (i8, ptr @GlobLd9, i64 3), align 4
+ store float %0, ptr getelementptr inbounds (i8, ptr @GlobSt9, i64 3), align 4
ret void
}
; CHECK-NEXT: stw r3, GlobSt9@toc@l+4(r4)
; CHECK-NEXT: blr
entry:
- %0 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @GlobLd9, i64 0, i64 1), align 4
- store float %0, float* getelementptr inbounds ([20 x float], [20 x float]* @GlobSt9, i64 0, i64 1), align 4
+ %0 = load float, ptr getelementptr inbounds ([20 x float], ptr @GlobLd9, i64 0, i64 1), align 4
+ store float %0, ptr getelementptr inbounds ([20 x float], ptr @GlobSt9, i64 0, i64 1), align 4
ret void
}
; CHECK-NEXT: stw r3, GlobSt9@toc@l+16(r4)
; CHECK-NEXT: blr
entry:
- %0 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @GlobLd9, i64 0, i64 4), align 4
- store float %0, float* getelementptr inbounds ([20 x float], [20 x float]* @GlobSt9, i64 0, i64 4), align 4
+ %0 = load float, ptr getelementptr inbounds ([20 x float], ptr @GlobLd9, i64 0, i64 4), align 4
+ store float %0, ptr getelementptr inbounds ([20 x float], ptr @GlobSt9, i64 0, i64 4), align 4
ret void
}
; CHECK-NEXT: stwx r4, r5, r3
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x float], [20 x float]* @GlobLd9, i64 0, i64 %Idx
- %0 = load float, float* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds [20 x float], [20 x float]* @GlobSt9, i64 0, i64 %Idx
- store float %0, float* %arrayidx1, align 4
+ %arrayidx = getelementptr inbounds [20 x float], ptr @GlobLd9, i64 0, i64 %Idx
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds [20 x float], ptr @GlobSt9, i64 0, i64 %Idx
+ store float %0, ptr %arrayidx1, align 4
ret void
}
; CHECK-P8-NEXT: std r3, GlobSt10@toc@l(r4)
; CHECK-P8-NEXT: blr
entry:
- %0 = load double, double* getelementptr inbounds ([20 x double], [20 x double]* @GlobLd10, i64 0, i64 0), align 8
- store double %0, double* getelementptr inbounds ([20 x double], [20 x double]* @GlobSt10, i64 0, i64 0), align 8
+ %0 = load double, ptr @GlobLd10, align 8
+ store double %0, ptr @GlobSt10, align 8
ret void
}
; CHECK-NEXT: stdx r3, r5, r4
; CHECK-NEXT: blr
entry:
- %0 = load double, double* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x double]* @GlobLd10 to i8*), i64 3) to double*), align 8
- store double %0, double* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x double]* @GlobSt10 to i8*), i64 3) to double*), align 8
+ %0 = load double, ptr getelementptr inbounds (i8, ptr @GlobLd10, i64 3), align 8
+ store double %0, ptr getelementptr inbounds (i8, ptr @GlobSt10, i64 3), align 8
ret void
}
; CHECK-P8-NEXT: std r3, GlobSt10@toc@l+4(r4)
; CHECK-P8-NEXT: blr
entry:
- %0 = load double, double* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x double]* @GlobLd10 to i8*), i64 4) to double*), align 8
- store double %0, double* bitcast (i8* getelementptr inbounds (i8, i8* bitcast ([20 x double]* @GlobSt10 to i8*), i64 4) to double*), align 8
+ %0 = load double, ptr getelementptr inbounds (i8, ptr @GlobLd10, i64 4), align 8
+ store double %0, ptr getelementptr inbounds (i8, ptr @GlobSt10, i64 4), align 8
ret void
}
; CHECK-P8-NEXT: std r3, GlobSt10@toc@l+16(r4)
; CHECK-P8-NEXT: blr
entry:
- %0 = load double, double* getelementptr inbounds ([20 x double], [20 x double]* @GlobLd10, i64 0, i64 2), align 8
- store double %0, double* getelementptr inbounds ([20 x double], [20 x double]* @GlobSt10, i64 0, i64 2), align 8
+ %0 = load double, ptr getelementptr inbounds ([20 x double], ptr @GlobLd10, i64 0, i64 2), align 8
+ store double %0, ptr getelementptr inbounds ([20 x double], ptr @GlobSt10, i64 0, i64 2), align 8
ret void
}
; CHECK-NEXT: stdx r4, r5, r3
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x double], [20 x double]* @GlobLd10, i64 0, i64 %Idx
- %0 = load double, double* %arrayidx, align 8
- %arrayidx1 = getelementptr inbounds [20 x double], [20 x double]* @GlobSt10, i64 0, i64 %Idx
- store double %0, double* %arrayidx1, align 8
+ %arrayidx = getelementptr inbounds [20 x double], ptr @GlobLd10, i64 0, i64 %Idx
+ %0 = load double, ptr %arrayidx, align 8
+ %arrayidx1 = getelementptr inbounds [20 x double], ptr @GlobSt10, i64 0, i64 %Idx
+ store double %0, ptr %arrayidx1, align 8
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x vs0, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd11, i64 0, i64 0), align 16
- store <16 x i8> %0, <16 x i8>* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt11, i64 0, i64 0), align 16
+ %0 = load <16 x i8>, ptr @GlobLd11, align 16
+ store <16 x i8> %0, ptr @GlobSt11, align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x vs0, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd11, i64 0, i64 0, i64 3) to <16 x i8>*), align 16
- store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt11, i64 0, i64 0, i64 3) to <16 x i8>*), align 16
+ %0 = load <16 x i8>, ptr getelementptr ([20 x <16 x i8>], ptr @GlobLd11, i64 0, i64 0, i64 3), align 16
+ store <16 x i8> %0, ptr getelementptr ([20 x <16 x i8>], ptr @GlobSt11, i64 0, i64 0, i64 3), align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x vs0, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd11, i64 0, i64 0, i64 4) to <16 x i8>*), align 16
- store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt11, i64 0, i64 0, i64 4) to <16 x i8>*), align 16
+ %0 = load <16 x i8>, ptr getelementptr ([20 x <16 x i8>], ptr @GlobLd11, i64 0, i64 0, i64 4), align 16
+ store <16 x i8> %0, ptr getelementptr ([20 x <16 x i8>], ptr @GlobSt11, i64 0, i64 0, i64 4), align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x vs0, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd11, i64 0, i64 1, i64 0) to <16 x i8>*), align 16
- store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt11, i64 0, i64 1, i64 0) to <16 x i8>*), align 16
+ %0 = load <16 x i8>, ptr getelementptr inbounds ([20 x <16 x i8>], ptr @GlobLd11, i64 0, i64 1, i64 0), align 16
+ store <16 x i8> %0, ptr getelementptr inbounds ([20 x <16 x i8>], ptr @GlobSt11, i64 0, i64 1, i64 0), align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x vs0, r4, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd11, i64 0, i64 %Idx
- %0 = load <16 x i8>, <16 x i8>* %arrayidx, align 16
- %arrayidx1 = getelementptr inbounds [20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt11, i64 0, i64 %Idx
- store <16 x i8> %0, <16 x i8>* %arrayidx1, align 16
+ %arrayidx = getelementptr inbounds [20 x <16 x i8>], ptr @GlobLd11, i64 0, i64 %Idx
+ %0 = load <16 x i8>, ptr %arrayidx, align 16
+ %arrayidx1 = getelementptr inbounds [20 x <16 x i8>], ptr @GlobSt11, i64 0, i64 %Idx
+ store <16 x i8> %0, ptr %arrayidx1, align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x vs0, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd12, i64 0, i64 0), align 16
- store <16 x i8> %0, <16 x i8>* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt12, i64 0, i64 0), align 16
+ %0 = load <16 x i8>, ptr @GlobLd12, align 16
+ store <16 x i8> %0, ptr @GlobSt12, align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x vs0, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd12, i64 0, i64 0, i64 3) to <16 x i8>*), align 16
- store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt12, i64 0, i64 0, i64 3) to <16 x i8>*), align 16
+ %0 = load <16 x i8>, ptr getelementptr ([20 x <16 x i8>], ptr @GlobLd12, i64 0, i64 0, i64 3), align 16
+ store <16 x i8> %0, ptr getelementptr ([20 x <16 x i8>], ptr @GlobSt12, i64 0, i64 0, i64 3), align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x vs0, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd12, i64 0, i64 0, i64 4) to <16 x i8>*), align 16
- store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt12, i64 0, i64 0, i64 4) to <16 x i8>*), align 16
+ %0 = load <16 x i8>, ptr getelementptr ([20 x <16 x i8>], ptr @GlobLd12, i64 0, i64 0, i64 4), align 16
+ store <16 x i8> %0, ptr getelementptr ([20 x <16 x i8>], ptr @GlobSt12, i64 0, i64 0, i64 4), align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x vs0, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd12, i64 0, i64 1, i64 0) to <16 x i8>*), align 16
- store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr inbounds ([20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt12, i64 0, i64 1, i64 0) to <16 x i8>*), align 16
+ %0 = load <16 x i8>, ptr getelementptr inbounds ([20 x <16 x i8>], ptr @GlobLd12, i64 0, i64 1, i64 0), align 16
+ store <16 x i8> %0, ptr getelementptr inbounds ([20 x <16 x i8>], ptr @GlobSt12, i64 0, i64 1, i64 0), align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x vs0, r4, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds [20 x <16 x i8>], [20 x <16 x i8>]* @GlobLd12, i64 0, i64 %Idx
- %0 = load <16 x i8>, <16 x i8>* %arrayidx, align 16
- %arrayidx1 = getelementptr inbounds [20 x <16 x i8>], [20 x <16 x i8>]* @GlobSt12, i64 0, i64 %Idx
- store <16 x i8> %0, <16 x i8>* %arrayidx1, align 16
+ %arrayidx = getelementptr inbounds [20 x <16 x i8>], ptr @GlobLd12, i64 0, i64 %Idx
+ %0 = load <16 x i8>, ptr %arrayidx, align 16
+ %arrayidx1 = getelementptr inbounds [20 x <16 x i8>], ptr @GlobSt12, i64 0, i64 %Idx
+ store <16 x i8> %0, ptr %arrayidx1, align 16
ret void
}
; CHECK-NEXT: stb r3, GlobSt1@toc@l(r4)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i8, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobLd1, i64 0, i64 0) monotonic, align 1
- store atomic i8 %0, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @GlobSt1, i64 0, i64 0) monotonic, align 1
+ %0 = load atomic i8, ptr @GlobLd1 monotonic, align 1
+ store atomic i8 %0, ptr @GlobSt1 monotonic, align 1
ret void
}
; CHECK-NEXT: sth r3, GlobSt3@toc@l(r4)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i16, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobLd3, i64 0, i64 0) monotonic, align 2
- store atomic i16 %0, i16* getelementptr inbounds ([20 x i16], [20 x i16]* @GlobSt3, i64 0, i64 0) monotonic, align 2
+ %0 = load atomic i16, ptr @GlobLd3 monotonic, align 2
+ store atomic i16 %0, ptr @GlobSt3 monotonic, align 2
ret void
}
; CHECK-NEXT: stw r3, GlobSt5@toc@l(r4)
; CHECK-NEXT: blr
entry:
- %0 = load atomic i32, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobLd5, i64 0, i64 0) monotonic, align 4
- store atomic i32 %0, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @GlobSt5, i64 0, i64 0) monotonic, align 4
+ %0 = load atomic i32, ptr @GlobLd5 monotonic, align 4
+ store atomic i32 %0, ptr @GlobSt5 monotonic, align 4
ret void
}
; CHECK-P8-NEXT: std r3, GlobSt7@toc@l(r4)
; CHECK-P8-NEXT: blr
entry:
- %0 = load atomic i64, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobLd7, i64 0, i64 0) monotonic, align 8
- store atomic i64 %0, i64* getelementptr inbounds ([20 x i64], [20 x i64]* @GlobSt7, i64 0, i64 0) monotonic, align 8
+ %0 = load atomic i64, ptr @GlobLd7 monotonic, align 8
+ store atomic i64 %0, ptr @GlobSt7 monotonic, align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i64
- store i64 %conv, i64* bitcast ([20 x double]* @GlobSt10 to i64*), align 8
+ store i64 %conv, ptr @GlobSt10, align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i64
- store i64 %conv, i64* bitcast ([20 x double]* @GlobSt10 to i64*), align 8
+ store i64 %conv, ptr @GlobSt10, align 8
ret void
}
; CHECK-P8-BE-NEXT: blr
entry:
%conv = fptoui fp128 %str to i64
- store i64 %conv, i64* bitcast ([20 x fp128]* @GlobF128 to i64*), align 16
+ store i64 %conv, ptr @GlobF128, align 16
ret void
}
; CHECK-P8-BE-NEXT: blr
entry:
%conv = fptosi fp128 %str to i64
- store i64 %conv, i64* bitcast ([20 x fp128]* @GlobF128 to i64*), align 16
+ store i64 %conv, ptr @GlobF128, align 16
ret void
}
define dso_local void @test_b4() nounwind {
entry:
- %0 = load i8, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 0), align 1
+ %0 = load i8, ptr @b4v, align 1
%inc0 = add nsw i8 %0, 1
- store i8 %inc0, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 0), align 1
- %1 = load i8, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 1), align 1
+ store i8 %inc0, ptr @b4v, align 1
+ %1 = load i8, ptr getelementptr inbounds (%struct.b4, ptr @b4v, i32 0, i32 1), align 1
%inc1 = add nsw i8 %1, 2
- store i8 %inc1, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 1), align 1
- %2 = load i8, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 2), align 1
+ store i8 %inc1, ptr getelementptr inbounds (%struct.b4, ptr @b4v, i32 0, i32 1), align 1
+ %2 = load i8, ptr getelementptr inbounds (%struct.b4, ptr @b4v, i32 0, i32 2), align 1
%inc2 = add nsw i8 %2, 3
- store i8 %inc2, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 2), align 1
- %3 = load i8, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 3), align 1
+ store i8 %inc2, ptr getelementptr inbounds (%struct.b4, ptr @b4v, i32 0, i32 2), align 1
+ %3 = load i8, ptr getelementptr inbounds (%struct.b4, ptr @b4v, i32 0, i32 3), align 1
%inc3 = add nsw i8 %3, 4
- store i8 %inc3, i8* getelementptr inbounds (%struct.b4, %struct.b4* @b4v, i32 0, i32 3), align 1
+ store i8 %inc3, ptr getelementptr inbounds (%struct.b4, ptr @b4v, i32 0, i32 3), align 1
ret void
}
define dso_local void @test_h2() nounwind {
entry:
- %0 = load i16, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 0), align 2
+ %0 = load i16, ptr @h2v, align 2
%inc0 = add nsw i16 %0, 1
- store i16 %inc0, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 0), align 2
- %1 = load i16, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 1), align 2
+ store i16 %inc0, ptr @h2v, align 2
+ %1 = load i16, ptr getelementptr inbounds (%struct.h2, ptr @h2v, i32 0, i32 1), align 2
%inc1 = add nsw i16 %1, 2
- store i16 %inc1, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 1), align 2
+ store i16 %inc1, ptr getelementptr inbounds (%struct.h2, ptr @h2v, i32 0, i32 1), align 2
ret void
}
; CHECK-DAG: sth [[REG1_1]], h2v@toc@l+2([[REGSTRUCT]])
define dso_local void @test_h2_optsize() optsize nounwind {
entry:
- %0 = load i16, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 0), align 2
+ %0 = load i16, ptr @h2v, align 2
%inc0 = add nsw i16 %0, 1
- store i16 %inc0, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 0), align 2
- %1 = load i16, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 1), align 2
+ store i16 %inc0, ptr @h2v, align 2
+ %1 = load i16, ptr getelementptr inbounds (%struct.h2, ptr @h2v, i32 0, i32 1), align 2
%inc1 = add nsw i16 %1, 2
- store i16 %inc1, i16* getelementptr inbounds (%struct.h2, %struct.h2* @h2v, i32 0, i32 1), align 2
+ store i16 %inc1, ptr getelementptr inbounds (%struct.h2, ptr @h2v, i32 0, i32 1), align 2
ret void
}
define dso_local void @test_b8() nounwind {
entry:
- %0 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 0), align 1
+ %0 = load i8, ptr @b8v, align 1
%inc0 = add nsw i8 %0, 1
- store i8 %inc0, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 0), align 1
- %1 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 1), align 1
+ store i8 %inc0, ptr @b8v, align 1
+ %1 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 1), align 1
%inc1 = add nsw i8 %1, 2
- store i8 %inc1, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 1), align 1
- %2 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 2), align 1
+ store i8 %inc1, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 1), align 1
+ %2 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 2), align 1
%inc2 = add nsw i8 %2, 3
- store i8 %inc2, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 2), align 1
- %3 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 3), align 1
+ store i8 %inc2, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 2), align 1
+ %3 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 3), align 1
%inc3 = add nsw i8 %3, 4
- store i8 %inc3, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 3), align 1
- %4 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 4), align 1
+ store i8 %inc3, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 3), align 1
+ %4 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 4), align 1
%inc4 = add nsw i8 %4, 5
- store i8 %inc4, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 4), align 1
- %5 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 5), align 1
+ store i8 %inc4, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 4), align 1
+ %5 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 5), align 1
%inc5 = add nsw i8 %5, 6
- store i8 %inc5, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 5), align 1
- %6 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 6), align 1
+ store i8 %inc5, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 5), align 1
+ %6 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 6), align 1
%inc6 = add nsw i8 %6, 7
- store i8 %inc6, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 6), align 1
- %7 = load i8, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 7), align 1
+ store i8 %inc6, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 6), align 1
+ %7 = load i8, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 7), align 1
%inc7 = add nsw i8 %7, 8
- store i8 %inc7, i8* getelementptr inbounds (%struct.b8, %struct.b8* @b8v, i32 0, i32 7), align 1
+ store i8 %inc7, ptr getelementptr inbounds (%struct.b8, ptr @b8v, i32 0, i32 7), align 1
ret void
}
define dso_local void @test_h4() nounwind {
entry:
- %0 = load i16, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 0), align 2
+ %0 = load i16, ptr @h4v, align 2
%inc0 = add nsw i16 %0, 1
- store i16 %inc0, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 0), align 2
- %1 = load i16, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 1), align 2
+ store i16 %inc0, ptr @h4v, align 2
+ %1 = load i16, ptr getelementptr inbounds (%struct.h4, ptr @h4v, i32 0, i32 1), align 2
%inc1 = add nsw i16 %1, 2
- store i16 %inc1, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 1), align 2
- %2 = load i16, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 2), align 2
+ store i16 %inc1, ptr getelementptr inbounds (%struct.h4, ptr @h4v, i32 0, i32 1), align 2
+ %2 = load i16, ptr getelementptr inbounds (%struct.h4, ptr @h4v, i32 0, i32 2), align 2
%inc2 = add nsw i16 %2, 3
- store i16 %inc2, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 2), align 2
- %3 = load i16, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 3), align 2
+ store i16 %inc2, ptr getelementptr inbounds (%struct.h4, ptr @h4v, i32 0, i32 2), align 2
+ %3 = load i16, ptr getelementptr inbounds (%struct.h4, ptr @h4v, i32 0, i32 3), align 2
%inc3 = add nsw i16 %3, 4
- store i16 %inc3, i16* getelementptr inbounds (%struct.h4, %struct.h4* @h4v, i32 0, i32 3), align 2
+ store i16 %inc3, ptr getelementptr inbounds (%struct.h4, ptr @h4v, i32 0, i32 3), align 2
ret void
}
define dso_local void @test_w2() nounwind {
entry:
- %0 = load i32, i32* getelementptr inbounds (%struct.w2, %struct.w2* @w2v, i32 0, i32 0), align 4
+ %0 = load i32, ptr @w2v, align 4
%inc0 = add nsw i32 %0, 1
- store i32 %inc0, i32* getelementptr inbounds (%struct.w2, %struct.w2* @w2v, i32 0, i32 0), align 4
- %1 = load i32, i32* getelementptr inbounds (%struct.w2, %struct.w2* @w2v, i32 0, i32 1), align 4
+ store i32 %inc0, ptr @w2v, align 4
+ %1 = load i32, ptr getelementptr inbounds (%struct.w2, ptr @w2v, i32 0, i32 1), align 4
%inc1 = add nsw i32 %1, 2
- store i32 %inc1, i32* getelementptr inbounds (%struct.w2, %struct.w2* @w2v, i32 0, i32 1), align 4
+ store i32 %inc1, ptr getelementptr inbounds (%struct.w2, ptr @w2v, i32 0, i32 1), align 4
ret void
}
define dso_local void @test_d2() nounwind {
entry:
- %0 = load i64, i64* getelementptr inbounds (%struct.d2, %struct.d2* @d2v, i32 0, i32 0), align 8
+ %0 = load i64, ptr @d2v, align 8
%inc0 = add nsw i64 %0, 1
- store i64 %inc0, i64* getelementptr inbounds (%struct.d2, %struct.d2* @d2v, i32 0, i32 0), align 8
- %1 = load i64, i64* getelementptr inbounds (%struct.d2, %struct.d2* @d2v, i32 0, i32 1), align 8
+ store i64 %inc0, ptr @d2v, align 8
+ %1 = load i64, ptr getelementptr inbounds (%struct.d2, ptr @d2v, i32 0, i32 1), align 8
%inc1 = add nsw i64 %1, 2
- store i64 %inc1, i64* getelementptr inbounds (%struct.d2, %struct.d2* @d2v, i32 0, i32 1), align 8
+ store i64 %inc1, ptr getelementptr inbounds (%struct.d2, ptr @d2v, i32 0, i32 1), align 8
ret void
}
; CHECK: ld 3, d2v@toc@l+8([[REG]])
define i64 @test_singleuse() nounwind {
entry:
- %0 = load i64, i64* getelementptr inbounds (%struct.d2, %struct.d2* @d2v, i32 0, i32 1), align 8
+ %0 = load i64, ptr getelementptr inbounds (%struct.d2, ptr @d2v, i32 0, i32 1), align 8
ret i64 %0
}
; CHECK: stdx [[REG0_1]], [[REGSTRUCT]], [[OFFSET_REG]]
define dso_local void @test_misalign() nounwind {
entry:
- %0 = load i64, i64* getelementptr inbounds (%struct.misalign, %struct.misalign* @misalign_v, i32 0, i32 1), align 1
+ %0 = load i64, ptr getelementptr inbounds (%struct.misalign, ptr @misalign_v, i32 0, i32 1), align 1
%inc0 = add nsw i64 %0, 1
- store i64 %inc0, i64* getelementptr inbounds (%struct.misalign, %struct.misalign* @misalign_v, i32 0, i32 1), align 1
+ store i64 %inc0, ptr getelementptr inbounds (%struct.misalign, ptr @misalign_v, i32 0, i32 1), align 1
ret void
}
@__profd_main = private global i64 zeroinitializer, section "__llvm_prf_data", align 8
@__llvm_prf_nm = private constant [6 x i8] c"\04\00main", section "__llvm_prf_names", align 1
-@llvm.used = appending global [2 x i8*]
- [i8* bitcast (i64* @__profd_main to i8*),
- i8* getelementptr inbounds ([6 x i8], [6 x i8]* @__llvm_prf_nm, i32 0, i32 0)], section "llvm.metadata"
+@llvm.used = appending global [2 x ptr]
+ [ptr @__profd_main,
+ ptr @__llvm_prf_nm], section "llvm.metadata"
define i32 @main() #0 {
entry:
@__profd_main = private global i64 zeroinitializer, section "__llvm_prf_data", align 8
@__llvm_prf_nm = private constant [6 x i8] c"\04\00main", section "__llvm_prf_names", align 1
-@llvm.used = appending global [3 x i8*]
- [i8* bitcast ([1 x i64]* @__profc_main to i8*),
- i8* bitcast (i64* @__profd_main to i8*),
- i8* getelementptr inbounds ([6 x i8], [6 x i8]* @__llvm_prf_nm, i32 0, i32 0)], section "llvm.metadata"
+@llvm.used = appending global [3 x ptr]
+ [ptr @__profc_main,
+ ptr @__profd_main,
+ ptr @__llvm_prf_nm], section "llvm.metadata"
define i32 @main() #0 {
entry:
@__profc_main = private global [1 x i64] zeroinitializer, section "__llvm_prf_cnts", align 8
@__profd_main = private global i64 zeroinitializer, section "__llvm_prf_data", align 8
@__llvm_prf_nm = private constant [6 x i8] c"\04\00main", section "__llvm_prf_names", align 1
-@__llvm_prf_vnodes = private global [10 x { i64, i64, i8* }] zeroinitializer, section "__llvm_prf_vnds"
+@__llvm_prf_vnodes = private global [10 x { i64, i64, ptr }] zeroinitializer, section "__llvm_prf_vnds"
-@llvm.used = appending global [4 x i8*]
- [i8* bitcast ([1 x i64]* @__profc_main to i8*),
- i8* bitcast (i64* @__profd_main to i8*),
- i8* getelementptr inbounds ([6 x i8], [6 x i8]* @__llvm_prf_nm, i32 0, i32 0),
- i8* bitcast ([10 x { i64, i64, i8* }]* @__llvm_prf_vnodes to i8*)], section "llvm.metadata"
+@llvm.used = appending global [4 x ptr]
+ [ptr @__profc_main,
+ ptr @__profd_main,
+ ptr @__llvm_prf_nm,
+ ptr @__llvm_prf_vnodes], section "llvm.metadata"
define i32 @main() #0 {
entry:
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-define void @foo(double* %x, double* nocapture readonly %y) #0 {
+define void @foo(ptr %x, ptr nocapture readonly %y) #0 {
entry:
br label %for.cond1.preheader
for.body3: ; preds = %for.body3, %for.cond1.preheader
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %y, i64 %indvars.iv
+ %0 = load double, ptr %arrayidx, align 8
%add = fadd double %0, 1.000000e+00
- %arrayidx5 = getelementptr inbounds double, double* %x, i64 %indvars.iv
- store double %add, double* %arrayidx5, align 8
+ %arrayidx5 = getelementptr inbounds double, ptr %x, i64 %indvars.iv
+ store double %add, ptr %arrayidx5, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 16000
br i1 %exitcond, label %for.end, label %for.body3
for.end: ; preds = %for.body3
- tail call void @bar(double* %x) #2
+ tail call void @bar(ptr %x) #2
%inc7 = add nuw nsw i32 %i.015, 1
%exitcond16 = icmp eq i32 %inc7, 1000
br i1 %exitcond16, label %for.end8, label %for.cond1.preheader
; CHECK: blr
}
-declare void @bar(double*) #1
+declare void @bar(ptr) #1
attributes #0 = { nounwind "target-cpu"="a2" }
attributes #1 = { "target-cpu"="a2" }
; RUN: llc < %s -O0 -mtriple=powerpc64le-unknown-unknown | FileCheck %s
; Function Attrs: nobuiltin nounwind readonly
-define i8 @popcount128(i128* nocapture nonnull readonly %0) {
+define i8 @popcount128(ptr nocapture nonnull readonly %0) {
; CHECK-LABEL: popcount128:
; CHECK: # %bb.0: # %Entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: clrldi 3, 3, 56
; CHECK-NEXT: blr
Entry:
- %1 = load i128, i128* %0, align 16
+ %1 = load i128, ptr %0, align 16
%2 = tail call i128 @llvm.ctpop.i128(i128 %1)
%3 = trunc i128 %2 to i8
ret i8 %3
declare i128 @llvm.ctpop.i128(i128)
; Function Attrs: nobuiltin nounwind readonly
-define i16 @popcount256(i256* nocapture nonnull readonly %0) {
+define i16 @popcount256(ptr nocapture nonnull readonly %0) {
; CHECK-LABEL: popcount256:
; CHECK: # %bb.0: # %Entry
; CHECK-NEXT: mr 6, 3
; CHECK-NEXT: clrldi 3, 3, 48
; CHECK-NEXT: blr
Entry:
- %1 = load i256, i256* %0, align 16
+ %1 = load i256, ptr %0, align 16
%2 = tail call i256 @llvm.ctpop.i256(i256 %1)
%3 = trunc i256 %2 to i16
ret i16 %3
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-%struct.inode.0.12.120 = type { i8* }
+%struct.inode.0.12.120 = type { ptr }
%struct.kstat2.1.13.121 = type { i32 }
-%struct.task_struct.4.16.124 = type { i8*, %struct.atomic_t.2.14.122, %struct.signal_struct.3.15.123* }
+%struct.task_struct.4.16.124 = type { ptr, %struct.atomic_t.2.14.122, ptr }
%struct.atomic_t.2.14.122 = type { i32 }
%struct.signal_struct.3.15.123 = type { i64 }
-%struct.pid.5.17.125 = type { i8* }
+%struct.pid.5.17.125 = type { ptr }
; Function Attrs: nounwind
-define signext i32 @proc_task_getattr(%struct.inode.0.12.120* nocapture readonly %inode, %struct.kstat2.1.13.121* nocapture %stat) #0 {
+define signext i32 @proc_task_getattr(ptr nocapture readonly %inode, ptr nocapture %stat) #0 {
entry:
- %call1.i = tail call %struct.task_struct.4.16.124* @get_pid_task(%struct.pid.5.17.125* undef, i32 zeroext 0) #0
+ %call1.i = tail call ptr @get_pid_task(ptr undef, i32 zeroext 0) #0
br i1 undef, label %if.end, label %if.then
if.then: ; preds = %entry
- %0 = load i64, i64* undef, align 8
+ %0 = load i64, ptr undef, align 8
%conv.i = trunc i64 %0 to i32
- %1 = load i32, i32* null, align 4
+ %1 = load i32, ptr null, align 4
%add = add i32 %1, %conv.i
- store i32 %add, i32* null, align 4
- %counter.i.i = getelementptr inbounds %struct.task_struct.4.16.124, %struct.task_struct.4.16.124* %call1.i, i64 0, i32 1, i32 0
- %2 = tail call i32 asm sideeffect "\09lwsync\0A1:\09lwarx\09$0,0,$1\09\09# atomic_dec_return\0A\09addic\09$0,$0,-1\0A\09stwcx.\09$0,0,$1\0A\09bne-\091b\0A\09sync\0A", "=&r,r,~{cr0},~{xer},~{memory}"(i32* %counter.i.i) #0
+ store i32 %add, ptr null, align 4
+ %counter.i.i = getelementptr inbounds %struct.task_struct.4.16.124, ptr %call1.i, i64 0, i32 1, i32 0
+ %2 = tail call i32 asm sideeffect "\09lwsync\0A1:\09lwarx\09$0,0,$1\09\09# atomic_dec_return\0A\09addic\09$0,$0,-1\0A\09stwcx.\09$0,0,$1\0A\09bne-\091b\0A\09sync\0A", "=&r,r,~{cr0},~{xer},~{memory}"(ptr %counter.i.i) #0
%cmp.i = icmp eq i32 %2, 0
br i1 %cmp.i, label %if.then.i, label %if.end
; CHECK: blr
if.then.i: ; preds = %if.then
- %3 = bitcast %struct.task_struct.4.16.124* %call1.i to i8*
- tail call void @foo(i8* %3) #0
+ tail call void @foo(ptr %call1.i) #0
unreachable
if.end: ; preds = %if.then, %entry
ret i32 0
}
-declare void @foo(i8*)
+declare void @foo(ptr)
-declare %struct.task_struct.4.16.124* @get_pid_task(%struct.pid.5.17.125*, i32 zeroext)
+declare ptr @get_pid_task(ptr, i32 zeroext)
attributes #0 = { nounwind }
; RUN: llc -vector-library=MASSV < %s -mtriple=powerpc-ibm-aix-xcoff -mcpu=pwr7 | FileCheck -check-prefixes=CHECK-PWR7 %s
; Exponent is a variable
-define void @vpow_var(double* nocapture %z, double* nocapture readonly %y, double* nocapture readonly %x) {
+define void @vpow_var(ptr nocapture %z, ptr nocapture readonly %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vpow_var
; CHECK-PWR10: __powd2_P10
; CHECK-PWR9: __powd2_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr double, double* %z, i64 %index
- %next.gep31 = getelementptr double, double* %y, i64 %index
- %next.gep32 = getelementptr double, double* %x, i64 %index
- %0 = bitcast double* %next.gep32 to <2 x double>*
- %wide.load = load <2 x double>, <2 x double>* %0, align 8
- %1 = bitcast double* %next.gep31 to <2 x double>*
- %wide.load33 = load <2 x double>, <2 x double>* %1, align 8
- %2 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> %wide.load33)
- %3 = bitcast double* %next.gep to <2 x double>*
- store <2 x double> %2, <2 x double>* %3, align 8
+ %next.gep = getelementptr double, ptr %z, i64 %index
+ %next.gep31 = getelementptr double, ptr %y, i64 %index
+ %next.gep32 = getelementptr double, ptr %x, i64 %index
+ %wide.load = load <2 x double>, ptr %next.gep32, align 8
+ %wide.load33 = load <2 x double>, ptr %next.gep31, align 8
+ %0 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> %wide.load33)
+ store <2 x double> %0, ptr %next.gep, align 8
%index.next = add i64 %index, 2
- %4 = icmp eq i64 %index.next, 1024
- br i1 %4, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is a constant != 0.75 and !=0.25
-define void @vpow_const(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_const(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vpow_const
; CHECK-PWR10: __powd2_P10
; CHECK-PWR9: __powd2_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr double, double* %y, i64 %index
- %next.gep19 = getelementptr double, double* %x, i64 %index
- %0 = bitcast double* %next.gep19 to <2 x double>*
- %wide.load = load <2 x double>, <2 x double>* %0, align 8
- %1 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.600000e-01, double 7.600000e-01>)
- %2 = bitcast double* %next.gep to <2 x double>*
- store <2 x double> %1, <2 x double>* %2, align 8
+ %next.gep = getelementptr double, ptr %y, i64 %index
+ %next.gep19 = getelementptr double, ptr %x, i64 %index
+ %wide.load = load <2 x double>, ptr %next.gep19, align 8
+ %0 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.600000e-01, double 7.600000e-01>)
+ store <2 x double> %0, ptr %next.gep, align 8
%index.next = add i64 %index, 2
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is a constant != 0.75 and !=0.25 and they are different
-define void @vpow_noeq_const(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_noeq_const(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vpow_noeq_const
; CHECK-PWR10: __powd2_P10
; CHECK-PWR9: __powd2_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr double, double* %y, i64 %index
- %next.gep19 = getelementptr double, double* %x, i64 %index
- %0 = bitcast double* %next.gep19 to <2 x double>*
- %wide.load = load <2 x double>, <2 x double>* %0, align 8
- %1 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 7.600000e-01>)
- %2 = bitcast double* %next.gep to <2 x double>*
- store <2 x double> %1, <2 x double>* %2, align 8
+ %next.gep = getelementptr double, ptr %y, i64 %index
+ %next.gep19 = getelementptr double, ptr %x, i64 %index
+ %wide.load = load <2 x double>, ptr %next.gep19, align 8
+ %0 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 7.600000e-01>)
+ store <2 x double> %0, ptr %next.gep, align 8
%index.next = add i64 %index, 2
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is a constant != 0.75 and !=0.25 and they are different
-define void @vpow_noeq075_const(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_noeq075_const(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vpow_noeq075_const
; CHECK-PWR10: __powd2_P10
; CHECK-PWR9: __powd2_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr double, double* %y, i64 %index
- %next.gep19 = getelementptr double, double* %x, i64 %index
- %0 = bitcast double* %next.gep19 to <2 x double>*
- %wide.load = load <2 x double>, <2 x double>* %0, align 8
- %1 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 7.500000e-01>)
- %2 = bitcast double* %next.gep to <2 x double>*
- store <2 x double> %1, <2 x double>* %2, align 8
+ %next.gep = getelementptr double, ptr %y, i64 %index
+ %next.gep19 = getelementptr double, ptr %x, i64 %index
+ %wide.load = load <2 x double>, ptr %next.gep19, align 8
+ %0 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 7.500000e-01>)
+ store <2 x double> %0, ptr %next.gep, align 8
%index.next = add i64 %index, 2
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is a constant != 0.75 and !=0.25 and they are different
-define void @vpow_noeq025_const(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_noeq025_const(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vpow_noeq025_const
; CHECK-PWR10: __powd2_P10
; CHECK-PWR9: __powd2_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr double, double* %y, i64 %index
- %next.gep19 = getelementptr double, double* %x, i64 %index
- %0 = bitcast double* %next.gep19 to <2 x double>*
- %wide.load = load <2 x double>, <2 x double>* %0, align 8
- %1 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 2.500000e-01>)
- %2 = bitcast double* %next.gep to <2 x double>*
- store <2 x double> %1, <2 x double>* %2, align 8
+ %next.gep = getelementptr double, ptr %y, i64 %index
+ %next.gep19 = getelementptr double, ptr %x, i64 %index
+ %wide.load = load <2 x double>, ptr %next.gep19, align 8
+ %0 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 2.500000e-01>)
+ store <2 x double> %0, ptr %next.gep, align 8
%index.next = add i64 %index, 2
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is 0.75
-define void @vpow_075(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_075(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vpow_075
; CHECK-NOT: __powd2_P{{[7,8,9,10]}}
; CHECK: xvrsqrtesp
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr double, double* %y, i64 %index
- %next.gep19 = getelementptr double, double* %x, i64 %index
- %0 = bitcast double* %next.gep19 to <2 x double>*
- %wide.load = load <2 x double>, <2 x double>* %0, align 8
- %1 = call ninf afn <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.500000e-01, double 7.500000e-01>)
- %2 = bitcast double* %next.gep to <2 x double>*
- store <2 x double> %1, <2 x double>* %2, align 8
+ %next.gep = getelementptr double, ptr %y, i64 %index
+ %next.gep19 = getelementptr double, ptr %x, i64 %index
+ %wide.load = load <2 x double>, ptr %next.gep19, align 8
+ %0 = call ninf afn <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.500000e-01, double 7.500000e-01>)
+ store <2 x double> %0, ptr %next.gep, align 8
%index.next = add i64 %index, 2
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is 0.25
-define void @vpow_025(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_025(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vpow_025
; CHECK-NOT: __powd2_P{{[7,8,9,10]}}
; CHECK: xvrsqrtesp
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr double, double* %y, i64 %index
- %next.gep19 = getelementptr double, double* %x, i64 %index
- %0 = bitcast double* %next.gep19 to <2 x double>*
- %wide.load = load <2 x double>, <2 x double>* %0, align 8
- %1 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 2.500000e-01, double 2.500000e-01>)
- %2 = bitcast double* %next.gep to <2 x double>*
- store <2 x double> %1, <2 x double>* %2, align 8
+ %next.gep = getelementptr double, ptr %y, i64 %index
+ %next.gep19 = getelementptr double, ptr %x, i64 %index
+ %wide.load = load <2 x double>, ptr %next.gep19, align 8
+ %0 = call ninf afn nsz <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 2.500000e-01, double 2.500000e-01>)
+ store <2 x double> %0, ptr %next.gep, align 8
%index.next = add i64 %index, 2
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is 0.75 but no proper fast-math flags
-define void @vpow_075_nofast(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_075_nofast(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vpow_075_nofast
; CHECK-PWR10: __powd2_P10
; CHECK-PWR9: __powd2_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr double, double* %y, i64 %index
- %next.gep19 = getelementptr double, double* %x, i64 %index
- %0 = bitcast double* %next.gep19 to <2 x double>*
- %wide.load = load <2 x double>, <2 x double>* %0, align 8
- %1 = call <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.500000e-01, double 7.500000e-01>)
- %2 = bitcast double* %next.gep to <2 x double>*
- store <2 x double> %1, <2 x double>* %2, align 8
+ %next.gep = getelementptr double, ptr %y, i64 %index
+ %next.gep19 = getelementptr double, ptr %x, i64 %index
+ %wide.load = load <2 x double>, ptr %next.gep19, align 8
+ %0 = call <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 7.500000e-01, double 7.500000e-01>)
+ store <2 x double> %0, ptr %next.gep, align 8
%index.next = add i64 %index, 2
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is 0.25 but no proper fast-math flags
-define void @vpow_025_nofast(double* nocapture %y, double* nocapture readonly %x) {
+define void @vpow_025_nofast(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vpow_025_nofast
; CHECK-PWR10: __powd2_P10
; CHECK-PWR9: __powd2_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr double, double* %y, i64 %index
- %next.gep19 = getelementptr double, double* %x, i64 %index
- %0 = bitcast double* %next.gep19 to <2 x double>*
- %wide.load = load <2 x double>, <2 x double>* %0, align 8
- %1 = call <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 2.500000e-01, double 2.500000e-01>)
- %2 = bitcast double* %next.gep to <2 x double>*
- store <2 x double> %1, <2 x double>* %2, align 8
+ %next.gep = getelementptr double, ptr %y, i64 %index
+ %next.gep19 = getelementptr double, ptr %x, i64 %index
+ %wide.load = load <2 x double>, ptr %next.gep19, align 8
+ %0 = call <2 x double> @__powd2(<2 x double> %wide.load, <2 x double> <double 2.500000e-01, double 2.500000e-01>)
+ store <2 x double> %0, ptr %next.gep, align 8
%index.next = add i64 %index, 2
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
ret i64 %0
}
-define <4 x i32> @test4(i32* nocapture readonly %in) {
+define <4 x i32> @test4(ptr nocapture readonly %in) {
; CHECK-LABEL: test4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvwsx v2, 0, r3
; CHECK-BE-NEXT: blr
entry:
- %0 = load i32, i32* %in, align 4
+ %0 = load i32, ptr %in, align 4
%splat.splatinsert = insertelement <4 x i32> undef, i32 %0, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> %splat.splat
}
-define <4 x float> @test5(float* nocapture readonly %in) {
+define <4 x float> @test5(ptr nocapture readonly %in) {
; CHECK-LABEL: test5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvwsx v2, 0, r3
; CHECK-BE-NEXT: blr
entry:
- %0 = load float, float* %in, align 4
+ %0 = load float, ptr %in, align 4
%splat.splatinsert = insertelement <4 x float> undef, float %0, i32 0
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %splat.splat
; CHECK-BE-NEXT: blr
entry:
- %0 = load i32, i32* @Globi, align 4
+ %0 = load i32, ptr @Globi, align 4
%splat.splatinsert = insertelement <4 x i32> undef, i32 %0, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> %splat.splat
; CHECK-BE-NEXT: blr
entry:
- %0 = load float, float* @Globf, align 4
+ %0 = load float, ptr @Globf, align 4
%splat.splatinsert = insertelement <4 x float> undef, float %0, i32 0
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %splat.splat
ret <16 x i8> <i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200, i8 200>
}
-define <4 x i32> @test14(<4 x i32> %a, i32* nocapture readonly %b) {
+define <4 x i32> @test14(<4 x i32> %a, ptr nocapture readonly %b) {
; CHECK-LABEL: test14:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 0(r5)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i32, i32* %b, align 4
+ %0 = load i32, ptr %b, align 4
%splat.splatinsert = insertelement <4 x i32> undef, i32 %0, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
%1 = add i32 %0, 5
- store i32 %1, i32* %b, align 4
+ store i32 %1, ptr %b, align 4
ret <4 x i32> %splat.splat
}
; RUN: llc -vector-library=MASSV < %s -mtriple=powerpc-ibm-aix-xcoff -mcpu=pwr7 | FileCheck -check-prefixes=CHECK-PWR7 %s
; Exponent is a variable
-define void @vspow_var(float* nocapture %z, float* nocapture readonly %y, float* nocapture readonly %x) {
+define void @vspow_var(ptr nocapture %z, ptr nocapture readonly %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vspow_var
; CHECK-PWR10: __powf4_P10
; CHECK-PWR9: __powf4_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr float, float* %z, i64 %index
- %next.gep31 = getelementptr float, float* %y, i64 %index
- %next.gep32 = getelementptr float, float* %x, i64 %index
- %0 = bitcast float* %next.gep32 to <4 x float>*
- %wide.load = load <4 x float>, <4 x float>* %0, align 4
- %1 = bitcast float* %next.gep31 to <4 x float>*
- %wide.load33 = load <4 x float>, <4 x float>* %1, align 4
- %2 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> %wide.load33)
- %3 = bitcast float* %next.gep to <4 x float>*
- store <4 x float> %2, <4 x float>* %3, align 4
+ %next.gep = getelementptr float, ptr %z, i64 %index
+ %next.gep31 = getelementptr float, ptr %y, i64 %index
+ %next.gep32 = getelementptr float, ptr %x, i64 %index
+ %wide.load = load <4 x float>, ptr %next.gep32, align 4
+ %wide.load33 = load <4 x float>, ptr %next.gep31, align 4
+ %0 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> %wide.load33)
+ store <4 x float> %0, ptr %next.gep, align 4
%index.next = add i64 %index, 4
- %4 = icmp eq i64 %index.next, 1024
- br i1 %4, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is a constant != 0.75 and !=0.25
-define void @vspow_const(float* nocapture %y, float* nocapture readonly %x) {
+define void @vspow_const(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vspow_const
; CHECK-PWR10: __powf4_P10
; CHECK-PWR9: __powf4_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr float, float* %y, i64 %index
- %next.gep19 = getelementptr float, float* %x, i64 %index
- %0 = bitcast float* %next.gep19 to <4 x float>*
- %wide.load = load <4 x float>, <4 x float>* %0, align 4
- %1 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 0x3FE851EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000>)
- %2 = bitcast float* %next.gep to <4 x float>*
- store <4 x float> %1, <4 x float>* %2, align 4
+ %next.gep = getelementptr float, ptr %y, i64 %index
+ %next.gep19 = getelementptr float, ptr %x, i64 %index
+ %wide.load = load <4 x float>, ptr %next.gep19, align 4
+ %0 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 0x3FE851EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000>)
+ store <4 x float> %0, ptr %next.gep, align 4
%index.next = add i64 %index, 4
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is a constant != 0.75 and !=0.25 and they are different
-define void @vspow_neq_const(float* nocapture %y, float* nocapture readonly %x) {
+define void @vspow_neq_const(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vspow_neq_const
; CHECK-PWR10: __powf4_P10
; CHECK-PWR9: __powf4_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr float, float* %y, i64 %index
- %next.gep19 = getelementptr float, float* %x, i64 %index
- %0 = bitcast float* %next.gep19 to <4 x float>*
- %wide.load = load <4 x float>, <4 x float>* %0, align 4
- %1 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 0x3FE861EB80000000, float 0x3FE871EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000>)
- %2 = bitcast float* %next.gep to <4 x float>*
- store <4 x float> %1, <4 x float>* %2, align 4
+ %next.gep = getelementptr float, ptr %y, i64 %index
+ %next.gep19 = getelementptr float, ptr %x, i64 %index
+ %wide.load = load <4 x float>, ptr %next.gep19, align 4
+ %0 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 0x3FE861EB80000000, float 0x3FE871EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000>)
+ store <4 x float> %0, ptr %next.gep, align 4
%index.next = add i64 %index, 4
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is a constant != 0.75 and !=0.25
-define void @vspow_neq075_const(float* nocapture %y, float* nocapture readonly %x) {
+define void @vspow_neq075_const(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vspow_neq075_const
; CHECK-PWR10: __powf4_P10
; CHECK-PWR9: __powf4_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr float, float* %y, i64 %index
- %next.gep19 = getelementptr float, float* %x, i64 %index
- %0 = bitcast float* %next.gep19 to <4 x float>*
- %wide.load = load <4 x float>, <4 x float>* %0, align 4
- %1 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 0x3FE851EB80000000>)
- %2 = bitcast float* %next.gep to <4 x float>*
- store <4 x float> %1, <4 x float>* %2, align 4
+ %next.gep = getelementptr float, ptr %y, i64 %index
+ %next.gep19 = getelementptr float, ptr %x, i64 %index
+ %wide.load = load <4 x float>, ptr %next.gep19, align 4
+ %0 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 0x3FE851EB80000000>)
+ store <4 x float> %0, ptr %next.gep, align 4
%index.next = add i64 %index, 4
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is a constant != 0.75 and !=0.25
-define void @vspow_neq025_const(float* nocapture %y, float* nocapture readonly %x) {
+define void @vspow_neq025_const(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vspow_neq025_const
; CHECK-PWR10: __powf4_P10
; CHECK-PWR9: __powf4_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr float, float* %y, i64 %index
- %next.gep19 = getelementptr float, float* %x, i64 %index
- %0 = bitcast float* %next.gep19 to <4 x float>*
- %wide.load = load <4 x float>, <4 x float>* %0, align 4
- %1 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 0x3FE851EB80000000, float 2.500000e-01, float 0x3FE851EB80000000, float 2.500000e-01>)
- %2 = bitcast float* %next.gep to <4 x float>*
- store <4 x float> %1, <4 x float>* %2, align 4
+ %next.gep = getelementptr float, ptr %y, i64 %index
+ %next.gep19 = getelementptr float, ptr %x, i64 %index
+ %wide.load = load <4 x float>, ptr %next.gep19, align 4
+ %0 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 0x3FE851EB80000000, float 2.500000e-01, float 0x3FE851EB80000000, float 2.500000e-01>)
+ store <4 x float> %0, ptr %next.gep, align 4
%index.next = add i64 %index, 4
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is 0.75
-define void @vspow_075(float* nocapture %y, float* nocapture readonly %x) {
+define void @vspow_075(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vspow_075
; CHECK-NOT: __powf4_P{{[7,8,9,10]}}
; CHECK: xvrsqrtesp
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr float, float* %y, i64 %index
- %next.gep19 = getelementptr float, float* %x, i64 %index
- %0 = bitcast float* %next.gep19 to <4 x float>*
- %wide.load = load <4 x float>, <4 x float>* %0, align 4
- %1 = call ninf afn <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 7.500000e-01>)
- %2 = bitcast float* %next.gep to <4 x float>*
- store <4 x float> %1, <4 x float>* %2, align 4
+ %next.gep = getelementptr float, ptr %y, i64 %index
+ %next.gep19 = getelementptr float, ptr %x, i64 %index
+ %wide.load = load <4 x float>, ptr %next.gep19, align 4
+ %0 = call ninf afn <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 7.500000e-01>)
+ store <4 x float> %0, ptr %next.gep, align 4
%index.next = add i64 %index, 4
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is 0.25
-define void @vspow_025(float* nocapture %y, float* nocapture readonly %x) {
+define void @vspow_025(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vspow_025
; CHECK-NOT: __powf4_P{{[7,8,9,10]}}
; CHECK: xvrsqrtesp
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr float, float* %y, i64 %index
- %next.gep19 = getelementptr float, float* %x, i64 %index
- %0 = bitcast float* %next.gep19 to <4 x float>*
- %wide.load = load <4 x float>, <4 x float>* %0, align 4
- %1 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 2.500000e-01, float 2.500000e-01, float 2.500000e-01, float 2.500000e-01>)
- %2 = bitcast float* %next.gep to <4 x float>*
- store <4 x float> %1, <4 x float>* %2, align 4
+ %next.gep = getelementptr float, ptr %y, i64 %index
+ %next.gep19 = getelementptr float, ptr %x, i64 %index
+ %wide.load = load <4 x float>, ptr %next.gep19, align 4
+ %0 = call ninf afn nsz <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 2.500000e-01, float 2.500000e-01, float 2.500000e-01, float 2.500000e-01>)
+ store <4 x float> %0, ptr %next.gep, align 4
%index.next = add i64 %index, 4
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is 0.75 but no proper fast-math flags
-define void @vspow_075_nofast(float* nocapture %y, float* nocapture readonly %x) {
+define void @vspow_075_nofast(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vspow_075_nofast
; CHECK-PWR10: __powf4_P10
; CHECK-PWR9: __powf4_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr float, float* %y, i64 %index
- %next.gep19 = getelementptr float, float* %x, i64 %index
- %0 = bitcast float* %next.gep19 to <4 x float>*
- %wide.load = load <4 x float>, <4 x float>* %0, align 4
- %1 = call <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 7.500000e-01>)
- %2 = bitcast float* %next.gep to <4 x float>*
- store <4 x float> %1, <4 x float>* %2, align 4
+ %next.gep = getelementptr float, ptr %y, i64 %index
+ %next.gep19 = getelementptr float, ptr %x, i64 %index
+ %wide.load = load <4 x float>, ptr %next.gep19, align 4
+ %0 = call <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 7.500000e-01>)
+ store <4 x float> %0, ptr %next.gep, align 4
%index.next = add i64 %index, 4
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
}
; Exponent is 0.25 but no proper fast-math flags
-define void @vspow_025_nofast(float* nocapture %y, float* nocapture readonly %x) {
+define void @vspow_025_nofast(ptr nocapture %y, ptr nocapture readonly %x) {
; CHECK-LABEL: @vspow_025_nofast
; CHECK-PWR10: __powf4_P10
; CHECK-PWR9: __powf4_P9
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %next.gep = getelementptr float, float* %y, i64 %index
- %next.gep19 = getelementptr float, float* %x, i64 %index
- %0 = bitcast float* %next.gep19 to <4 x float>*
- %wide.load = load <4 x float>, <4 x float>* %0, align 4
- %1 = call <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 2.500000e-01, float 2.500000e-01, float 2.500000e-01, float 2.500000e-01>)
- %2 = bitcast float* %next.gep to <4 x float>*
- store <4 x float> %1, <4 x float>* %2, align 4
+ %next.gep = getelementptr float, ptr %y, i64 %index
+ %next.gep19 = getelementptr float, ptr %x, i64 %index
+ %wide.load = load <4 x float>, ptr %next.gep19, align 4
+ %0 = call <4 x float> @__powf4(<4 x float> %wide.load, <4 x float> <float 2.500000e-01, float 2.500000e-01, float 2.500000e-01, float 2.500000e-01>)
+ store <4 x float> %0, ptr %next.gep, align 4
%index.next = add i64 %index, 4
- %3 = icmp eq i64 %index.next, 1024
- br i1 %3, label %for.end, label %vector.body
+ %1 = icmp eq i64 %index.next, 1024
+ br i1 %1, label %for.end, label %vector.body
for.end:
ret void
while.body: ; preds = %while.body, %entry
%newelement = phi i32 [ 0, %entry ], [ %5, %while.body ]
%0 = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %newelement, i32 0
- %1 = load <4 x i32>, <4 x i32>* undef, align 1
+ %1 = load <4 x i32>, ptr undef, align 1
%2 = add <4 x i32> %1, %0
%3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
%4 = add <4 x i32> %2, %3
; RUN: llc -verify-machineinstrs -mtriple=powerpc64 \
; RUN: -mcpu=pwr9 < %s | FileCheck %s --check-prefix=64BIT
-define dso_local void @foo(i32 %inta, i64* %long_intb) {
+define dso_local void @foo(i32 %inta, ptr %long_intb) {
; 32BIT-LABEL: foo:
; 32BIT: # %bb.0: # %entry
; 32BIT-NEXT: srawi 5, 3, 31
entry:
%conv = sext i32 %inta to i64
%shl = shl nsw i64 %conv, 8
- store i64 %shl, i64* %long_intb, align 8
+ store i64 %shl, ptr %long_intb, align 8
ret void
}
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -verify-machineinstrs | FileCheck %s
; Function Attrs: norecurse nounwind readonly
-define signext i32 @limit_loop(i32 signext %iters, i32* nocapture readonly %vec, i32 signext %limit) local_unnamed_addr {
+define signext i32 @limit_loop(i32 signext %iters, ptr nocapture readonly %vec, i32 signext %limit) local_unnamed_addr {
entry:
%cmp5 = icmp sgt i32 %iters, 0
br i1 %cmp5, label %for.body.preheader, label %cleanup
for.body: ; preds = %for.body.preheader, %for.cond
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.cond ]
- %arrayidx = getelementptr inbounds i32, i32* %vec, i64 %indvars.iv
- %1 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %vec, i64 %indvars.iv
+ %1 = load i32, ptr %arrayidx, align 4
%cmp1 = icmp slt i32 %1, %limit
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
br i1 %cmp1, label %for.cond, label %cleanup
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- tail call void bitcast (void (...)* @fa to void ()*)()
+ tail call void @fa()
br label %if.end
if.else: ; preds = %entry
- tail call void bitcast (void (...)* @fb to void ()*)()
+ tail call void @fb()
br label %if.end
if.end: ; preds = %if.else, %if.then
%struct.fab = type { float, float }
; Function Attrs: nounwind
-define void @func_fab(%struct.fab* noalias sret(%struct.fab) %agg.result, i64 %x.coerce) #0 {
+define void @func_fab(ptr noalias sret(%struct.fab) %agg.result, i64 %x.coerce) #0 {
entry:
%x = alloca %struct.fab, align 8
- %0 = bitcast %struct.fab* %x to i64*
- store i64 %x.coerce, i64* %0, align 1
- %1 = bitcast %struct.fab* %agg.result to i8*
- %2 = bitcast %struct.fab* %x to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %2, i64 8, i1 false)
+ store i64 %x.coerce, ptr %x, align 1
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %agg.result, ptr align 4 %x, i64 8, i1 false)
ret void
}
; CHECK: func_fab
; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #1
attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "target-features"="" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
; };
;
; unsigned int ret = foo();
-; void* g = (void *) ((unsigned int)&&L + arr[ret]);
+; ptr g = (ptr) ((unsigned int)&&L + arr[ret]);
; goto *g;
;
; x:
br label %L
L: ; preds = %L, %entry
- indirectbr i8* inttoptr (i32 add (i32 ptrtoint (i8* blockaddress(@main, %L) to i32), i32 sub (i32 ptrtoint (i8* blockaddress(@main, %return) to i32), i32 ptrtoint (i8* blockaddress(@main, %L) to i32))) to i8*), [label %return, label %L]
+ indirectbr ptr inttoptr (i32 add (i32 ptrtoint (ptr blockaddress(@main, %L) to i32), i32 sub (i32 ptrtoint (ptr blockaddress(@main, %return) to i32), i32 ptrtoint (ptr blockaddress(@main, %L) to i32))) to ptr), [label %return, label %L]
return: ; preds = %L
ret i32 15
br label %__here
__here: ; preds = %entry
- ret i64 ptrtoint (i8* blockaddress(@foo, %__here) to i64)
+ ret i64 ptrtoint (ptr blockaddress(@foo, %__here) to i64)
}
; CHECK-PIC32: lwz {{r[0-9]+}}, .LC0-.LTOC(r30)
; PWR9-NEXT: li 3, 55
; PWR9-NEXT: blr
entry:
- %0 = atomicrmw xchg i8* getelementptr inbounds ({ i8 }, { i8 }* @value8, i64 0, i32 0), i8 %val seq_cst, align 1
+ %0 = atomicrmw xchg ptr @value8, i8 %val seq_cst, align 1
%conv = zext i8 %0 to i32
- store i32 %conv, i32* @global_int, align 4
+ store i32 %conv, ptr @global_int, align 4
ret i32 55
}
; PWR9-NEXT: li 3, 55
; PWR9-NEXT: blr
entry:
- %0 = atomicrmw xchg i16* getelementptr inbounds ({ i16 }, { i16 }* @value16, i64 0, i32 0), i16 %val seq_cst, align 2
+ %0 = atomicrmw xchg ptr @value16, i16 %val seq_cst, align 2
%conv = zext i16 %0 to i32
- store i32 %conv, i32* @global_int, align 4
+ store i32 %conv, ptr @global_int, align 4
ret i32 55
}
; CHECK-NEXT: stw 31, 28(1)
; CHECK: mr 31, 1
entry:
- %a_addr = alloca i32 ; <i32*> [#uses=2]
- %retval = alloca i32 ; <i32*> [#uses=2]
- %0 = alloca i32 ; <i32*> [#uses=2]
+ %a_addr = alloca i32 ; <ptr> [#uses=2]
+ %retval = alloca i32 ; <ptr> [#uses=2]
+ %0 = alloca i32 ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %a, i32* %a_addr
- %1 = call i32 @_Z3barPi(i32* %a_addr) ; <i32> [#uses=1]
- store i32 %1, i32* %0, align 4
- %2 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
- store i32 %2, i32* %retval, align 4
+ store i32 %a, ptr %a_addr
+ %1 = call i32 @_Z3barPi(ptr %a_addr) ; <i32> [#uses=1]
+ store i32 %1, ptr %0, align 4
+ %2 = load i32, ptr %0, align 4 ; <i32> [#uses=1]
+ store i32 %2, ptr %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, ptr %retval ; <i32> [#uses=1]
ret i32 %retval1
}
-declare i32 @_Z3barPi(i32*)
+declare i32 @_Z3barPi(ptr)
br i1 %tmp2, label %true, label %false
true:
- store i32 %a, i32* %tmp, align 4
- %tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
+ store i32 %a, ptr %tmp, align 4
+ %tmp4 = call i32 @doSomething(i32 0, ptr %tmp)
br label %false
false:
}
; Function Attrs: optsize
-declare i32 @doSomething(i32, i32*)
+declare i32 @doSomething(i32, ptr)
; Check that we do not perform the restore inside the loop whereas the save
for.body: ; preds = %entry, %for.body
%i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
%sum.04 = phi i32 [ %add, %for.body ], [ 0, %for.preheader ]
- %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+ %call = tail call i32 @something()
%add = add nsw i32 %call, %sum.04
%inc = add nuw nsw i32 %i.05, 1
%exitcond = icmp eq i32 %inc, 10
for.body: ; preds = %for.body, %entry
%i.04 = phi i32 [ 0, %for.preheader ], [ %inc, %for.body ]
%sum.03 = phi i32 [ 0, %for.preheader ], [ %add, %for.body ]
- %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+ %call = tail call i32 @something()
%add = add nsw i32 %call, %sum.03
%inc = add nuw nsw i32 %i.04, 1
%exitcond = icmp eq i32 %inc, 10
for.body: ; preds = %entry, %for.body
%i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
%sum.04 = phi i32 [ %add, %for.body ], [ 0, %for.preheader ]
- %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+ %call = tail call i32 @something()
%add = add nsw i32 %call, %sum.04
%inc = add nuw nsw i32 %i.05, 1
%exitcond = icmp eq i32 %inc, 10
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body
- tail call void bitcast (void (...)* @somethingElse to void ()*)()
+ tail call void @somethingElse()
%shl = shl i32 %add, 3
br label %if.end
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- tail call void bitcast (void (...)* @somethingElse to void ()*)()
+ tail call void @somethingElse()
br label %for.body
for.body: ; preds = %for.body, %if.then
%i.05 = phi i32 [ 0, %if.then ], [ %inc, %for.body ]
%sum.04 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
- %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+ %call = tail call i32 @something()
%add = add nsw i32 %call, %sum.04
%inc = add nuw nsw i32 %i.05, 1
%exitcond = icmp eq i32 %inc, 10
for.body: ; preds = %for.body, %entry
%sum.03 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
- %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+ %call = tail call i32 @something()
%add = add nsw i32 %call, %sum.03
- store i32 %add, i32* %ptr
+ store i32 %add, ptr %ptr
br label %for.body
if.end:
%sum.03 = phi i32 [ 0, %if.then ], [ %add, %body1 ], [ 1, %body2]
%call = tail call i32 asm "mftb $0, 268", "=r,~{r14}"()
%add = add nsw i32 %call, %sum.03
- store i32 %add, i32* %ptr
+ store i32 %add, ptr %ptr
br i1 undef, label %body1, label %body2
body1:
br i1 undef, label %loop2a, label %end
loop1: ; preds = %loop2a, %loop2b
- %var.phi = phi i32* [ %next.phi, %loop2b ], [ %var, %loop2a ]
- %next.phi = phi i32* [ %next.load, %loop2b ], [ %next.var, %loop2a ]
- %0 = icmp eq i32* %var, null
- %next.load = load i32*, i32** undef
+ %var.phi = phi ptr [ %next.phi, %loop2b ], [ %var, %loop2a ]
+ %next.phi = phi ptr [ %next.load, %loop2b ], [ %next.var, %loop2a ]
+ %0 = icmp eq ptr %var, null
+ %next.load = load ptr, ptr undef
br i1 %0, label %loop2a, label %loop2b
loop2a: ; preds = %loop1, %body, %entry
- %var = phi i32* [ null, %body ], [ null, %entry ], [ %next.phi, %loop1 ]
- %next.var = phi i32* [ undef, %body ], [ null, %entry ], [ %next.load, %loop1 ]
+ %var = phi ptr [ null, %body ], [ null, %entry ], [ %next.phi, %loop1 ]
+ %next.var = phi ptr [ undef, %body ], [ null, %entry ], [ %next.load, %loop1 ]
br label %loop1
loop2b: ; preds = %loop1
- %gep1 = bitcast i32* %var.phi to i32*
- %next.ptr = bitcast i32* %gep1 to i32**
- store i32* %next.phi, i32** %next.ptr
+ store ptr %next.phi, ptr %var.phi
br label %loop1
end:
@lock = common global i32 0, align 4
@htindex = common global i32 0, align 4
@stride = common global i32 0, align 4
-@ht = common global i32* null, align 8
-@he = common global i8* null, align 8
+@ht = common global ptr null, align 8
+@he = common global ptr null, align 8
; Test for a bug that was caused when save point was equal to restore point.
; Function Attrs: nounwind
; CHECK: blr
define signext i32 @transpose() {
entry:
- %0 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 1), align 4
+ %0 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 1), align 4
%shl.i = shl i32 %0, 7
- %1 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 2), align 4
+ %1 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 2), align 4
%or.i = or i32 %shl.i, %1
%shl1.i = shl i32 %or.i, 7
- %2 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 3), align 4
+ %2 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 3), align 4
%or2.i = or i32 %shl1.i, %2
- %3 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 7), align 4
+ %3 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 7), align 4
%shl3.i = shl i32 %3, 7
- %4 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 6), align 4
+ %4 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 6), align 4
%or4.i = or i32 %shl3.i, %4
%shl5.i = shl i32 %or4.i, 7
- %5 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 5), align 4
+ %5 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 5), align 4
%or6.i = or i32 %shl5.i, %5
%cmp.i = icmp ugt i32 %or2.i, %or6.i
br i1 %cmp.i, label %cond.true.i, label %cond.false.i
cond.true.i:
%shl7.i = shl i32 %or2.i, 7
- %6 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 4), align 4
+ %6 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 4), align 4
%or8.i = or i32 %6, %shl7.i
%conv.i = zext i32 %or8.i to i64
%shl9.i = shl nuw nsw i64 %conv.i, 21
cond.false.i:
%shl12.i = shl i32 %or6.i, 7
- %7 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 4), align 4
+ %7 = load i32, ptr getelementptr inbounds ([0 x i32], ptr @columns, i64 0, i64 4), align 4
%or13.i = or i32 %7, %shl12.i
%conv14.i = zext i32 %or13.i to i64
%shl15.i = shl nuw nsw i64 %conv14.i, 21
%cond.i = phi i64 [ %or11.i, %cond.true.i ], [ %or17.i, %cond.false.i ]
%shr.29.i = lshr i64 %cond.i, 17
%conv18.i = trunc i64 %shr.29.i to i32
- store i32 %conv18.i, i32* @lock, align 4
+ store i32 %conv18.i, ptr @lock, align 4
%rem.i = srem i64 %cond.i, 1050011
%conv19.i = trunc i64 %rem.i to i32
- store i32 %conv19.i, i32* @htindex, align 4
+ store i32 %conv19.i, ptr @htindex, align 4
%rem20.i = urem i32 %conv18.i, 179
%add.i = or i32 %rem20.i, 131072
- store i32 %add.i, i32* @stride, align 4
- %8 = load i32*, i32** @ht, align 8
- %arrayidx = getelementptr inbounds i32, i32* %8, i64 %rem.i
- %9 = load i32, i32* %arrayidx, align 4
+ store i32 %add.i, ptr @stride, align 4
+ %8 = load ptr, ptr @ht, align 8
+ %arrayidx = getelementptr inbounds i32, ptr %8, i64 %rem.i
+ %9 = load i32, ptr %arrayidx, align 4
%cmp1 = icmp eq i32 %9, %conv18.i
br i1 %cmp1, label %if.then, label %if.end
if.then:
%idxprom.lcssa = phi i64 [ %rem.i, %hash.exit ], [ %idxprom.1, %if.end ], [ %idxprom.2, %if.end.1 ], [ %idxprom.3, %if.end.2 ], [ %idxprom.4, %if.end.3 ], [ %idxprom.5, %if.end.4 ], [ %idxprom.6, %if.end.5 ], [ %idxprom.7, %if.end.6 ]
- %10 = load i8*, i8** @he, align 8
- %arrayidx3 = getelementptr inbounds i8, i8* %10, i64 %idxprom.lcssa
- %11 = load i8, i8* %arrayidx3, align 1
+ %10 = load ptr, ptr @he, align 8
+ %arrayidx3 = getelementptr inbounds i8, ptr %10, i64 %idxprom.lcssa
+ %11 = load i8, ptr %arrayidx3, align 1
%conv = sext i8 %11 to i32
br label %cleanup
%sub = add nsw i32 %add, -1050011
%sub.add = select i1 %cmp4, i32 %sub, i32 %add
%idxprom.1 = sext i32 %sub.add to i64
- %arrayidx.1 = getelementptr inbounds i32, i32* %8, i64 %idxprom.1
- %12 = load i32, i32* %arrayidx.1, align 4
+ %arrayidx.1 = getelementptr inbounds i32, ptr %8, i64 %idxprom.1
+ %12 = load i32, ptr %arrayidx.1, align 4
%cmp1.1 = icmp eq i32 %12, %conv18.i
br i1 %cmp1.1, label %if.then, label %if.end.1
%sub.1 = add nsw i32 %add.1, -1050011
%sub.add.1 = select i1 %cmp4.1, i32 %sub.1, i32 %add.1
%idxprom.2 = sext i32 %sub.add.1 to i64
- %arrayidx.2 = getelementptr inbounds i32, i32* %8, i64 %idxprom.2
- %13 = load i32, i32* %arrayidx.2, align 4
+ %arrayidx.2 = getelementptr inbounds i32, ptr %8, i64 %idxprom.2
+ %13 = load i32, ptr %arrayidx.2, align 4
%cmp1.2 = icmp eq i32 %13, %conv18.i
br i1 %cmp1.2, label %if.then, label %if.end.2
%sub.2 = add nsw i32 %add.2, -1050011
%sub.add.2 = select i1 %cmp4.2, i32 %sub.2, i32 %add.2
%idxprom.3 = sext i32 %sub.add.2 to i64
- %arrayidx.3 = getelementptr inbounds i32, i32* %8, i64 %idxprom.3
- %14 = load i32, i32* %arrayidx.3, align 4
+ %arrayidx.3 = getelementptr inbounds i32, ptr %8, i64 %idxprom.3
+ %14 = load i32, ptr %arrayidx.3, align 4
%cmp1.3 = icmp eq i32 %14, %conv18.i
br i1 %cmp1.3, label %if.then, label %if.end.3
%sub.3 = add nsw i32 %add.3, -1050011
%sub.add.3 = select i1 %cmp4.3, i32 %sub.3, i32 %add.3
%idxprom.4 = sext i32 %sub.add.3 to i64
- %arrayidx.4 = getelementptr inbounds i32, i32* %8, i64 %idxprom.4
- %15 = load i32, i32* %arrayidx.4, align 4
+ %arrayidx.4 = getelementptr inbounds i32, ptr %8, i64 %idxprom.4
+ %15 = load i32, ptr %arrayidx.4, align 4
%cmp1.4 = icmp eq i32 %15, %conv18.i
br i1 %cmp1.4, label %if.then, label %if.end.4
%sub.4 = add nsw i32 %add.4, -1050011
%sub.add.4 = select i1 %cmp4.4, i32 %sub.4, i32 %add.4
%idxprom.5 = sext i32 %sub.add.4 to i64
- %arrayidx.5 = getelementptr inbounds i32, i32* %8, i64 %idxprom.5
- %16 = load i32, i32* %arrayidx.5, align 4
+ %arrayidx.5 = getelementptr inbounds i32, ptr %8, i64 %idxprom.5
+ %16 = load i32, ptr %arrayidx.5, align 4
%cmp1.5 = icmp eq i32 %16, %conv18.i
br i1 %cmp1.5, label %if.then, label %if.end.5
%sub.5 = add nsw i32 %add.5, -1050011
%sub.add.5 = select i1 %cmp4.5, i32 %sub.5, i32 %add.5
%idxprom.6 = sext i32 %sub.add.5 to i64
- %arrayidx.6 = getelementptr inbounds i32, i32* %8, i64 %idxprom.6
- %17 = load i32, i32* %arrayidx.6, align 4
+ %arrayidx.6 = getelementptr inbounds i32, ptr %8, i64 %idxprom.6
+ %17 = load i32, ptr %arrayidx.6, align 4
%cmp1.6 = icmp eq i32 %17, %conv18.i
br i1 %cmp1.6, label %if.then, label %if.end.6
%sub.6 = add nsw i32 %add.6, -1050011
%sub.add.6 = select i1 %cmp4.6, i32 %sub.6, i32 %add.6
%idxprom.7 = sext i32 %sub.add.6 to i64
- %arrayidx.7 = getelementptr inbounds i32, i32* %8, i64 %idxprom.7
- %18 = load i32, i32* %arrayidx.7, align 4
+ %arrayidx.7 = getelementptr inbounds i32, ptr %8, i64 %idxprom.7
+ %18 = load i32, ptr %arrayidx.7, align 4
%cmp1.7 = icmp eq i32 %18, %conv18.i
br i1 %cmp1.7, label %if.then, label %cleanup
}
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32"
target triple = "powerpc-montavista-linux-gnuspe"
-%struct.__va_list_tag.0.9.18.23.32.41.48.55.62.67.72.77.82.87.90.93.96.101.105 = type { i8, i8, i16, i8*, i8* }
+%struct.__va_list_tag.0.9.18.23.32.41.48.55.62.67.72.77.82.87.90.93.96.101.105 = type { i8, i8, i16, ptr, ptr }
-define fastcc void @test1(%struct.__va_list_tag.0.9.18.23.32.41.48.55.62.67.72.77.82.87.90.93.96.101.105* %args) {
+define fastcc void @test1(ptr %args) {
entry:
br i1 undef, label %repeat, label %maxlen_reached
unreachable
sw.bb323: ; preds = %repeat
- %0 = va_arg %struct.__va_list_tag.0.9.18.23.32.41.48.55.62.67.72.77.82.87.90.93.96.101.105* %args, i32
+ %0 = va_arg ptr %args, i32
unreachable
sw.bb326: ; preds = %repeat
define void @foo() #0 {
entry:
- %0 = load ppc_fp128, ppc_fp128* @x, align 16
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), ppc_fp128 %0)
+ %0 = load ppc_fp128, ptr @x, align 16
+ %call = tail call i32 (ptr, ...) @printf(ptr @.str, ppc_fp128 %0)
ret void
}
; Do not skip register r4 because of register alignment in soft float mode. Instead skipping
; put in r4 part of first argument for printf function (long double).
; CHECK: lwzu 4, x@l({{[0-9]+}})
-declare i32 @printf(i8* nocapture readonly, ...) #0
+declare i32 @printf(ptr nocapture readonly, ...) #0
attributes #0 = { "use-soft-float"="true" }
define i32 @main() #0 {
entry:
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), ppc_fp128 0xM3FF00000000000000000000000000000)
+ %call = tail call i32 (ptr, ...) @printf(ptr @.str, ppc_fp128 0xM3FF00000000000000000000000000000)
ret i32 0
}
; CHECK: li 5, 0
; CHECK: li 7, 0
-declare i32 @printf(i8* nocapture readonly, ...)
+declare i32 @printf(ptr nocapture readonly, ...)
attributes #0 = { "use-soft-float"="true" }
; RUN: llc -verify-machineinstrs < %s -mcpu=ppc32 | FileCheck %s
target triple = "powerpc-unknown-linux-gnu"
-declare void @printf(i8*, ...)
+declare void @printf(ptr, ...)
define void @main() {
- call void (i8*, ...) @printf(i8* undef, i1 false)
+ call void (ptr, ...) @printf(ptr undef, i1 false)
ret void
}
define void @foo() local_unnamed_addr {
entry:
- %0 = load i64, i64* @ll
+ %0 = load i64, ptr @ll
%conv = sitofp i64 %0 to float
- store float %conv, float* getelementptr inbounds (%struct.A, %struct.A* @a, i32 0, i32 0)
+ store float %conv, ptr @a
ret void
}
; Function Attrs: nounwind uwtable
define i32 @fn1() #0 {
entry:
- %.promoted = load i72, i72* inttoptr (i32 1 to i72*), align 4
+ %.promoted = load i72, ptr inttoptr (i32 1 to ptr), align 4
br label %while.cond
while.cond: ; preds = %while.cond, %entry
while.end: ; preds = %while.cond
%bf.set.lcssa = phi i72 [ %bf.set, %while.cond ]
- store i72 %bf.set.lcssa, i72* inttoptr (i32 1 to i72*), align 4
+ store i72 %bf.set.lcssa, ptr inttoptr (i32 1 to ptr), align 4
ret i32 undef
}
; Tests that the 'nest' parameter attribute causes the relevant parameter to be
; passed in the right register (r11 for PPC).
-define i8* @nest_receiver(i8* nest %arg) nounwind {
+define ptr @nest_receiver(ptr nest %arg) nounwind {
; CHECK-LABEL: nest_receiver:
; CHECK: # %bb.0:
; CHECK-NEXT: mr 3, 11
; CHECK-NEXT: blr
- ret i8* %arg
+ ret ptr %arg
}
-define i8* @nest_caller(i8* %arg) nounwind {
+define ptr @nest_caller(ptr %arg) nounwind {
; CHECK-LABEL: nest_caller:
; CHECK: mr 11, 3
; CHECK-NEXT: bl nest_receiver
; CHECK: blr
- %result = call i8* @nest_receiver(i8* nest %arg)
- ret i8* %result
+ %result = call ptr @nest_receiver(ptr nest %arg)
+ ret ptr %result
}
define i32 @foo() {
entry:
- %0 = load i32, i32* @bar, align 4
+ %0 = load i32, ptr @bar, align 4
%call = call i32 (i32, ...) @call_foo(i32 %0, i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64)
ret i32 %0
}
define i32 @load() {
entry:
- %0 = load i32, i32* @bar1
- %1 = load i32, i32* @bar2
+ %0 = load i32, ptr @bar1
+ %1 = load i32, ptr @bar2
%2 = add i32 %0, %1
ret i32 %2
}
define i32 @foo() {
entry:
- %0 = load i32, i32* @bar, align 4
+ %0 = load i32, ptr @bar, align 4
%call = call i32 (i32, ...) @call_foo(i32 %0, i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64)
ret i32 0
}
@a = thread_local local_unnamed_addr global i32 6, align 4
define i32 @main() local_unnamed_addr #0 {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
ret i32 %0
}
@a = thread_local local_unnamed_addr global i32 6, align 4
define i32 @main() local_unnamed_addr #0 {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
ret i32 %0
}
define void @foo() #0 {
entry:
- %0 = load ppc_fp128, ppc_fp128* @x, align 16
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), ppc_fp128 %0, ppc_fp128 %0)
+ %0 = load ppc_fp128, ptr @x, align 16
+ %call = tail call i32 (ptr, ...) @printf(ptr @.str, ppc_fp128 %0, ppc_fp128 %0)
ret void
}
; Do not put second argument of function in r8 register, because there is no enough registers
; CHECK: stw 5, 12(1)
; CHECK: stw 4, 8(1)
-declare i32 @printf(i8* nocapture readonly, ...)
+declare i32 @printf(ptr nocapture readonly, ...)
attributes #0 = { "use-soft-float"="true" }
; RUN: llc -verify-machineinstrs -mtriple="powerpc-unknown-linux-gnu" -mcpu=ppc64 < %s | FileCheck %s
; PR15286
-%va_list = type {i8, i8, i16, i8*, i8*}
-declare void @llvm.va_copy(i8*, i8*)
+%va_list = type {i8, i8, i16, ptr, ptr}
+declare void @llvm.va_copy(ptr, ptr)
define void @test_vacopy() nounwind {
entry:
%0 = alloca %va_list
%1 = alloca %va_list
- %2 = bitcast %va_list* %0 to i8*
- %3 = bitcast %va_list* %1 to i8*
- call void @llvm.va_copy(i8* %3, i8* %2)
+ call void @llvm.va_copy(ptr %1, ptr %0)
ret void
}
%0 = type { double, double }
-define void @maybe_an_fma(%0* sret(%0) %agg.result, %0* byval(%0) %a, %0* byval(%0) %b, %0* byval(%0) %c) nounwind {
+define void @maybe_an_fma(ptr sret(%0) %agg.result, ptr byval(%0) %a, ptr byval(%0) %b, ptr byval(%0) %c) nounwind {
entry:
- %a.realp = getelementptr inbounds %0, %0* %a, i32 0, i32 0
- %a.real = load double, double* %a.realp
- %a.imagp = getelementptr inbounds %0, %0* %a, i32 0, i32 1
- %a.imag = load double, double* %a.imagp
- %b.realp = getelementptr inbounds %0, %0* %b, i32 0, i32 0
- %b.real = load double, double* %b.realp
- %b.imagp = getelementptr inbounds %0, %0* %b, i32 0, i32 1
- %b.imag = load double, double* %b.imagp
+ %a.real = load double, ptr %a
+ %a.imagp = getelementptr inbounds %0, ptr %a, i32 0, i32 1
+ %a.imag = load double, ptr %a.imagp
+ %b.real = load double, ptr %b
+ %b.imagp = getelementptr inbounds %0, ptr %b, i32 0, i32 1
+ %b.imag = load double, ptr %b.imagp
%mul.rl = fmul double %a.real, %b.real
%mul.rr = fmul double %a.imag, %b.imag
%mul.r = fsub double %mul.rl, %mul.rr
%mul.il = fmul double %a.imag, %b.real
%mul.ir = fmul double %a.real, %b.imag
%mul.i = fadd double %mul.il, %mul.ir
- %c.realp = getelementptr inbounds %0, %0* %c, i32 0, i32 0
- %c.real = load double, double* %c.realp
- %c.imagp = getelementptr inbounds %0, %0* %c, i32 0, i32 1
- %c.imag = load double, double* %c.imagp
+ %c.real = load double, ptr %c
+ %c.imagp = getelementptr inbounds %0, ptr %c, i32 0, i32 1
+ %c.imag = load double, ptr %c.imagp
%add.r = fadd double %mul.r, %c.real
%add.i = fadd double %mul.i, %c.imag
- %real = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 0
- %imag = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 1
- store double %add.r, double* %real
- store double %add.i, double* %imag
+ %imag = getelementptr inbounds %0, ptr %agg.result, i32 0, i32 1
+ store double %add.r, ptr %agg.result
+ store double %add.i, ptr %imag
ret void
; CHECK: fmadd
}
define void @modulo_sw(i32 signext %a, i32 signext %b) local_unnamed_addr {
entry:
%rem = srem i32 %a, %b
- store i32 %rem, i32* @mod_resultsw, align 4
+ store i32 %rem, ptr @mod_resultsw, align 4
ret void
; CHECK-LABEL: modulo_sw
; CHECK: modsw {{[0-9]+}}, 3, 4
define void @modulo_ud(i64 %a, i64 %b) local_unnamed_addr {
entry:
%rem = urem i64 %a, %b
- store i64 %rem, i64* @mod_resultud, align 8
+ store i64 %rem, ptr @mod_resultud, align 8
ret void
; CHECK-LABEL: modulo_ud
; CHECK: modud {{[0-9]+}}, 3, 4
define void @modulo_div_sw(i32 signext %a, i32 signext %b) local_unnamed_addr {
entry:
%rem = srem i32 %a, %b
- store i32 %rem, i32* @mod_resultsw, align 4
+ store i32 %rem, ptr @mod_resultsw, align 4
%div = sdiv i32 %a, %b
- store i32 %div, i32* @div_resultsw, align 4
+ store i32 %div, ptr @div_resultsw, align 4
ret void
; CHECK-LABEL: modulo_div_sw
; CHECK: modsw {{[0-9]+}}, 3, 4
define void @modulo_div_abc_sw(i32 signext %a, i32 signext %b, i32 signext %c) local_unnamed_addr {
entry:
%rem = srem i32 %a, %c
- store i32 %rem, i32* @mod_resultsw, align 4
+ store i32 %rem, ptr @mod_resultsw, align 4
%div = sdiv i32 %b, %c
- store i32 %div, i32* @div_resultsw, align 4
+ store i32 %div, ptr @div_resultsw, align 4
ret void
; CHECK-LABEL: modulo_div_abc_sw
; CHECK: modsw {{[0-9]+}}, 3, 5
define void @modulo_div_uw(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr {
entry:
%rem = urem i32 %a, %b
- store i32 %rem, i32* @mod_resultuw, align 4
+ store i32 %rem, ptr @mod_resultuw, align 4
%div = udiv i32 %a, %b
- store i32 %div, i32* @div_resultuw, align 4
+ store i32 %div, ptr @div_resultuw, align 4
ret void
; CHECK-LABEL: modulo_div_uw
; CHECK: moduw {{[0-9]+}}, 3, 4
define void @modulo_div_swuw(i32 signext %a, i32 signext %b) local_unnamed_addr {
entry:
%rem = srem i32 %a, %b
- store i32 %rem, i32* @mod_resultsw, align 4
+ store i32 %rem, ptr @mod_resultsw, align 4
%div = udiv i32 %a, %b
- store i32 %div, i32* @div_resultsw, align 4
+ store i32 %div, ptr @div_resultsw, align 4
ret void
; CHECK-LABEL: modulo_div_swuw
; CHECK: modsw {{[0-9]+}}, 3, 4
define void @modulo_div_udsd(i64 %a, i64 %b) local_unnamed_addr {
entry:
%rem = urem i64 %a, %b
- store i64 %rem, i64* @mod_resultud, align 8
+ store i64 %rem, ptr @mod_resultud, align 8
%div = sdiv i64 %a, %b
- store i64 %div, i64* @div_resultsd, align 8
+ store i64 %div, ptr @div_resultsd, align 8
ret void
; CHECK-LABEL: modulo_div_udsd
; CHECK: modud {{[0-9]+}}, 3, 4
define void @modulo_const32_sw(i32 signext %a) local_unnamed_addr {
entry:
%rem = srem i32 %a, 32
- store i32 %rem, i32* @mod_resultsw, align 4
+ store i32 %rem, ptr @mod_resultsw, align 4
ret void
; CHECK-LABEL: modulo_const32_sw
; CHECK-NOT: modsw
define void @blocks_modulo_div_sw(i32 signext %a, i32 signext %b, i32 signext %c) local_unnamed_addr {
entry:
%div = sdiv i32 %a, %b
- store i32 %div, i32* @div_resultsw, align 4
+ store i32 %div, ptr @div_resultsw, align 4
%cmp = icmp sgt i32 %c, 0
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
%rem = srem i32 %a, %b
- store i32 %rem, i32* @mod_resultsw, align 4
+ store i32 %rem, ptr @mod_resultsw, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
}
; Verify this case doesn't crash
-define void @setbn4(i128 %0, i32* %sel.out) {
+define void @setbn4(i128 %0, ptr %sel.out) {
; CHECK-LABEL: setbn4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li r6, 1
%c2 = icmp ugt i128 %0, 5192296858534827628530496329220096
%ext = zext i1 %c2 to i32
%sel = select i1 %c1, i32 -1, i32 %ext
- store i32 %sel, i32* %sel.out, align 4
+ store i32 %sel, ptr %sel.out, align 4
ret void
}
define void @pass_arg_si() nounwind {
entry:
- %0 = load i32, i32* @si, align 4
+ %0 = load i32, ptr @si, align 4
tail call void @arg_si(i32 signext %0) nounwind
ret void
}
define void @pass_arg_ui() nounwind {
entry:
- %0 = load i32, i32* @ui, align 4
+ %0 = load i32, ptr @ui, align 4
tail call void @arg_ui(i32 zeroext %0) nounwind
ret void
}
define signext i32 @pass_ret_si() nounwind readonly {
entry:
- %0 = load i32, i32* @si, align 4
+ %0 = load i32, ptr @si, align 4
ret i32 %0
}
; CHECK: @pass_ret_si
define zeroext i32 @pass_ret_ui() nounwind readonly {
entry:
- %0 = load i32, i32* @ui, align 4
+ %0 = load i32, ptr @ui, align 4
ret i32 %0
}
; CHECK: @pass_ret_ui
%0 = tail call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> zeroinitializer, <16 x i8> undef, <16 x i8> undef, <16 x i8> zeroinitializer)
%1 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> %0)
%2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 2
- store <16 x i8> %2, <16 x i8>* null, align 1
+ store <16 x i8> %2, ptr null, align 1
unreachable
}
%0 = type <{ double }>
%1 = type <{ double }>
-define void @acc_regalloc(i32* %arg, [0 x %0]* %arg1, [0 x %1]* %arg2) local_unnamed_addr {
+define void @acc_regalloc(ptr %arg, ptr %arg1, ptr %arg2) local_unnamed_addr {
; CHECK-LABEL: acc_regalloc:
; CHECK: # %bb.0: # %bb
; CHECK-NEXT: lwz r3, 0(r3)
; TRACKLIVE-NEXT: stxv vs12, 48(0)
; TRACKLIVE-NEXT: b .LBB0_1
bb:
- %i = load i32, i32* %arg, align 4
+ %i = load i32, ptr %arg, align 4
%i3 = sext i32 %i to i64
%i4 = shl nsw i64 %i3, 3
- %i5 = bitcast [0 x %0]* %arg1 to i8*
- %i6 = getelementptr i8, i8* %i5, i64 undef
- %i7 = getelementptr [0 x %1], [0 x %1]* %arg2, i64 0, i64 -8
- %i8 = getelementptr i8, i8* %i6, i64 undef
+ %i6 = getelementptr i8, ptr %arg1, i64 undef
+ %i7 = getelementptr [0 x %1], ptr %arg2, i64 0, i64 -8
+ %i8 = getelementptr i8, ptr %i6, i64 undef
br label %bb9
bb9: ; preds = %bb95, %bb
%i10 = phi i64 [ 1, %bb ], [ 0, %bb95 ]
- %i11 = getelementptr %1, %1* null, i64 2
- %i12 = bitcast %1* %i11 to <2 x double>*
- %i13 = load <2 x double>, <2 x double>* %i12, align 1
+ %i11 = getelementptr %1, ptr null, i64 2
+ %i13 = load <2 x double>, ptr %i11, align 1
%i14 = add nuw nsw i64 %i10, 2
- %i15 = getelementptr inbounds %1, %1* %i7, i64 undef
- %i16 = bitcast %1* %i15 to <2 x double>*
- %i17 = load <2 x double>, <2 x double>* %i16, align 1
- %i18 = load <2 x double>, <2 x double>* null, align 1
- %i19 = getelementptr %1, %1* %i15, i64 6
- %i20 = bitcast %1* %i19 to <2 x double>*
- %i21 = load <2 x double>, <2 x double>* %i20, align 1
- %i22 = load i64, i64* undef, align 8
+ %i15 = getelementptr inbounds %1, ptr %i7, i64 undef
+ %i17 = load <2 x double>, ptr %i15, align 1
+ %i18 = load <2 x double>, ptr null, align 1
+ %i19 = getelementptr %1, ptr %i15, i64 6
+ %i21 = load <2 x double>, ptr %i19, align 1
+ %i22 = load i64, ptr undef, align 8
%i23 = insertelement <2 x i64> poison, i64 %i22, i32 0
%i24 = bitcast <2 x i64> %i23 to <2 x double>
%i25 = shufflevector <2 x double> %i24, <2 x double> undef, <2 x i32> zeroinitializer
%i26 = mul i64 %i14, %i4
- %i27 = getelementptr i8, i8* null, i64 %i26
- %i28 = getelementptr inbounds i8, i8* %i27, i64 0
- %i29 = getelementptr i8, i8* %i28, i64 16
- %i30 = bitcast i8* %i29 to i64*
- %i31 = load i64, i64* %i30, align 8
+ %i27 = getelementptr i8, ptr null, i64 %i26
+ %i29 = getelementptr i8, ptr %i27, i64 16
+ %i31 = load i64, ptr %i29, align 8
%i32 = insertelement <2 x i64> poison, i64 %i31, i32 0
%i33 = bitcast <2 x i64> %i32 to <2 x double>
%i34 = shufflevector <2 x double> %i33, <2 x double> undef, <2 x i32> zeroinitializer
%i101 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %i100, 2
%i102 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> %i94)
%i103 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %i102, 3
- %i104 = getelementptr inbounds i8, i8* %i8, i64 undef
- %i105 = bitcast i8* %i104 to <16 x i8>*
- store <16 x i8> %i97, <16 x i8>* %i105, align 1
- %i106 = getelementptr i8, i8* %i104, i64 32
- %i107 = bitcast i8* %i106 to <16 x i8>*
- store <16 x i8> %i101, <16 x i8>* %i107, align 1
- %i108 = getelementptr i8, i8* null, i64 16
- %i109 = bitcast i8* %i108 to <16 x i8>*
- store <16 x i8> %i99, <16 x i8>* %i109, align 1
- %i110 = getelementptr i8, i8* null, i64 48
- %i111 = bitcast i8* %i110 to <16 x i8>*
- store <16 x i8> %i103, <16 x i8>* %i111, align 1
+ %i104 = getelementptr inbounds i8, ptr %i8, i64 undef
+ store <16 x i8> %i97, ptr %i104, align 1
+ %i106 = getelementptr i8, ptr %i104, i64 32
+ store <16 x i8> %i101, ptr %i106, align 1
+ %i108 = getelementptr i8, ptr null, i64 16
+ store <16 x i8> %i99, ptr %i108, align 1
+ %i110 = getelementptr i8, ptr null, i64 48
+ store <16 x i8> %i103, ptr %i110, align 1
br label %bb9
}
; value. Since the target does bitcast through memory and we no longer
; remember the address we need to do the store in a fresh local
; address.
-define ppc_fp128 @test(%struct.S* byval(%struct.S) %x) nounwind {
+define ppc_fp128 @test(ptr byval(%struct.S) %x) nounwind {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std 5, -16(1)
; CHECK-P9-NEXT: std 4, 56(1)
; CHECK-P9-NEXT: blr
entry:
- %b = getelementptr inbounds %struct.S, %struct.S* %x, i32 0, i32 1
- %0 = load ppc_fp128, ppc_fp128* %b, align 16
+ %b = getelementptr inbounds %struct.S, ptr %x, i32 0, i32 1
+ %0 = load ppc_fp128, ptr %b, align 16
ret ppc_fp128 %0
}
%class.T = type { [2 x i8] }
-define void @e_callee(%class.T* %this, i8* %c) { ret void }
-define void @e_caller(%class.T* %this, i8* %c) {
- call void @e_callee(%class.T* %this, i8* %c)
+define void @e_callee(ptr %this, ptr %c) { ret void }
+define void @e_caller(ptr %this, ptr %c) {
+ call void @e_callee(ptr %this, ptr %c)
ret void
; CHECK-LABEL: e_caller:
; CHECK-FS-NEXT: nop
}
-define void @e_scallee(%class.T* %this, i8* %c) section "different" { ret void }
-define void @e_scaller(%class.T* %this, i8* %c) {
- call void @e_scallee(%class.T* %this, i8* %c)
+define void @e_scallee(ptr %this, ptr %c) section "different" { ret void }
+define void @e_scaller(ptr %this, ptr %c) {
+ call void @e_scallee(ptr %this, ptr %c)
ret void
; CHECK-LABEL: e_scaller:
; CHECK-NEXT: nop
}
-define void @e_s2callee(%class.T* %this, i8* %c) { ret void }
-define void @e_s2caller(%class.T* %this, i8* %c) section "different" {
- call void @e_s2callee(%class.T* %this, i8* %c)
+define void @e_s2callee(ptr %this, ptr %c) { ret void }
+define void @e_s2caller(ptr %this, ptr %c) section "different" {
+ call void @e_s2callee(ptr %this, ptr %c)
ret void
; CHECK-LABEL: e_s2caller:
$cd1 = comdat any
$cd2 = comdat any
-define void @e_ccallee(%class.T* %this, i8* %c) comdat($cd1) { ret void }
-define void @e_ccaller(%class.T* %this, i8* %c) comdat($cd2) {
- call void @e_ccallee(%class.T* %this, i8* %c)
+define void @e_ccallee(ptr %this, ptr %c) comdat($cd1) { ret void }
+define void @e_ccaller(ptr %this, ptr %c) comdat($cd2) {
+ call void @e_ccallee(ptr %this, ptr %c)
ret void
; CHECK-LABEL: e_ccaller:
$cd = comdat any
-define void @e_c1callee(%class.T* %this, i8* %c) comdat($cd) { ret void }
-define void @e_c1caller(%class.T* %this, i8* %c) comdat($cd) {
- call void @e_c1callee(%class.T* %this, i8* %c)
+define void @e_c1callee(ptr %this, ptr %c) comdat($cd) { ret void }
+define void @e_c1caller(ptr %this, ptr %c) comdat($cd) {
+ call void @e_c1callee(ptr %this, ptr %c)
ret void
; CHECK-LABEL: e_c1caller:
; CHECK-NEXT: nop
}
-define weak_odr hidden void @wo_hcallee(%class.T* %this, i8* %c) { ret void }
-define void @wo_hcaller(%class.T* %this, i8* %c) {
- call void @wo_hcallee(%class.T* %this, i8* %c)
+define weak_odr hidden void @wo_hcallee(ptr %this, ptr %c) { ret void }
+define void @wo_hcaller(ptr %this, ptr %c) {
+ call void @wo_hcallee(ptr %this, ptr %c)
ret void
; CHECK-LABEL: wo_hcaller:
; SCM-NEXT: nop
}
-define weak_odr protected void @wo_pcallee(%class.T* %this, i8* %c) { ret void }
-define void @wo_pcaller(%class.T* %this, i8* %c) {
- call void @wo_pcallee(%class.T* %this, i8* %c)
+define weak_odr protected void @wo_pcallee(ptr %this, ptr %c) { ret void }
+define void @wo_pcaller(ptr %this, ptr %c) {
+ call void @wo_pcallee(ptr %this, ptr %c)
ret void
; CHECK-LABEL: wo_pcaller:
; SCM-NEXT: nop
}
-define weak_odr void @wo_callee(%class.T* %this, i8* %c) { ret void }
-define void @wo_caller(%class.T* %this, i8* %c) {
- call void @wo_callee(%class.T* %this, i8* %c)
+define weak_odr void @wo_callee(ptr %this, ptr %c) { ret void }
+define void @wo_caller(ptr %this, ptr %c) {
+ call void @wo_callee(ptr %this, ptr %c)
ret void
; CHECK-LABEL: wo_caller:
; CHECK-NEXT: nop
}
-define weak protected void @w_pcallee(i8* %ptr) { ret void }
-define void @w_pcaller(i8* %ptr) {
- call void @w_pcallee(i8* %ptr)
+define weak protected void @w_pcallee(ptr %ptr) { ret void }
+define void @w_pcaller(ptr %ptr) {
+ call void @w_pcallee(ptr %ptr)
ret void
; CHECK-LABEL: w_pcaller:
; SCM-NEXT: nop
}
-define weak hidden void @w_hcallee(i8* %ptr) { ret void }
-define void @w_hcaller(i8* %ptr) {
- call void @w_hcallee(i8* %ptr)
+define weak hidden void @w_hcallee(ptr %ptr) { ret void }
+define void @w_hcaller(ptr %ptr) {
+ call void @w_hcallee(ptr %ptr)
ret void
; CHECK-LABEL: w_hcaller:
; SCM-NEXT: nop
}
-define weak void @w_callee(i8* %ptr) { ret void }
-define void @w_caller(i8* %ptr) {
- call void @w_callee(i8* %ptr)
+define weak void @w_callee(ptr %ptr) { ret void }
+define void @w_caller(ptr %ptr) {
+ call void @w_callee(ptr %ptr)
ret void
; CHECK-LABEL: w_caller:
@gt = common global %struct.test zeroinitializer, align 16
@gp = common global %struct.pad zeroinitializer, align 8
-define signext i32 @callee1(i32 signext %x, %struct.test* byval(%struct.test) align 16 nocapture readnone %y, i32 signext %z) {
+define signext i32 @callee1(i32 signext %x, ptr byval(%struct.test) align 16 nocapture readnone %y, i32 signext %z) {
entry:
ret i32 %z
}
; CHECK: mr 3, 7
; CHECK: blr
-declare signext i32 @test1(i32 signext, %struct.test* byval(%struct.test) align 16, i32 signext)
+declare signext i32 @test1(i32 signext, ptr byval(%struct.test) align 16, i32 signext)
define void @caller1(i32 signext %z) {
entry:
- %call = tail call signext i32 @test1(i32 signext 0, %struct.test* byval(%struct.test) align 16 @gt, i32 signext %z)
+ %call = tail call signext i32 @test1(i32 signext 0, ptr byval(%struct.test) align 16 @gt, i32 signext %z)
ret void
}
; CHECK-LABEL: @caller1
; CHECK: mr 7, 3
; CHECK: bl test1
-define i64 @callee2(%struct.pad* byval(%struct.pad) nocapture readnone %x, i32 signext %y, %struct.test* byval(%struct.test) align 16 nocapture readonly %z) {
+define i64 @callee2(ptr byval(%struct.pad) nocapture readnone %x, i32 signext %y, ptr byval(%struct.test) align 16 nocapture readonly %z) {
entry:
- %x1 = getelementptr inbounds %struct.test, %struct.test* %z, i64 0, i32 0
- %0 = load i64, i64* %x1, align 16
+ %0 = load i64, ptr %z, align 16
ret i64 %0
}
; CHECK-LABEL: @callee2
; CHECK: ld {{[0-9]+}}, 128(1)
; CHECK: blr
-declare i64 @test2(%struct.pad* byval(%struct.pad), i32 signext, %struct.test* byval(%struct.test) align 16)
+declare i64 @test2(ptr byval(%struct.pad), i32 signext, ptr byval(%struct.test) align 16)
define void @caller2(i64 %z) {
entry:
%tmp = alloca %struct.test, align 16
- %.compoundliteral.sroa.0.0..sroa_idx = getelementptr inbounds %struct.test, %struct.test* %tmp, i64 0, i32 0
- store i64 %z, i64* %.compoundliteral.sroa.0.0..sroa_idx, align 16
- %call = call i64 @test2(%struct.pad* byval(%struct.pad) @gp, i32 signext 0, %struct.test* byval(%struct.test) align 16 %tmp)
+ store i64 %z, ptr %tmp, align 16
+ %call = call i64 @test2(ptr byval(%struct.pad) @gp, i32 signext 0, ptr byval(%struct.test) align 16 %tmp)
ret void
}
; CHECK-LABEL: @caller2
; RUN: llc -verify-machineinstrs --mtriple powerpc64-unknown-linux-gnu \
; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names < %s | FileCheck %s --check-prefix=P10BE
-define signext i8 @caller_9([9 x i8]* nocapture readonly byval([9 x i8]) %data) #0 {
+define signext i8 @caller_9(ptr nocapture readonly byval([9 x i8]) %data) #0 {
; P8LE-LABEL: caller_9:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [9 x i8], align 1
- %.elt0 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 0
- %.unpack0 = load i8, i8* %.elt0, align 1
- %.elt1 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 1
- %.unpack1 = load i8, i8* %.elt1, align 1
- %.elt2 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 2
- %.unpack2 = load i8, i8* %.elt2, align 1
- %.elt3 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 3
- %.unpack3 = load i8, i8* %.elt3, align 1
- %.elt4 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 4
- %.unpack4 = load i8, i8* %.elt4, align 1
- %.elt5 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 5
- %.unpack5 = load i8, i8* %.elt5, align 1
- %.elt6 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 6
- %.unpack6 = load i8, i8* %.elt6, align 1
- %.elt7 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 7
- %.unpack7 = load i8, i8* %.elt7, align 1
- %.elt8 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 8
- %.unpack8 = load i8, i8* %.elt8, align 1
- %.temp.0.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack0, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack1, i8* %.temp.1.gep, align 1
- %.temp.2.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 2
- store i8 %.unpack2, i8* %.temp.2.gep, align 1
- %.temp.3.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 3
- store i8 %.unpack3, i8* %.temp.3.gep, align 1
- %.temp.4.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 4
- store i8 %.unpack4, i8* %.temp.4.gep, align 1
- %.temp.5.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 5
- store i8 %.unpack5, i8* %.temp.5.gep, align 1
- %.temp.6.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 6
- store i8 %.unpack6, i8* %.temp.6.gep, align 1
- %.temp.7.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 7
- store i8 %.unpack7, i8* %.temp.7.gep, align 1
- %.temp.8.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 8
- store i8 %.unpack8, i8* %.temp.8.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack0 = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 1
+ %.unpack1 = load i8, ptr %.elt1, align 1
+ %.elt2 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 2
+ %.unpack2 = load i8, ptr %.elt2, align 1
+ %.elt3 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 3
+ %.unpack3 = load i8, ptr %.elt3, align 1
+ %.elt4 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 4
+ %.unpack4 = load i8, ptr %.elt4, align 1
+ %.elt5 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 5
+ %.unpack5 = load i8, ptr %.elt5, align 1
+ %.elt6 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 6
+ %.unpack6 = load i8, ptr %.elt6, align 1
+ %.elt7 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 7
+ %.unpack7 = load i8, ptr %.elt7, align 1
+ %.elt8 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 8
+ %.unpack8 = load i8, ptr %.elt8, align 1
+ store i8 %.unpack0, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack1, ptr %.temp.1.gep, align 1
+ %.temp.2.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 2
+ store i8 %.unpack2, ptr %.temp.2.gep, align 1
+ %.temp.3.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 3
+ store i8 %.unpack3, ptr %.temp.3.gep, align 1
+ %.temp.4.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 4
+ store i8 %.unpack4, ptr %.temp.4.gep, align 1
+ %.temp.5.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 5
+ store i8 %.unpack5, ptr %.temp.5.gep, align 1
+ %.temp.6.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 6
+ store i8 %.unpack6, ptr %.temp.6.gep, align 1
+ %.temp.7.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 7
+ store i8 %.unpack7, ptr %.temp.7.gep, align 1
+ %.temp.8.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 8
+ store i8 %.unpack8, ptr %.temp.8.gep, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-define signext i8 @caller_9_callee_9([9 x i8]* nocapture readonly byval([9 x i8]) %data) #0 {
+define signext i8 @caller_9_callee_9(ptr nocapture readonly byval([9 x i8]) %data) #0 {
; P8LE-LABEL: caller_9_callee_9:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [9 x i8], align 1
- %.elt0 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 0
- %.unpack0 = load i8, i8* %.elt0, align 1
- %.elt1 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 1
- %.unpack1 = load i8, i8* %.elt1, align 1
- %.elt2 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 2
- %.unpack2 = load i8, i8* %.elt2, align 1
- %.elt3 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 3
- %.unpack3 = load i8, i8* %.elt3, align 1
- %.elt4 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 4
- %.unpack4 = load i8, i8* %.elt4, align 1
- %.elt5 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 5
- %.unpack5 = load i8, i8* %.elt5, align 1
- %.elt6 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 6
- %.unpack6 = load i8, i8* %.elt6, align 1
- %.elt7 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 7
- %.unpack7 = load i8, i8* %.elt7, align 1
- %.elt8 = getelementptr inbounds [9 x i8], [9 x i8]* %data, i64 0, i64 8
- %.unpack8 = load i8, i8* %.elt8, align 1
- %.temp.0.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack0, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack1, i8* %.temp.1.gep, align 1
- %.temp.2.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 2
- store i8 %.unpack2, i8* %.temp.2.gep, align 1
- %.temp.3.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 3
- store i8 %.unpack3, i8* %.temp.3.gep, align 1
- %.temp.4.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 4
- store i8 %.unpack4, i8* %.temp.4.gep, align 1
- %.temp.5.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 5
- store i8 %.unpack5, i8* %.temp.5.gep, align 1
- %.temp.6.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 6
- store i8 %.unpack6, i8* %.temp.6.gep, align 1
- %.temp.7.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 7
- store i8 %.unpack7, i8* %.temp.7.gep, align 1
- %.temp.8.gep = getelementptr inbounds [9 x i8], [9 x i8]* %_param_data, i64 0, i64 8
- store i8 %.unpack8, i8* %.temp.8.gep, align 1
- call void @callee_9([9 x i8]* nocapture readonly byval([9 x i8]) %data)
+ %.unpack0 = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 1
+ %.unpack1 = load i8, ptr %.elt1, align 1
+ %.elt2 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 2
+ %.unpack2 = load i8, ptr %.elt2, align 1
+ %.elt3 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 3
+ %.unpack3 = load i8, ptr %.elt3, align 1
+ %.elt4 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 4
+ %.unpack4 = load i8, ptr %.elt4, align 1
+ %.elt5 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 5
+ %.unpack5 = load i8, ptr %.elt5, align 1
+ %.elt6 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 6
+ %.unpack6 = load i8, ptr %.elt6, align 1
+ %.elt7 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 7
+ %.unpack7 = load i8, ptr %.elt7, align 1
+ %.elt8 = getelementptr inbounds [9 x i8], ptr %data, i64 0, i64 8
+ %.unpack8 = load i8, ptr %.elt8, align 1
+ store i8 %.unpack0, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack1, ptr %.temp.1.gep, align 1
+ %.temp.2.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 2
+ store i8 %.unpack2, ptr %.temp.2.gep, align 1
+ %.temp.3.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 3
+ store i8 %.unpack3, ptr %.temp.3.gep, align 1
+ %.temp.4.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 4
+ store i8 %.unpack4, ptr %.temp.4.gep, align 1
+ %.temp.5.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 5
+ store i8 %.unpack5, ptr %.temp.5.gep, align 1
+ %.temp.6.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 6
+ store i8 %.unpack6, ptr %.temp.6.gep, align 1
+ %.temp.7.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 7
+ store i8 %.unpack7, ptr %.temp.7.gep, align 1
+ %.temp.8.gep = getelementptr inbounds [9 x i8], ptr %_param_data, i64 0, i64 8
+ store i8 %.unpack8, ptr %.temp.8.gep, align 1
+ call void @callee_9(ptr nocapture readonly byval([9 x i8]) %data)
ret i8 0
}
-define signext i8 @caller_10([10 x i8]* nocapture readonly byval([10 x i8]) %data) #0 {
+define signext i8 @caller_10(ptr nocapture readonly byval([10 x i8]) %data) #0 {
; P8LE-LABEL: caller_10:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [10 x i8], align 1
- %.elt0 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 0
- %.unpack0 = load i8, i8* %.elt0, align 1
- %.elt1 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 1
- %.unpack1 = load i8, i8* %.elt1, align 1
- %.elt2 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 2
- %.unpack2 = load i8, i8* %.elt2, align 1
- %.elt3 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 3
- %.unpack3 = load i8, i8* %.elt3, align 1
- %.elt4 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 4
- %.unpack4 = load i8, i8* %.elt4, align 1
- %.elt5 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 5
- %.unpack5 = load i8, i8* %.elt5, align 1
- %.elt6 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 6
- %.unpack6 = load i8, i8* %.elt6, align 1
- %.elt7 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 7
- %.unpack7 = load i8, i8* %.elt7, align 1
- %.elt8 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 8
- %.unpack8 = load i8, i8* %.elt8, align 1
- %.elt9 = getelementptr inbounds [10 x i8], [10 x i8]* %data, i64 0, i64 9
- %.unpack9 = load i8, i8* %.elt9, align 1
- %.temp.0.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack0, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack1, i8* %.temp.1.gep, align 1
- %.temp.2.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 2
- store i8 %.unpack2, i8* %.temp.2.gep, align 1
- %.temp.3.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 3
- store i8 %.unpack3, i8* %.temp.3.gep, align 1
- %.temp.4.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 4
- store i8 %.unpack4, i8* %.temp.4.gep, align 1
- %.temp.5.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 5
- store i8 %.unpack5, i8* %.temp.5.gep, align 1
- %.temp.6.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 6
- store i8 %.unpack6, i8* %.temp.6.gep, align 1
- %.temp.7.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 7
- store i8 %.unpack7, i8* %.temp.7.gep, align 1
- %.temp.8.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 8
- store i8 %.unpack8, i8* %.temp.8.gep, align 1
- %.temp.9.gep = getelementptr inbounds [10 x i8], [10 x i8]* %_param_data, i64 0, i64 9
- store i8 %.unpack9, i8* %.temp.9.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack0 = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 1
+ %.unpack1 = load i8, ptr %.elt1, align 1
+ %.elt2 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 2
+ %.unpack2 = load i8, ptr %.elt2, align 1
+ %.elt3 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 3
+ %.unpack3 = load i8, ptr %.elt3, align 1
+ %.elt4 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 4
+ %.unpack4 = load i8, ptr %.elt4, align 1
+ %.elt5 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 5
+ %.unpack5 = load i8, ptr %.elt5, align 1
+ %.elt6 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 6
+ %.unpack6 = load i8, ptr %.elt6, align 1
+ %.elt7 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 7
+ %.unpack7 = load i8, ptr %.elt7, align 1
+ %.elt8 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 8
+ %.unpack8 = load i8, ptr %.elt8, align 1
+ %.elt9 = getelementptr inbounds [10 x i8], ptr %data, i64 0, i64 9
+ %.unpack9 = load i8, ptr %.elt9, align 1
+ store i8 %.unpack0, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack1, ptr %.temp.1.gep, align 1
+ %.temp.2.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 2
+ store i8 %.unpack2, ptr %.temp.2.gep, align 1
+ %.temp.3.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 3
+ store i8 %.unpack3, ptr %.temp.3.gep, align 1
+ %.temp.4.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 4
+ store i8 %.unpack4, ptr %.temp.4.gep, align 1
+ %.temp.5.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 5
+ store i8 %.unpack5, ptr %.temp.5.gep, align 1
+ %.temp.6.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 6
+ store i8 %.unpack6, ptr %.temp.6.gep, align 1
+ %.temp.7.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 7
+ store i8 %.unpack7, ptr %.temp.7.gep, align 1
+ %.temp.8.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 8
+ store i8 %.unpack8, ptr %.temp.8.gep, align 1
+ %.temp.9.gep = getelementptr inbounds [10 x i8], ptr %_param_data, i64 0, i64 9
+ store i8 %.unpack9, ptr %.temp.9.gep, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-define signext i8 @caller_12([12 x i8]* nocapture readonly byval([12 x i8]) %data) #0 {
+define signext i8 @caller_12(ptr nocapture readonly byval([12 x i8]) %data) #0 {
; P8LE-LABEL: caller_12:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [12 x i8], align 1
- %.elt0 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 0
- %.unpack0 = load i8, i8* %.elt0, align 1
- %.elt1 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 1
- %.unpack1 = load i8, i8* %.elt1, align 1
- %.elt2 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 2
- %.unpack2 = load i8, i8* %.elt2, align 1
- %.elt3 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 3
- %.unpack3 = load i8, i8* %.elt3, align 1
- %.elt4 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 4
- %.unpack4 = load i8, i8* %.elt4, align 1
- %.elt5 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 5
- %.unpack5 = load i8, i8* %.elt5, align 1
- %.elt6 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 6
- %.unpack6 = load i8, i8* %.elt6, align 1
- %.elt7 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 7
- %.unpack7 = load i8, i8* %.elt7, align 1
- %.elt8 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 8
- %.unpack8 = load i8, i8* %.elt8, align 1
- %.elt9 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 9
- %.unpack9 = load i8, i8* %.elt9, align 1
- %.elt10 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 10
- %.unpack10 = load i8, i8* %.elt10, align 1
- %.elt11 = getelementptr inbounds [12 x i8], [12 x i8]* %data, i64 0, i64 11
- %.unpack11 = load i8, i8* %.elt11, align 1
- %.temp.0.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack0, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack1, i8* %.temp.1.gep, align 1
- %.temp.2.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 2
- store i8 %.unpack2, i8* %.temp.2.gep, align 1
- %.temp.3.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 3
- store i8 %.unpack3, i8* %.temp.3.gep, align 1
- %.temp.4.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 4
- store i8 %.unpack4, i8* %.temp.4.gep, align 1
- %.temp.5.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 5
- store i8 %.unpack5, i8* %.temp.5.gep, align 1
- %.temp.6.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 6
- store i8 %.unpack6, i8* %.temp.6.gep, align 1
- %.temp.7.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 7
- store i8 %.unpack7, i8* %.temp.7.gep, align 1
- %.temp.8.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 8
- store i8 %.unpack8, i8* %.temp.8.gep, align 1
- %.temp.9.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 9
- store i8 %.unpack9, i8* %.temp.9.gep, align 1
- %.temp.10.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 10
- store i8 %.unpack10, i8* %.temp.10.gep, align 1
- %.temp.11.gep = getelementptr inbounds [12 x i8], [12 x i8]* %_param_data, i64 0, i64 11
- store i8 %.unpack11, i8* %.temp.11.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack0 = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 1
+ %.unpack1 = load i8, ptr %.elt1, align 1
+ %.elt2 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 2
+ %.unpack2 = load i8, ptr %.elt2, align 1
+ %.elt3 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 3
+ %.unpack3 = load i8, ptr %.elt3, align 1
+ %.elt4 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 4
+ %.unpack4 = load i8, ptr %.elt4, align 1
+ %.elt5 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 5
+ %.unpack5 = load i8, ptr %.elt5, align 1
+ %.elt6 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 6
+ %.unpack6 = load i8, ptr %.elt6, align 1
+ %.elt7 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 7
+ %.unpack7 = load i8, ptr %.elt7, align 1
+ %.elt8 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 8
+ %.unpack8 = load i8, ptr %.elt8, align 1
+ %.elt9 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 9
+ %.unpack9 = load i8, ptr %.elt9, align 1
+ %.elt10 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 10
+ %.unpack10 = load i8, ptr %.elt10, align 1
+ %.elt11 = getelementptr inbounds [12 x i8], ptr %data, i64 0, i64 11
+ %.unpack11 = load i8, ptr %.elt11, align 1
+ store i8 %.unpack0, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack1, ptr %.temp.1.gep, align 1
+ %.temp.2.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 2
+ store i8 %.unpack2, ptr %.temp.2.gep, align 1
+ %.temp.3.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 3
+ store i8 %.unpack3, ptr %.temp.3.gep, align 1
+ %.temp.4.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 4
+ store i8 %.unpack4, ptr %.temp.4.gep, align 1
+ %.temp.5.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 5
+ store i8 %.unpack5, ptr %.temp.5.gep, align 1
+ %.temp.6.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 6
+ store i8 %.unpack6, ptr %.temp.6.gep, align 1
+ %.temp.7.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 7
+ store i8 %.unpack7, ptr %.temp.7.gep, align 1
+ %.temp.8.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 8
+ store i8 %.unpack8, ptr %.temp.8.gep, align 1
+ %.temp.9.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 9
+ store i8 %.unpack9, ptr %.temp.9.gep, align 1
+ %.temp.10.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 10
+ store i8 %.unpack10, ptr %.temp.10.gep, align 1
+ %.temp.11.gep = getelementptr inbounds [12 x i8], ptr %_param_data, i64 0, i64 11
+ store i8 %.unpack11, ptr %.temp.11.gep, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-define signext i8 @caller_14([14 x i8]* nocapture readonly byval([14 x i8]) %data) #0 {
+define signext i8 @caller_14(ptr nocapture readonly byval([14 x i8]) %data) #0 {
; P8LE-LABEL: caller_14:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [14 x i8], align 1
- %.elt0 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 0
- %.unpack0 = load i8, i8* %.elt0, align 1
- %.elt1 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 1
- %.unpack1 = load i8, i8* %.elt1, align 1
- %.elt2 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 2
- %.unpack2 = load i8, i8* %.elt2, align 1
- %.elt3 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 3
- %.unpack3 = load i8, i8* %.elt3, align 1
- %.elt4 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 4
- %.unpack4 = load i8, i8* %.elt4, align 1
- %.elt5 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 5
- %.unpack5 = load i8, i8* %.elt5, align 1
- %.elt6 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 6
- %.unpack6 = load i8, i8* %.elt6, align 1
- %.elt7 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 7
- %.unpack7 = load i8, i8* %.elt7, align 1
- %.elt8 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 8
- %.unpack8 = load i8, i8* %.elt8, align 1
- %.elt9 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 9
- %.unpack9 = load i8, i8* %.elt9, align 1
- %.elt10 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 10
- %.unpack10 = load i8, i8* %.elt10, align 1
- %.elt11 = getelementptr inbounds [14 x i8], [14 x i8]* %data, i64 0, i64 11
- %.unpack11 = load i8, i8* %.elt11, align 1
- %.temp.0.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack0, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack1, i8* %.temp.1.gep, align 1
- %.temp.2.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 2
- store i8 %.unpack2, i8* %.temp.2.gep, align 1
- %.temp.3.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 3
- store i8 %.unpack3, i8* %.temp.3.gep, align 1
- %.temp.4.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 4
- store i8 %.unpack4, i8* %.temp.4.gep, align 1
- %.temp.5.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 5
- store i8 %.unpack5, i8* %.temp.5.gep, align 1
- %.temp.6.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 6
- store i8 %.unpack6, i8* %.temp.6.gep, align 1
- %.temp.7.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 7
- store i8 %.unpack7, i8* %.temp.7.gep, align 1
- %.temp.8.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 8
- store i8 %.unpack8, i8* %.temp.8.gep, align 1
- %.temp.9.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 9
- store i8 %.unpack9, i8* %.temp.9.gep, align 1
- %.temp.10.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 10
- store i8 %.unpack10, i8* %.temp.10.gep, align 1
- %.temp.11.gep = getelementptr inbounds [14 x i8], [14 x i8]* %_param_data, i64 0, i64 11
- store i8 %.unpack11, i8* %.temp.11.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack0 = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 1
+ %.unpack1 = load i8, ptr %.elt1, align 1
+ %.elt2 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 2
+ %.unpack2 = load i8, ptr %.elt2, align 1
+ %.elt3 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 3
+ %.unpack3 = load i8, ptr %.elt3, align 1
+ %.elt4 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 4
+ %.unpack4 = load i8, ptr %.elt4, align 1
+ %.elt5 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 5
+ %.unpack5 = load i8, ptr %.elt5, align 1
+ %.elt6 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 6
+ %.unpack6 = load i8, ptr %.elt6, align 1
+ %.elt7 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 7
+ %.unpack7 = load i8, ptr %.elt7, align 1
+ %.elt8 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 8
+ %.unpack8 = load i8, ptr %.elt8, align 1
+ %.elt9 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 9
+ %.unpack9 = load i8, ptr %.elt9, align 1
+ %.elt10 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 10
+ %.unpack10 = load i8, ptr %.elt10, align 1
+ %.elt11 = getelementptr inbounds [14 x i8], ptr %data, i64 0, i64 11
+ %.unpack11 = load i8, ptr %.elt11, align 1
+ store i8 %.unpack0, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack1, ptr %.temp.1.gep, align 1
+ %.temp.2.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 2
+ store i8 %.unpack2, ptr %.temp.2.gep, align 1
+ %.temp.3.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 3
+ store i8 %.unpack3, ptr %.temp.3.gep, align 1
+ %.temp.4.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 4
+ store i8 %.unpack4, ptr %.temp.4.gep, align 1
+ %.temp.5.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 5
+ store i8 %.unpack5, ptr %.temp.5.gep, align 1
+ %.temp.6.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 6
+ store i8 %.unpack6, ptr %.temp.6.gep, align 1
+ %.temp.7.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 7
+ store i8 %.unpack7, ptr %.temp.7.gep, align 1
+ %.temp.8.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 8
+ store i8 %.unpack8, ptr %.temp.8.gep, align 1
+ %.temp.9.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 9
+ store i8 %.unpack9, ptr %.temp.9.gep, align 1
+ %.temp.10.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 10
+ store i8 %.unpack10, ptr %.temp.10.gep, align 1
+ %.temp.11.gep = getelementptr inbounds [14 x i8], ptr %_param_data, i64 0, i64 11
+ store i8 %.unpack11, ptr %.temp.11.gep, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-define signext i8 @caller_16([16 x i8]* nocapture readonly byval([16 x i8]) %data) #0 {
+define signext i8 @caller_16(ptr nocapture readonly byval([16 x i8]) %data) #0 {
; P8LE-LABEL: caller_16:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [16 x i8], align 1
- %.elt0 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 0
- %.unpack0 = load i8, i8* %.elt0, align 1
- %.elt1 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 1
- %.unpack1 = load i8, i8* %.elt1, align 1
- %.elt2 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 2
- %.unpack2 = load i8, i8* %.elt2, align 1
- %.elt3 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 3
- %.unpack3 = load i8, i8* %.elt3, align 1
- %.elt4 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 4
- %.unpack4 = load i8, i8* %.elt4, align 1
- %.elt5 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 5
- %.unpack5 = load i8, i8* %.elt5, align 1
- %.elt6 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 6
- %.unpack6 = load i8, i8* %.elt6, align 1
- %.elt7 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 7
- %.unpack7 = load i8, i8* %.elt7, align 1
- %.elt8 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 8
- %.unpack8 = load i8, i8* %.elt8, align 1
- %.elt9 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 9
- %.unpack9 = load i8, i8* %.elt9, align 1
- %.elt10 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 10
- %.unpack10 = load i8, i8* %.elt10, align 1
- %.elt11 = getelementptr inbounds [16 x i8], [16 x i8]* %data, i64 0, i64 11
- %.unpack11 = load i8, i8* %.elt11, align 1
- %.temp.0.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack0, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack1, i8* %.temp.1.gep, align 1
- %.temp.2.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 2
- store i8 %.unpack2, i8* %.temp.2.gep, align 1
- %.temp.3.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 3
- store i8 %.unpack3, i8* %.temp.3.gep, align 1
- %.temp.4.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 4
- store i8 %.unpack4, i8* %.temp.4.gep, align 1
- %.temp.5.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 5
- store i8 %.unpack5, i8* %.temp.5.gep, align 1
- %.temp.6.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 6
- store i8 %.unpack6, i8* %.temp.6.gep, align 1
- %.temp.7.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 7
- store i8 %.unpack7, i8* %.temp.7.gep, align 1
- %.temp.8.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 8
- store i8 %.unpack8, i8* %.temp.8.gep, align 1
- %.temp.9.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 9
- store i8 %.unpack9, i8* %.temp.9.gep, align 1
- %.temp.10.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 10
- store i8 %.unpack10, i8* %.temp.10.gep, align 1
- %.temp.11.gep = getelementptr inbounds [16 x i8], [16 x i8]* %_param_data, i64 0, i64 11
- store i8 %.unpack11, i8* %.temp.11.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack0 = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 1
+ %.unpack1 = load i8, ptr %.elt1, align 1
+ %.elt2 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 2
+ %.unpack2 = load i8, ptr %.elt2, align 1
+ %.elt3 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 3
+ %.unpack3 = load i8, ptr %.elt3, align 1
+ %.elt4 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 4
+ %.unpack4 = load i8, ptr %.elt4, align 1
+ %.elt5 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 5
+ %.unpack5 = load i8, ptr %.elt5, align 1
+ %.elt6 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 6
+ %.unpack6 = load i8, ptr %.elt6, align 1
+ %.elt7 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 7
+ %.unpack7 = load i8, ptr %.elt7, align 1
+ %.elt8 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 8
+ %.unpack8 = load i8, ptr %.elt8, align 1
+ %.elt9 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 9
+ %.unpack9 = load i8, ptr %.elt9, align 1
+ %.elt10 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 10
+ %.unpack10 = load i8, ptr %.elt10, align 1
+ %.elt11 = getelementptr inbounds [16 x i8], ptr %data, i64 0, i64 11
+ %.unpack11 = load i8, ptr %.elt11, align 1
+ store i8 %.unpack0, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack1, ptr %.temp.1.gep, align 1
+ %.temp.2.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 2
+ store i8 %.unpack2, ptr %.temp.2.gep, align 1
+ %.temp.3.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 3
+ store i8 %.unpack3, ptr %.temp.3.gep, align 1
+ %.temp.4.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 4
+ store i8 %.unpack4, ptr %.temp.4.gep, align 1
+ %.temp.5.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 5
+ store i8 %.unpack5, ptr %.temp.5.gep, align 1
+ %.temp.6.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 6
+ store i8 %.unpack6, ptr %.temp.6.gep, align 1
+ %.temp.7.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 7
+ store i8 %.unpack7, ptr %.temp.7.gep, align 1
+ %.temp.8.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 8
+ store i8 %.unpack8, ptr %.temp.8.gep, align 1
+ %.temp.9.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 9
+ store i8 %.unpack9, ptr %.temp.9.gep, align 1
+ %.temp.10.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 10
+ store i8 %.unpack10, ptr %.temp.10.gep, align 1
+ %.temp.11.gep = getelementptr inbounds [16 x i8], ptr %_param_data, i64 0, i64 11
+ store i8 %.unpack11, ptr %.temp.11.gep, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-define signext i8 @caller_18([18 x i8]* nocapture readonly byval([18 x i8]) %data) #0 {
+define signext i8 @caller_18(ptr nocapture readonly byval([18 x i8]) %data) #0 {
; P8LE-LABEL: caller_18:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [18 x i8], align 1
- %.elt0 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 0
- %.unpack0 = load i8, i8* %.elt0, align 1
- %.elt1 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 1
- %.unpack1 = load i8, i8* %.elt1, align 1
- %.elt2 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 2
- %.unpack2 = load i8, i8* %.elt2, align 1
- %.elt3 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 3
- %.unpack3 = load i8, i8* %.elt3, align 1
- %.elt4 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 4
- %.unpack4 = load i8, i8* %.elt4, align 1
- %.elt5 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 5
- %.unpack5 = load i8, i8* %.elt5, align 1
- %.elt6 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 6
- %.unpack6 = load i8, i8* %.elt6, align 1
- %.elt7 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 7
- %.unpack7 = load i8, i8* %.elt7, align 1
- %.elt8 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 8
- %.unpack8 = load i8, i8* %.elt8, align 1
- %.elt9 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 9
- %.unpack9 = load i8, i8* %.elt9, align 1
- %.elt10 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 10
- %.unpack10 = load i8, i8* %.elt10, align 1
- %.elt11 = getelementptr inbounds [18 x i8], [18 x i8]* %data, i64 0, i64 11
- %.unpack11 = load i8, i8* %.elt11, align 1
- %.temp.0.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack0, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack1, i8* %.temp.1.gep, align 1
- %.temp.2.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 2
- store i8 %.unpack2, i8* %.temp.2.gep, align 1
- %.temp.3.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 3
- store i8 %.unpack3, i8* %.temp.3.gep, align 1
- %.temp.4.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 4
- store i8 %.unpack4, i8* %.temp.4.gep, align 1
- %.temp.5.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 5
- store i8 %.unpack5, i8* %.temp.5.gep, align 1
- %.temp.6.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 6
- store i8 %.unpack6, i8* %.temp.6.gep, align 1
- %.temp.7.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 7
- store i8 %.unpack7, i8* %.temp.7.gep, align 1
- %.temp.8.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 8
- store i8 %.unpack8, i8* %.temp.8.gep, align 1
- %.temp.9.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 9
- store i8 %.unpack9, i8* %.temp.9.gep, align 1
- %.temp.10.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 10
- store i8 %.unpack10, i8* %.temp.10.gep, align 1
- %.temp.11.gep = getelementptr inbounds [18 x i8], [18 x i8]* %_param_data, i64 0, i64 11
- store i8 %.unpack11, i8* %.temp.11.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack0 = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 1
+ %.unpack1 = load i8, ptr %.elt1, align 1
+ %.elt2 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 2
+ %.unpack2 = load i8, ptr %.elt2, align 1
+ %.elt3 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 3
+ %.unpack3 = load i8, ptr %.elt3, align 1
+ %.elt4 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 4
+ %.unpack4 = load i8, ptr %.elt4, align 1
+ %.elt5 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 5
+ %.unpack5 = load i8, ptr %.elt5, align 1
+ %.elt6 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 6
+ %.unpack6 = load i8, ptr %.elt6, align 1
+ %.elt7 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 7
+ %.unpack7 = load i8, ptr %.elt7, align 1
+ %.elt8 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 8
+ %.unpack8 = load i8, ptr %.elt8, align 1
+ %.elt9 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 9
+ %.unpack9 = load i8, ptr %.elt9, align 1
+ %.elt10 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 10
+ %.unpack10 = load i8, ptr %.elt10, align 1
+ %.elt11 = getelementptr inbounds [18 x i8], ptr %data, i64 0, i64 11
+ %.unpack11 = load i8, ptr %.elt11, align 1
+ store i8 %.unpack0, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack1, ptr %.temp.1.gep, align 1
+ %.temp.2.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 2
+ store i8 %.unpack2, ptr %.temp.2.gep, align 1
+ %.temp.3.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 3
+ store i8 %.unpack3, ptr %.temp.3.gep, align 1
+ %.temp.4.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 4
+ store i8 %.unpack4, ptr %.temp.4.gep, align 1
+ %.temp.5.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 5
+ store i8 %.unpack5, ptr %.temp.5.gep, align 1
+ %.temp.6.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 6
+ store i8 %.unpack6, ptr %.temp.6.gep, align 1
+ %.temp.7.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 7
+ store i8 %.unpack7, ptr %.temp.7.gep, align 1
+ %.temp.8.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 8
+ store i8 %.unpack8, ptr %.temp.8.gep, align 1
+ %.temp.9.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 9
+ store i8 %.unpack9, ptr %.temp.9.gep, align 1
+ %.temp.10.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 10
+ store i8 %.unpack10, ptr %.temp.10.gep, align 1
+ %.temp.11.gep = getelementptr inbounds [18 x i8], ptr %_param_data, i64 0, i64 11
+ store i8 %.unpack11, ptr %.temp.11.gep, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-declare void @callee(i8*) local_unnamed_addr #0
-declare void @callee_9([9 x i8]* nocapture readonly byval([9 x i8]) %data) local_unnamed_addr #0
+declare void @callee(ptr) local_unnamed_addr #0
+declare void @callee_9(ptr nocapture readonly byval([9 x i8]) %data) local_unnamed_addr #0
attributes #0 = { nounwind }
; RUN: llc -verify-machineinstrs --mtriple powerpc64-unknown-linux-gnu \
; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names < %s | FileCheck %s --check-prefix=P10BE
-define signext i8 @caller_1([1 x i8]* nocapture readonly byval([1 x i8]) %data) #0 {
+define signext i8 @caller_1(ptr nocapture readonly byval([1 x i8]) %data) #0 {
; P8LE-LABEL: caller_1:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [1 x i8], align 1
- %.elt = getelementptr inbounds [1 x i8], [1 x i8]* %data, i64 0, i64 0
- %.unpack = load i8, i8* %.elt, align 1
- %.temp.0.gep = getelementptr inbounds [1 x i8], [1 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack, i8* %.temp.0.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack = load i8, ptr %data, align 1
+ store i8 %.unpack, ptr %_param_data, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-define signext i8 @caller_2([2 x i8]* nocapture readonly byval([2 x i8]) %data) #0 {
+define signext i8 @caller_2(ptr nocapture readonly byval([2 x i8]) %data) #0 {
; P8LE-LABEL: caller_2:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [2 x i8], align 1
- %.elt = getelementptr inbounds [2 x i8], [2 x i8]* %data, i64 0, i64 0
- %.unpack = load i8, i8* %.elt, align 1
- %.elt1 = getelementptr inbounds [2 x i8], [2 x i8]* %data, i64 0, i64 1
- %.unpack2 = load i8, i8* %.elt1, align 1
- %.temp.0.gep = getelementptr inbounds [2 x i8], [2 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [2 x i8], [2 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack2, i8* %.temp.1.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [2 x i8], ptr %data, i64 0, i64 1
+ %.unpack2 = load i8, ptr %.elt1, align 1
+ store i8 %.unpack, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [2 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack2, ptr %.temp.1.gep, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-define signext i8 @caller_3([3 x i8]* nocapture readonly byval([3 x i8]) %data) #0 {
+define signext i8 @caller_3(ptr nocapture readonly byval([3 x i8]) %data) #0 {
; P8LE-LABEL: caller_3:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [3 x i8], align 1
- %.elt = getelementptr inbounds [3 x i8], [3 x i8]* %data, i64 0, i64 0
- %.unpack = load i8, i8* %.elt, align 1
- %.elt1 = getelementptr inbounds [3 x i8], [3 x i8]* %data, i64 0, i64 1
- %.unpack2 = load i8, i8* %.elt1, align 1
- %.elt3 = getelementptr inbounds [3 x i8], [3 x i8]* %data, i64 0, i64 2
- %.unpack4 = load i8, i8* %.elt3, align 1
- %.temp.0.gep = getelementptr inbounds [3 x i8], [3 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [3 x i8], [3 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack2, i8* %.temp.1.gep, align 1
- %.temp.2.gep = getelementptr inbounds [3 x i8], [3 x i8]* %_param_data, i64 0, i64 2
- store i8 %.unpack4, i8* %.temp.2.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [3 x i8], ptr %data, i64 0, i64 1
+ %.unpack2 = load i8, ptr %.elt1, align 1
+ %.elt3 = getelementptr inbounds [3 x i8], ptr %data, i64 0, i64 2
+ %.unpack4 = load i8, ptr %.elt3, align 1
+ store i8 %.unpack, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [3 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack2, ptr %.temp.1.gep, align 1
+ %.temp.2.gep = getelementptr inbounds [3 x i8], ptr %_param_data, i64 0, i64 2
+ store i8 %.unpack4, ptr %.temp.2.gep, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-define signext i8 @caller_4([4 x i8]* nocapture readonly byval([4 x i8]) %data) #0 {
+define signext i8 @caller_4(ptr nocapture readonly byval([4 x i8]) %data) #0 {
; P8LE-LABEL: caller_4:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [4 x i8], align 1
- %.elt = getelementptr inbounds [4 x i8], [4 x i8]* %data, i64 0, i64 0
- %.unpack = load i8, i8* %.elt, align 1
- %.elt1 = getelementptr inbounds [4 x i8], [4 x i8]* %data, i64 0, i64 1
- %.unpack2 = load i8, i8* %.elt1, align 1
- %.elt3 = getelementptr inbounds [4 x i8], [4 x i8]* %data, i64 0, i64 2
- %.unpack4 = load i8, i8* %.elt3, align 1
- %.elt5 = getelementptr inbounds [4 x i8], [4 x i8]* %data, i64 0, i64 3
- %.unpack6 = load i8, i8* %.elt5, align 1
- %.temp.0.gep = getelementptr inbounds [4 x i8], [4 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [4 x i8], [4 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack2, i8* %.temp.1.gep, align 1
- %.temp.2.gep = getelementptr inbounds [4 x i8], [4 x i8]* %_param_data, i64 0, i64 2
- store i8 %.unpack4, i8* %.temp.2.gep, align 1
- %.temp.3.gep = getelementptr inbounds [4 x i8], [4 x i8]* %_param_data, i64 0, i64 3
- store i8 %.unpack6, i8* %.temp.3.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [4 x i8], ptr %data, i64 0, i64 1
+ %.unpack2 = load i8, ptr %.elt1, align 1
+ %.elt3 = getelementptr inbounds [4 x i8], ptr %data, i64 0, i64 2
+ %.unpack4 = load i8, ptr %.elt3, align 1
+ %.elt5 = getelementptr inbounds [4 x i8], ptr %data, i64 0, i64 3
+ %.unpack6 = load i8, ptr %.elt5, align 1
+ store i8 %.unpack, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [4 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack2, ptr %.temp.1.gep, align 1
+ %.temp.2.gep = getelementptr inbounds [4 x i8], ptr %_param_data, i64 0, i64 2
+ store i8 %.unpack4, ptr %.temp.2.gep, align 1
+ %.temp.3.gep = getelementptr inbounds [4 x i8], ptr %_param_data, i64 0, i64 3
+ store i8 %.unpack6, ptr %.temp.3.gep, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-define signext i8 @caller_5([5 x i8]* nocapture readonly byval([5 x i8]) %data) #0 {
+define signext i8 @caller_5(ptr nocapture readonly byval([5 x i8]) %data) #0 {
; P8LE-LABEL: caller_5:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [5 x i8], align 1
- %.elt = getelementptr inbounds [5 x i8], [5 x i8]* %data, i64 0, i64 0
- %.unpack = load i8, i8* %.elt, align 1
- %.elt1 = getelementptr inbounds [5 x i8], [5 x i8]* %data, i64 0, i64 1
- %.unpack2 = load i8, i8* %.elt1, align 1
- %.elt3 = getelementptr inbounds [5 x i8], [5 x i8]* %data, i64 0, i64 2
- %.unpack4 = load i8, i8* %.elt3, align 1
- %.elt5 = getelementptr inbounds [5 x i8], [5 x i8]* %data, i64 0, i64 3
- %.unpack6 = load i8, i8* %.elt5, align 1
- %.elt7 = getelementptr inbounds [5 x i8], [5 x i8]* %data, i64 0, i64 4
- %.unpack8 = load i8, i8* %.elt7, align 1
- %.temp.0.gep = getelementptr inbounds [5 x i8], [5 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [5 x i8], [5 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack2, i8* %.temp.1.gep, align 1
- %.temp.2.gep = getelementptr inbounds [5 x i8], [5 x i8]* %_param_data, i64 0, i64 2
- store i8 %.unpack4, i8* %.temp.2.gep, align 1
- %.temp.3.gep = getelementptr inbounds [5 x i8], [5 x i8]* %_param_data, i64 0, i64 3
- store i8 %.unpack6, i8* %.temp.3.gep, align 1
- %.temp.4.gep = getelementptr inbounds [5 x i8], [5 x i8]* %_param_data, i64 0, i64 4
- store i8 %.unpack8, i8* %.temp.4.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [5 x i8], ptr %data, i64 0, i64 1
+ %.unpack2 = load i8, ptr %.elt1, align 1
+ %.elt3 = getelementptr inbounds [5 x i8], ptr %data, i64 0, i64 2
+ %.unpack4 = load i8, ptr %.elt3, align 1
+ %.elt5 = getelementptr inbounds [5 x i8], ptr %data, i64 0, i64 3
+ %.unpack6 = load i8, ptr %.elt5, align 1
+ %.elt7 = getelementptr inbounds [5 x i8], ptr %data, i64 0, i64 4
+ %.unpack8 = load i8, ptr %.elt7, align 1
+ store i8 %.unpack, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [5 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack2, ptr %.temp.1.gep, align 1
+ %.temp.2.gep = getelementptr inbounds [5 x i8], ptr %_param_data, i64 0, i64 2
+ store i8 %.unpack4, ptr %.temp.2.gep, align 1
+ %.temp.3.gep = getelementptr inbounds [5 x i8], ptr %_param_data, i64 0, i64 3
+ store i8 %.unpack6, ptr %.temp.3.gep, align 1
+ %.temp.4.gep = getelementptr inbounds [5 x i8], ptr %_param_data, i64 0, i64 4
+ store i8 %.unpack8, ptr %.temp.4.gep, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-define signext i8 @caller_6([6 x i8]* nocapture readonly byval([6 x i8]) %data) #0 {
+define signext i8 @caller_6(ptr nocapture readonly byval([6 x i8]) %data) #0 {
; P8LE-LABEL: caller_6:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [6 x i8], align 1
- %.elt = getelementptr inbounds [6 x i8], [6 x i8]* %data, i64 0, i64 0
- %.unpack = load i8, i8* %.elt, align 1
- %.elt1 = getelementptr inbounds [6 x i8], [6 x i8]* %data, i64 0, i64 1
- %.unpack2 = load i8, i8* %.elt1, align 1
- %.elt3 = getelementptr inbounds [6 x i8], [6 x i8]* %data, i64 0, i64 2
- %.unpack4 = load i8, i8* %.elt3, align 1
- %.elt5 = getelementptr inbounds [6 x i8], [6 x i8]* %data, i64 0, i64 3
- %.unpack6 = load i8, i8* %.elt5, align 1
- %.elt7 = getelementptr inbounds [6 x i8], [6 x i8]* %data, i64 0, i64 4
- %.unpack8 = load i8, i8* %.elt7, align 1
- %.elt9 = getelementptr inbounds [6 x i8], [6 x i8]* %data, i64 0, i64 5
- %.unpack10 = load i8, i8* %.elt9, align 1
- %.temp.0.gep = getelementptr inbounds [6 x i8], [6 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [6 x i8], [6 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack2, i8* %.temp.1.gep, align 1
- %.temp.2.gep = getelementptr inbounds [6 x i8], [6 x i8]* %_param_data, i64 0, i64 2
- store i8 %.unpack4, i8* %.temp.2.gep, align 1
- %.temp.3.gep = getelementptr inbounds [6 x i8], [6 x i8]* %_param_data, i64 0, i64 3
- store i8 %.unpack6, i8* %.temp.3.gep, align 1
- %.temp.4.gep = getelementptr inbounds [6 x i8], [6 x i8]* %_param_data, i64 0, i64 4
- store i8 %.unpack8, i8* %.temp.4.gep, align 1
- %.temp.5.gep = getelementptr inbounds [6 x i8], [6 x i8]* %_param_data, i64 0, i64 5
- store i8 %.unpack10, i8* %.temp.5.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [6 x i8], ptr %data, i64 0, i64 1
+ %.unpack2 = load i8, ptr %.elt1, align 1
+ %.elt3 = getelementptr inbounds [6 x i8], ptr %data, i64 0, i64 2
+ %.unpack4 = load i8, ptr %.elt3, align 1
+ %.elt5 = getelementptr inbounds [6 x i8], ptr %data, i64 0, i64 3
+ %.unpack6 = load i8, ptr %.elt5, align 1
+ %.elt7 = getelementptr inbounds [6 x i8], ptr %data, i64 0, i64 4
+ %.unpack8 = load i8, ptr %.elt7, align 1
+ %.elt9 = getelementptr inbounds [6 x i8], ptr %data, i64 0, i64 5
+ %.unpack10 = load i8, ptr %.elt9, align 1
+ store i8 %.unpack, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [6 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack2, ptr %.temp.1.gep, align 1
+ %.temp.2.gep = getelementptr inbounds [6 x i8], ptr %_param_data, i64 0, i64 2
+ store i8 %.unpack4, ptr %.temp.2.gep, align 1
+ %.temp.3.gep = getelementptr inbounds [6 x i8], ptr %_param_data, i64 0, i64 3
+ store i8 %.unpack6, ptr %.temp.3.gep, align 1
+ %.temp.4.gep = getelementptr inbounds [6 x i8], ptr %_param_data, i64 0, i64 4
+ store i8 %.unpack8, ptr %.temp.4.gep, align 1
+ %.temp.5.gep = getelementptr inbounds [6 x i8], ptr %_param_data, i64 0, i64 5
+ store i8 %.unpack10, ptr %.temp.5.gep, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-define signext i8 @caller_7([7 x i8]* nocapture readonly byval([7 x i8]) %data) #0 {
+define signext i8 @caller_7(ptr nocapture readonly byval([7 x i8]) %data) #0 {
; P8LE-LABEL: caller_7:
; P8LE: # %bb.0: # %entry
; P8LE-NEXT: mflr r0
; P10BE-NEXT: blr
entry:
%_param_data = alloca [7 x i8], align 1
- %.elt = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 0
- %.unpack = load i8, i8* %.elt, align 1
- %.elt1 = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 1
- %.unpack2 = load i8, i8* %.elt1, align 1
- %.elt3 = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 2
- %.unpack4 = load i8, i8* %.elt3, align 1
- %.elt5 = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 3
- %.unpack6 = load i8, i8* %.elt5, align 1
- %.elt7 = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 4
- %.unpack8 = load i8, i8* %.elt7, align 1
- %.elt9 = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 5
- %.unpack10 = load i8, i8* %.elt9, align 1
- %.elt11 = getelementptr inbounds [7 x i8], [7 x i8]* %data, i64 0, i64 6
- %.unpack12 = load i8, i8* %.elt11, align 1
- %.temp.0.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 0
- store i8 %.unpack, i8* %.temp.0.gep, align 1
- %.temp.1.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 1
- store i8 %.unpack2, i8* %.temp.1.gep, align 1
- %.temp.2.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 2
- store i8 %.unpack4, i8* %.temp.2.gep, align 1
- %.temp.3.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 3
- store i8 %.unpack6, i8* %.temp.3.gep, align 1
- %.temp.4.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 4
- store i8 %.unpack8, i8* %.temp.4.gep, align 1
- %.temp.5.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 5
- store i8 %.unpack10, i8* %.temp.5.gep, align 1
- %.temp.6.gep = getelementptr inbounds [7 x i8], [7 x i8]* %_param_data, i64 0, i64 6
- store i8 %.unpack12, i8* %.temp.6.gep, align 1
- call void @callee(i8* nonnull %.temp.0.gep)
+ %.unpack = load i8, ptr %data, align 1
+ %.elt1 = getelementptr inbounds [7 x i8], ptr %data, i64 0, i64 1
+ %.unpack2 = load i8, ptr %.elt1, align 1
+ %.elt3 = getelementptr inbounds [7 x i8], ptr %data, i64 0, i64 2
+ %.unpack4 = load i8, ptr %.elt3, align 1
+ %.elt5 = getelementptr inbounds [7 x i8], ptr %data, i64 0, i64 3
+ %.unpack6 = load i8, ptr %.elt5, align 1
+ %.elt7 = getelementptr inbounds [7 x i8], ptr %data, i64 0, i64 4
+ %.unpack8 = load i8, ptr %.elt7, align 1
+ %.elt9 = getelementptr inbounds [7 x i8], ptr %data, i64 0, i64 5
+ %.unpack10 = load i8, ptr %.elt9, align 1
+ %.elt11 = getelementptr inbounds [7 x i8], ptr %data, i64 0, i64 6
+ %.unpack12 = load i8, ptr %.elt11, align 1
+ store i8 %.unpack, ptr %_param_data, align 1
+ %.temp.1.gep = getelementptr inbounds [7 x i8], ptr %_param_data, i64 0, i64 1
+ store i8 %.unpack2, ptr %.temp.1.gep, align 1
+ %.temp.2.gep = getelementptr inbounds [7 x i8], ptr %_param_data, i64 0, i64 2
+ store i8 %.unpack4, ptr %.temp.2.gep, align 1
+ %.temp.3.gep = getelementptr inbounds [7 x i8], ptr %_param_data, i64 0, i64 3
+ store i8 %.unpack6, ptr %.temp.3.gep, align 1
+ %.temp.4.gep = getelementptr inbounds [7 x i8], ptr %_param_data, i64 0, i64 4
+ store i8 %.unpack8, ptr %.temp.4.gep, align 1
+ %.temp.5.gep = getelementptr inbounds [7 x i8], ptr %_param_data, i64 0, i64 5
+ store i8 %.unpack10, ptr %.temp.5.gep, align 1
+ %.temp.6.gep = getelementptr inbounds [7 x i8], ptr %_param_data, i64 0, i64 6
+ store i8 %.unpack12, ptr %.temp.6.gep, align 1
+ call void @callee(ptr nonnull %_param_data)
ret i8 0
}
-declare void @callee(i8*) local_unnamed_addr #0
+declare void @callee(ptr) local_unnamed_addr #0
attributes #0 = { nounwind }
}
; Indirect calls requires a full stub creation
-define dso_local void @test_indirect(void ()* nocapture %fp) nounwind {
+define dso_local void @test_indirect(ptr nocapture %fp) nounwind {
; CHECK-LABEL: test_indirect:
tail call void %fp() nounwind
; CHECK: ld [[FP:[0-9]+]], 0(3)
; used on 64-bit SVR4 (as e.g. on Darwin).
define dso_local void @test_abs() nounwind {
; CHECK-LABEL: test_abs:
- tail call void inttoptr (i64 1024 to void ()*)() nounwind
+ tail call void inttoptr (i64 1024 to ptr)() nounwind
; CHECK: ld [[FP:[0-9]+]], 1024(0)
; CHECK: ld 11, 1040(0)
; CHECK: ld 2, 1032(0)
; The 'ld 2, 40(1)' really must always come directly after the bctrl to make
; the unwinding code in libgcc happy.
-@g = external global void ()*
+@g = external global ptr
declare void @h(i64)
define dso_local void @test_indir_toc_reload(i64 %x) {
- %1 = load void ()*, void ()** @g
+ %1 = load ptr, ptr @g
call void %1()
call void @h(i64 %x)
ret void
%struct.pos_T = type { i64 }
; check that we're not copying stuff between R and X registers
-define internal void @serialize_pos(%struct.pos_T* byval(%struct.pos_T) %pos, %struct.__sFILE* %fp) nounwind {
+define internal void @serialize_pos(ptr byval(%struct.pos_T) %pos, ptr %fp) nounwind {
entry:
ret void
}
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-define void @bar(void (...)* nocapture %x) #0 {
+define void @bar(ptr nocapture %x) #0 {
entry:
- %callee.knr.cast = bitcast void (...)* %x to void ()*
br label %for.body
; INVFUNCDESC-LABEL: @bar
for.body: ; preds = %for.body, %entry
%i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- tail call void %callee.knr.cast() #0
+ tail call void %x() #0
%inc = add nuw nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, 1600000000
br i1 %exitcond, label %for.end, label %for.body
; Function Attrs: nounwind
define signext i32 @check_cache_line() local_unnamed_addr {
entry:
- %call = tail call i32* bitcast (i32* (...)* @magici to i32* ()*)()
- %call115 = tail call signext i32 bitcast (i32 (...)* @iter to i32 ()*)()
+ %call = tail call ptr @magici()
+ %call115 = tail call signext i32 @iter()
%cmp16 = icmp sgt i32 %call115, 0
br i1 %cmp16, label %for.body, label %for.cond.cleanup
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%res.017 = phi i32 [ %add5, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %call, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %call, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %res.017
%1 = add nuw nsw i64 %indvars.iv, 16
- %arrayidx4 = getelementptr inbounds i32, i32* %call, i64 %1
- %2 = load i32, i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %call, i64 %1
+ %2 = load i32, ptr %arrayidx4, align 4
%add5 = add nsw i32 %add, %2
%indvars.iv.next = add nuw i64 %indvars.iv, 1
- %call1 = tail call signext i32 bitcast (i32 (...)* @iter to i32 ()*)()
+ %call1 = tail call signext i32 @iter()
%3 = sext i32 %call1 to i64
%cmp = icmp slt i64 %indvars.iv.next, %3
br i1 %cmp, label %for.body, label %for.cond.cleanup
; CHECK-DCBT: blr
}
-declare i32* @magici(...) local_unnamed_addr
+declare ptr @magici(...) local_unnamed_addr
declare signext i32 @iter(...) local_unnamed_addr
; callee. See comments for individual functions above for details on registers
; used for parameters.
define <1 x i128> @call_v1i128_increment_by_one() nounwind {
- %tmp = load <1 x i128>, <1 x i128>* @x, align 16
+ %tmp = load <1 x i128>, ptr @x, align 16
%ret = call <1 x i128> @v1i128_increment_by_one(<1 x i128> %tmp)
ret <1 x i128> %ret
}
define <1 x i128> @call_v1i128_increment_by_val() nounwind {
- %tmp = load <1 x i128>, <1 x i128>* @x, align 16
- %tmp2 = load <1 x i128>, <1 x i128>* @y, align 16
+ %tmp = load <1 x i128>, ptr @x, align 16
+ %tmp2 = load <1 x i128>, ptr @y, align 16
%ret = call <1 x i128> @v1i128_increment_by_val(<1 x i128> %tmp, <1 x i128> %tmp2)
ret <1 x i128> %ret
}
define i128 @call_i128_increment_by_one() nounwind {
- %tmp = load i128, i128* @a, align 16
+ %tmp = load i128, ptr @a, align 16
%ret = call i128 @i128_increment_by_one(i128 %tmp)
ret i128 %ret
; %ret4 = call i128 @i128_increment_by_val(i128 %tmp2, i128 %tmp2)
}
define i128 @call_i128_increment_by_val() nounwind {
- %tmp = load i128, i128* @a, align 16
- %tmp2 = load i128, i128* @b, align 16
+ %tmp = load i128, ptr @a, align 16
+ %tmp2 = load i128, ptr @b, align 16
%ret = call i128 @i128_increment_by_val(i128 %tmp, i128 %tmp2)
ret i128 %ret
; CHECK-LE-LABEL: @call_i128_increment_by_val
define i128 @i128_split() {
entry:
- %0 = load i128, i128* @a, align 16
- %1 = load i128, i128* @b, align 16
+ %0 = load i128, ptr @a, align 16
+ %1 = load i128, ptr @b, align 16
%call = tail call i128 @callee_i128_split(i32 1, i128 %0, i32 4, i32 5,
i32 6, i32 7, i128 %1, i32 8, i128 9)
ret i128 %call
; Based on the ppc64-prefetch.ll test
; RUN: not --crash llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s 2>&1 | FileCheck %s
-declare void @llvm.prefetch(i8*, i32, i32, i32)
+declare void @llvm.prefetch(ptr, i32, i32, i32)
-define void @test(i8* %a, ...) nounwind {
+define void @test(ptr %a, ...) nounwind {
entry:
- call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 0)
+ call void @llvm.prefetch(ptr %a, i32 0, i32 3, i32 0)
ret void
; FIXME: Crashing is not really the correct behavior here, we really should just emit nothing
; Copied from the ppc64-prefetch.ll test
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
-declare void @llvm.prefetch(i8*, i32, i32, i32)
+declare void @llvm.prefetch(ptr, i32, i32, i32)
-define void @test(i8* %a, ...) nounwind {
+define void @test(ptr %a, ...) nounwind {
entry:
- call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 0)
+ call void @llvm.prefetch(ptr %a, i32 0, i32 3, i32 0)
ret void
; CHECK-LABEL: @test
; Tests that the 'nest' parameter attribute causes the relevant parameter to be
; passed in the right register (r11 for PPC).
-define i8* @nest_receiver(i8* nest %arg) nounwind {
+define ptr @nest_receiver(ptr nest %arg) nounwind {
; CHECK-LABEL: nest_receiver:
; CHECK: # %bb.0:
; CHECK-NEXT: mr 3, 11
; CHECK-NEXT: blr
- ret i8* %arg
+ ret ptr %arg
}
-define i8* @nest_caller(i8* %arg) nounwind {
+define ptr @nest_caller(ptr %arg) nounwind {
; CHECK-LABEL: nest_caller:
; CHECK: mr 11, 3
; CHECK-NEXT: bl nest_receiver
; CHECK: blr
- %result = call i8* @nest_receiver(i8* nest %arg)
- ret i8* %result
+ %result = call ptr @nest_receiver(ptr nest %arg)
+ ret ptr %result
}
-define void @test_indirect(i32 ()* nocapture %f, i8* %p) {
+define void @test_indirect(ptr nocapture %f, ptr %p) {
entry:
; CHECK-LABEL: test_indirect
; CHECK: bctrl
; CHECK: blr
- %callee.knr.cast = bitcast i32 ()* %f to i32 (i8*)*
- %call = tail call signext i32 %callee.knr.cast(i8* nest %p)
+ %call = tail call signext i32 %f(ptr nest %p)
ret void
}
; Function Attrs: nounwind
define dso_local void @foo() #0 {
entry:
- tail call void bitcast ([33 x i8]* @something to void ()*)() #0
+ tail call void @something() #0
ret void
; CHECK-LABEL: @foo
; Function Attrs: nounwind
define dso_local void @bar() #0 {
entry:
- tail call void bitcast (%struct.cd* @tls_something to void ()*)() #0
+ tail call void @tls_something() #0
ret void
; CHECK-LABEL: @bar
; Function Attrs: nounwind
define dso_local void @ext() #0 {
entry:
- tail call void bitcast (%struct.cd* @extern_something to void ()*)() #0
+ tail call void @extern_something() #0
ret void
; CHECK-LABEL: @ext
while.cond:
%l.0 = phi i64 [ 0, %entry ], [ %inc, %while.cond ]
- %arrayidx = getelementptr inbounds [100 x i64], [100 x i64]* @perm, i64 0, i64 %l.0
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds [100 x i64], ptr @perm, i64 0, i64 %l.0
+ %0 = load i64, ptr %arrayidx, align 8
%cmp = icmp sgt i64 %0, 0
%inc = add nuw nsw i64 %l.0, 1
br i1 %cmp, label %while.cond, label %while.end
while.end:
- store i64 0, i64* %arrayidx, align 8
+ store i64 0, ptr %arrayidx, align 8
ret void
; CHECK-LABEL: sort_basket
; CHECK: addi {{[0-9]+}}, {{[0-9]+}}, -8
target triple = "powerpc64-unknown-linux-gnu"
; RUN: llc -verify-machineinstrs -mcpu=a2 < %s | FileCheck %s
-define void @test1(i8* %a, ...) nounwind {
+define void @test1(ptr %a, ...) nounwind {
entry:
- call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 1)
+ call void @llvm.prefetch(ptr %a, i32 0, i32 3, i32 1)
ret void
; CHECK-LABEL: @test1
; CHECK: dcbt
}
-declare void @llvm.prefetch(i8*, i32, i32, i32)
+declare void @llvm.prefetch(ptr, i32, i32, i32)
-define void @test2(i8* %a, ...) nounwind {
+define void @test2(ptr %a, ...) nounwind {
entry:
- call void @llvm.prefetch(i8* %a, i32 1, i32 3, i32 1)
+ call void @llvm.prefetch(ptr %a, i32 1, i32 3, i32 1)
ret void
; CHECK-LABEL: @test2
; CHECK: dcbtst
}
-define void @test3(i8* %a, ...) nounwind {
+define void @test3(ptr %a, ...) nounwind {
entry:
- call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 0)
+ call void @llvm.prefetch(ptr %a, i32 0, i32 3, i32 0)
ret void
; CHECK-LABEL: @test3
;; outside of the initial 288 byte volatile program storage region in the
;; Protected Zone. However, this restriction will be removed in an upcoming
;; revision of the ABI.
-define dso_local zeroext i32 @spill(i32* nocapture readonly %in) #0 {
+define dso_local zeroext i32 @spill(ptr nocapture readonly %in) #0 {
; BE-P10-LABEL: spill:
; BE-P10: # %bb.0: # %entry
; BE-P10-NEXT: mflr r0
; BE-32BIT-P8-PRIV-NEXT: blr
entry:
%local = alloca i32, align 4
- %0 = bitcast i32* %local to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
- %arrayidx = getelementptr inbounds i32, i32* %in, i64 3
- %1 = load i32, i32* %arrayidx, align 4
- store i32 %1, i32* %local, align 4
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %local)
+ %arrayidx = getelementptr inbounds i32, ptr %in, i64 3
+ %0 = load i32, ptr %arrayidx, align 4
+ store i32 %0, ptr %local, align 4
tail call void asm sideeffect "nop", "~{cr2},~{cr3},~{cr4},~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{r16},~{r17},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
- %call = call zeroext i32 @callee2(i32* nonnull %local)
- %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 4
- %2 = load i32, i32* %arrayidx1, align 4
- %add = add i32 %2, %call
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+ %call = call zeroext i32 @callee2(ptr nonnull %local)
+ %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 4
+ %1 = load i32, ptr %arrayidx1, align 4
+ %add = add i32 %1, %call
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %local)
ret i32 %add
}
-define dso_local zeroext i32 @shrinkwrap(i32* readonly %in) #0 {
+define dso_local zeroext i32 @shrinkwrap(ptr readonly %in) #0 {
; BE-P10-LABEL: shrinkwrap:
; BE-P10: # %bb.0: # %entry
; BE-P10-NEXT: cmpldi r3, 0
; BE-32BIT-P8-PRIV-NEXT: blr
entry:
%local = alloca i32, align 4
- %tobool.not = icmp eq i32* %in, null
+ %tobool.not = icmp eq ptr %in, null
br i1 %tobool.not, label %return, label %if.end
if.end: ; preds = %entry
- %0 = bitcast i32* %local to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
- %arrayidx = getelementptr inbounds i32, i32* %in, i64 3
- %1 = load i32, i32* %arrayidx, align 4
- store i32 %1, i32* %local, align 4
- %call = call zeroext i32 @callee2(i32* nonnull %local)
- %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 4
- %2 = load i32, i32* %arrayidx1, align 4
- %add = add i32 %2, %call
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %local)
+ %arrayidx = getelementptr inbounds i32, ptr %in, i64 3
+ %0 = load i32, ptr %arrayidx, align 4
+ store i32 %0, ptr %local, align 4
+ %call = call zeroext i32 @callee2(ptr nonnull %local)
+ %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 4
+ %1 = load i32, ptr %arrayidx1, align 4
+ %add = add i32 %1, %call
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %local)
br label %return
return: ; preds = %entry, %if.end
ret i32 %retval.0
}
-define dso_local zeroext i32 @aligned(i32* nocapture readonly %in) #0 {
+define dso_local zeroext i32 @aligned(ptr nocapture readonly %in) #0 {
; BE-P10-LABEL: aligned:
; BE-P10: # %bb.0: # %entry
; BE-P10-NEXT: mflr r0
%beforeLocal = alloca i32, align 4
%local = alloca i32, align 32768
%afterLocal = alloca i32, align 4
- %0 = bitcast i32* %beforeLocal to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
- %arrayidx = getelementptr inbounds i32, i32* %in, i64 1
- %1 = load i32, i32* %arrayidx, align 4
- store i32 %1, i32* %beforeLocal, align 4
- %2 = bitcast i32* %local to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2)
- %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 3
- %3 = load i32, i32* %arrayidx1, align 4
- store i32 %3, i32* %local, align 32768
- %4 = bitcast i32* %afterLocal to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %4)
- %arrayidx2 = getelementptr inbounds i32, i32* %in, i64 5
- %5 = load i32, i32* %arrayidx2, align 4
- store i32 %5, i32* %afterLocal, align 4
- %call = call zeroext i32 @callee3(i32* nonnull %local, i32* nonnull %beforeLocal, i32* nonnull %afterLocal)
- %arrayidx3 = getelementptr inbounds i32, i32* %in, i64 4
- %6 = load i32, i32* %arrayidx3, align 4
- %add = add i32 %6, %call
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %4)
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %2)
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %beforeLocal)
+ %arrayidx = getelementptr inbounds i32, ptr %in, i64 1
+ %0 = load i32, ptr %arrayidx, align 4
+ store i32 %0, ptr %beforeLocal, align 4
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %local)
+ %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 3
+ %1 = load i32, ptr %arrayidx1, align 4
+ store i32 %1, ptr %local, align 32768
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %afterLocal)
+ %arrayidx2 = getelementptr inbounds i32, ptr %in, i64 5
+ %2 = load i32, ptr %arrayidx2, align 4
+ store i32 %2, ptr %afterLocal, align 4
+ %call = call zeroext i32 @callee3(ptr nonnull %local, ptr nonnull %beforeLocal, ptr nonnull %afterLocal)
+ %arrayidx3 = getelementptr inbounds i32, ptr %in, i64 4
+ %3 = load i32, ptr %arrayidx3, align 4
+ %add = add i32 %3, %call
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %afterLocal)
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %local)
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %beforeLocal)
ret i32 %add
}
declare zeroext i32 @callee(i32 zeroext) local_unnamed_addr
-declare zeroext i32 @callee2(i32*) local_unnamed_addr
-declare zeroext i32 @callee3(i32*, i32*, i32*) local_unnamed_addr
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare zeroext i32 @callee2(ptr) local_unnamed_addr
+declare zeroext i32 @callee3(ptr, ptr, ptr) local_unnamed_addr
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
attributes #0 = { nounwind }
;; outside of the initial 288 byte volatile program storage region in the
;; Protected Zone. However, this restriction will be removed in an upcoming
;; revision of the ABI.
-define dso_local zeroext i32 @spill(i32* nocapture readonly %in) #0 {
+define dso_local zeroext i32 @spill(ptr nocapture readonly %in) #0 {
; LE-P10-LABEL: spill:
; LE-P10: # %bb.0: # %entry
; LE-P10-NEXT: mflr r0
; BE-P8-PRIV-NEXT: blr
entry:
%local = alloca i32, align 4
- %0 = bitcast i32* %local to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
- %arrayidx = getelementptr inbounds i32, i32* %in, i64 3
- %1 = load i32, i32* %arrayidx, align 4
- store i32 %1, i32* %local, align 4
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %local)
+ %arrayidx = getelementptr inbounds i32, ptr %in, i64 3
+ %0 = load i32, ptr %arrayidx, align 4
+ store i32 %0, ptr %local, align 4
tail call void asm sideeffect "nop", "~{cr2},~{cr3},~{cr4},~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{r16},~{r17},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
- %call = call zeroext i32 @callee2(i32* nonnull %local)
- %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 4
- %2 = load i32, i32* %arrayidx1, align 4
- %add = add i32 %2, %call
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+ %call = call zeroext i32 @callee2(ptr nonnull %local)
+ %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 4
+ %1 = load i32, ptr %arrayidx1, align 4
+ %add = add i32 %1, %call
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %local)
ret i32 %add
}
-define dso_local zeroext i32 @shrinkwrap(i32* readonly %in) #0 {
+define dso_local zeroext i32 @shrinkwrap(ptr readonly %in) #0 {
; LE-P10-LABEL: shrinkwrap:
; LE-P10: # %bb.0: # %entry
; LE-P10-NEXT: cmpldi r3, 0
; BE-P8-PRIV-NEXT: blr
entry:
%local = alloca i32, align 4
- %tobool.not = icmp eq i32* %in, null
+ %tobool.not = icmp eq ptr %in, null
br i1 %tobool.not, label %return, label %if.end
if.end: ; preds = %entry
- %0 = bitcast i32* %local to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
- %arrayidx = getelementptr inbounds i32, i32* %in, i64 3
- %1 = load i32, i32* %arrayidx, align 4
- store i32 %1, i32* %local, align 4
- %call = call zeroext i32 @callee2(i32* nonnull %local)
- %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 4
- %2 = load i32, i32* %arrayidx1, align 4
- %add = add i32 %2, %call
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %local)
+ %arrayidx = getelementptr inbounds i32, ptr %in, i64 3
+ %0 = load i32, ptr %arrayidx, align 4
+ store i32 %0, ptr %local, align 4
+ %call = call zeroext i32 @callee2(ptr nonnull %local)
+ %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 4
+ %1 = load i32, ptr %arrayidx1, align 4
+ %add = add i32 %1, %call
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %local)
br label %return
return: ; preds = %entry, %if.end
ret i32 %retval.0
}
-define dso_local zeroext i32 @aligned(i32* nocapture readonly %in) #0 {
+define dso_local zeroext i32 @aligned(ptr nocapture readonly %in) #0 {
; LE-P10-LABEL: aligned:
; LE-P10: # %bb.0: # %entry
; LE-P10-NEXT: mflr r0
%beforeLocal = alloca i32, align 4
%local = alloca i32, align 32768
%afterLocal = alloca i32, align 4
- %0 = bitcast i32* %beforeLocal to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
- %arrayidx = getelementptr inbounds i32, i32* %in, i64 1
- %1 = load i32, i32* %arrayidx, align 4
- store i32 %1, i32* %beforeLocal, align 4
- %2 = bitcast i32* %local to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2)
- %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 3
- %3 = load i32, i32* %arrayidx1, align 4
- store i32 %3, i32* %local, align 32768
- %4 = bitcast i32* %afterLocal to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %4)
- %arrayidx2 = getelementptr inbounds i32, i32* %in, i64 5
- %5 = load i32, i32* %arrayidx2, align 4
- store i32 %5, i32* %afterLocal, align 4
- %call = call zeroext i32 @callee3(i32* nonnull %local, i32* nonnull %beforeLocal, i32* nonnull %afterLocal)
- %arrayidx3 = getelementptr inbounds i32, i32* %in, i64 4
- %6 = load i32, i32* %arrayidx3, align 4
- %add = add i32 %6, %call
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %4)
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %2)
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %beforeLocal)
+ %arrayidx = getelementptr inbounds i32, ptr %in, i64 1
+ %0 = load i32, ptr %arrayidx, align 4
+ store i32 %0, ptr %beforeLocal, align 4
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %local)
+ %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 3
+ %1 = load i32, ptr %arrayidx1, align 4
+ store i32 %1, ptr %local, align 32768
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %afterLocal)
+ %arrayidx2 = getelementptr inbounds i32, ptr %in, i64 5
+ %2 = load i32, ptr %arrayidx2, align 4
+ store i32 %2, ptr %afterLocal, align 4
+ %call = call zeroext i32 @callee3(ptr nonnull %local, ptr nonnull %beforeLocal, ptr nonnull %afterLocal)
+ %arrayidx3 = getelementptr inbounds i32, ptr %in, i64 4
+ %3 = load i32, ptr %arrayidx3, align 4
+ %add = add i32 %3, %call
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %afterLocal)
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %local)
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %beforeLocal)
ret i32 %add
}
declare zeroext i32 @callee(i32 zeroext) local_unnamed_addr
-declare zeroext i32 @callee2(i32*) local_unnamed_addr
-declare zeroext i32 @callee3(i32*, i32*, i32*) local_unnamed_addr
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare zeroext i32 @callee2(ptr) local_unnamed_addr
+declare zeroext i32 @callee3(ptr, ptr, ptr) local_unnamed_addr
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
attributes #0 = { nounwind }
declare void @__assert_fail();
define dso_local i8 @_ZNK5clang9NamedDecl23getLinkageAndVisibilityEv(
- %"class.clang::NamedDecl"* %this) {
+ ptr %this) {
entry:
- %tobool = icmp eq %"class.clang::NamedDecl"* %this, null
+ %tobool = icmp eq ptr %this, null
br i1 %tobool, label %cond.false, label %exit
cond.false:
unreachable
exit:
- %DeclKind = getelementptr inbounds
- %"class.clang::NamedDecl",
- %"class.clang::NamedDecl"* %this, i64 0, i32 0
- %bf.load = load i32, i32* %DeclKind, align 4
+ %bf.load = load i32, ptr %this, align 4
%call.i = tail call i8 @LVComputationKind(
- %"class.clang::NamedDecl"* %this,
+ ptr %this,
i32 %bf.load)
ret i8 %call.i
}
define dso_local fastcc i8 @LVComputationKind(
- %"class.clang::NamedDecl"* %D,
+ ptr %D,
i32 %computation) {
ret i8 0
}
%S_32 = type { [7 x i32], i32 }
; Function Attrs: noinline nounwind
-define dso_local void @callee_56_copy([7 x i64] %a, %S_56* %b) #0 { ret void }
-define dso_local void @callee_64_copy([8 x i64] %a, %S_64* %b) #0 { ret void }
+define dso_local void @callee_56_copy([7 x i64] %a, ptr %b) #0 { ret void }
+define dso_local void @callee_64_copy([8 x i64] %a, ptr %b) #0 { ret void }
; Function Attrs: nounwind
-define dso_local void @caller_56_reorder_copy(%S_56* %b, [7 x i64] %a) #1 {
- tail call void @callee_56_copy([7 x i64] %a, %S_56* %b)
+define dso_local void @caller_56_reorder_copy(ptr %b, [7 x i64] %a) #1 {
+ tail call void @callee_56_copy([7 x i64] %a, ptr %b)
ret void
; CHECK-SCO-LABEL: caller_56_reorder_copy:
; CHECK-SCO: TC_RETURNd8 callee_56_copy
}
-define dso_local void @caller_64_reorder_copy(%S_64* %b, [8 x i64] %a) #1 {
- tail call void @callee_64_copy([8 x i64] %a, %S_64* %b)
+define dso_local void @caller_64_reorder_copy(ptr %b, [8 x i64] %a) #1 {
+ tail call void @callee_64_copy([8 x i64] %a, ptr %b)
ret void
; CHECK-SCO-LABEL: caller_64_reorder_copy:
}
define dso_local void @arg8_callee(
- float %a, i32 signext %b, float %c, i32* %d,
- i8 zeroext %e, float %f, i32* %g, i32 signext %h)
+ float %a, i32 signext %b, float %c, ptr %d,
+ i8 zeroext %e, float %f, ptr %g, i32 signext %h)
{
ret void
}
-define dso_local void @arg8_caller(float %a, i32 signext %b, i8 zeroext %c, i32* %d) {
+define dso_local void @arg8_caller(float %a, i32 signext %b, i8 zeroext %c, ptr %d) {
entry:
tail call void @arg8_callee(float undef, i32 signext undef, float undef,
- i32* %d, i8 zeroext undef, float undef,
- i32* undef, i32 signext undef)
+ ptr %d, i8 zeroext undef, float undef,
+ ptr undef, i32 signext undef)
ret void
; CHECK-SCO-LABEL: arg8_caller:
; Struct return test
; Function Attrs: noinline nounwind
-define dso_local void @callee_sret_56(%S_56* noalias sret(%S_56) %agg.result) #0 { ret void }
-define dso_local void @callee_sret_32(%S_32* noalias sret(%S_32) %agg.result) #0 { ret void }
+define dso_local void @callee_sret_56(ptr noalias sret(%S_56) %agg.result) #0 { ret void }
+define dso_local void @callee_sret_32(ptr noalias sret(%S_32) %agg.result) #0 { ret void }
; Function Attrs: nounwind
-define dso_local void @caller_do_something_sret_32(%S_32* noalias sret(%S_32) %agg.result) #1 {
+define dso_local void @caller_do_something_sret_32(ptr noalias sret(%S_32) %agg.result) #1 {
%1 = alloca %S_56, align 4
- %2 = bitcast %S_56* %1 to i8*
- call void @callee_sret_56(%S_56* nonnull sret(%S_56) %1)
- tail call void @callee_sret_32(%S_32* sret(%S_32) %agg.result)
+ call void @callee_sret_56(ptr nonnull sret(%S_56) %1)
+ tail call void @callee_sret_32(ptr sret(%S_32) %agg.result)
ret void
; CHECK-SCO-LABEL: caller_do_something_sret_32:
; CHECK-SCO: TC_RETURNd8 callee_sret_32
}
-define dso_local void @caller_local_sret_32(%S_32* %a) #1 {
+define dso_local void @caller_local_sret_32(ptr %a) #1 {
%tmp = alloca %S_32, align 4
- tail call void @callee_sret_32(%S_32* nonnull sret(%S_32) %tmp)
+ tail call void @callee_sret_32(ptr nonnull sret(%S_32) %tmp)
ret void
; CHECK-SCO-LABEL: caller_local_sret_32:
attributes #0 = { noinline nounwind }
attributes #1 = { nounwind }
-define dso_local void @f128_callee(i32* %ptr, ppc_fp128 %a, ppc_fp128 %b) { ret void }
-define dso_local void @f128_caller(i32* %ptr, ppc_fp128 %a, ppc_fp128 %b) {
- tail call void @f128_callee(i32* %ptr, ppc_fp128 %a, ppc_fp128 %b)
+define dso_local void @f128_callee(ptr %ptr, ppc_fp128 %a, ppc_fp128 %b) { ret void }
+define dso_local void @f128_caller(ptr %ptr, ppc_fp128 %a, ppc_fp128 %b) {
+ tail call void @f128_callee(ptr %ptr, ppc_fp128 %a, ppc_fp128 %b)
ret void
; CHECK-SCO-LABEL: f128_caller:
; weak linkage test
%class.T = type { [2 x i8] }
-define weak_odr hidden void @wo_hcallee(%class.T* %this, i8* %c) { ret void }
-define dso_local void @wo_hcaller(%class.T* %this, i8* %c) {
- tail call void @wo_hcallee(%class.T* %this, i8* %c)
+define weak_odr hidden void @wo_hcallee(ptr %this, ptr %c) { ret void }
+define dso_local void @wo_hcaller(ptr %this, ptr %c) {
+ tail call void @wo_hcallee(ptr %this, ptr %c)
ret void
; CHECK-SCO-LABEL: wo_hcaller:
; SCM: bl wo_hcallee
}
-define weak_odr protected void @wo_pcallee(%class.T* %this, i8* %c) { ret void }
-define dso_local void @wo_pcaller(%class.T* %this, i8* %c) {
- tail call void @wo_pcallee(%class.T* %this, i8* %c)
+define weak_odr protected void @wo_pcallee(ptr %this, ptr %c) { ret void }
+define dso_local void @wo_pcaller(ptr %this, ptr %c) {
+ tail call void @wo_pcallee(ptr %this, ptr %c)
ret void
; CHECK-SCO-LABEL: wo_pcaller:
; SCM: bl wo_pcallee
}
-define weak_odr void @wo_callee(%class.T* %this, i8* %c) { ret void }
-define dso_local void @wo_caller(%class.T* %this, i8* %c) {
- tail call void @wo_callee(%class.T* %this, i8* %c)
+define weak_odr void @wo_callee(ptr %this, ptr %c) { ret void }
+define dso_local void @wo_caller(ptr %this, ptr %c) {
+ tail call void @wo_callee(ptr %this, ptr %c)
ret void
; CHECK-SCO-LABEL: wo_caller:
; SCM: bl wo_callee
}
-define weak protected void @w_pcallee(i8* %ptr) { ret void }
-define dso_local void @w_pcaller(i8* %ptr) {
- tail call void @w_pcallee(i8* %ptr)
+define weak protected void @w_pcallee(ptr %ptr) { ret void }
+define dso_local void @w_pcaller(ptr %ptr) {
+ tail call void @w_pcallee(ptr %ptr)
ret void
; CHECK-SCO-LABEL: w_pcaller:
; SCM: bl w_pcallee
}
-define weak hidden void @w_hcallee(i8* %ptr) { ret void }
-define dso_local void @w_hcaller(i8* %ptr) {
- tail call void @w_hcallee(i8* %ptr)
+define weak hidden void @w_hcallee(ptr %ptr) { ret void }
+define dso_local void @w_hcaller(ptr %ptr) {
+ tail call void @w_hcallee(ptr %ptr)
ret void
; CHECK-SCO-LABEL: w_hcaller:
; SCM: bl w_hcallee
}
-define weak void @w_callee(i8* %ptr) { ret void }
-define dso_local void @w_caller(i8* %ptr) {
- tail call void @w_callee(i8* %ptr)
+define weak void @w_callee(ptr %ptr) { ret void }
+define dso_local void @w_caller(ptr %ptr) {
+ tail call void @w_callee(ptr %ptr)
ret void
; CHECK-SCO-LABEL: w_caller:
%struct.byvalTest = type { [8 x i8] }
@byval = common global %struct.byvalTest zeroinitializer
-define dso_local void @byval_callee(%struct.byvalTest* byval(%struct.byvalTest) %ptr) { ret void }
+define dso_local void @byval_callee(ptr byval(%struct.byvalTest) %ptr) { ret void }
define dso_local void @byval_caller() {
- tail call void @byval_callee(%struct.byvalTest* byval(%struct.byvalTest) @byval)
+ tail call void @byval_callee(ptr byval(%struct.byvalTest) @byval)
ret void
; CHECK-SCO-LABEL: bl byval_callee
@gs = common global %struct.small_arg zeroinitializer, align 2
@gf = common global float 0.000000e+00, align 4
-define void @callee1(%struct.small_arg* noalias nocapture sret(%struct.small_arg) %agg.result, %struct.large_arg* byval(%struct.large_arg) nocapture readnone %pad, %struct.small_arg* byval(%struct.small_arg) nocapture readonly %x) {
+define void @callee1(ptr noalias nocapture sret(%struct.small_arg) %agg.result, ptr byval(%struct.large_arg) nocapture readnone %pad, ptr byval(%struct.small_arg) nocapture readonly %x) {
entry:
- %0 = bitcast %struct.small_arg* %x to i32*
- %1 = bitcast %struct.small_arg* %agg.result to i32*
- %2 = load i32, i32* %0, align 2
- store i32 %2, i32* %1, align 2
+ %0 = load i32, ptr %x, align 2
+ store i32 %0, ptr %agg.result, align 2
ret void
}
; CHECK: @callee1
define void @caller1() {
entry:
%tmp = alloca %struct.small_arg, align 2
- call void @test1(%struct.small_arg* sret(%struct.small_arg) %tmp, %struct.large_arg* byval(%struct.large_arg) @gl, %struct.small_arg* byval(%struct.small_arg) @gs)
+ call void @test1(ptr sret(%struct.small_arg) %tmp, ptr byval(%struct.large_arg) @gl, ptr byval(%struct.small_arg) @gs)
ret void
}
; CHECK: @caller1
; CHECK: stw {{[0-9]+}}, 124(1)
; CHECK: bl test1
-declare void @test1(%struct.small_arg* sret(%struct.small_arg), %struct.large_arg* byval(%struct.large_arg), %struct.small_arg* byval(%struct.small_arg))
+declare void @test1(ptr sret(%struct.small_arg), ptr byval(%struct.large_arg), ptr byval(%struct.small_arg))
define float @callee2(float %pad1, float %pad2, float %pad3, float %pad4, float %pad5, float %pad6, float %pad7, float %pad8, float %pad9, float %pad10, float %pad11, float %pad12, float %pad13, float %x) {
entry:
define void @caller2() {
entry:
- %0 = load float, float* @gf, align 4
+ %0 = load float, ptr @gf, align 4
%call = tail call float @test2(float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float %0)
ret void
}
; CHECK-NEXT: .quad 0
; CHECK-NEXT: .text
; CHECK-NEXT: .L[[BEGIN]]:
- %0 = load i64, i64* @number64, align 8
+ %0 = load i64, ptr @number64, align 8
; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
%cmp = icmp eq i64 %0, %a
%conv1 = zext i1 %cmp to i64
entry:
; CHECK-LABEL: internal_static_var:
; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
- %0 = load i64, i64* @internal_static_var.x, align 8
+ %0 = load i64, ptr @internal_static_var.x, align 8
%cmp = icmp eq i64 %0, %a
%conv1 = zext i1 %cmp to i64
ret i64 %conv1
entry:
; CHECK-LABEL: access_double_array:
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds [32 x double], [32 x double]* @double_array, i64 0, i64 %idxprom
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds [32 x double], ptr @double_array, i64 0, i64 %idxprom
+ %0 = load double, ptr %arrayidx, align 8
; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
%cmp = fcmp oeq double %0, %a
%conv = zext i1 %cmp to i32
define i32 @intvaarg(i32 %a, ...) nounwind {
entry:
- %va = alloca i8*, align 8
- %va1 = bitcast i8** %va to i8*
- call void @llvm.va_start(i8* %va1)
- %0 = va_arg i8** %va, i32
+ %va = alloca ptr, align 8
+ call void @llvm.va_start(ptr %va)
+ %0 = va_arg ptr %va, i32
%sub = sub nsw i32 %a, %0
ret i32 %sub
}
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
; CHECK: @intvaarg
; Make sure that the va pointer is incremented by 8 (not 4).
; LE-NEXT: std r4, -8(r1)
; LE-NEXT: blr
entry:
- %va = alloca i8*, align 8
- %va.cast = bitcast i8** %va to i8*
- call void @llvm.va_start(i8* %va.cast)
+ %va = alloca ptr, align 8
+ call void @llvm.va_start(ptr %va)
ret i32 0
}
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
define dso_local void @test_xxsplti32dx() local_unnamed_addr {
entry:
- %i1 = load double, double* undef, align 8
+ %i1 = load double, ptr undef, align 8
br label %for.body124
for.body124:
br i1 undef, label %for.body919.preheader, label %for.end1072
for.body919.preheader:
- %i4 = load double, double* null, align 8
- %i5 = load double, double* null, align 8
+ %i4 = load double, ptr null, align 8
+ %i5 = load double, ptr null, align 8
%i15 = insertelement <2 x double> poison, double %i5, i32 0
%i23 = insertelement <2 x double> undef, double %i4, i32 1
%i24 = insertelement <2 x double> %i15, double 0x3FC5555555555555, i32 1
%E1 = phi double [ %E0, %for.body124 ], [ %sub994, %for.body919.preheader ]
%i28 = phi <2 x double> [ zeroinitializer, %for.body124 ], [ %i15, %for.body919.preheader ]
tail call void @callee()
- store <2 x double> %i28, <2 x double>* undef, align 8
+ store <2 x double> %i28, ptr undef, align 8
br label %for.body124
}
define void @caller2() {
entry:
- %0 = load [8 x float], [8 x float]* getelementptr inbounds (%struct.float8, %struct.float8* @g8, i64 0, i32 0), align 4
- %1 = load [5 x float], [5 x float]* getelementptr inbounds (%struct.float5, %struct.float5* @g5, i64 0, i32 0), align 4
- %2 = load [2 x float], [2 x float]* getelementptr inbounds (%struct.float2, %struct.float2* @g2, i64 0, i32 0), align 4
+ %0 = load [8 x float], ptr @g8, align 4
+ %1 = load [5 x float], ptr @g5, align 4
+ %2 = load [2 x float], ptr @g2, align 4
tail call void @test2([8 x float] %0, [5 x float] %1, [2 x float] %2)
ret void
}
define void @caller3(double %d) {
entry:
- %0 = load [8 x float], [8 x float]* getelementptr inbounds (%struct.float8, %struct.float8* @g8, i64 0, i32 0), align 4
- %1 = load [5 x float], [5 x float]* getelementptr inbounds (%struct.float5, %struct.float5* @g5, i64 0, i32 0), align 4
+ %0 = load [8 x float], ptr @g8, align 4
+ %1 = load [5 x float], ptr @g5, align 4
tail call void @test3([8 x float] %0, [5 x float] %1, double %d)
ret void
}
define void @caller4(float %f) {
entry:
- %0 = load [8 x float], [8 x float]* getelementptr inbounds (%struct.float8, %struct.float8* @g8, i64 0, i32 0), align 4
- %1 = load [5 x float], [5 x float]* getelementptr inbounds (%struct.float5, %struct.float5* @g5, i64 0, i32 0), align 4
+ %0 = load [8 x float], ptr @g8, align 4
+ %1 = load [5 x float], ptr @g5, align 4
tail call void @test4([8 x float] %0, [5 x float] %1, float %f)
ret void
}
target triple = "powerpc64le-unknown-linux-gnu"
; Indirect calls requires a full stub creation
-define void @test_indirect(void ()* nocapture %fp) {
+define void @test_indirect(ptr nocapture %fp) {
; CHECK-LABEL: @test_indirect
tail call void %fp()
; CHECK-DAG: std 2, 24(1)
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
-@_ZTIi = external constant i8*
-declare i8* @__cxa_allocate_exception(i64)
-declare void @__cxa_throw(i8*, i8*, i8*)
+@_ZTIi = external constant ptr
+declare ptr @__cxa_allocate_exception(i64)
+declare void @__cxa_throw(ptr, ptr, ptr)
define void @crsave() {
entry:
call void asm sideeffect "", "~{cr3}"()
call void asm sideeffect "", "~{cr4}"()
- %exception = call i8* @__cxa_allocate_exception(i64 4)
- %0 = bitcast i8* %exception to i32*
- store i32 0, i32* %0
- call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null)
+ %exception = call ptr @__cxa_allocate_exception(i64 4)
+ store i32 0, ptr %exception
+ call void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null)
unreachable
return: ; No predecessors!
; CHECK-NEXT: .Lfunc_lep[[FN]]:
; CHECK-NEXT: .localentry use_toc, .Lfunc_lep[[FN]]-.Lfunc_gep[[FN]]
; CHECK-NEXT: %entry
- %0 = load i64, i64* @number64, align 8
+ %0 = load i64, ptr @number64, align 8
%cmp = icmp eq i64 %0, %a
%conv1 = zext i1 %cmp to i64
ret i64 %conv1
; CHECK-NEXT: .Lfunc_lep[[FN]]:
; CHECK-NEXT: .localentry use_toc, .Lfunc_lep[[FN]]-.Lfunc_gep[[FN]]
; CHECK-NEXT: %entry
- %0 = load i64, i64* @number64, align 8
+ %0 = load i64, ptr @number64, align 8
%cmp = icmp eq i64 %0, %a
%conv1 = zext i1 %cmp to i64
ret i64 %conv1
@gs = common global %struct.small_arg zeroinitializer, align 2
@gf = common global float 0.000000e+00, align 4
-define void @callee1(%struct.small_arg* noalias nocapture sret(%struct.small_arg) %agg.result, %struct.large_arg* byval(%struct.large_arg) nocapture readnone %pad, %struct.small_arg* byval(%struct.small_arg) nocapture readonly %x) {
+define void @callee1(ptr noalias nocapture sret(%struct.small_arg) %agg.result, ptr byval(%struct.large_arg) nocapture readnone %pad, ptr byval(%struct.small_arg) nocapture readonly %x) {
entry:
- %0 = bitcast %struct.small_arg* %x to i32*
- %1 = bitcast %struct.small_arg* %agg.result to i32*
- %2 = load i32, i32* %0, align 2
- store i32 %2, i32* %1, align 2
+ %0 = load i32, ptr %x, align 2
+ store i32 %0, ptr %agg.result, align 2
ret void
}
; CHECK: @callee1
define void @caller1() {
entry:
%tmp = alloca %struct.small_arg, align 2
- call void @test1(%struct.small_arg* sret(%struct.small_arg) %tmp, %struct.large_arg* byval(%struct.large_arg) @gl, %struct.small_arg* byval(%struct.small_arg) @gs)
+ call void @test1(ptr sret(%struct.small_arg) %tmp, ptr byval(%struct.large_arg) @gl, ptr byval(%struct.small_arg) @gs)
ret void
}
; CHECK: @caller1
; CHECK: stw {{[0-9]+}}, 104(1)
; CHECK: bl test1
-declare void @test1(%struct.small_arg* sret(%struct.small_arg), %struct.large_arg* byval(%struct.large_arg), %struct.small_arg* byval(%struct.small_arg))
+declare void @test1(ptr sret(%struct.small_arg), ptr byval(%struct.large_arg), ptr byval(%struct.small_arg))
define float @callee2(float %pad1, float %pad2, float %pad3, float %pad4, float %pad5, float %pad6, float %pad7, float %pad8, float %pad9, float %pad10, float %pad11, float %pad12, float %pad13, float %x) {
entry:
define void @caller2() {
entry:
- %0 = load float, float* @gf, align 4
+ %0 = load float, ptr @gf, align 4
%call = tail call float @test2(float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float %0)
ret void
}
define i32 @main() local_unnamed_addr {
_main_entry:
%e3 = alloca ppc_fp128, align 16
- store ppc_fp128 0xM0000000000000000FFFFFFFFFFFFFFFF, ppc_fp128* %e3, align 16
- %0 = call i64 @foo( ppc_fp128* nonnull %e3)
+ store ppc_fp128 0xM0000000000000000FFFFFFFFFFFFFFFF, ptr %e3, align 16
+ %0 = call i64 @foo( ptr nonnull %e3)
ret i32 undef
}
-declare i64 @foo(ppc_fp128 *)
+declare i64 @foo(ptr)
define ppc_fp128 @plus(ppc_fp128 %x, ppc_fp128 %y) {
entry:
- %x_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %y_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %retval = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
- %tmp = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
+ %x_addr = alloca ppc_fp128 ; <ptr> [#uses=2]
+ %y_addr = alloca ppc_fp128 ; <ptr> [#uses=2]
+ %retval = alloca ppc_fp128, align 16 ; <ptr> [#uses=2]
+ %tmp = alloca ppc_fp128, align 16 ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store ppc_fp128 %x, ppc_fp128* %x_addr
- store ppc_fp128 %y, ppc_fp128* %y_addr
- %tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
+ store ppc_fp128 %x, ptr %x_addr
+ store ppc_fp128 %y, ptr %y_addr
+ %tmp1 = load ppc_fp128, ptr %x_addr, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp2 = load ppc_fp128, ptr %y_addr, align 16 ; <ppc_fp128> [#uses=1]
%tmp3 = fadd ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
- %tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
+ store ppc_fp128 %tmp3, ptr %tmp, align 16
+ %tmp4 = load ppc_fp128, ptr %tmp, align 16 ; <ppc_fp128> [#uses=1]
+ store ppc_fp128 %tmp4, ptr %retval, align 16
br label %return
return: ; preds = %entry
- %retval5 = load ppc_fp128, ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
+ %retval5 = load ppc_fp128, ptr %retval ; <ppc_fp128> [#uses=1]
ret ppc_fp128 %retval5
}
define ppc_fp128 @minus(ppc_fp128 %x, ppc_fp128 %y) {
entry:
- %x_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %y_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %retval = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
- %tmp = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
+ %x_addr = alloca ppc_fp128 ; <ptr> [#uses=2]
+ %y_addr = alloca ppc_fp128 ; <ptr> [#uses=2]
+ %retval = alloca ppc_fp128, align 16 ; <ptr> [#uses=2]
+ %tmp = alloca ppc_fp128, align 16 ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store ppc_fp128 %x, ppc_fp128* %x_addr
- store ppc_fp128 %y, ppc_fp128* %y_addr
- %tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
+ store ppc_fp128 %x, ptr %x_addr
+ store ppc_fp128 %y, ptr %y_addr
+ %tmp1 = load ppc_fp128, ptr %x_addr, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp2 = load ppc_fp128, ptr %y_addr, align 16 ; <ppc_fp128> [#uses=1]
%tmp3 = fsub ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
- %tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
+ store ppc_fp128 %tmp3, ptr %tmp, align 16
+ %tmp4 = load ppc_fp128, ptr %tmp, align 16 ; <ppc_fp128> [#uses=1]
+ store ppc_fp128 %tmp4, ptr %retval, align 16
br label %return
return: ; preds = %entry
- %retval5 = load ppc_fp128, ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
+ %retval5 = load ppc_fp128, ptr %retval ; <ppc_fp128> [#uses=1]
ret ppc_fp128 %retval5
}
define ppc_fp128 @times(ppc_fp128 %x, ppc_fp128 %y) {
entry:
- %x_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %y_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %retval = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
- %tmp = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
+ %x_addr = alloca ppc_fp128 ; <ptr> [#uses=2]
+ %y_addr = alloca ppc_fp128 ; <ptr> [#uses=2]
+ %retval = alloca ppc_fp128, align 16 ; <ptr> [#uses=2]
+ %tmp = alloca ppc_fp128, align 16 ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store ppc_fp128 %x, ppc_fp128* %x_addr
- store ppc_fp128 %y, ppc_fp128* %y_addr
- %tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
+ store ppc_fp128 %x, ptr %x_addr
+ store ppc_fp128 %y, ptr %y_addr
+ %tmp1 = load ppc_fp128, ptr %x_addr, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp2 = load ppc_fp128, ptr %y_addr, align 16 ; <ppc_fp128> [#uses=1]
%tmp3 = fmul ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
- %tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
+ store ppc_fp128 %tmp3, ptr %tmp, align 16
+ %tmp4 = load ppc_fp128, ptr %tmp, align 16 ; <ppc_fp128> [#uses=1]
+ store ppc_fp128 %tmp4, ptr %retval, align 16
br label %return
return: ; preds = %entry
- %retval5 = load ppc_fp128, ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
+ %retval5 = load ppc_fp128, ptr %retval ; <ppc_fp128> [#uses=1]
ret ppc_fp128 %retval5
}
define ppc_fp128 @divide(ppc_fp128 %x, ppc_fp128 %y) {
entry:
- %x_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %y_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %retval = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
- %tmp = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
+ %x_addr = alloca ppc_fp128 ; <ptr> [#uses=2]
+ %y_addr = alloca ppc_fp128 ; <ptr> [#uses=2]
+ %retval = alloca ppc_fp128, align 16 ; <ptr> [#uses=2]
+ %tmp = alloca ppc_fp128, align 16 ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store ppc_fp128 %x, ppc_fp128* %x_addr
- store ppc_fp128 %y, ppc_fp128* %y_addr
- %tmp1 = load ppc_fp128, ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp2 = load ppc_fp128, ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
+ store ppc_fp128 %x, ptr %x_addr
+ store ppc_fp128 %y, ptr %y_addr
+ %tmp1 = load ppc_fp128, ptr %x_addr, align 16 ; <ppc_fp128> [#uses=1]
+ %tmp2 = load ppc_fp128, ptr %y_addr, align 16 ; <ppc_fp128> [#uses=1]
%tmp3 = fdiv ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
- %tmp4 = load ppc_fp128, ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
+ store ppc_fp128 %tmp3, ptr %tmp, align 16
+ %tmp4 = load ppc_fp128, ptr %tmp, align 16 ; <ppc_fp128> [#uses=1]
+ store ppc_fp128 %tmp4, ptr %retval, align 16
br label %return
return: ; preds = %entry
- %retval5 = load ppc_fp128, ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
+ %retval5 = load ppc_fp128, ptr %retval ; <ppc_fp128> [#uses=1]
ret ppc_fp128 %retval5
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32--
%struct.stp_sequence = type { double, double }
-define i32 @stp_sequence_set_short_data(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
+define i32 @stp_sequence_set_short_data(ptr %sequence, i32 %count, ptr %data) {
entry:
%tmp1112 = sitofp i16 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
%tmp13 = call i32 (...) @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
ret i32 0
}
-define i32 @stp_sequence_set_short_data2(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
+define i32 @stp_sequence_set_short_data2(ptr %sequence, i32 %count, ptr %data) {
entry:
%tmp1112 = sitofp i8 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
%tmp13 = call i32 (...) @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
ret i32 0
}
-define i32 @stp_sequence_set_short_data3(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
+define i32 @stp_sequence_set_short_data3(ptr %sequence, i32 %count, ptr %data) {
entry:
%tmp1112 = uitofp i16 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
%tmp13 = call i32 (...) @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
ret i32 0
}
-define i32 @stp_sequence_set_short_data4(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
+define i32 @stp_sequence_set_short_data4(ptr %sequence, i32 %count, ptr %data) {
entry:
%tmp1112 = uitofp i8 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
%tmp13 = call i32 (...) @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
; Test that resultant libcalls retain order even when their non-strict FLOP form could be
; trivially optimized into differing sequences.
-define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %result) #0 {
+define void @test_constrained_libcall_multichain(ptr %firstptr, ptr %result) #0 {
; PC64LE-LABEL: test_constrained_libcall_multichain:
; PC64LE: # %bb.0:
; PC64LE-NEXT: mflr 0
; PC64-NEXT: ld 0, 16(1)
; PC64-NEXT: mtlr 0
; PC64-NEXT: blr
- %load = load float, float* %firstptr
+ %load = load float, ptr %firstptr
%first = call ppc_fp128 @llvm.experimental.constrained.fpext.f32.ppcf128(
float %load,
metadata !"fpexcept.strict") #1
- store ppc_fp128 %first, ppc_fp128* %result
+ store ppc_fp128 %first, ptr %result
; For unconstrained FLOPs, these next two FP instructions would necessarily
; be executed in series with one another.
ppc_fp128 %first,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #1
- %stridx1 = getelementptr ppc_fp128, ppc_fp128* %result, i32 1
- store ppc_fp128 %fadd, ppc_fp128* %stridx1
+ %stridx1 = getelementptr ppc_fp128, ptr %result, i32 1
+ store ppc_fp128 %fadd, ptr %stridx1
%fmul = call ppc_fp128 @llvm.experimental.constrained.fmul.ppcf128(
ppc_fp128 %fadd,
ppc_fp128 %fadd,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #1
- %stridx2 = getelementptr ppc_fp128, ppc_fp128* %stridx1, i32 1
- store ppc_fp128 %fadd, ppc_fp128* %stridx2
+ %stridx2 = getelementptr ppc_fp128, ptr %stridx1, i32 1
+ store ppc_fp128 %fadd, ptr %stridx2
; For unconstrained FLOPs, these next two FP instructions could be reordered
; or even executed in parallel with respect to the previous two instructions.
ppc_fp128 %powi,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #1
- store float %tinypow, float* %firstptr
- %stridxn1 = getelementptr ppc_fp128, ppc_fp128* %result, i32 -1
- store ppc_fp128 %powi, ppc_fp128* %stridxn1
+ store float %tinypow, ptr %firstptr
+ %stridxn1 = getelementptr ppc_fp128, ptr %result, i32 -1
+ store ppc_fp128 %powi, ptr %stridxn1
ret void
}
; CHECK-NEXT: blr
entry:
%x.addr = alloca ppc_fp128, align 16
- store ppc_fp128 %x, ppc_fp128* %x.addr, align 16
- %0 = load ppc_fp128, ppc_fp128* %x.addr, align 16
- store ppc_fp128 %0, ppc_fp128* @g, align 16
+ store ppc_fp128 %x, ptr %x.addr, align 16
+ %0 = load ppc_fp128, ptr %x.addr, align 16
+ store ppc_fp128 %0, ptr @g, align 16
ret void
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %0 = load ppc_fp128, ppc_fp128* @g, align 16
+ %0 = load ppc_fp128, ptr @g, align 16
call void @test(ppc_fp128 %0)
ret void
}
; CHECK-NEXT: lfd 2, 8(3)
; CHECK-NEXT: blr
entry:
- %0 = load ppc_fp128, ppc_fp128* @g, align 16
+ %0 = load ppc_fp128, ptr @g, align 16
ret ppc_fp128 %0
}
; CHECK-NEXT: blr
entry:
%call = tail call ppc_fp128 @test_result() #3
- store ppc_fp128 %call, ppc_fp128* @g, align 16
+ store ppc_fp128 %call, ptr @g, align 16
ret void
}
ret double %conv
}
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
define double @vararg(i32 %a, ...) {
; CHECK-LABEL: vararg:
; CHECK-NEXT: std 3, -8(1)
; CHECK-NEXT: blr
entry:
- %va = alloca i8*, align 8
- %va1 = bitcast i8** %va to i8*
- call void @llvm.va_start(i8* %va1)
- %arg = va_arg i8** %va, ppc_fp128
+ %va = alloca ptr, align 8
+ call void @llvm.va_start(ptr %va)
+ %arg = va_arg ptr %va, ppc_fp128
%conv = fptrunc ppc_fp128 %arg to double
ret double %conv
}
define void @foo() #0 {
entry:
%c = alloca ppc_fp128, align 16
- %0 = load ppc_fp128, ppc_fp128* @ld, align 16
- %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+ %0 = load ppc_fp128, ptr @ld, align 16
+ %1 = load ppc_fp128, ptr @ld2, align 16
%add = fadd ppc_fp128 %0, %1
- store volatile ppc_fp128 %add, ppc_fp128* %c, align 16
- %2 = load ppc_fp128, ppc_fp128* @ld, align 16
- %3 = load ppc_fp128, ppc_fp128* @ld2, align 16
+ store volatile ppc_fp128 %add, ptr %c, align 16
+ %2 = load ppc_fp128, ptr @ld, align 16
+ %3 = load ppc_fp128, ptr @ld2, align 16
%sub = fsub ppc_fp128 %2, %3
- store volatile ppc_fp128 %sub, ppc_fp128* %c, align 16
- %4 = load ppc_fp128, ppc_fp128* @ld, align 16
- %5 = load ppc_fp128, ppc_fp128* @ld2, align 16
+ store volatile ppc_fp128 %sub, ptr %c, align 16
+ %4 = load ppc_fp128, ptr @ld, align 16
+ %5 = load ppc_fp128, ptr @ld2, align 16
%mul = fmul ppc_fp128 %4, %5
- store volatile ppc_fp128 %mul, ppc_fp128* %c, align 16
- %6 = load ppc_fp128, ppc_fp128* @ld, align 16
- %7 = load ppc_fp128, ppc_fp128* @ld2, align 16
+ store volatile ppc_fp128 %mul, ptr %c, align 16
+ %6 = load ppc_fp128, ptr @ld, align 16
+ %7 = load ppc_fp128, ptr @ld2, align 16
%div = fdiv ppc_fp128 %6, %7
- store volatile ppc_fp128 %div, ppc_fp128* %c, align 16
+ store volatile ppc_fp128 %div, ptr %c, align 16
ret void
; CHECK-LABEL: __gcc_qadd
define void @foo1() #0 {
entry:
- %0 = load double, double* @d, align 8
+ %0 = load double, ptr @d, align 8
%conv = fpext double %0 to ppc_fp128
- store ppc_fp128 %conv, ppc_fp128* @ld, align 16
+ store ppc_fp128 %conv, ptr @ld, align 16
ret void
; CHECK-LABEL: __gcc_dtoq
define void @foo2() #0 {
entry:
- %0 = load ppc_fp128, ppc_fp128* @ld, align 16
+ %0 = load ppc_fp128, ptr @ld, align 16
%conv = fptrunc ppc_fp128 %0 to double
- store double %conv, double* @d, align 8
+ store double %conv, ptr @d, align 8
ret void
; CHECK-LABEL: __gcc_qtod
define void @foo3() #0 {
entry:
- %0 = load ppc_fp128, ppc_fp128* @ld, align 16
+ %0 = load ppc_fp128, ptr @ld, align 16
%conv = fptrunc ppc_fp128 %0 to float
- store float %conv, float* @f, align 4
+ store float %conv, ptr @f, align 4
ret void
; CHECK-LABEL: __gcc_qtos
define void @foo4() #0 {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%conv = sitofp i32 %0 to ppc_fp128
- store ppc_fp128 %conv, ppc_fp128* @ld, align 16
+ store ppc_fp128 %conv, ptr @ld, align 16
ret void
; CHECK-LABEL: __gcc_itoq
define void @foo5() #0 {
entry:
- %0 = load i32, i32* @ui, align 4
+ %0 = load i32, ptr @ui, align 4
%conv = uitofp i32 %0 to ppc_fp128
- store ppc_fp128 %conv, ppc_fp128* @ld, align 16
+ store ppc_fp128 %conv, ptr @ld, align 16
ret void
; CHECK-LABEL: __gcc_utoq
define void @foo6() #0 {
entry:
- %0 = load ppc_fp128, ppc_fp128* @ld, align 16
- %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+ %0 = load ppc_fp128, ptr @ld, align 16
+ %1 = load ppc_fp128, ptr @ld2, align 16
%cmp = fcmp oeq ppc_fp128 %0, %1
%conv = zext i1 %cmp to i32
%conv1 = trunc i32 %conv to i8
- store i8 %conv1, i8* @var, align 1
+ store i8 %conv1, ptr @var, align 1
ret void
; CHECK-LABEL: __gcc_qeq
define void @foo7() #0 {
entry:
- %0 = load ppc_fp128, ppc_fp128* @ld, align 16
- %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+ %0 = load ppc_fp128, ptr @ld, align 16
+ %1 = load ppc_fp128, ptr @ld2, align 16
%cmp = fcmp une ppc_fp128 %0, %1
%conv = zext i1 %cmp to i32
%conv1 = trunc i32 %conv to i8
- store i8 %conv1, i8* @var, align 1
+ store i8 %conv1, ptr @var, align 1
ret void
; CHECK-LABEL: __gcc_qne
define void @foo8() #0 {
entry:
- %0 = load ppc_fp128, ppc_fp128* @ld, align 16
- %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+ %0 = load ppc_fp128, ptr @ld, align 16
+ %1 = load ppc_fp128, ptr @ld2, align 16
%cmp = fcmp ogt ppc_fp128 %0, %1
%conv = zext i1 %cmp to i32
%conv1 = trunc i32 %conv to i8
- store i8 %conv1, i8* @var, align 1
+ store i8 %conv1, ptr @var, align 1
ret void
; CHECK-LABEL: __gcc_qgt
define void @foo9() #0 {
entry:
- %0 = load ppc_fp128, ppc_fp128* @ld, align 16
- %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+ %0 = load ppc_fp128, ptr @ld, align 16
+ %1 = load ppc_fp128, ptr @ld2, align 16
%cmp = fcmp olt ppc_fp128 %0, %1
%conv = zext i1 %cmp to i32
%conv1 = trunc i32 %conv to i8
- store i8 %conv1, i8* @var, align 1
+ store i8 %conv1, ptr @var, align 1
ret void
; CHECK-LABEL: __gcc_qlt
define void @foo10() #0 {
entry:
- %0 = load ppc_fp128, ppc_fp128* @ld, align 16
- %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+ %0 = load ppc_fp128, ptr @ld, align 16
+ %1 = load ppc_fp128, ptr @ld2, align 16
%cmp = fcmp ole ppc_fp128 %0, %1
%conv = zext i1 %cmp to i32
%conv1 = trunc i32 %conv to i8
- store i8 %conv1, i8* @var, align 1
+ store i8 %conv1, ptr @var, align 1
ret void
; CHECK-LABEL: __gcc_qle
define void @foo11() #0 {
entry:
- %0 = load ppc_fp128, ppc_fp128* @ld, align 16
- %1 = load ppc_fp128, ppc_fp128* @ld, align 16
+ %0 = load ppc_fp128, ptr @ld, align 16
+ %1 = load ppc_fp128, ptr @ld, align 16
%cmp = fcmp une ppc_fp128 %0, %1
%conv = zext i1 %cmp to i32
%conv1 = trunc i32 %conv to i8
- store i8 %conv1, i8* @var, align 1
+ store i8 %conv1, ptr @var, align 1
ret void
; CHECK-LABEL: __gcc_qunord
define void @foo12() #0 {
entry:
- %0 = load ppc_fp128, ppc_fp128* @ld, align 16
- %1 = load ppc_fp128, ppc_fp128* @ld2, align 16
+ %0 = load ppc_fp128, ptr @ld, align 16
+ %1 = load ppc_fp128, ptr @ld2, align 16
%cmp = fcmp oge ppc_fp128 %0, %1
%conv = zext i1 %cmp to i32
%conv1 = trunc i32 %conv to i8
- store i8 %conv1, i8* @var, align 1
+ store i8 %conv1, ptr @var, align 1
ret void
; CHECK-LABEL: __gcc_qge
entry:
%a = alloca double, align 8
%b = alloca double, align 8
- %0 = load double, double* %a, align 8
- %1 = load double, double* %b, align 8
+ %0 = load double, ptr %a, align 8
+ %1 = load double, ptr %b, align 8
%add = fadd double %0, %1
ret double %add
entry:
%a = alloca double, align 8
%b = alloca double, align 8
- %0 = load double, double* %a, align 8
- %1 = load double, double* %b, align 8
+ %0 = load double, ptr %a, align 8
+ %1 = load double, ptr %b, align 8
%mul = fmul double %0, %1
ret double %mul
entry:
%a = alloca double, align 8
%b = alloca double, align 8
- %0 = load double, double* %a, align 8
- %1 = load double, double* %b, align 8
+ %0 = load double, ptr %a, align 8
+ %1 = load double, ptr %b, align 8
%sub = fsub double %0, %1
ret double %sub
entry:
%a = alloca double, align 8
%b = alloca double, align 8
- %0 = load double, double* %a, align 8
- %1 = load double, double* %b, align 8
+ %0 = load double, ptr %a, align 8
+ %1 = load double, ptr %b, align 8
%div = fdiv double %0, %1
ret double %div
%struct.foo = type { i8, i8 }
-define void @_Z5check3foos(%struct.foo* nocapture byval(%struct.foo) %f, i16 signext %i) noinline {
+define void @_Z5check3foos(ptr nocapture byval(%struct.foo) %f, i16 signext %i) noinline {
; CHECK-LABEL: _Z5check3foos:
; CHECK: sth 3, {{[0-9]+}}(1)
; CHECK: lbz {{[0-9]+}}, {{[0-9]+}}(1)
entry:
- %0 = bitcast %struct.foo* %f to i16*
- %1 = load i16, i16* %0, align 2
- %bf.val.sext = ashr i16 %1, 8
+ %0 = load i16, ptr %f, align 2
+ %bf.val.sext = ashr i16 %0, 8
%cmp = icmp eq i16 %bf.val.sext, %i
br i1 %cmp, label %if.end, label %if.then
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-%"class.llvm::MachineMemOperand" = type { %"struct.llvm::MachinePointerInfo", i64, i32, %"class.llvm::MDNode"*, %"class.llvm::MDNode"* }
-%"struct.llvm::MachinePointerInfo" = type { %"class.llvm::Value"*, i64 }
-%"class.llvm::Value" = type { i32 (...)**, i8, i8, i16, %"class.llvm::Type"*, %"class.llvm::Use"*, %"class.llvm::StringMapEntry"* }
-%"class.llvm::Type" = type { %"class.llvm::LLVMContext"*, i32, i32, %"class.llvm::Type"** }
-%"class.llvm::LLVMContext" = type { %"class.llvm::LLVMContextImpl"* }
+%"class.llvm::MachineMemOperand" = type { %"struct.llvm::MachinePointerInfo", i64, i32, ptr, ptr }
+%"struct.llvm::MachinePointerInfo" = type { ptr, i64 }
+%"class.llvm::Value" = type { ptr, i8, i8, i16, ptr, ptr, ptr }
+%"class.llvm::Type" = type { ptr, i32, i32, ptr }
+%"class.llvm::LLVMContext" = type { ptr }
%"class.llvm::LLVMContextImpl" = type opaque
-%"class.llvm::Use" = type { %"class.llvm::Value"*, %"class.llvm::Use"*, %"class.llvm::PointerIntPair" }
+%"class.llvm::Use" = type { ptr, ptr, %"class.llvm::PointerIntPair" }
%"class.llvm::PointerIntPair" = type { i64 }
%"class.llvm::StringMapEntry" = type opaque
%"class.llvm::MDNode" = type { %"class.llvm::Value", %"class.llvm::FoldingSetImpl::Node", i32, i32 }
-%"class.llvm::FoldingSetImpl::Node" = type { i8* }
-%"class.llvm::MachineInstr" = type { %"class.llvm::ilist_node", %"class.llvm::MCInstrDesc"*, %"class.llvm::MachineBasicBlock"*, %"class.llvm::MachineOperand"*, i32, %"class.llvm::ArrayRecycler<llvm::MachineOperand, 8>::Capacity", i8, i8, i8, %"class.llvm::MachineMemOperand"**, %"class.llvm::DebugLoc" }
-%"class.llvm::ilist_node" = type { %"class.llvm::ilist_half_node", %"class.llvm::MachineInstr"* }
-%"class.llvm::ilist_half_node" = type { %"class.llvm::MachineInstr"* }
-%"class.llvm::MCInstrDesc" = type { i16, i16, i16, i16, i16, i32, i64, i16*, i16*, %"class.llvm::MCOperandInfo"* }
+%"class.llvm::FoldingSetImpl::Node" = type { ptr }
+%"class.llvm::MachineInstr" = type { %"class.llvm::ilist_node", ptr, ptr, ptr, i32, %"class.llvm::ArrayRecycler<llvm::MachineOperand, 8>::Capacity", i8, i8, i8, ptr, %"class.llvm::DebugLoc" }
+%"class.llvm::ilist_node" = type { %"class.llvm::ilist_half_node", ptr }
+%"class.llvm::ilist_half_node" = type { ptr }
+%"class.llvm::MCInstrDesc" = type { i16, i16, i16, i16, i16, i32, i64, ptr, ptr, ptr }
%"class.llvm::MCOperandInfo" = type { i16, i8, i8, i32 }
-%"class.llvm::MachineBasicBlock" = type { %"class.llvm::ilist_node.0", %"struct.llvm::ilist", %"class.llvm::BasicBlock"*, i32, %"class.llvm::MachineFunction"*, %"class.std::vector.163", %"class.std::vector.163", %"class.std::vector.123", %"class.std::vector.123", i32, i8, i8 }
-%"class.llvm::ilist_node.0" = type { %"class.llvm::ilist_half_node.1", %"class.llvm::MachineBasicBlock"* }
-%"class.llvm::ilist_half_node.1" = type { %"class.llvm::MachineBasicBlock"* }
+%"class.llvm::MachineBasicBlock" = type { %"class.llvm::ilist_node.0", %"struct.llvm::ilist", ptr, i32, ptr, %"class.std::vector.163", %"class.std::vector.163", %"class.std::vector.123", %"class.std::vector.123", i32, i8, i8 }
+%"class.llvm::ilist_node.0" = type { %"class.llvm::ilist_half_node.1", ptr }
+%"class.llvm::ilist_half_node.1" = type { ptr }
%"struct.llvm::ilist" = type { %"class.llvm::iplist" }
-%"class.llvm::iplist" = type { %"struct.llvm::ilist_traits", %"class.llvm::MachineInstr"* }
-%"struct.llvm::ilist_traits" = type { %"class.llvm::ilist_half_node", %"class.llvm::MachineBasicBlock"* }
-%"class.llvm::BasicBlock" = type { %"class.llvm::Value", %"class.llvm::ilist_node.2", %"class.llvm::iplist.4", %"class.llvm::Function"* }
-%"class.llvm::ilist_node.2" = type { %"class.llvm::ilist_half_node.3", %"class.llvm::BasicBlock"* }
-%"class.llvm::ilist_half_node.3" = type { %"class.llvm::BasicBlock"* }
-%"class.llvm::iplist.4" = type { %"struct.llvm::ilist_traits.5", %"class.llvm::Instruction"* }
+%"class.llvm::iplist" = type { %"struct.llvm::ilist_traits", ptr }
+%"struct.llvm::ilist_traits" = type { %"class.llvm::ilist_half_node", ptr }
+%"class.llvm::BasicBlock" = type { %"class.llvm::Value", %"class.llvm::ilist_node.2", %"class.llvm::iplist.4", ptr }
+%"class.llvm::ilist_node.2" = type { %"class.llvm::ilist_half_node.3", ptr }
+%"class.llvm::ilist_half_node.3" = type { ptr }
+%"class.llvm::iplist.4" = type { %"struct.llvm::ilist_traits.5", ptr }
%"struct.llvm::ilist_traits.5" = type { %"class.llvm::ilist_half_node.10" }
-%"class.llvm::ilist_half_node.10" = type { %"class.llvm::Instruction"* }
-%"class.llvm::Instruction" = type { %"class.llvm::User", %"class.llvm::ilist_node.193", %"class.llvm::BasicBlock"*, %"class.llvm::DebugLoc" }
-%"class.llvm::User" = type { %"class.llvm::Value", %"class.llvm::Use"*, i32 }
-%"class.llvm::ilist_node.193" = type { %"class.llvm::ilist_half_node.10", %"class.llvm::Instruction"* }
+%"class.llvm::ilist_half_node.10" = type { ptr }
+%"class.llvm::Instruction" = type { %"class.llvm::User", %"class.llvm::ilist_node.193", ptr, %"class.llvm::DebugLoc" }
+%"class.llvm::User" = type { %"class.llvm::Value", ptr, i32 }
+%"class.llvm::ilist_node.193" = type { %"class.llvm::ilist_half_node.10", ptr }
%"class.llvm::DebugLoc" = type { i32, i32 }
-%"class.llvm::Function" = type { %"class.llvm::GlobalValue", %"class.llvm::ilist_node.27", %"class.llvm::iplist.47", %"class.llvm::iplist.54", %"class.llvm::ValueSymbolTable"*, %"class.llvm::AttributeSet" }
-%"class.llvm::GlobalValue" = type { [52 x i8], [4 x i8], %"class.llvm::Module"*, %"class.std::basic_string" }
-%"class.llvm::Module" = type { %"class.llvm::LLVMContext"*, %"class.llvm::iplist.11", %"class.llvm::iplist.20", %"class.llvm::iplist.29", %"struct.llvm::ilist.38", %"class.std::basic_string", %"class.llvm::ValueSymbolTable"*, %"class.llvm::OwningPtr", %"class.std::basic_string", %"class.std::basic_string", %"class.std::basic_string", i8* }
-%"class.llvm::iplist.11" = type { %"struct.llvm::ilist_traits.12", %"class.llvm::GlobalVariable"* }
+%"class.llvm::Function" = type { %"class.llvm::GlobalValue", %"class.llvm::ilist_node.27", %"class.llvm::iplist.47", %"class.llvm::iplist.54", ptr, %"class.llvm::AttributeSet" }
+%"class.llvm::GlobalValue" = type { [52 x i8], [4 x i8], ptr, %"class.std::basic_string" }
+%"class.llvm::Module" = type { ptr, %"class.llvm::iplist.11", %"class.llvm::iplist.20", %"class.llvm::iplist.29", %"struct.llvm::ilist.38", %"class.std::basic_string", ptr, %"class.llvm::OwningPtr", %"class.std::basic_string", %"class.std::basic_string", %"class.std::basic_string", ptr }
+%"class.llvm::iplist.11" = type { %"struct.llvm::ilist_traits.12", ptr }
%"struct.llvm::ilist_traits.12" = type { %"class.llvm::ilist_node.18" }
-%"class.llvm::ilist_node.18" = type { %"class.llvm::ilist_half_node.19", %"class.llvm::GlobalVariable"* }
-%"class.llvm::ilist_half_node.19" = type { %"class.llvm::GlobalVariable"* }
+%"class.llvm::ilist_node.18" = type { %"class.llvm::ilist_half_node.19", ptr }
+%"class.llvm::ilist_half_node.19" = type { ptr }
%"class.llvm::GlobalVariable" = type { %"class.llvm::GlobalValue", %"class.llvm::ilist_node.18", i8 }
-%"class.llvm::iplist.20" = type { %"struct.llvm::ilist_traits.21", %"class.llvm::Function"* }
+%"class.llvm::iplist.20" = type { %"struct.llvm::ilist_traits.21", ptr }
%"struct.llvm::ilist_traits.21" = type { %"class.llvm::ilist_node.27" }
-%"class.llvm::ilist_node.27" = type { %"class.llvm::ilist_half_node.28", %"class.llvm::Function"* }
-%"class.llvm::ilist_half_node.28" = type { %"class.llvm::Function"* }
-%"class.llvm::iplist.29" = type { %"struct.llvm::ilist_traits.30", %"class.llvm::GlobalAlias"* }
+%"class.llvm::ilist_node.27" = type { %"class.llvm::ilist_half_node.28", ptr }
+%"class.llvm::ilist_half_node.28" = type { ptr }
+%"class.llvm::iplist.29" = type { %"struct.llvm::ilist_traits.30", ptr }
%"struct.llvm::ilist_traits.30" = type { %"class.llvm::ilist_node.36" }
-%"class.llvm::ilist_node.36" = type { %"class.llvm::ilist_half_node.37", %"class.llvm::GlobalAlias"* }
-%"class.llvm::ilist_half_node.37" = type { %"class.llvm::GlobalAlias"* }
+%"class.llvm::ilist_node.36" = type { %"class.llvm::ilist_half_node.37", ptr }
+%"class.llvm::ilist_half_node.37" = type { ptr }
%"class.llvm::GlobalAlias" = type { %"class.llvm::GlobalValue", %"class.llvm::ilist_node.36" }
%"struct.llvm::ilist.38" = type { %"class.llvm::iplist.39" }
-%"class.llvm::iplist.39" = type { %"struct.llvm::ilist_traits.40", %"class.llvm::NamedMDNode"* }
+%"class.llvm::iplist.39" = type { %"struct.llvm::ilist_traits.40", ptr }
%"struct.llvm::ilist_traits.40" = type { %"class.llvm::ilist_node.45" }
-%"class.llvm::ilist_node.45" = type { %"class.llvm::ilist_half_node.46", %"class.llvm::NamedMDNode"* }
-%"class.llvm::ilist_half_node.46" = type { %"class.llvm::NamedMDNode"* }
-%"class.llvm::NamedMDNode" = type { %"class.llvm::ilist_node.45", %"class.std::basic_string", %"class.llvm::Module"*, i8* }
+%"class.llvm::ilist_node.45" = type { %"class.llvm::ilist_half_node.46", ptr }
+%"class.llvm::ilist_half_node.46" = type { ptr }
+%"class.llvm::NamedMDNode" = type { %"class.llvm::ilist_node.45", %"class.std::basic_string", ptr, ptr }
%"class.std::basic_string" = type { %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" }
-%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { i8* }
+%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { ptr }
%"class.llvm::ValueSymbolTable" = type opaque
-%"class.llvm::OwningPtr" = type { %"class.llvm::GVMaterializer"* }
+%"class.llvm::OwningPtr" = type { ptr }
%"class.llvm::GVMaterializer" = type opaque
-%"class.llvm::iplist.47" = type { %"struct.llvm::ilist_traits.48", %"class.llvm::BasicBlock"* }
+%"class.llvm::iplist.47" = type { %"struct.llvm::ilist_traits.48", ptr }
%"struct.llvm::ilist_traits.48" = type { %"class.llvm::ilist_half_node.3" }
-%"class.llvm::iplist.54" = type { %"struct.llvm::ilist_traits.55", %"class.llvm::Argument"* }
+%"class.llvm::iplist.54" = type { %"struct.llvm::ilist_traits.55", ptr }
%"struct.llvm::ilist_traits.55" = type { %"class.llvm::ilist_half_node.61" }
-%"class.llvm::ilist_half_node.61" = type { %"class.llvm::Argument"* }
-%"class.llvm::Argument" = type { %"class.llvm::Value", %"class.llvm::ilist_node.192", %"class.llvm::Function"* }
-%"class.llvm::ilist_node.192" = type { %"class.llvm::ilist_half_node.61", %"class.llvm::Argument"* }
-%"class.llvm::AttributeSet" = type { %"class.llvm::AttributeSetImpl"* }
+%"class.llvm::ilist_half_node.61" = type { ptr }
+%"class.llvm::Argument" = type { %"class.llvm::Value", %"class.llvm::ilist_node.192", ptr }
+%"class.llvm::ilist_node.192" = type { %"class.llvm::ilist_half_node.61", ptr }
+%"class.llvm::AttributeSet" = type { ptr }
%"class.llvm::AttributeSetImpl" = type opaque
-%"class.llvm::MachineFunction" = type { %"class.llvm::Function"*, %"class.llvm::TargetMachine"*, %"class.llvm::MCContext"*, %"class.llvm::MachineModuleInfo"*, %"class.llvm::GCModuleInfo"*, %"class.llvm::MachineRegisterInfo"*, %"struct.llvm::MachineFunctionInfo"*, %"class.llvm::MachineFrameInfo"*, %"class.llvm::MachineConstantPool"*, %"class.llvm::MachineJumpTableInfo"*, %"class.std::vector.163", %"class.llvm::BumpPtrAllocator", %"class.llvm::Recycler", %"class.llvm::ArrayRecycler", %"class.llvm::Recycler.180", %"struct.llvm::ilist.181", i32, i32, i8 }
-%"class.llvm::TargetMachine" = type { i32 (...)**, %"class.llvm::Target"*, %"class.std::basic_string", %"class.std::basic_string", %"class.std::basic_string", %"class.llvm::MCCodeGenInfo"*, %"class.llvm::MCAsmInfo"*, i8, %"class.llvm::TargetOptions" }
+%"class.llvm::MachineFunction" = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, %"class.std::vector.163", %"class.llvm::BumpPtrAllocator", %"class.llvm::Recycler", %"class.llvm::ArrayRecycler", %"class.llvm::Recycler.180", %"struct.llvm::ilist.181", i32, i32, i8 }
+%"class.llvm::TargetMachine" = type { ptr, ptr, %"class.std::basic_string", %"class.std::basic_string", %"class.std::basic_string", ptr, ptr, i8, %"class.llvm::TargetOptions" }
%"class.llvm::Target" = type opaque
%"class.llvm::MCCodeGenInfo" = type opaque
%"class.llvm::MCAsmInfo" = type opaque
%"class.llvm::TargetOptions" = type { [2 x i8], i32, i8, i32, i8, %"class.std::basic_string", i32, i32 }
-%"class.llvm::MCContext" = type { %"class.llvm::SourceMgr"*, %"class.llvm::MCAsmInfo"*, %"class.llvm::MCRegisterInfo"*, %"class.llvm::MCObjectFileInfo"*, %"class.llvm::BumpPtrAllocator", %"class.llvm::StringMap", %"class.llvm::StringMap.62", i32, %"class.llvm::DenseMap.63", i8*, %"class.llvm::raw_ostream"*, i8, %"class.std::basic_string", %"class.std::basic_string", %"class.std::vector", %"class.std::vector.70", %"class.llvm::MCDwarfLoc", i8, i8, i32, %"class.llvm::MCSection"*, %"class.llvm::MCSymbol"*, %"class.llvm::MCSymbol"*, %"class.std::vector.75", %"class.llvm::StringRef", %"class.llvm::StringRef", i8, %"class.llvm::DenseMap.80", %"class.std::vector.84", i8*, i8*, i8*, i8 }
+%"class.llvm::MCContext" = type { ptr, ptr, ptr, ptr, %"class.llvm::BumpPtrAllocator", %"class.llvm::StringMap", %"class.llvm::StringMap.62", i32, %"class.llvm::DenseMap.63", ptr, ptr, i8, %"class.std::basic_string", %"class.std::basic_string", %"class.std::vector", %"class.std::vector.70", %"class.llvm::MCDwarfLoc", i8, i8, i32, ptr, ptr, ptr, %"class.std::vector.75", %"class.llvm::StringRef", %"class.llvm::StringRef", i8, %"class.llvm::DenseMap.80", %"class.std::vector.84", ptr, ptr, ptr, i8 }
%"class.llvm::SourceMgr" = type opaque
-%"class.llvm::MCRegisterInfo" = type { %"struct.llvm::MCRegisterDesc"*, i32, i32, i32, %"class.llvm::MCRegisterClass"*, i32, i32, [2 x i16]*, i16*, i8*, i16*, i32, i16*, i32, i32, i32, i32, %"struct.llvm::MCRegisterInfo::DwarfLLVMRegPair"*, %"struct.llvm::MCRegisterInfo::DwarfLLVMRegPair"*, %"struct.llvm::MCRegisterInfo::DwarfLLVMRegPair"*, %"struct.llvm::MCRegisterInfo::DwarfLLVMRegPair"*, %"class.llvm::DenseMap" }
+%"class.llvm::MCRegisterInfo" = type { ptr, i32, i32, i32, ptr, i32, i32, ptr, ptr, ptr, ptr, i32, ptr, i32, i32, i32, i32, ptr, ptr, ptr, ptr, %"class.llvm::DenseMap" }
%"struct.llvm::MCRegisterDesc" = type { i32, i32, i32, i32, i32, i32 }
-%"class.llvm::MCRegisterClass" = type { i8*, i16*, i8*, i16, i16, i16, i16, i16, i8, i8 }
+%"class.llvm::MCRegisterClass" = type { ptr, ptr, ptr, i16, i16, i16, i16, i16, i8, i8 }
%"struct.llvm::MCRegisterInfo::DwarfLLVMRegPair" = type { i32, i32 }
-%"class.llvm::DenseMap" = type { %"struct.std::pair"*, i32, i32, i32 }
+%"class.llvm::DenseMap" = type { ptr, i32, i32, i32 }
%"struct.std::pair" = type { i32, i32 }
%"class.llvm::MCObjectFileInfo" = type opaque
-%"class.llvm::BumpPtrAllocator" = type { i64, i64, %"class.llvm::SlabAllocator"*, %"class.llvm::MemSlab"*, i8*, i8*, i64 }
-%"class.llvm::SlabAllocator" = type { i32 (...)** }
-%"class.llvm::MemSlab" = type { i64, %"class.llvm::MemSlab"* }
-%"class.llvm::StringMap" = type { %"class.llvm::StringMapImpl", %"class.llvm::BumpPtrAllocator"* }
-%"class.llvm::StringMapImpl" = type { %"class.llvm::StringMapEntryBase"**, i32, i32, i32, i32 }
+%"class.llvm::BumpPtrAllocator" = type { i64, i64, ptr, ptr, ptr, ptr, i64 }
+%"class.llvm::SlabAllocator" = type { ptr }
+%"class.llvm::MemSlab" = type { i64, ptr }
+%"class.llvm::StringMap" = type { %"class.llvm::StringMapImpl", ptr }
+%"class.llvm::StringMapImpl" = type { ptr, i32, i32, i32, i32 }
%"class.llvm::StringMapEntryBase" = type { i32 }
-%"class.llvm::StringMap.62" = type { %"class.llvm::StringMapImpl", %"class.llvm::BumpPtrAllocator"* }
-%"class.llvm::DenseMap.63" = type { %"struct.std::pair.66"*, i32, i32, i32 }
+%"class.llvm::StringMap.62" = type { %"class.llvm::StringMapImpl", ptr }
+%"class.llvm::DenseMap.63" = type { ptr, i32, i32, i32 }
%"struct.std::pair.66" = type opaque
-%"class.llvm::raw_ostream" = type { i32 (...)**, i8*, i8*, i8*, i32 }
+%"class.llvm::raw_ostream" = type { ptr, ptr, ptr, ptr, i32 }
%"class.std::vector" = type { %"struct.std::_Vector_base" }
%"struct.std::_Vector_base" = type { %"struct.std::_Vector_base<llvm::MCDwarfFile *, std::allocator<llvm::MCDwarfFile *> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::MCDwarfFile *, std::allocator<llvm::MCDwarfFile *> >::_Vector_impl" = type { %"class.llvm::MCDwarfFile"**, %"class.llvm::MCDwarfFile"**, %"class.llvm::MCDwarfFile"** }
+%"struct.std::_Vector_base<llvm::MCDwarfFile *, std::allocator<llvm::MCDwarfFile *> >::_Vector_impl" = type { ptr, ptr, ptr }
%"class.llvm::MCDwarfFile" = type { %"class.llvm::StringRef", i32 }
-%"class.llvm::StringRef" = type { i8*, i64 }
+%"class.llvm::StringRef" = type { ptr, i64 }
%"class.std::vector.70" = type { %"struct.std::_Vector_base.71" }
%"struct.std::_Vector_base.71" = type { %"struct.std::_Vector_base<llvm::StringRef, std::allocator<llvm::StringRef> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::StringRef, std::allocator<llvm::StringRef> >::_Vector_impl" = type { %"class.llvm::StringRef"*, %"class.llvm::StringRef"*, %"class.llvm::StringRef"* }
+%"struct.std::_Vector_base<llvm::StringRef, std::allocator<llvm::StringRef> >::_Vector_impl" = type { ptr, ptr, ptr }
%"class.llvm::MCDwarfLoc" = type { i32, i32, i32, i32, i32, i32 }
%"class.llvm::MCSection" = type opaque
-%"class.llvm::MCSymbol" = type { %"class.llvm::StringRef", %"class.llvm::MCSection"*, %"class.llvm::MCExpr"*, i8 }
+%"class.llvm::MCSymbol" = type { %"class.llvm::StringRef", ptr, ptr, i8 }
%"class.llvm::MCExpr" = type opaque
%"class.std::vector.75" = type { %"struct.std::_Vector_base.76" }
%"struct.std::_Vector_base.76" = type { %"struct.std::_Vector_base<const llvm::MCGenDwarfLabelEntry *, std::allocator<const llvm::MCGenDwarfLabelEntry *> >::_Vector_impl" }
-%"struct.std::_Vector_base<const llvm::MCGenDwarfLabelEntry *, std::allocator<const llvm::MCGenDwarfLabelEntry *> >::_Vector_impl" = type { %"class.llvm::MCGenDwarfLabelEntry"**, %"class.llvm::MCGenDwarfLabelEntry"**, %"class.llvm::MCGenDwarfLabelEntry"** }
-%"class.llvm::MCGenDwarfLabelEntry" = type { %"class.llvm::StringRef", i32, i32, %"class.llvm::MCSymbol"* }
-%"class.llvm::DenseMap.80" = type { %"struct.std::pair.83"*, i32, i32, i32 }
-%"struct.std::pair.83" = type { %"class.llvm::MCSection"*, %"class.llvm::MCLineSection"* }
+%"struct.std::_Vector_base<const llvm::MCGenDwarfLabelEntry *, std::allocator<const llvm::MCGenDwarfLabelEntry *> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::MCGenDwarfLabelEntry" = type { %"class.llvm::StringRef", i32, i32, ptr }
+%"class.llvm::DenseMap.80" = type { ptr, i32, i32, i32 }
+%"struct.std::pair.83" = type { ptr, ptr }
%"class.llvm::MCLineSection" = type { %"class.std::vector.215" }
%"class.std::vector.215" = type { %"struct.std::_Vector_base.216" }
%"struct.std::_Vector_base.216" = type { %"struct.std::_Vector_base<llvm::MCLineEntry, std::allocator<llvm::MCLineEntry> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::MCLineEntry, std::allocator<llvm::MCLineEntry> >::_Vector_impl" = type { %"class.llvm::MCLineEntry"*, %"class.llvm::MCLineEntry"*, %"class.llvm::MCLineEntry"* }
-%"class.llvm::MCLineEntry" = type { %"class.llvm::MCDwarfLoc", %"class.llvm::MCSymbol"* }
+%"struct.std::_Vector_base<llvm::MCLineEntry, std::allocator<llvm::MCLineEntry> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::MCLineEntry" = type { %"class.llvm::MCDwarfLoc", ptr }
%"class.std::vector.84" = type { %"struct.std::_Vector_base.85" }
%"struct.std::_Vector_base.85" = type { %"struct.std::_Vector_base<const llvm::MCSection *, std::allocator<const llvm::MCSection *> >::_Vector_impl" }
-%"struct.std::_Vector_base<const llvm::MCSection *, std::allocator<const llvm::MCSection *> >::_Vector_impl" = type { %"class.llvm::MCSection"**, %"class.llvm::MCSection"**, %"class.llvm::MCSection"** }
-%"class.llvm::MachineModuleInfo" = type { %"class.llvm::ImmutablePass", %"class.llvm::MCContext", %"class.llvm::Module"*, %"class.llvm::MachineModuleInfoImpl"*, %"class.std::vector.95", i32, %"class.std::vector.100", %"class.llvm::DenseMap.110", %"class.llvm::DenseMap.114", i32, %"class.std::vector.118", %"class.std::vector.123", %"class.std::vector.123", %"class.std::vector.128", %"class.llvm::SmallPtrSet", %"class.llvm::MMIAddrLabelMap"*, i8, i8, i8, i8, %"class.llvm::SmallVector.133" }
+%"struct.std::_Vector_base<const llvm::MCSection *, std::allocator<const llvm::MCSection *> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::MachineModuleInfo" = type { %"class.llvm::ImmutablePass", %"class.llvm::MCContext", ptr, ptr, %"class.std::vector.95", i32, %"class.std::vector.100", %"class.llvm::DenseMap.110", %"class.llvm::DenseMap.114", i32, %"class.std::vector.118", %"class.std::vector.123", %"class.std::vector.123", %"class.std::vector.128", %"class.llvm::SmallPtrSet", ptr, i8, i8, i8, i8, %"class.llvm::SmallVector.133" }
%"class.llvm::ImmutablePass" = type { %"class.llvm::ModulePass" }
%"class.llvm::ModulePass" = type { %"class.llvm::Pass" }
-%"class.llvm::Pass" = type { i32 (...)**, %"class.llvm::AnalysisResolver"*, i8*, i32 }
-%"class.llvm::AnalysisResolver" = type { %"class.std::vector.89", %"class.llvm::PMDataManager"* }
+%"class.llvm::Pass" = type { ptr, ptr, ptr, i32 }
+%"class.llvm::AnalysisResolver" = type { %"class.std::vector.89", ptr }
%"class.std::vector.89" = type { %"struct.std::_Vector_base.90" }
-%"struct.std::_Vector_base.90" = type { %"struct.std::_Vector_base<std::pair<const void *, llvm::Pass *>, std::allocator<std::pair<const void *, llvm::Pass *> > >::_Vector_impl" }
-%"struct.std::_Vector_base<std::pair<const void *, llvm::Pass *>, std::allocator<std::pair<const void *, llvm::Pass *> > >::_Vector_impl" = type { %"struct.std::pair.94"*, %"struct.std::pair.94"*, %"struct.std::pair.94"* }
-%"struct.std::pair.94" = type { i8*, %"class.llvm::Pass"* }
+%"struct.std::_Vector_base.90" = type { %"struct.std::_Vector_base<std::pair<const ptr, llvm::Pass *>, std::allocator<std::pair<const ptr, llvm::Pass *> > >::_Vector_impl" }
+%"struct.std::_Vector_base<std::pair<const ptr, llvm::Pass *>, std::allocator<std::pair<const ptr, llvm::Pass *> > >::_Vector_impl" = type { ptr, ptr, ptr }
+%"struct.std::pair.94" = type { ptr, ptr }
%"class.llvm::PMDataManager" = type opaque
-%"class.llvm::MachineModuleInfoImpl" = type { i32 (...)** }
+%"class.llvm::MachineModuleInfoImpl" = type { ptr }
%"class.std::vector.95" = type { %"struct.std::_Vector_base.96" }
%"struct.std::_Vector_base.96" = type { %"struct.std::_Vector_base<llvm::MachineMove, std::allocator<llvm::MachineMove> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::MachineMove, std::allocator<llvm::MachineMove> >::_Vector_impl" = type { %"class.llvm::MachineMove"*, %"class.llvm::MachineMove"*, %"class.llvm::MachineMove"* }
-%"class.llvm::MachineMove" = type { %"class.llvm::MCSymbol"*, %"class.llvm::MachineLocation", %"class.llvm::MachineLocation" }
+%"struct.std::_Vector_base<llvm::MachineMove, std::allocator<llvm::MachineMove> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::MachineMove" = type { ptr, %"class.llvm::MachineLocation", %"class.llvm::MachineLocation" }
%"class.llvm::MachineLocation" = type { i8, i32, i32 }
%"class.std::vector.100" = type { %"struct.std::_Vector_base.101" }
%"struct.std::_Vector_base.101" = type { %"struct.std::_Vector_base<llvm::LandingPadInfo, std::allocator<llvm::LandingPadInfo> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::LandingPadInfo, std::allocator<llvm::LandingPadInfo> >::_Vector_impl" = type { %"struct.llvm::LandingPadInfo"*, %"struct.llvm::LandingPadInfo"*, %"struct.llvm::LandingPadInfo"* }
-%"struct.llvm::LandingPadInfo" = type { %"class.llvm::MachineBasicBlock"*, %"class.llvm::SmallVector", %"class.llvm::SmallVector", %"class.llvm::MCSymbol"*, %"class.llvm::Function"*, %"class.std::vector.105" }
+%"struct.std::_Vector_base<llvm::LandingPadInfo, std::allocator<llvm::LandingPadInfo> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"struct.llvm::LandingPadInfo" = type { ptr, %"class.llvm::SmallVector", %"class.llvm::SmallVector", ptr, ptr, %"class.std::vector.105" }
%"class.llvm::SmallVector" = type { %"class.llvm::SmallVectorImpl", %"struct.llvm::SmallVectorStorage" }
%"class.llvm::SmallVectorImpl" = type { %"class.llvm::SmallVectorTemplateBase" }
%"class.llvm::SmallVectorTemplateBase" = type { %"class.llvm::SmallVectorTemplateCommon" }
%"class.llvm::SmallVectorTemplateCommon" = type { %"class.llvm::SmallVectorBase", %"struct.llvm::AlignedCharArrayUnion" }
-%"class.llvm::SmallVectorBase" = type { i8*, i8*, i8* }
+%"class.llvm::SmallVectorBase" = type { ptr, ptr, ptr }
%"struct.llvm::AlignedCharArrayUnion" = type { %"struct.llvm::AlignedCharArray" }
%"struct.llvm::AlignedCharArray" = type { [8 x i8] }
%"struct.llvm::SmallVectorStorage" = type { i8 }
%"class.std::vector.105" = type { %"struct.std::_Vector_base.106" }
%"struct.std::_Vector_base.106" = type { %"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl" }
-%"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl" = type { i32*, i32*, i32* }
-%"class.llvm::DenseMap.110" = type { %"struct.std::pair.113"*, i32, i32, i32 }
-%"struct.std::pair.113" = type { %"class.llvm::MCSymbol"*, %"class.llvm::SmallVector.206" }
+%"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::DenseMap.110" = type { ptr, i32, i32, i32 }
+%"struct.std::pair.113" = type { ptr, %"class.llvm::SmallVector.206" }
%"class.llvm::SmallVector.206" = type { [28 x i8], %"struct.llvm::SmallVectorStorage.207" }
%"struct.llvm::SmallVectorStorage.207" = type { [3 x %"struct.llvm::AlignedCharArrayUnion.198"] }
%"struct.llvm::AlignedCharArrayUnion.198" = type { %"struct.llvm::AlignedCharArray.199" }
%"struct.llvm::AlignedCharArray.199" = type { [4 x i8] }
-%"class.llvm::DenseMap.114" = type { %"struct.std::pair.117"*, i32, i32, i32 }
-%"struct.std::pair.117" = type { %"class.llvm::MCSymbol"*, i32 }
+%"class.llvm::DenseMap.114" = type { ptr, i32, i32, i32 }
+%"struct.std::pair.117" = type { ptr, i32 }
%"class.std::vector.118" = type { %"struct.std::_Vector_base.119" }
%"struct.std::_Vector_base.119" = type { %"struct.std::_Vector_base<const llvm::GlobalVariable *, std::allocator<const llvm::GlobalVariable *> >::_Vector_impl" }
-%"struct.std::_Vector_base<const llvm::GlobalVariable *, std::allocator<const llvm::GlobalVariable *> >::_Vector_impl" = type { %"class.llvm::GlobalVariable"**, %"class.llvm::GlobalVariable"**, %"class.llvm::GlobalVariable"** }
+%"struct.std::_Vector_base<const llvm::GlobalVariable *, std::allocator<const llvm::GlobalVariable *> >::_Vector_impl" = type { ptr, ptr, ptr }
%"class.std::vector.123" = type { %"struct.std::_Vector_base.124" }
%"struct.std::_Vector_base.124" = type { %"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl" }
-%"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl" = type { i32*, i32*, i32* }
+%"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl" = type { ptr, ptr, ptr }
%"class.std::vector.128" = type { %"struct.std::_Vector_base.129" }
%"struct.std::_Vector_base.129" = type { %"struct.std::_Vector_base<const llvm::Function *, std::allocator<const llvm::Function *> >::_Vector_impl" }
-%"struct.std::_Vector_base<const llvm::Function *, std::allocator<const llvm::Function *> >::_Vector_impl" = type { %"class.llvm::Function"**, %"class.llvm::Function"**, %"class.llvm::Function"** }
-%"class.llvm::SmallPtrSet" = type { %"class.llvm::SmallPtrSetImpl", [33 x i8*] }
-%"class.llvm::SmallPtrSetImpl" = type { i8**, i8**, i32, i32, i32 }
+%"struct.std::_Vector_base<const llvm::Function *, std::allocator<const llvm::Function *> >::_Vector_impl" = type { ptr, ptr, ptr }
+%"class.llvm::SmallPtrSet" = type { %"class.llvm::SmallPtrSetImpl", [33 x ptr] }
+%"class.llvm::SmallPtrSetImpl" = type { ptr, ptr, i32, i32, i32 }
%"class.llvm::MMIAddrLabelMap" = type opaque
%"class.llvm::SmallVector.133" = type { %"class.llvm::SmallVectorImpl.134", %"struct.llvm::SmallVectorStorage.139" }
%"class.llvm::SmallVectorImpl.134" = type { %"class.llvm::SmallVectorTemplateBase.135" }
%"struct.llvm::AlignedCharArray.138" = type { [40 x i8] }
%"struct.llvm::SmallVectorStorage.139" = type { [3 x %"struct.llvm::AlignedCharArrayUnion.137"] }
%"class.llvm::GCModuleInfo" = type opaque
-%"class.llvm::MachineRegisterInfo" = type { %"class.llvm::TargetRegisterInfo"*, i8, i8, %"class.llvm::IndexedMap", %"class.llvm::IndexedMap.146", %"class.llvm::MachineOperand"**, %"class.llvm::BitVector", %"class.llvm::BitVector", %"class.llvm::BitVector", %"class.std::vector.147", %"class.std::vector.123" }
-%"class.llvm::TargetRegisterInfo" = type { i32 (...)**, %"class.llvm::MCRegisterInfo", %"struct.llvm::TargetRegisterInfoDesc"*, i8**, i32*, %"class.llvm::TargetRegisterClass"**, %"class.llvm::TargetRegisterClass"** }
+%"class.llvm::MachineRegisterInfo" = type { ptr, i8, i8, %"class.llvm::IndexedMap", %"class.llvm::IndexedMap.146", ptr, %"class.llvm::BitVector", %"class.llvm::BitVector", %"class.llvm::BitVector", %"class.std::vector.147", %"class.std::vector.123" }
+%"class.llvm::TargetRegisterInfo" = type { ptr, %"class.llvm::MCRegisterInfo", ptr, ptr, ptr, ptr, ptr }
%"struct.llvm::TargetRegisterInfoDesc" = type { i32, i8 }
-%"class.llvm::TargetRegisterClass" = type { %"class.llvm::MCRegisterClass"*, i32*, i32*, i16*, %"class.llvm::TargetRegisterClass"**, void (%"class.llvm::ArrayRef"*, %"class.llvm::MachineFunction"*)* }
-%"class.llvm::ArrayRef" = type { i16*, i64 }
+%"class.llvm::TargetRegisterClass" = type { ptr, ptr, ptr, ptr, ptr, ptr }
+%"class.llvm::ArrayRef" = type { ptr, i64 }
%"class.llvm::IndexedMap" = type { %"class.std::vector.140", %"struct.std::pair.145", %"struct.llvm::VirtReg2IndexFunctor" }
%"class.std::vector.140" = type { %"struct.std::_Vector_base.141" }
%"struct.std::_Vector_base.141" = type { %"struct.std::_Vector_base<std::pair<const llvm::TargetRegisterClass *, llvm::MachineOperand *>, std::allocator<std::pair<const llvm::TargetRegisterClass *, llvm::MachineOperand *> > >::_Vector_impl" }
-%"struct.std::_Vector_base<std::pair<const llvm::TargetRegisterClass *, llvm::MachineOperand *>, std::allocator<std::pair<const llvm::TargetRegisterClass *, llvm::MachineOperand *> > >::_Vector_impl" = type { %"struct.std::pair.145"*, %"struct.std::pair.145"*, %"struct.std::pair.145"* }
-%"struct.std::pair.145" = type { %"class.llvm::TargetRegisterClass"*, %"class.llvm::MachineOperand"* }
-%"class.llvm::MachineOperand" = type { i8, [3 x i8], %union.anon, %"class.llvm::MachineInstr"*, %union.anon.188 }
+%"struct.std::_Vector_base<std::pair<const llvm::TargetRegisterClass *, llvm::MachineOperand *>, std::allocator<std::pair<const llvm::TargetRegisterClass *, llvm::MachineOperand *> > >::_Vector_impl" = type { ptr, ptr, ptr }
+%"struct.std::pair.145" = type { ptr, ptr }
+%"class.llvm::MachineOperand" = type { i8, [3 x i8], %union.anon, ptr, %union.anon.188 }
%union.anon = type { i32 }
%union.anon.188 = type { %struct.anon }
-%struct.anon = type { %"class.llvm::MachineOperand"*, %"class.llvm::MachineOperand"* }
+%struct.anon = type { ptr, ptr }
%"struct.llvm::VirtReg2IndexFunctor" = type { i8 }
%"class.llvm::IndexedMap.146" = type { %"class.std::vector.147", %"struct.std::pair.152", %"struct.llvm::VirtReg2IndexFunctor" }
%"class.std::vector.147" = type { %"struct.std::_Vector_base.148" }
%"struct.std::_Vector_base.148" = type { %"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>, std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" }
-%"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>, std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" = type { %"struct.std::pair.152"*, %"struct.std::pair.152"*, %"struct.std::pair.152"* }
+%"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>, std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" = type { ptr, ptr, ptr }
%"struct.std::pair.152" = type { i32, i32 }
-%"class.llvm::BitVector" = type { i64*, i32, i32 }
-%"struct.llvm::MachineFunctionInfo" = type { i32 (...)** }
+%"class.llvm::BitVector" = type { ptr, i32, i32 }
+%"struct.llvm::MachineFunctionInfo" = type { ptr }
%"class.llvm::MachineFrameInfo" = type opaque
-%"class.llvm::MachineConstantPool" = type { %"class.llvm::DataLayout"*, i32, %"class.std::vector.153", %"class.llvm::DenseSet" }
+%"class.llvm::MachineConstantPool" = type { ptr, i32, %"class.std::vector.153", %"class.llvm::DenseSet" }
%"class.llvm::DataLayout" = type opaque
%"class.std::vector.153" = type { %"struct.std::_Vector_base.154" }
%"struct.std::_Vector_base.154" = type { %"struct.std::_Vector_base<llvm::MachineConstantPoolEntry, std::allocator<llvm::MachineConstantPoolEntry> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::MachineConstantPoolEntry, std::allocator<llvm::MachineConstantPoolEntry> >::_Vector_impl" = type { %"class.llvm::MachineConstantPoolEntry"*, %"class.llvm::MachineConstantPoolEntry"*, %"class.llvm::MachineConstantPoolEntry"* }
+%"struct.std::_Vector_base<llvm::MachineConstantPoolEntry, std::allocator<llvm::MachineConstantPoolEntry> >::_Vector_impl" = type { ptr, ptr, ptr }
%"class.llvm::MachineConstantPoolEntry" = type { %union.anon.158, i32 }
-%union.anon.158 = type { %"class.llvm::Constant"* }
+%union.anon.158 = type { ptr }
%"class.llvm::Constant" = type { %"class.llvm::User" }
%"class.llvm::DenseSet" = type { %"class.llvm::DenseMap.159" }
-%"class.llvm::DenseMap.159" = type { %"struct.std::pair.162"*, i32, i32, i32 }
-%"struct.std::pair.162" = type { %"class.llvm::MachineConstantPoolValue"*, i8 }
-%"class.llvm::MachineConstantPoolValue" = type { i32 (...)**, %"class.llvm::Type"* }
+%"class.llvm::DenseMap.159" = type { ptr, i32, i32, i32 }
+%"struct.std::pair.162" = type { ptr, i8 }
+%"class.llvm::MachineConstantPoolValue" = type { ptr, ptr }
%"class.llvm::MachineJumpTableInfo" = type opaque
%"class.std::vector.163" = type { %"struct.std::_Vector_base.164" }
%"struct.std::_Vector_base.164" = type { %"struct.std::_Vector_base<llvm::MachineBasicBlock *, std::allocator<llvm::MachineBasicBlock *> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::MachineBasicBlock *, std::allocator<llvm::MachineBasicBlock *> >::_Vector_impl" = type { %"class.llvm::MachineBasicBlock"**, %"class.llvm::MachineBasicBlock"**, %"class.llvm::MachineBasicBlock"** }
+%"struct.std::_Vector_base<llvm::MachineBasicBlock *, std::allocator<llvm::MachineBasicBlock *> >::_Vector_impl" = type { ptr, ptr, ptr }
%"class.llvm::Recycler" = type { %"class.llvm::iplist.168" }
-%"class.llvm::iplist.168" = type { %"struct.llvm::ilist_traits.169", %"struct.llvm::RecyclerStruct"* }
+%"class.llvm::iplist.168" = type { %"struct.llvm::ilist_traits.169", ptr }
%"struct.llvm::ilist_traits.169" = type { %"struct.llvm::RecyclerStruct" }
-%"struct.llvm::RecyclerStruct" = type { %"struct.llvm::RecyclerStruct"*, %"struct.llvm::RecyclerStruct"* }
+%"struct.llvm::RecyclerStruct" = type { ptr, ptr }
%"class.llvm::ArrayRecycler" = type { %"class.llvm::SmallVector.174" }
%"class.llvm::SmallVector.174" = type { %"class.llvm::SmallVectorImpl.175", %"struct.llvm::SmallVectorStorage.179" }
%"class.llvm::SmallVectorImpl.175" = type { %"class.llvm::SmallVectorTemplateBase.176" }
%"struct.llvm::SmallVectorStorage.179" = type { [7 x %"struct.llvm::AlignedCharArrayUnion.178"] }
%"class.llvm::Recycler.180" = type { %"class.llvm::iplist.168" }
%"struct.llvm::ilist.181" = type { %"class.llvm::iplist.182" }
-%"class.llvm::iplist.182" = type { %"struct.llvm::ilist_traits.183", %"class.llvm::MachineBasicBlock"* }
+%"class.llvm::iplist.182" = type { %"struct.llvm::ilist_traits.183", ptr }
%"struct.llvm::ilist_traits.183" = type { %"class.llvm::ilist_half_node.1" }
%"class.llvm::ArrayRecycler<llvm::MachineOperand, 8>::Capacity" = type { i8 }
%"class.llvm::ConstantInt" = type { %"class.llvm::Constant", %"class.llvm::APInt" }
%"class.llvm::APInt" = type { i32, %union.anon.189 }
%union.anon.189 = type { i64 }
%"class.llvm::ConstantFP" = type { %"class.llvm::Constant", %"class.llvm::APFloat" }
-%"class.llvm::APFloat" = type { %"struct.llvm::fltSemantics"*, %"union.llvm::APFloat::Significand", i16, i8 }
+%"class.llvm::APFloat" = type { ptr, %"union.llvm::APFloat::Significand", i16, i8 }
%"struct.llvm::fltSemantics" = type opaque
%"union.llvm::APFloat::Significand" = type { i64 }
%"class.llvm::BlockAddress" = type { %"class.llvm::Constant" }
%"class.llvm::hash_code" = type { i64 }
%"struct.llvm::hashing::detail::hash_combine_recursive_helper" = type { [64 x i8], %"struct.llvm::hashing::detail::hash_state", i64 }
%"struct.llvm::hashing::detail::hash_state" = type { i64, i64, i64, i64, i64, i64, i64, i64 }
-%"class.llvm::PrintReg" = type { %"class.llvm::TargetRegisterInfo"*, i32, i32 }
+%"class.llvm::PrintReg" = type { ptr, i32, i32 }
%"class.llvm::PseudoSourceValue" = type { %"class.llvm::Value" }
%"class.llvm::FoldingSetNodeID" = type { %"class.llvm::SmallVector.194" }
%"class.llvm::SmallVector.194" = type { [28 x i8], %"struct.llvm::SmallVectorStorage.200" }
%"struct.llvm::SmallVectorStorage.200" = type { [31 x %"struct.llvm::AlignedCharArrayUnion.198"] }
-%"struct.llvm::ArrayRecycler<llvm::MachineOperand, 8>::FreeList" = type { %"struct.llvm::ArrayRecycler<llvm::MachineOperand, 8>::FreeList"* }
-%"class.llvm::ilist_iterator.202" = type { %"class.llvm::MachineInstr"* }
-%"class.llvm::TargetInstrInfo" = type { i32 (...)**, [28 x i8], i32, i32 }
+%"struct.llvm::ArrayRecycler<llvm::MachineOperand, 8>::FreeList" = type { ptr }
+%"class.llvm::ilist_iterator.202" = type { ptr }
+%"class.llvm::TargetInstrInfo" = type { ptr, [28 x i8], i32, i32 }
%"struct.std::pair.203" = type { i8, i8 }
%"class.llvm::SmallVectorImpl.195" = type { %"class.llvm::SmallVectorTemplateBase.196" }
%"class.llvm::SmallVectorTemplateBase.196" = type { %"class.llvm::SmallVectorTemplateCommon.197" }
%"class.llvm::SmallVectorTemplateCommon.197" = type { %"class.llvm::SmallVectorBase", %"struct.llvm::AlignedCharArrayUnion.198" }
-%"class.llvm::AliasAnalysis" = type { i32 (...)**, %"class.llvm::DataLayout"*, %"class.llvm::TargetLibraryInfo"*, %"class.llvm::AliasAnalysis"* }
+%"class.llvm::AliasAnalysis" = type { ptr, ptr, ptr, ptr }
%"class.llvm::TargetLibraryInfo" = type opaque
-%"struct.llvm::AliasAnalysis::Location" = type { %"class.llvm::Value"*, i64, %"class.llvm::MDNode"* }
+%"struct.llvm::AliasAnalysis::Location" = type { ptr, i64, ptr }
%"class.llvm::DIVariable" = type { %"class.llvm::DIDescriptor" }
-%"class.llvm::DIDescriptor" = type { %"class.llvm::MDNode"* }
+%"class.llvm::DIDescriptor" = type { ptr }
%"class.llvm::DIScope" = type { %"class.llvm::DIDescriptor" }
-%"class.llvm::ArrayRef.208" = type { i32*, i64 }
+%"class.llvm::ArrayRef.208" = type { ptr, i64 }
%"class.llvm::SmallVector.209" = type { %"class.llvm::SmallVectorImpl.210", %"struct.llvm::SmallVectorStorage.214" }
%"class.llvm::SmallVectorImpl.210" = type { %"class.llvm::SmallVectorTemplateBase.211" }
%"class.llvm::SmallVectorTemplateBase.211" = type { %"class.llvm::SmallVectorTemplateCommon.212" }
%"struct.llvm::AlignedCharArrayUnion.213" = type { %"struct.llvm::AlignedCharArray" }
%"struct.llvm::SmallVectorStorage.214" = type { [7 x %"struct.llvm::AlignedCharArrayUnion.213"] }
%"class.llvm::Twine" = type { %"union.llvm::Twine::Child", %"union.llvm::Twine::Child", i8, i8 }
-%"union.llvm::Twine::Child" = type { %"class.llvm::Twine"* }
+%"union.llvm::Twine::Child" = type { ptr }
%"struct.std::random_access_iterator_tag" = type { i8 }
-declare void @_ZN4llvm19MachineRegisterInfo27removeRegOperandFromUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"*, %"class.llvm::MachineOperand"*)
+declare void @_ZN4llvm19MachineRegisterInfo27removeRegOperandFromUseListEPNS_14MachineOperandE(ptr, ptr)
-declare void @_ZN4llvm19MachineRegisterInfo22addRegOperandToUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"*, %"class.llvm::MachineOperand"*)
+declare void @_ZN4llvm19MachineRegisterInfo22addRegOperandToUseListEPNS_14MachineOperandE(ptr, ptr)
-declare zeroext i32 @_ZNK4llvm14MCRegisterInfo9getSubRegEjj(%"class.llvm::MCRegisterInfo"*, i32 zeroext, i32 zeroext)
+declare zeroext i32 @_ZNK4llvm14MCRegisterInfo9getSubRegEjj(ptr, i32 zeroext, i32 zeroext)
-define void @_ZN4llvm14MachineOperand12substPhysRegEjRKNS_18TargetRegisterInfoE(%"class.llvm::MachineOperand"* %this, i32 zeroext %Reg, %"class.llvm::TargetRegisterInfo"* %TRI) align 2 {
+define void @_ZN4llvm14MachineOperand12substPhysRegEjRKNS_18TargetRegisterInfoE(ptr %this, i32 zeroext %Reg, ptr %TRI) align 2 {
entry:
- %SubReg_TargetFlags.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 1
- %0 = bitcast [3 x i8]* %SubReg_TargetFlags.i to i24*
- %bf.load.i = load i24, i24* %0, align 1
+ %SubReg_TargetFlags.i = getelementptr inbounds %"class.llvm::MachineOperand", ptr %this, i64 0, i32 1
+ %bf.load.i = load i24, ptr %SubReg_TargetFlags.i, align 1
%bf.lshr.i = lshr i24 %bf.load.i, 12
%tobool = icmp eq i24 %bf.lshr.i, 0
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
%bf.cast.i = zext i24 %bf.lshr.i to i32
- %add.ptr = getelementptr inbounds %"class.llvm::TargetRegisterInfo", %"class.llvm::TargetRegisterInfo"* %TRI, i64 0, i32 1
- %call3 = tail call zeroext i32 @_ZNK4llvm14MCRegisterInfo9getSubRegEjj(%"class.llvm::MCRegisterInfo"* %add.ptr, i32 zeroext %Reg, i32 zeroext %bf.cast.i)
- %bf.load.i10 = load i24, i24* %0, align 1
+ %add.ptr = getelementptr inbounds %"class.llvm::TargetRegisterInfo", ptr %TRI, i64 0, i32 1
+ %call3 = tail call zeroext i32 @_ZNK4llvm14MCRegisterInfo9getSubRegEjj(ptr %add.ptr, i32 zeroext %Reg, i32 zeroext %bf.cast.i)
+ %bf.load.i10 = load i24, ptr %SubReg_TargetFlags.i, align 1
%bf.clear.i = and i24 %bf.load.i10, 4095
- store i24 %bf.clear.i, i24* %0, align 1
+ store i24 %bf.clear.i, ptr %SubReg_TargetFlags.i, align 1
br label %if.end
if.end: ; preds = %entry, %if.then
%Reg.addr.0 = phi i32 [ %call3, %if.then ], [ %Reg, %entry ]
- %RegNo.i.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 2, i32 0
- %1 = load i32, i32* %RegNo.i.i, align 4
- %cmp.i = icmp eq i32 %1, %Reg.addr.0
+ %RegNo.i.i = getelementptr inbounds %"class.llvm::MachineOperand", ptr %this, i64 0, i32 2, i32 0
+ %0 = load i32, ptr %RegNo.i.i, align 4
+ %cmp.i = icmp eq i32 %0, %Reg.addr.0
br i1 %cmp.i, label %_ZN4llvm14MachineOperand6setRegEj.exit, label %if.end.i
if.end.i: ; preds = %if.end
- %ParentMI.i.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 3
- %2 = load %"class.llvm::MachineInstr"*, %"class.llvm::MachineInstr"** %ParentMI.i.i, align 8
- %tobool.i = icmp eq %"class.llvm::MachineInstr"* %2, null
+ %ParentMI.i.i = getelementptr inbounds %"class.llvm::MachineOperand", ptr %this, i64 0, i32 3
+ %1 = load ptr, ptr %ParentMI.i.i, align 8
+ %tobool.i = icmp eq ptr %1, null
br i1 %tobool.i, label %if.end13.i, label %if.then3.i
if.then3.i: ; preds = %if.end.i
- %Parent.i.i = getelementptr inbounds %"class.llvm::MachineInstr", %"class.llvm::MachineInstr"* %2, i64 0, i32 2
- %3 = load %"class.llvm::MachineBasicBlock"*, %"class.llvm::MachineBasicBlock"** %Parent.i.i, align 8
- %tobool5.i = icmp eq %"class.llvm::MachineBasicBlock"* %3, null
+ %Parent.i.i = getelementptr inbounds %"class.llvm::MachineInstr", ptr %1, i64 0, i32 2
+ %2 = load ptr, ptr %Parent.i.i, align 8
+ %tobool5.i = icmp eq ptr %2, null
br i1 %tobool5.i, label %if.end13.i, label %if.then6.i
if.then6.i: ; preds = %if.then3.i
- %xParent.i.i = getelementptr inbounds %"class.llvm::MachineBasicBlock", %"class.llvm::MachineBasicBlock"* %3, i64 0, i32 4
- %4 = load %"class.llvm::MachineFunction"*, %"class.llvm::MachineFunction"** %xParent.i.i, align 8
- %tobool8.i = icmp eq %"class.llvm::MachineFunction"* %4, null
+ %xParent.i.i = getelementptr inbounds %"class.llvm::MachineBasicBlock", ptr %2, i64 0, i32 4
+ %3 = load ptr, ptr %xParent.i.i, align 8
+ %tobool8.i = icmp eq ptr %3, null
br i1 %tobool8.i, label %if.end13.i, label %if.then9.i
if.then9.i: ; preds = %if.then6.i
- %RegInfo.i.i = getelementptr inbounds %"class.llvm::MachineFunction", %"class.llvm::MachineFunction"* %4, i64 0, i32 5
- %5 = load %"class.llvm::MachineRegisterInfo"*, %"class.llvm::MachineRegisterInfo"** %RegInfo.i.i, align 8
- tail call void @_ZN4llvm19MachineRegisterInfo27removeRegOperandFromUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"* %5, %"class.llvm::MachineOperand"* %this)
- store i32 %Reg.addr.0, i32* %RegNo.i.i, align 4
- tail call void @_ZN4llvm19MachineRegisterInfo22addRegOperandToUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"* %5, %"class.llvm::MachineOperand"* %this)
+ %RegInfo.i.i = getelementptr inbounds %"class.llvm::MachineFunction", ptr %3, i64 0, i32 5
+ %4 = load ptr, ptr %RegInfo.i.i, align 8
+ tail call void @_ZN4llvm19MachineRegisterInfo27removeRegOperandFromUseListEPNS_14MachineOperandE(ptr %4, ptr %this)
+ store i32 %Reg.addr.0, ptr %RegNo.i.i, align 4
+ tail call void @_ZN4llvm19MachineRegisterInfo22addRegOperandToUseListEPNS_14MachineOperandE(ptr %4, ptr %this)
br label %_ZN4llvm14MachineOperand6setRegEj.exit
if.end13.i: ; preds = %if.then6.i, %if.then3.i, %if.end.i
- store i32 %Reg.addr.0, i32* %RegNo.i.i, align 4
+ store i32 %Reg.addr.0, ptr %RegNo.i.i, align 4
br label %_ZN4llvm14MachineOperand6setRegEj.exit
_ZN4llvm14MachineOperand6setRegEj.exit: ; preds = %if.end, %if.then9.i, %if.end13.i
define fastcc void @func() nounwind {
entry:
- store i32 42, i32* @nextIdx
+ store i32 42, ptr @nextIdx
ret void
}
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define weak_odr void @_D4core6atomic49__T11atomicStoreVE4core6atomic11MemoryOrder3ThThZ11atomicStoreFNaNbKOhhZv(i8* %val_arg, i8 zeroext %newval_arg) {
+define weak_odr void @_D4core6atomic49__T11atomicStoreVE4core6atomic11MemoryOrder3ThThZ11atomicStoreFNaNbKOhhZv(ptr %val_arg, i8 zeroext %newval_arg) {
entry:
%newval = alloca i8
%ordering = alloca i32, align 4
- store i8 %newval_arg, i8* %newval
- %tmp = load i8, i8* %newval
- store atomic volatile i8 %tmp, i8* %val_arg seq_cst, align 1
+ store i8 %newval_arg, ptr %newval
+ %tmp = load i8, ptr %newval
+ store atomic volatile i8 %tmp, ptr %val_arg seq_cst, align 1
ret void
}
define void @bug() {
entry:
- %x = load ppc_fp128, ppc_fp128* @ld2, align 16
+ %x = load ppc_fp128, ptr @ld2, align 16
%tmp70 = frem ppc_fp128 0xM00000000000000000000000000000000, %x
call void @other(ppc_fp128 %tmp70)
unreachable
@_D4core4time12TickDuration11ticksPerSecyl = global i64 0
@.str5 = internal unnamed_addr constant [40 x i8] c"..\5Cldc\5Cruntime\5Cdruntime\5Csrc\5Ccore\5Ctime.d\00"
@.str83 = internal constant [10 x i8] c"null this\00"
-@.modulefilename = internal constant { i32, i8* } { i32 39, i8* getelementptr inbounds ([40 x i8], [40 x i8]* @.str5, i32 0, i32 0) }
+@.modulefilename = internal constant { i32, ptr } { i32 39, ptr @.str5 }
-declare i8* @_d_assert_msg({ i32, i8* }, { i32, i8* }, i32)
+declare ptr @_d_assert_msg({ i32, ptr }, { i32, ptr }, i32)
-define weak_odr fastcc i64 @_D4core4time12TickDuration30__T2toVAyaa7_7365636f6e6473TlZ2toMxFNaNbNfZl(%core.time.TickDuration* %.this_arg) {
+define weak_odr fastcc i64 @_D4core4time12TickDuration30__T2toVAyaa7_7365636f6e6473TlZ2toMxFNaNbNfZl(ptr %.this_arg) {
entry:
%unitsPerSec = alloca i64, align 8
- %tmp = icmp ne %core.time.TickDuration* %.this_arg, null
+ %tmp = icmp ne ptr %.this_arg, null
br i1 %tmp, label %noassert, label %assert
assert: ; preds = %entry
- %tmp1 = load { i32, i8* }, { i32, i8* }* @.modulefilename
- %0 = call i8* @_d_assert_msg({ i32, i8* } { i32 9, i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str83, i32 0, i32 0) }, { i32, i8* } %tmp1, i32 1586)
+ %tmp1 = load { i32, ptr }, ptr @.modulefilename
+ %0 = call ptr @_d_assert_msg({ i32, ptr } { i32 9, ptr @.str83 }, { i32, ptr } %tmp1, i32 1586)
unreachable
noassert: ; preds = %entry
- %tmp2 = getelementptr %core.time.TickDuration, %core.time.TickDuration* %.this_arg, i32 0, i32 0
- %tmp3 = load i64, i64* %tmp2
+ %tmp3 = load i64, ptr %.this_arg
%tmp4 = sitofp i64 %tmp3 to ppc_fp128
- %tmp5 = load i64, i64* @_D4core4time12TickDuration11ticksPerSecyl
+ %tmp5 = load i64, ptr @_D4core4time12TickDuration11ticksPerSecyl
%tmp6 = sitofp i64 %tmp5 to ppc_fp128
%tmp7 = fdiv ppc_fp128 %tmp6, 0xM80000000000000000000000000000000
%tmp8 = fdiv ppc_fp128 %tmp4, %tmp7
%core.time.TickDuration.37.125 = type { i64 }
-define weak_odr fastcc i64 @_D4core4time12TickDuration30__T2toVAyaa7_7365636f6e6473TlZ2toMxFNaNbNfZl(%core.time.TickDuration.37.125* %.this_arg) {
+define weak_odr fastcc i64 @_D4core4time12TickDuration30__T2toVAyaa7_7365636f6e6473TlZ2toMxFNaNbNfZl(ptr %.this_arg) {
entry:
br i1 undef, label %noassert, label %assert
for.end1042: ; preds = %for.cond968.preheader, %for.cond964.preheader, %entry
%0 = phi i32 [ undef, %for.cond964.preheader ], [ undef, %for.cond968.preheader ], [ undef, %entry ]
- %1 = load i32, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @grid_points, i64 0, i64 0), align 4, !dbg !285, !tbaa !286
+ %1 = load i32, ptr @grid_points, align 4, !dbg !285, !tbaa !286
tail call void @llvm.dbg.value(metadata i32 1, i64 0, metadata !268, metadata !290), !dbg !291
%sub10454270 = add nsw i32 %0, -1, !dbg !291
%cmp10464271 = icmp sgt i32 %sub10454270, 1, !dbg !291
%struct.CS = type { i32 }
@_ZL3glb = internal global [1 x %struct.CS] zeroinitializer, align 4
-@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__I_a, i8* null }]
+@llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_GLOBAL__I_a, ptr null }]
define internal void @__cxx_global_var_init() section ".text.startup" {
entry:
- call void @_Z4funcv(%struct.CS* sret(%struct.CS) getelementptr inbounds ([1 x %struct.CS], [1 x %struct.CS]* @_ZL3glb, i64 0, i64 0))
+ call void @_Z4funcv(ptr sret(%struct.CS) @_ZL3glb)
ret void
}
; CHECK-NEXT: nop
; Function Attrs: nounwind
-define void @_Z4funcv(%struct.CS* noalias sret(%struct.CS) %agg.result) #0 {
+define void @_Z4funcv(ptr noalias sret(%struct.CS) %agg.result) #0 {
entry:
- %a_ = getelementptr inbounds %struct.CS, %struct.CS* %agg.result, i32 0, i32 0
- store i32 0, i32* %a_, align 4
+ store i32 0, ptr %agg.result, align 4
ret void
}
%"class.std::__1::locale::id.1580.4307.4610.8491" = type { %"struct.std::__1::once_flag.1579.4306.4609.8490", i32 }
%"struct.std::__1::once_flag.1579.4306.4609.8490" = type { i64 }
%"class.Foam::IOerror.1581.4308.4611.8505" = type { %"class.Foam::error.1535.4262.4565.8504", %"class.Foam::string.1530.4257.4560.8499", i32, i32 }
-%"class.Foam::error.1535.4262.4565.8504" = type { %"class.std::exception.1523.4250.4553.8492", [36 x i8], %"class.Foam::string.1530.4257.4560.8499", %"class.Foam::string.1530.4257.4560.8499", i32, i8, i8, %"class.Foam::OStringStream.1534.4261.4564.8503"* }
-%"class.std::exception.1523.4250.4553.8492" = type { i32 (...)** }
+%"class.Foam::error.1535.4262.4565.8504" = type { %"class.std::exception.1523.4250.4553.8492", [36 x i8], %"class.Foam::string.1530.4257.4560.8499", %"class.Foam::string.1530.4257.4560.8499", i32, i8, i8, ptr }
+%"class.std::exception.1523.4250.4553.8492" = type { ptr }
%"class.Foam::OStringStream.1534.4261.4564.8503" = type { %"class.Foam::OSstream.1533.4260.4563.8502" }
-%"class.Foam::OSstream.1533.4260.4563.8502" = type { [50 x i8], %"class.Foam::fileName.1531.4258.4561.8500", %"class.std::__1::basic_ostream.1532.4259.4562.8501"* }
+%"class.Foam::OSstream.1533.4260.4563.8502" = type { [50 x i8], %"class.Foam::fileName.1531.4258.4561.8500", ptr }
%"class.Foam::fileName.1531.4258.4561.8500" = type { %"class.Foam::string.1530.4257.4560.8499" }
-%"class.std::__1::basic_ostream.1532.4259.4562.8501" = type { i32 (...)**, [148 x i8] }
+%"class.std::__1::basic_ostream.1532.4259.4562.8501" = type { ptr, [148 x i8] }
%"class.Foam::string.1530.4257.4560.8499" = type { %"class.std::__1::basic_string.1529.4256.4559.8498" }
%"class.std::__1::basic_string.1529.4256.4559.8498" = type { %"class.std::__1::__compressed_pair.1528.4255.4558.8497" }
%"class.std::__1::__compressed_pair.1528.4255.4558.8497" = type { %"class.std::__1::__libcpp_compressed_pair_imp.1527.4254.4557.8496" }
%"class.std::__1::__libcpp_compressed_pair_imp.1527.4254.4557.8496" = type { %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep.1526.4253.4556.8495" }
%"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep.1526.4253.4556.8495" = type { %union.anon.1525.4252.4555.8494 }
%union.anon.1525.4252.4555.8494 = type { %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long.1524.4251.4554.8493" }
-%"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long.1524.4251.4554.8493" = type { i64, i64, i8* }
+%"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long.1524.4251.4554.8493" = type { i64, i64, ptr }
@.str3 = external unnamed_addr constant [16 x i8], align 1
@_ZNSt3__15ctypeIcE2idE = external global %"class.std::__1::locale::id.1580.4307.4610.8491"
; Function Attrs: inlinehint
declare void @_ZN4Foam8fileName12stripInvalidEv() #2 align 2
-define void @_ZN4Foam3CSVINS_6VectorIdEEE4readEv() #0 align 2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @_ZN4Foam3CSVINS_6VectorIdEEE4readEv() #0 align 2 personality ptr @__gxx_personality_v0 {
entry:
invoke void @_ZN4Foam6string6expandEb()
to label %invoke.cont unwind label %lpad
to label %invoke.cont2 unwind label %lpad.i
lpad.i: ; preds = %_ZN4Foam6stringC2ERKS0_.exit.i
- %0 = landingpad { i8*, i32 }
+ %0 = landingpad { ptr, i32 }
cleanup
br label %ehcleanup142
to label %if.end unwind label %lpad5
lpad: ; preds = %if.then.i.i.i.i176, %entry
- %1 = landingpad { i8*, i32 }
+ %1 = landingpad { ptr, i32 }
cleanup
br label %ehcleanup142
lpad3: ; preds = %invoke.cont2
- %2 = landingpad { i8*, i32 }
+ %2 = landingpad { ptr, i32 }
cleanup
br label %ehcleanup142
lpad5: ; preds = %memptr.end.i, %invoke.cont8, %if.then
- %3 = landingpad { i8*, i32 }
+ %3 = landingpad { ptr, i32 }
cleanup
br label %ehcleanup142
unreachable
lpad.i.i.i: ; preds = %.noexc205
- %4 = landingpad { i8*, i32 }
+ %4 = landingpad { ptr, i32 }
cleanup
br label %ehcleanup142
lpad19: ; preds = %for.body
- %5 = landingpad { i8*, i32 }
+ %5 = landingpad { ptr, i32 }
cleanup
br label %ehcleanup142
br label %vector.body
ehcleanup142: ; preds = %lpad19, %lpad.i.i.i, %lpad5, %lpad3, %lpad, %lpad.i
- resume { i8*, i32 } undef
+ resume { ptr, i32 } undef
}
attributes #0 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
%class.Point.1 = type { %class.Tensor.0 }
%class.Tensor.0 = type { [3 x double] }
%class.TriaObjectAccessor.57 = type { %class.TriaAccessor.56 }
-%class.TriaAccessor.56 = type { i32, i32, %class.Triangulation.55* }
-%class.Triangulation.55 = type { %class.Subscriptor, %"class.std::vector.46", %"class.std::vector", %"class.std::vector.3.8", [255 x %class.Boundary.50*], i32, %struct.TriaNumberCache.54 }
-%class.Subscriptor = type { i32 (...)**, i32, %"class.std::type_info.2"* }
-%"class.std::type_info.2" = type { i32 (...)**, i8* }
+%class.TriaAccessor.56 = type { i32, i32, ptr }
+%class.Triangulation.55 = type { %class.Subscriptor, %"class.std::vector.46", %"class.std::vector", %"class.std::vector.3.8", [255 x ptr], i32, %struct.TriaNumberCache.54 }
+%class.Subscriptor = type { ptr, i32, ptr }
+%"class.std::type_info.2" = type { ptr, ptr }
%"class.std::vector.46" = type { %"struct.std::_Vector_base.45" }
-%"struct.std::_Vector_base.45" = type { %"struct.std::_Vector_base<TriangulationLevel<3> *, std::allocator<TriangulationLevel<3> *> >::_Vector_impl.44" }
-%"struct.std::_Vector_base<TriangulationLevel<3> *, std::allocator<TriangulationLevel<3> *> >::_Vector_impl.44" = type { %class.TriangulationLevel.43**, %class.TriangulationLevel.43**, %class.TriangulationLevel.43** }
+%"struct.std::_Vector_base.45" = type { %"struct.std::_Vector_base<TriangulationLevelptr, std::allocator<TriangulationLevelptr> >::_Vector_impl.44" }
+%"struct.std::_Vector_base<TriangulationLevelptr, std::allocator<TriangulationLevelptr> >::_Vector_impl.44" = type { ptr, ptr, ptr }
%class.TriangulationLevel.43 = type { %class.TriangulationLevel.0.37, %"struct.TriangulationLevel<3>::HexesData.42" }
%class.TriangulationLevel.0.37 = type { %class.TriangulationLevel.1.31, %"struct.TriangulationLevel<2>::QuadsData.36" }
%class.TriangulationLevel.1.31 = type { %class.TriangulationLevel, %"struct.TriangulationLevel<1>::LinesData.30" }
%class.TriangulationLevel = type { %"class.std::vector.3.8", %"class.std::vector.3.8", %"class.std::vector.7.12", %"class.std::vector.12.15" }
%"class.std::vector.7.12" = type { %"struct.std::_Vector_base" }
%"struct.std::_Vector_base" = type { %"struct.std::_Vector_base<std::pair<int, int>, std::allocator<std::pair<int, int> > >::_Vector_impl.10" }
-%"struct.std::_Vector_base<std::pair<int, int>, std::allocator<std::pair<int, int> > >::_Vector_impl.10" = type { %"struct.std::pair.9"*, %"struct.std::pair.9"*, %"struct.std::pair.9"* }
+%"struct.std::_Vector_base<std::pair<int, int>, std::allocator<std::pair<int, int> > >::_Vector_impl.10" = type { ptr, ptr, ptr }
%"struct.std::pair.9" = type opaque
%"class.std::vector.12.15" = type { %"struct.std::_Vector_base.13.14" }
%"struct.std::_Vector_base.13.14" = type { %"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl.13" }
-%"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl.13" = type { i32*, i32*, i32* }
+%"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl.13" = type { ptr, ptr, ptr }
%"struct.TriangulationLevel<1>::LinesData.30" = type { %"class.std::vector.17.20", %"class.std::vector.22.23", %"class.std::vector.3.8", %"class.std::vector.3.8", %"class.std::vector.27.26", %"class.std::vector.32.29" }
%"class.std::vector.17.20" = type { %"struct.std::_Vector_base.18.19" }
%"struct.std::_Vector_base.18.19" = type { %"struct.std::_Vector_base<Line, std::allocator<Line> >::_Vector_impl.18" }
-%"struct.std::_Vector_base<Line, std::allocator<Line> >::_Vector_impl.18" = type { %class.Line.17*, %class.Line.17*, %class.Line.17* }
+%"struct.std::_Vector_base<Line, std::allocator<Line> >::_Vector_impl.18" = type { ptr, ptr, ptr }
%class.Line.17 = type { [2 x i32] }
%"class.std::vector.22.23" = type { %"struct.std::_Vector_base.23.22" }
%"struct.std::_Vector_base.23.22" = type { %"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl.21" }
-%"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl.21" = type { i32*, i32*, i32* }
+%"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl.21" = type { ptr, ptr, ptr }
%"class.std::vector.27.26" = type { %"struct.std::_Vector_base.28.25" }
%"struct.std::_Vector_base.28.25" = type { %"struct.std::_Vector_base<unsigned char, std::allocator<unsigned char> >::_Vector_impl.24" }
-%"struct.std::_Vector_base<unsigned char, std::allocator<unsigned char> >::_Vector_impl.24" = type { i8*, i8*, i8* }
+%"struct.std::_Vector_base<unsigned char, std::allocator<unsigned char> >::_Vector_impl.24" = type { ptr, ptr, ptr }
%"class.std::vector.32.29" = type { %"struct.std::_Vector_base.33.28" }
-%"struct.std::_Vector_base.33.28" = type { %"struct.std::_Vector_base<void *, std::allocator<void *> >::_Vector_impl.27" }
-%"struct.std::_Vector_base<void *, std::allocator<void *> >::_Vector_impl.27" = type { i8**, i8**, i8** }
+%"struct.std::_Vector_base.33.28" = type { %"struct.std::_Vector_base<ptr, std::allocator<ptr> >::_Vector_impl.27" }
+%"struct.std::_Vector_base<ptr, std::allocator<ptr> >::_Vector_impl.27" = type { ptr, ptr, ptr }
%"struct.TriangulationLevel<2>::QuadsData.36" = type { %"class.std::vector.37.35", %"class.std::vector.22.23", %"class.std::vector.3.8", %"class.std::vector.3.8", %"class.std::vector.27.26", %"class.std::vector.32.29" }
%"class.std::vector.37.35" = type { %"struct.std::_Vector_base.38.34" }
%"struct.std::_Vector_base.38.34" = type { %"struct.std::_Vector_base<Quad, std::allocator<Quad> >::_Vector_impl.33" }
-%"struct.std::_Vector_base<Quad, std::allocator<Quad> >::_Vector_impl.33" = type { %class.Quad.32*, %class.Quad.32*, %class.Quad.32* }
+%"struct.std::_Vector_base<Quad, std::allocator<Quad> >::_Vector_impl.33" = type { ptr, ptr, ptr }
%class.Quad.32 = type { [4 x i32] }
%"struct.TriangulationLevel<3>::HexesData.42" = type { %"class.std::vector.42.41", %"class.std::vector.22.23", %"class.std::vector.3.8", %"class.std::vector.3.8", %"class.std::vector.27.26", %"class.std::vector.32.29", %"class.std::vector.3.8" }
%"class.std::vector.42.41" = type { %"struct.std::_Vector_base.43.40" }
%"struct.std::_Vector_base.43.40" = type { %"struct.std::_Vector_base<Hexahedron, std::allocator<Hexahedron> >::_Vector_impl.39" }
-%"struct.std::_Vector_base<Hexahedron, std::allocator<Hexahedron> >::_Vector_impl.39" = type { %class.Hexahedron.38*, %class.Hexahedron.38*, %class.Hexahedron.38* }
+%"struct.std::_Vector_base<Hexahedron, std::allocator<Hexahedron> >::_Vector_impl.39" = type { ptr, ptr, ptr }
%class.Hexahedron.38= type { [6 x i32] }
%"class.std::vector" = type { %"struct.std::_Vector_base.48.48" }
%"struct.std::_Vector_base.48.48" = type { %"struct.std::_Vector_base<Point<3>, std::allocator<Point<3> > >::_Vector_impl.47" }
-%"struct.std::_Vector_base<Point<3>, std::allocator<Point<3> > >::_Vector_impl.47" = type { %class.Point.1*, %class.Point.1*, %class.Point.1* }
+%"struct.std::_Vector_base<Point<3>, std::allocator<Point<3> > >::_Vector_impl.47" = type { ptr, ptr, ptr }
%"class.std::vector.3.8" = type { %"struct.std::_Bvector_base.7" }
%"struct.std::_Bvector_base.7" = type { %"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl.6" }
-%"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl.6" = type { %"struct.std::_Bit_iterator.5", %"struct.std::_Bit_iterator.5", i64* }
+%"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl.6" = type { %"struct.std::_Bit_iterator.5", %"struct.std::_Bit_iterator.5", ptr }
%"struct.std::_Bit_iterator.5" = type { %"struct.std::_Bit_iterator_base.base.4", [4 x i8] }
-%"struct.std::_Bit_iterator_base.base.4" = type <{ i64*, i32 }>
+%"struct.std::_Bit_iterator_base.base.4" = type <{ ptr, i32 }>
%class.Boundary.50 = type opaque
%struct.TriaNumberCache.54 = type { %struct.TriaNumberCache.52.52, i32, %"class.std::vector.12.15", i32, %"class.std::vector.12.15" }
%struct.TriaNumberCache.52.52 = type { %struct.TriaNumberCache.53.51, i32, %"class.std::vector.12.15", i32, %"class.std::vector.12.15" }
%struct.TriaNumberCache.53.51 = type { i32, %"class.std::vector.12.15", i32, %"class.std::vector.12.15" }
-define void @_ZNK18TriaObjectAccessorILi3ELi3EE10barycenterEv(%class.Point.1* noalias nocapture sret(%class.Point.1) %agg.result, %class.TriaObjectAccessor.57* %this) #0 align 2 {
+define void @_ZNK18TriaObjectAccessorILi3ELi3EE10barycenterEv(ptr noalias nocapture sret(%class.Point.1) %agg.result, ptr %this) #0 align 2 {
entry:
- %0 = load double, double* null, align 8
- %1 = load double, double* undef, align 8
- %call18 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 6)
- %2 = load double, double* undef, align 8
- %call21 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 7)
- %3 = load double, double* undef, align 8
- %call33 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 3)
- %4 = load double, double* null, align 8
- %5 = load double, double* undef, align 8
- %call45 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 7)
- %6 = load double, double* undef, align 8
- %call48 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 0)
- %7 = load double, double* undef, align 8
- %call66 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 6)
- %8 = load double, double* undef, align 8
+ %0 = load double, ptr null, align 8
+ %1 = load double, ptr undef, align 8
+ %call18 = tail call dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr %this, i32 zeroext 6)
+ %2 = load double, ptr undef, align 8
+ %call21 = tail call dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr %this, i32 zeroext 7)
+ %3 = load double, ptr undef, align 8
+ %call33 = tail call dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr %this, i32 zeroext 3)
+ %4 = load double, ptr null, align 8
+ %5 = load double, ptr undef, align 8
+ %call45 = tail call dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr %this, i32 zeroext 7)
+ %6 = load double, ptr undef, align 8
+ %call48 = tail call dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr %this, i32 zeroext 0)
+ %7 = load double, ptr undef, align 8
+ %call66 = tail call dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr %this, i32 zeroext 6)
+ %8 = load double, ptr undef, align 8
%mul334 = fmul double undef, 2.000000e+00
%mul579 = fmul double %2, %5
%mul597 = fmul double undef, %mul579
%add8901 = fadd double %mul8900, %add8893
%mul9767 = fmul double 0.000000e+00, %add8901
%mul9768 = fmul double %mul9767, 0x3FC5555555555555
- store double %mul4917, double* undef, align 8
- store double %mul9768, double* undef, align 8
+ store double %mul4917, ptr undef, align 8
+ store double %mul9768, ptr undef, align 8
ret void
}
-declare dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57*, i32 zeroext) #0
+declare dereferenceable(24) ptr @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(ptr, i32 zeroext) #0
%struct.anon = type { i32 }
%struct.anon.0 = type { i32 }
-@b = common global %struct.anon* null, align 4
-@a = common global %struct.anon.0* null, align 4
+@b = common global ptr null, align 4
+@a = common global ptr null, align 4
; Function Attrs: nounwind readonly uwtable
define i32 @fn1() #0 {
entry:
- %0 = load %struct.anon*, %struct.anon** @b, align 4
- %1 = ptrtoint %struct.anon* %0 to i32
- %cmp = icmp sgt %struct.anon* %0, null
- %2 = load %struct.anon.0*, %struct.anon.0** @a, align 4
+ %0 = load ptr, ptr @b, align 4
+ %1 = ptrtoint ptr %0 to i32
+ %cmp = icmp sgt ptr %0, null
+ %2 = load ptr, ptr @a, align 4
br i1 %cmp, label %for.bodythread-pre-split, label %if.end8
for.bodythread-pre-split: ; preds = %entry
- %aclass = getelementptr inbounds %struct.anon.0, %struct.anon.0* %2, i32 0, i32 0
- %.pr = load i32, i32* %aclass, align 4
+ %.pr = load i32, ptr %2, align 4
br label %for.body
for.body: ; preds = %for.bodythread-pre-split, %for.body
while.body: ; preds = %while.body.lr.ph, %while.cond
%j.110 = phi i32 [ %j.1.ph13, %while.body.lr.ph ], [ %inc7, %while.cond ]
- %aclass_index = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 %j.110, i32 0
- %3 = load i32, i32* %aclass_index, align 4
- %aclass5 = getelementptr inbounds %struct.anon.0, %struct.anon.0* %2, i32 %3, i32 0
- %4 = load i32, i32* %aclass5, align 4
+ %aclass_index = getelementptr inbounds %struct.anon, ptr %0, i32 %j.110, i32 0
+ %3 = load i32, ptr %aclass_index, align 4
+ %aclass5 = getelementptr inbounds %struct.anon.0, ptr %2, i32 %3, i32 0
+ %4 = load i32, ptr %aclass5, align 4
%tobool = icmp eq i32 %4, 0
%inc7 = add nsw i32 %j.110, 1
br i1 %tobool, label %while.cond, label %if.then6
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -filetype=obj -o - | llvm-readobj --sections - | FileCheck %s
-define void @test(i32* %a) {
+define void @test(ptr %a) {
entry:
- %a.addr = alloca i32*, align 8
- store i32* %a, i32** %a.addr, align 8
- %0 = load i32*, i32** %a.addr, align 8
- %incdec.ptr = getelementptr inbounds i32, i32* %0, i32 1
- store i32* %incdec.ptr, i32** %a.addr, align 8
- %1 = load i32, i32* %0, align 4
+ %a.addr = alloca ptr, align 8
+ store ptr %a, ptr %a.addr, align 8
+ %0 = load ptr, ptr %a.addr, align 8
+ %incdec.ptr = getelementptr inbounds i32, ptr %0, i32 1
+ store ptr %incdec.ptr, ptr %a.addr, align 8
+ %1 = load i32, ptr %0, align 4
switch i32 %1, label %sw.epilog [
i32 17, label %sw.bb
i32 13, label %sw.bb1
]
sw.bb: ; preds = %entry
- %2 = load i32*, i32** %a.addr, align 8
- store i32 2, i32* %2, align 4
+ %2 = load ptr, ptr %a.addr, align 8
+ store i32 2, ptr %2, align 4
br label %sw.epilog
sw.bb1: ; preds = %entry
- %3 = load i32*, i32** %a.addr, align 8
- store i32 3, i32* %3, align 4
+ %3 = load ptr, ptr %a.addr, align 8
+ store i32 3, ptr %3, align 4
br label %sw.epilog
sw.bb2: ; preds = %entry
- %4 = load i32*, i32** %a.addr, align 8
- store i32 5, i32* %4, align 4
+ %4 = load ptr, ptr %a.addr, align 8
+ store i32 5, ptr %4, align 4
br label %sw.epilog
sw.bb3: ; preds = %entry
- %5 = load i32*, i32** %a.addr, align 8
- store i32 7, i32* %5, align 4
+ %5 = load ptr, ptr %a.addr, align 8
+ store i32 7, ptr %5, align 4
br label %sw.epilog
sw.bb4: ; preds = %entry
- %6 = load i32*, i32** %a.addr, align 8
- store i32 11, i32* %6, align 4
+ %6 = load ptr, ptr %a.addr, align 8
+ store i32 11, ptr %6, align 4
br label %sw.epilog
sw.bb5: ; preds = %entry
- %7 = load i32*, i32** %a.addr, align 8
- store i32 13, i32* %7, align 4
+ %7 = load ptr, ptr %a.addr, align 8
+ store i32 13, ptr %7, align 4
br label %sw.epilog
sw.bb6: ; preds = %entry
- %8 = load i32*, i32** %a.addr, align 8
- store i32 17, i32* %8, align 4
+ %8 = load ptr, ptr %a.addr, align 8
+ store i32 17, ptr %8, align 4
br label %sw.epilog
sw.epilog: ; preds = %entry, %sw.bb6, %sw.bb5, %sw.bb4, %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb
; Test case adapted from PR24216.
-define void @foo(<16 x i8>* nocapture readonly %in, <16 x i8>* nocapture %out) {
+define void @foo(ptr nocapture readonly %in, ptr nocapture %out) {
entry:
- %0 = load <16 x i8>, <16 x i8>* %in, align 16
+ %0 = load <16 x i8>, ptr %in, align 16
%1 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 2, i32 3, i32 4, i32 5, i32 2, i32 3, i32 4, i32 5, i32 2, i32 3, i32 4, i32 5>
- store <16 x i8> %1, <16 x i8>* %out, align 16
+ store <16 x i8> %1, ptr %out, align 16
ret void
}
br label %php_intpow10.exit, !dbg !41
if.end.i: ; preds = %if.then
- %0 = load double, double* undef, align 8, !dbg !42, !tbaa !43
+ %0 = load double, ptr undef, align 8, !dbg !42, !tbaa !43
br label %php_intpow10.exit, !dbg !47
php_intpow10.exit: ; preds = %if.end.i, %if.then.i
.lr.ph.split.split: ; preds = %.lr.ph.split.split, %.lr.ph.split
%1 = phi i32 [ %2, %.lr.ph.split.split ], [ undef, %.lr.ph.split ]
- %2 = and i32 %1, and (i32 and (i32 and (i32 and (i32 and (i32 and (i32 and (i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq ([1 x i32]* bitcast (i32* @c to [1 x i32]*), [1 x i32]* @b), i1 true, i1 false) to i32))
+ %2 = and i32 %1, and (i32 and (i32 and (i32 and (i32 and (i32 and (i32 and (i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32)), i32 zext (i1 select (i1 icmp eq (ptr @c, ptr @b), i1 true, i1 false) to i32))
%3 = icmp slt i32 undef, 4
br i1 %3, label %.lr.ph.split.split, label %._crit_edge
unreachable
L.LB38_2452:
- %0 = load float, float* bitcast (i8* getelementptr inbounds (%struct.BSS38.51.4488.9911.14348.16813.20264.24701.28152.31603.35054.39491.44914.45407.46393.46886.47872.49351.49844.50830.51323.52309.53295.53788.54281.55267.55760.59211.61625, %struct.BSS38.51.4488.9911.14348.16813.20264.24701.28152.31603.35054.39491.44914.45407.46393.46886.47872.49351.49844.50830.51323.52309.53295.53788.54281.55267.55760.59211.61625* @.BSS38, i64 0, i32 0, i64 16) to float*), align 16
+ %0 = load float, ptr getelementptr inbounds (%struct.BSS38.51.4488.9911.14348.16813.20264.24701.28152.31603.35054.39491.44914.45407.46393.46886.47872.49351.49844.50830.51323.52309.53295.53788.54281.55267.55760.59211.61625, ptr @.BSS38, i64 0, i32 0, i64 16), align 16
%1 = fpext float %0 to double
%2 = insertelement <2 x double> undef, double %1, i32 1
- store <2 x double> %2, <2 x double>* bitcast (i8* getelementptr inbounds (%struct_main1_2_.491.4928.10351.14788.17253.20704.25141.28592.32043.35494.39931.45354.45847.46833.47326.48312.49791.50284.51270.51763.52749.53735.54228.54721.55707.56200.59651.61626, %struct_main1_2_.491.4928.10351.14788.17253.20704.25141.28592.32043.35494.39931.45354.45847.46833.47326.48312.49791.50284.51270.51763.52749.53735.54228.54721.55707.56200.59651.61626* @_main1_2_, i64 0, i32 0, i64 32) to <2 x double>*), align 16
+ store <2 x double> %2, ptr getelementptr inbounds (%struct_main1_2_.491.4928.10351.14788.17253.20704.25141.28592.32043.35494.39931.45354.45847.46833.47326.48312.49791.50284.51270.51763.52749.53735.54228.54721.55707.56200.59651.61626, ptr @_main1_2_, i64 0, i32 0, i64 32), align 16
unreachable
}
unreachable
L.LB38_2452:
- %0 = load float, float* bitcast (i8* getelementptr inbounds (%struct.BSS38.51.4488.9911.14348.16813.20264.24701.28152.31603.35054.39491.44914.45407.46393.46886.47872.49351.49844.50830.51323.52309.53295.53788.54281.55267.55760.59211.61625, %struct.BSS38.51.4488.9911.14348.16813.20264.24701.28152.31603.35054.39491.44914.45407.46393.46886.47872.49351.49844.50830.51323.52309.53295.53788.54281.55267.55760.59211.61625* @.BSS38, i64 0, i32 0, i64 16) to float*), align 16
+ %0 = load float, ptr getelementptr inbounds (%struct.BSS38.51.4488.9911.14348.16813.20264.24701.28152.31603.35054.39491.44914.45407.46393.46886.47872.49351.49844.50830.51323.52309.53295.53788.54281.55267.55760.59211.61625, ptr @.BSS38, i64 0, i32 0, i64 16), align 16
%1 = fpext float %0 to double
%2 = insertelement <2 x double> undef, double %1, i32 1
- store <2 x double> %2, <2 x double>* bitcast (i8* getelementptr inbounds (%struct_main1_2_.491.4928.10351.14788.17253.20704.25141.28592.32043.35494.39931.45354.45847.46833.47326.48312.49791.50284.51270.51763.52749.53735.54228.54721.55707.56200.59651.61626, %struct_main1_2_.491.4928.10351.14788.17253.20704.25141.28592.32043.35494.39931.45354.45847.46833.47326.48312.49791.50284.51270.51763.52749.53735.54228.54721.55707.56200.59651.61626* @_main1_2_, i64 0, i32 0, i64 32) to <2 x double>*), align 16
+ store <2 x double> %2, ptr getelementptr inbounds (%struct_main1_2_.491.4928.10351.14788.17253.20704.25141.28592.32043.35494.39931.45354.45847.46833.47326.48312.49791.50284.51270.51763.52749.53735.54228.54721.55707.56200.59651.61626, ptr @_main1_2_, i64 0, i32 0, i64 32), align 16
unreachable
}
; RUN: llc -verify-machineinstrs -compile-twice -filetype obj \
; RUN: -mtriple=powerpc64le-unknown-unknown -mcpu=pwr8 < %s
@foo = common global i32 0, align 4
-define i8* @blah() #0 {
- ret i8* bitcast (i32* @foo to i8*)
+define ptr @blah() #0 {
+ ret ptr @foo
}
%struct.anon.1 = type { i32 }
@i = common global i32 0, align 4
-@b = common global i32* null, align 8
+@b = common global ptr null, align 8
@c = common global i32 0, align 4
@a = common global i32 0, align 4
@h = common global i32 0, align 4
@e = common global i32 0, align 4
; Function Attrs: norecurse nounwind
-define signext i32 @fn1(i32* nocapture %p1, i32 signext %p2, i32* nocapture %p3) {
+define signext i32 @fn1(ptr nocapture %p1, i32 signext %p2, ptr nocapture %p3) {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
%cond = icmp eq i32 %0, 8
br i1 %cond, label %if.end16, label %while.cond.preheader
while.cond.preheader: ; preds = %entry
- %1 = load i32*, i32** @b, align 8, !tbaa !5
- %2 = load i32, i32* %1, align 4, !tbaa !1
+ %1 = load ptr, ptr @b, align 8, !tbaa !5
+ %2 = load i32, ptr %1, align 4, !tbaa !1
%tobool18 = icmp eq i32 %2, 0
br i1 %tobool18, label %while.end, label %while.body.lr.ph
while.body.lr.ph: ; preds = %while.cond.preheader
- %.pre = load i32, i32* @c, align 4, !tbaa !1
+ %.pre = load i32, ptr @c, align 4, !tbaa !1
br label %while.body
while.body: ; preds = %while.body.backedge, %while.body.lr.ph
br label %while.body
sw.bb1: ; preds = %while.body, %while.body, %while.body
- store i32 2, i32* @a, align 4, !tbaa !1
+ store i32 2, ptr @a, align 4, !tbaa !1
br label %while.cond.backedge
while.cond.backedge: ; preds = %while.body, %sw.bb1
- store i32 4, i32* @a, align 4, !tbaa !1
- %.pre19 = load i32, i32* %1, align 4, !tbaa !1
+ store i32 4, ptr @a, align 4, !tbaa !1
+ %.pre19 = load i32, ptr %1, align 4, !tbaa !1
%tobool = icmp eq i32 %.pre19, 0
br i1 %tobool, label %while.end.loopexit, label %while.body.backedge
br label %while.end
while.end: ; preds = %while.end.loopexit, %while.cond.preheader
- %3 = load i32, i32* @h, align 4, !tbaa !1
+ %3 = load i32, ptr @h, align 4, !tbaa !1
%mul = mul nsw i32 %0, %3
- %4 = load i32, i32* @g, align 4, !tbaa !1
+ %4 = load i32, ptr @g, align 4, !tbaa !1
%mul4 = mul nsw i32 %mul, %4
- store i32 %mul4, i32* @j, align 4, !tbaa !1
- %5 = load i32, i32* getelementptr inbounds (%struct.anon, %struct.anon* @f, i64 0, i32 0, i32 0), align 4, !tbaa !7
+ store i32 %mul4, ptr @j, align 4, !tbaa !1
+ %5 = load i32, ptr @f, align 4, !tbaa !7
%tobool5 = icmp eq i32 %5, 0
br i1 %tobool5, label %if.end, label %if.then
if.then: ; preds = %while.end
%div = sdiv i32 %5, %mul
- store i32 %div, i32* @g, align 4, !tbaa !1
+ store i32 %div, ptr @g, align 4, !tbaa !1
br label %if.end
if.end: ; preds = %while.end, %if.then
%6 = phi i32 [ %4, %while.end ], [ %div, %if.then ]
- %7 = load i32, i32* getelementptr inbounds (%struct.anon, %struct.anon* @f, i64 0, i32 1, i32 0), align 4, !tbaa !10
+ %7 = load i32, ptr getelementptr inbounds (%struct.anon, ptr @f, i64 0, i32 1, i32 0), align 4, !tbaa !10
%tobool7 = icmp ne i32 %7, 0
%tobool8 = icmp ne i32 %mul4, 0
%or.cond = and i1 %tobool7, %tobool8
br i1 %or.cond17, label %if.then11, label %if.end13
if.then11: ; preds = %if.end
- store i32 %3, i32* @d, align 4, !tbaa !1
- %8 = load i32, i32* @e, align 4, !tbaa !1
- store i32 %8, i32* %p3, align 4, !tbaa !1
- %.pre20 = load i32, i32* @g, align 4, !tbaa !1
+ store i32 %3, ptr @d, align 4, !tbaa !1
+ %8 = load i32, ptr @e, align 4, !tbaa !1
+ store i32 %8, ptr %p3, align 4, !tbaa !1
+ %.pre20 = load i32, ptr @g, align 4, !tbaa !1
br label %if.end13
if.end13: ; preds = %if.then11, %if.end
br i1 %tobool14, label %if.end16, label %if.then15
if.then15: ; preds = %if.end13
- store i32 %p2, i32* %p1, align 4, !tbaa !1
+ store i32 %p2, ptr %p1, align 4, !tbaa !1
br label %if.end16
if.end16: ; preds = %entry, %if.end13, %if.then15
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-linux-gnu -mcpu=pwr8 -mattr=+vsx < %s | FileCheck %s
-define <4 x float> @bar(float* %p, float* %q) {
+define <4 x float> @bar(ptr %p, ptr %q) {
; CHECK-LABEL: bar:
; CHECK: # %bb.0:
; CHECK-NEXT: li 5, 16
; CHECK-NEXT: lxvw4x 35, 0, 3
; CHECK-NEXT: vperm 2, 2, 5, 3
; CHECK-NEXT: blr
- %1 = bitcast float* %p to <12 x float>*
- %2 = bitcast float* %q to <12 x float>*
- %3 = load <12 x float>, <12 x float>* %1, align 16
- %4 = load <12 x float>, <12 x float>* %2, align 16
- %5 = fsub <12 x float> %4, %3
- %6 = shufflevector <12 x float> %5, <12 x float> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
- ret <4 x float> %6
+ %1 = load <12 x float>, ptr %p, align 16
+ %2 = load <12 x float>, ptr %q, align 16
+ %3 = fsub <12 x float> %2, %1
+ %4 = shufflevector <12 x float> %3, <12 x float> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+ ret <4 x float> %4
}
; RUN: llc -verify-machineinstrs -mcpu=ppc64le -mtriple=powerpc64le-unknown-linux-gnu < %s
; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #0
; Function Attrs: nounwind
define internal fastcc void @foo() unnamed_addr #1 align 2 {
entry:
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 undef, i8* align 8 null, i64 16, i1 false)
- %0 = load <2 x i64>, <2 x i64>* null, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 8 undef, ptr align 8 null, i64 16, i1 false)
+ %0 = load <2 x i64>, ptr null, align 8
%1 = extractelement <2 x i64> %0, i32 1
%.fca.1.insert159.i = insertvalue [2 x i64] undef, i64 %1, 1
tail call fastcc void @bar([2 x i64] undef, [2 x i64] %.fca.1.insert159.i) #2
%StructA = type { double, double, double, double, double, double, double, double }
-define void @Test(%StructA* %tmp) unnamed_addr #0 align 2 {
+define void @Test(ptr %tmp) unnamed_addr #0 align 2 {
; CHECK-LABEL: Test:
; CHECK: lxvd2x
; CHECK-NEXT: xxswapd
; CHECK: xxswapd [[OUTPUT:[0-9]+]]
; CHECK-NEXT: stxvd2x [[OUTPUT]]
bb:
- %tmp2 = getelementptr inbounds %StructA, %StructA* %tmp, i64 0, i32 0
- %tmp4 = bitcast %StructA* %tmp to <2 x double>*
- %tmp5 = getelementptr inbounds %StructA, %StructA* %tmp, i64 0, i32 2
- %tmp9 = getelementptr inbounds %StructA, %StructA* %tmp, i64 0, i32 4
- %tmp11 = getelementptr inbounds %StructA, %StructA* %tmp, i64 0, i32 5
- %tmp13 = getelementptr inbounds %StructA, %StructA* %tmp, i64 0, i32 6
- %tmp15 = getelementptr inbounds %StructA, %StructA* %tmp, i64 0, i32 7
- %tmp18 = load double, double* %tmp2, align 16
- %tmp19 = load double, double* %tmp11, align 8
- %tmp20 = load double, double* %tmp9, align 16
+ %tmp5 = getelementptr inbounds %StructA, ptr %tmp, i64 0, i32 2
+ %tmp9 = getelementptr inbounds %StructA, ptr %tmp, i64 0, i32 4
+ %tmp11 = getelementptr inbounds %StructA, ptr %tmp, i64 0, i32 5
+ %tmp13 = getelementptr inbounds %StructA, ptr %tmp, i64 0, i32 6
+ %tmp15 = getelementptr inbounds %StructA, ptr %tmp, i64 0, i32 7
+ %tmp18 = load double, ptr %tmp, align 16
+ %tmp19 = load double, ptr %tmp11, align 8
+ %tmp20 = load double, ptr %tmp9, align 16
%tmp21 = fsub double 1.210000e+04, %tmp20
%tmp22 = fmul double %tmp18, %tmp21
%tmp23 = fadd double %tmp20, %tmp22
- %tmp24 = load double, double* %tmp13, align 16
+ %tmp24 = load double, ptr %tmp13, align 16
%tmp25 = fsub double 1.000000e+02, %tmp24
%tmp26 = fmul double %tmp18, %tmp25
%tmp27 = fadd double %tmp24, %tmp26
- %tmp28 = load double, double* %tmp15, align 8
+ %tmp28 = load double, ptr %tmp15, align 8
%tmp29 = insertelement <2 x double> undef, double %tmp19, i32 0
%tmp30 = insertelement <2 x double> %tmp29, double %tmp28, i32 1
%tmp31 = fsub <2 x double> <double 1.100000e+04, double 1.100000e+02>, %tmp30
%tmp33 = insertelement <2 x double> %tmp32, double %tmp18, i32 1
%tmp34 = fmul <2 x double> %tmp33, %tmp31
%tmp35 = fadd <2 x double> %tmp30, %tmp34
- %tmp36 = bitcast double* %tmp5 to <2 x double>*
- %tmp37 = load <2 x double>, <2 x double>* %tmp36, align 16
+ %tmp37 = load <2 x double>, ptr %tmp5, align 16
%tmp38 = fsub <2 x double> <double 1.000000e+00, double 1.000000e+04>, %tmp37
%tmp39 = fmul <2 x double> %tmp33, %tmp38
%tmp40 = fadd <2 x double> %tmp37, %tmp39
%tmp48 = fsub double 1.440000e+04, %tmp23
%tmp49 = fmul double %tmp18, %tmp48
%tmp50 = fadd double %tmp23, %tmp49
- store double %tmp50, double* %tmp9, align 16
+ store double %tmp50, ptr %tmp9, align 16
%tmp51 = fsub double 1.000000e+02, %tmp27
%tmp52 = fmul double %tmp18, %tmp51
%tmp53 = fadd double %tmp27, %tmp52
- store double %tmp53, double* %tmp13, align 16
+ store double %tmp53, ptr %tmp13, align 16
%tmp54 = extractelement <2 x double> %tmp46, i32 1
- store double %tmp54, double* %tmp15, align 8
- %tmp55 = bitcast double* %tmp5 to <2 x double>*
- store <2 x double> %tmp43, <2 x double>* %tmp55, align 16
+ store double %tmp54, ptr %tmp15, align 8
+ store <2 x double> %tmp43, ptr %tmp5, align 16
ret void
}
@g = common global double 0.000000e+00, align 8
define double @testitd() {
- %g = load double, double* @g, align 8
+ %g = load double, ptr @g, align 8
ret double %g
}
define i8 @atomic_min_i8() {
top:
%0 = alloca i8, align 2
- %1 = bitcast i8* %0 to i8*
- call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
- store i8 -1, i8* %0, align 2
- %2 = atomicrmw min i8* %0, i8 0 acq_rel
- %3 = load atomic i8, i8* %0 acquire, align 8
- call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
- ret i8 %3
+ call void @llvm.lifetime.start.p0(i64 2, ptr %0)
+ store i8 -1, ptr %0, align 2
+ %1 = atomicrmw min ptr %0, i8 0 acq_rel
+ %2 = load atomic i8, ptr %0 acquire, align 8
+ call void @llvm.lifetime.end.p0(i64 2, ptr %0)
+ ret i8 %2
; CHECK-LABEL: atomic_min_i8
; CHECK: lbarx [[DST:[0-9]+]],
; CHECK-NEXT: extsb [[EXT:[0-9]+]], [[DST]]
define i16 @atomic_min_i16() {
top:
%0 = alloca i16, align 2
- %1 = bitcast i16* %0 to i8*
- call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
- store i16 -1, i16* %0, align 2
- %2 = atomicrmw min i16* %0, i16 0 acq_rel
- %3 = load atomic i16, i16* %0 acquire, align 8
- call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
- ret i16 %3
+ call void @llvm.lifetime.start.p0(i64 2, ptr %0)
+ store i16 -1, ptr %0, align 2
+ %1 = atomicrmw min ptr %0, i16 0 acq_rel
+ %2 = load atomic i16, ptr %0 acquire, align 8
+ call void @llvm.lifetime.end.p0(i64 2, ptr %0)
+ ret i16 %2
; CHECK-LABEL: atomic_min_i16
; CHECK: lharx [[DST:[0-9]+]],
; CHECK-NEXT: extsh [[EXT:[0-9]+]], [[DST]]
define i8 @atomic_max_i8() {
top:
%0 = alloca i8, align 2
- %1 = bitcast i8* %0 to i8*
- call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
- store i8 -1, i8* %0, align 2
- %2 = atomicrmw max i8* %0, i8 0 acq_rel
- %3 = load atomic i8, i8* %0 acquire, align 8
- call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
- ret i8 %3
+ call void @llvm.lifetime.start.p0(i64 2, ptr %0)
+ store i8 -1, ptr %0, align 2
+ %1 = atomicrmw max ptr %0, i8 0 acq_rel
+ %2 = load atomic i8, ptr %0 acquire, align 8
+ call void @llvm.lifetime.end.p0(i64 2, ptr %0)
+ ret i8 %2
; CHECK-LABEL: atomic_max_i8
; CHECK: lbarx [[DST:[0-9]+]],
; CHECK-NEXT: extsb [[EXT:[0-9]+]], [[DST]]
define i16 @atomic_max_i16() {
top:
%0 = alloca i16, align 2
- %1 = bitcast i16* %0 to i8*
- call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
- store i16 -1, i16* %0, align 2
- %2 = atomicrmw max i16* %0, i16 0 acq_rel
- %3 = load atomic i16, i16* %0 acquire, align 8
- call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
- ret i16 %3
+ call void @llvm.lifetime.start.p0(i64 2, ptr %0)
+ store i16 -1, ptr %0, align 2
+ %1 = atomicrmw max ptr %0, i16 0 acq_rel
+ %2 = load atomic i16, ptr %0 acquire, align 8
+ call void @llvm.lifetime.end.p0(i64 2, ptr %0)
+ ret i16 %2
; CHECK-LABEL: atomic_max_i16
; CHECK: lharx [[DST:[0-9]+]],
; CHECK-NEXT: extsh [[EXT:[0-9]+]], [[DST]]
; CHECK-NEXT: bgt 0
}
-declare void @llvm.lifetime.start.p0i8(i64, i8*)
-declare void @llvm.lifetime.end.p0i8(i64, i8*)
+declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(i64, ptr)
; CHECK-NOT: xxspltw
define void @Test() {
bb4:
- %tmp = load <4 x i8>, <4 x i8>* undef
+ %tmp = load <4 x i8>, ptr undef
%tmp8 = bitcast <4 x i8> %tmp to float
%tmp18 = fmul float %tmp8, undef
%tmp19 = fsub float 0.000000e+00, %tmp18
- store float %tmp19, float* undef
+ store float %tmp19, ptr undef
%tmp22 = shufflevector <4 x i8> %tmp, <4 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
%tmp23 = bitcast <16 x i8> %tmp22 to <4 x float>
%tmp25 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> %tmp23, <4 x float> undef)
%tmp26 = fsub <4 x float> zeroinitializer, %tmp25
%tmp27 = bitcast <4 x float> %tmp26 to <4 x i32>
- tail call void @llvm.ppc.altivec.stvx(<4 x i32> %tmp27, i8* undef)
+ tail call void @llvm.ppc.altivec.stvx(<4 x i32> %tmp27, ptr undef)
ret void
}
-declare void @llvm.ppc.altivec.stvx(<4 x i32>, i8*)
+declare void @llvm.ppc.altivec.stvx(<4 x i32>, ptr)
declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
%class.FullMatrix = type { i8 }
-%class.Vector = type { float* }
+%class.Vector = type { ptr }
$test = comdat any
-define weak_odr void @test(%class.FullMatrix* %this, %class.Vector* dereferenceable(8) %p1, %class.Vector* dereferenceable(8), i1 zeroext) {
+define weak_odr void @test(ptr %this, ptr dereferenceable(8) %p1, ptr dereferenceable(8), i1 zeroext) {
entry:
- %call = tail call signext i32 @fn1(%class.FullMatrix* %this)
+ %call = tail call signext i32 @fn1(ptr %this)
%cmp10 = icmp sgt i32 %call, 0
br i1 %cmp10, label %for.body.lr.ph, label %for.cond.cleanup
for.body.lr.ph: ; preds = %entry
- %val.i = getelementptr inbounds %class.Vector, %class.Vector* %p1, i64 0, i32 0
- %2 = load float*, float** %val.i, align 8
+ %2 = load ptr, ptr %p1, align 8
%wide.trip.count = zext i32 %call to i64
%min.iters.check = icmp ult i32 %call, 4
br i1 %min.iters.check, label %for.body.preheader, label %min.iters.checked
vector.body: ; preds = %vector.body.preheader, %vector.body
%index = phi i64 [ %index.next, %vector.body ], [ 0, %vector.body.preheader ]
- %4 = getelementptr inbounds float, float* %2, i64 %index
- %5 = bitcast float* %4 to <4 x float>*
- %wide.load = load <4 x float>, <4 x float>* %5, align 4
- %6 = fpext <4 x float> %wide.load to <4 x ppc_fp128>
- %7 = fadd <4 x ppc_fp128> %6, %6
- %8 = fptrunc <4 x ppc_fp128> %7 to <4 x float>
- %9 = bitcast float* %4 to <4 x float>*
- store <4 x float> %8, <4 x float>* %9, align 4
+ %4 = getelementptr inbounds float, ptr %2, i64 %index
+ %wide.load = load <4 x float>, ptr %4, align 4
+ %5 = fpext <4 x float> %wide.load to <4 x ppc_fp128>
+ %6 = fadd <4 x ppc_fp128> %5, %5
+ %7 = fptrunc <4 x ppc_fp128> %6 to <4 x float>
+ store <4 x float> %7, ptr %4, align 4
%index.next = add i64 %index, 4
- %10 = icmp eq i64 %index.next, %n.vec
- br i1 %10, label %middle.block, label %vector.body
+ %8 = icmp eq i64 %index.next, %n.vec
+ br i1 %8, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
%cmp.n = icmp eq i32 %3, 0
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
- %arrayidx.i = getelementptr inbounds float, float* %2, i64 %indvars.iv
- %11 = load float, float* %arrayidx.i, align 4
- %conv = fpext float %11 to ppc_fp128
+ %arrayidx.i = getelementptr inbounds float, ptr %2, i64 %indvars.iv
+ %9 = load float, ptr %arrayidx.i, align 4
+ %conv = fpext float %9 to ppc_fp128
%add = fadd ppc_fp128 %conv, %conv
%conv4 = fptrunc ppc_fp128 %add to float
- store float %conv4, float* %arrayidx.i, align 4
+ store float %conv4, ptr %arrayidx.i, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
; CHECK: lxvd2x
}
-declare signext i32 @fn1(%class.FullMatrix*) local_unnamed_addr #1
+declare signext i32 @fn1(ptr) local_unnamed_addr #1
declare void @bar(double)
-define void @foo1(i8* %p) {
+define void @foo1(ptr %p) {
entry:
- %0 = load i8, i8* %p, align 1
+ %0 = load i8, ptr %p, align 1
%conv = uitofp i8 %0 to double
call void @bar(double %conv)
ret void
; CHECK: mtfprwz
}
-define void @foo2(i16* %p) {
+define void @foo2(ptr %p) {
entry:
- %0 = load i16, i16* %p, align 2
+ %0 = load i16, ptr %p, align 2
%conv = uitofp i16 %0 to double
call void @bar(double %conv)
ret void
; RUN: llc -O2 < %s | FileCheck %s
target triple = "powerpc64le-linux-gnu"
-define void @foo(i32 %v, i16* %p) {
+define void @foo(i32 %v, ptr %p) {
%1 = and i32 %v, -65536
%2 = tail call i32 @llvm.bswap.i32(i32 %1)
%conv = trunc i32 %2 to i16
- store i16 %conv, i16* %p
+ store i16 %conv, ptr %p
ret void
; CHECK: srwi
; CHECK-BE-NEXT: stwbrx 3, 0, 4
; CHECK-BE-NEXT: blr
entry:
- %0 = load i32, i32* @ai, align 4
+ %0 = load i32, ptr @ai, align 4
%conv.i = sext i32 %0 to i64
%or26.i = tail call i64 @llvm.bswap.i64(i64 %conv.i)
%conv = trunc i64 %or26.i to i32
- store i32 %conv, i32* @bi, align 4
+ store i32 %conv, ptr @bi, align 4
ret void
}
; CHECK-BE-NEXT: sthbrx 3, 0, 4
; CHECK-BE-NEXT: blr
entry:
- %0 = load i16, i16* @as, align 2
+ %0 = load i16, ptr @as, align 2
%conv.i = sext i16 %0 to i32
%or26.i = tail call i32 @llvm.bswap.i32(i32 %conv.i)
%conv = trunc i32 %or26.i to i16
- store i16 %conv, i16* @bs, align 2
+ store i16 %conv, ptr @bs, align 2
ret void
}
; CHECK-BE-NEXT: sthbrx 3, 0, 4
; CHECK-BE-NEXT: blr
entry:
- %0 = load i16, i16* @as, align 2
+ %0 = load i16, ptr @as, align 2
%conv.i = sext i16 %0 to i64
%or26.i = tail call i64 @llvm.bswap.i64(i64 %conv.i)
%conv = trunc i64 %or26.i to i16
- store i16 %conv, i16* @bs, align 2
+ store i16 %conv, ptr @bs, align 2
ret void
}
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
L.entry:
- tail call void @testFunc(i64* bitcast (i8* getelementptr inbounds (%struct.STATICS1, %struct.STATICS1* @.STATICS1, i64 0, i32 0, i64 124) to i64*), i64* bitcast (i32* @.C302_MAIN_ to i64*))
+ tail call void @testFunc(ptr getelementptr inbounds (%struct.STATICS1, ptr @.STATICS1, i64 0, i32 0, i64 124), ptr @.C302_MAIN_)
ret void
}
; Function Attrs: noinline norecurse nounwind readonly
-define signext i32 @ifunc_(i64* nocapture readonly %i) {
+define signext i32 @ifunc_(ptr nocapture readonly %i) {
; CHECK-LABEL: ifunc_:
; CHECK: # %bb.0: # %L.entry
; CHECK-NEXT: lwa 3, 0(3)
; CHECK-NEXT: blr
L.entry:
- %0 = bitcast i64* %i to i32*
- %1 = load i32, i32* %0, align 4
- ret i32 %1
+ %0 = load i32, ptr %i, align 4
+ ret i32 %0
}
; Function Attrs: noinline norecurse nounwind
-define void @testFunc(i64* nocapture %r, i64* nocapture readonly %k) {
+define void @testFunc(ptr nocapture %r, ptr nocapture readonly %k) {
; CHECK-LABEL: testFunc:
; CHECK: # %bb.0: # %L.entry
; CHECK-NEXT: mflr 0
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
L.entry:
- %0 = bitcast i64* %k to i32*
- %1 = load i32, i32* %0, align 4
- switch i32 %1, label %L.LB3_307 [
+ %0 = load i32, ptr %k, align 4
+ switch i32 %0, label %L.LB3_307 [
i32 1, label %L.LB3_307.sink.split
i32 3, label %L.LB3_307.sink.split
i32 4, label %L.LB3_321.split
L.LB3_307.sink.split: ; preds = %L.LB3_321.split, %L.entry, %L.entry, %L.entry
%.sink = phi i32 [ 5, %L.LB3_321.split ], [ -3, %L.entry ], [ -3, %L.entry ], [ -3, %L.entry ]
- %2 = bitcast i64* %r to i32*
- store i32 %.sink, i32* %2, align 4
+ store i32 %.sink, ptr %r, align 4
br label %L.LB3_307
L.LB3_307: ; preds = %L.LB3_307.sink.split, %L.entry
; RUN: llc -O2 < %s | FileCheck %s
target triple = "powerpc64le-linux-gnu"
-define void @test(i8* %p, i64 %data) {
+define void @test(ptr %p, i64 %data) {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: rotldi 5, 4, 16
; CHECK-NEXT: blr
entry:
%0 = tail call i64 @llvm.bswap.i64(i64 %data)
- %ptr = bitcast i8* %p to i48*
%val = trunc i64 %0 to i48
- store i48 %val, i48* %ptr, align 1
+ store i48 %val, ptr %p, align 1
ret void
}
br label %fe_cmovznz.exit.i534.i.15
fe_cmovznz.exit.i534.i.15: ; preds = %fe_cmovznz.exit.i534.i.15, %entry
- %0 = load i64, i64* undef, align 8
- %1 = load i64, i64* undef, align 8
+ %0 = load i64, ptr undef, align 8
+ %1 = load i64, ptr undef, align 8
%conv.i69.i.i = zext i64 %0 to i128
%sub.i72.i.i = sub nsw i128 0, %conv.i69.i.i
%conv.i63.i.i = zext i64 %1 to i128
%conv1.i58.i.i = and i128 %sub.i65.lobit.i.i, 18446744073709551615
%add3.i59.i.i = add nuw nsw i128 %conv1.i58.i.i, 0
%conv4.i60.i.i = trunc i128 %add3.i59.i.i to i64
- store i64 %conv4.i60.i.i, i64* undef, align 16
+ store i64 %conv4.i60.i.i, ptr undef, align 16
br label %fe_cmovznz.exit.i534.i.15
}
@glob = local_unnamed_addr global <4 x float> zeroinitializer, align 4
; Function Attrs: norecurse nounwind
-define void @test(float %a, <4 x float>* nocapture readonly %b) {
+define void @test(float %a, ptr nocapture readonly %b) {
; CHECK-LABEL: test
; CHECK: xscvdpspn [[REG:[0-9]+]], 1
; CHECK: xxspltw {{[0-9]+}}, [[REG]], 0
entry:
%splat.splatinsert = insertelement <4 x float> undef, float %a, i32 0
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
- %0 = load <4 x float>, <4 x float>* %b, align 4
+ %0 = load <4 x float>, ptr %b, align 4
%mul = fmul <4 x float> %splat.splat, %0
- store <4 x float> %mul, <4 x float>* @glob, align 4
+ store <4 x float> %mul, ptr @glob, align 4
ret void
}
br label %forcond
forcond: ; preds = %bounds.ok, %0
- %1 = load i64, i64* %pos
- %.len1 = load i64, i64* undef
+ %1 = load i64, ptr %pos
+ %.len1 = load i64, ptr undef
%bounds.cmp = icmp ult i64 %1, %.len1
br i1 %bounds.cmp, label %bounds.ok, label %bounds.fail
bounds.ok: ; preds = %forcond
- %2 = load float, float* undef
+ %2 = load float, ptr undef
%3 = frem float 0.000000e+00, %2
- store float %3, float* undef
- %4 = load i64, i64* %pos
+ store float %3, ptr undef
+ %4 = load i64, ptr %pos
%5 = add i64 %4, 1
- store i64 %5, i64* %pos
+ store i64 %5, ptr %pos
br label %forcond
bounds.fail: ; preds = %forcond
; CHECK-NEXT: stxv vs0, 0(r3)
; CHECK-NEXT: blr
entry:
- %.size = load i32, i32* undef
+ %.size = load i32, ptr undef
%0 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %.size, i32 7)
%1 = extractvalue { i32, i1 } %0, 0
%2 = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %1, i32 0)
%4 = select i1 false, i32 0, i32 %3
%5 = xor i1 false, true
%6 = sext i1 %5 to i32
- %7 = load <4 x i16>, <4 x i16>* undef, align 2
+ %7 = load <4 x i16>, ptr undef, align 2
%8 = extractelement <4 x i16> %7, i32 0
%9 = sext i16 %8 to i32
%10 = insertelement <4 x i32> undef, i32 %9, i32 0
%25 = bitcast <4 x i32> %24 to <4 x float>
%26 = shufflevector <4 x float> %25, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%27 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %x, <4 x float> %x, <4 x float> %26)
- store <4 x float> %27, <4 x float>* undef
+ store <4 x float> %27, ptr undef
ret void
}
; RUN: llc < %s -mtriple=powerpc64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECKBE
; RUN: llc < %s -mtriple=powerpc64-ibm-aix-xcoff -verify-machineinstrs | FileCheck %s --check-prefix=CHECKBE
-define void @pr39478(i64* %p64, i32* %p32) {
+define void @pr39478(ptr %p64, ptr %p32) {
; CHECKLE-LABEL: pr39478:
; CHECKLE: # %bb.0: # %entry
; CHECKLE-NEXT: lbz 3, 4(3)
; CHECKBE-NEXT: stb 3, 3(4)
; CHECKBE-NEXT: blr
entry:
- %tmp32 = load i64, i64* %p64, align 8
- %tmp33 = load i32, i32* %p32, align 4
+ %tmp32 = load i64, ptr %p64, align 8
+ %tmp33 = load i32, ptr %p32, align 4
%tmp34 = and i32 %tmp33, -256
%tmp35 = lshr i64 %tmp32, 32
%tmp36 = shl nuw nsw i64 %tmp35, 24
%tmp37 = trunc i64 %tmp36 to i32
%tmp38 = call i32 @llvm.bswap.i32(i32 %tmp37)
%tmp39 = or i32 %tmp38, %tmp34
- store i32 %tmp39, i32* %p32, align 4
+ store i32 %tmp39, ptr %p32, align 4
ret void
}
; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu < %s \
; RUN: -verify-machineinstrs | FileCheck %s
-@b = dso_local local_unnamed_addr global i64* null, align 8
+@b = dso_local local_unnamed_addr global ptr null, align 8
@a = dso_local local_unnamed_addr global i8 0, align 1
define void @testADDEPromoteResult() {
entry:
- %0 = load i64*, i64** @b, align 8
- %1 = load i64, i64* %0, align 8
- %cmp = icmp ne i64* %0, null
+ %0 = load ptr, ptr @b, align 8
+ %1 = load i64, ptr %0, align 8
+ %cmp = icmp ne ptr %0, null
%conv1 = zext i1 %cmp to i64
%add = add nsw i64 %1, %conv1
%2 = trunc i64 %add to i8
%conv2 = and i8 %2, 5
- store i8 %conv2, i8* @a, align 1
+ store i8 %conv2, ptr @a, align 1
ret void
; CHECK-LABEL: @testADDEPromoteResult
define i32 @a() {
entry:
- %call = tail call i32 bitcast (i32 (...)* @d to i32 ()*)()
- %0 = load i32, i32* @a.b, align 4
+ %call = tail call i32 @d()
+ %0 = load i32, ptr @a.b, align 4
%conv = zext i32 %0 to i64
%add = add nuw nsw i64 %conv, 6
%and = and i64 %add, 8589934575
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- %call3 = tail call i32 bitcast (i32 (...)* @e to i32 ()*)()
+ %call3 = tail call i32 @e()
br label %if.end
if.end: ; preds = %if.then, %entry
- store i32 %call, i32* @a.b, align 4
+ store i32 %call, ptr @a.b, align 4
ret i32 undef
}
; RUN: -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs < %s | \
; RUN: FileCheck %s
-%0 = type { [0 x i64], %1, [0 x i64], { i64, i8* }, [0 x i64] }
-%1 = type { [0 x i64], %2, [0 x i64], i64*, [0 x i64] }
+%0 = type { [0 x i64], %1, [0 x i64], { i64, ptr }, [0 x i64] }
+%1 = type { [0 x i64], %2, [0 x i64], ptr, [0 x i64] }
%2 = type { [0 x i64], %3, [0 x i64], %4, [0 x i8], i8, [7 x i8] }
-%3 = type { [0 x i64], { i64*, i64* }, [0 x i64], i64*, [0 x i8], i8, [7 x i8] }
-%4 = type { [0 x i64], { i64*, i64* }, [0 x i64], %5, [0 x i64] }
-%5 = type { [0 x i64], { i64*, i64* }, [0 x i64], i64*, [0 x i64] }
+%3 = type { [0 x i64], { ptr, ptr }, [0 x i64], ptr, [0 x i8], i8, [7 x i8] }
+%4 = type { [0 x i64], { ptr, ptr }, [0 x i64], %5, [0 x i64] }
+%5 = type { [0 x i64], { ptr, ptr }, [0 x i64], ptr, [0 x i64] }
%6 = type { [0 x i64], i64, [2 x i64] }
-%7 = type { [0 x i64], { i64*, i64* }, [0 x i64], %8, [0 x i64] }
-%8 = type { [0 x i64], %9*, [0 x i32], { i32, i32 }, [0 x i8], i8, [7 x i8] }
+%7 = type { [0 x i64], { ptr, ptr }, [0 x i64], %8, [0 x i64] }
+%8 = type { [0 x i64], ptr, [0 x i32], { i32, i32 }, [0 x i8], i8, [7 x i8] }
%9 = type { [0 x i64], i64, [0 x i64], [0 x %10], [0 x i8], %11 }
%10 = type { [0 x i8], i8, [31 x i8] }
%11 = type {}
%12 = type { [0 x i64], %13, [0 x i32], i32, [0 x i32], i32, [0 x i32] }
%13 = type { [0 x i8], i8, [23 x i8] }
%14 = type { [0 x i64], i64, [0 x i64], %15, [0 x i32], i32, [0 x i8], i8, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [0 x i8], { i8, i8 }, [7 x i8] }
-%15 = type { [0 x i64], { i64*, i64 }, [0 x i64], i64, [0 x i64] }
+%15 = type { [0 x i64], { ptr, i64 }, [0 x i64], i64, [0 x i64] }
%16 = type { [0 x i64], %17, [0 x i64], %18, [0 x i64], %19, [0 x i64], i64, [0 x i8], { i8, i8 }, [6 x i8] }
%17 = type { [0 x i32], i32, [27 x i32] }
%18 = type { [0 x i64], i64, [6 x i64] }
%19 = type { [0 x i8], i8, [103 x i8] }
-%20 = type { [0 x i64], { i64*, i64* }*, [0 x i64], %7**, [0 x i64], i64**, [0 x i64] }
-%21 = type { [0 x i64], i64, [0 x i64], void (i32, %21*)*, [0 x i64], [2 x i64], [0 x i64] }
+%20 = type { [0 x i64], ptr, [0 x i64], ptr, [0 x i64], ptr, [0 x i64] }
+%21 = type { [0 x i64], i64, [0 x i64], ptr, [0 x i64], [2 x i64], [0 x i64] }
%22 = type { [0 x i8] }
-@var = external dso_local unnamed_addr constant <{ i8*, [8 x i8], i8*, [16 x i8] }>, align 8
+@var = external dso_local unnamed_addr constant <{ ptr, [8 x i8], ptr, [16 x i8] }>, align 8
-declare dso_local fastcc { i64*, i8* } @test2(%0**) unnamed_addr
+declare dso_local fastcc { ptr, ptr } @test2(ptr) unnamed_addr
-define void @test(%6* %arg, %7* %arg1, %12* %arg2) unnamed_addr personality i32 (i32, i32, i64, %21*, %22*)* @personality {
+define void @test(ptr %arg, ptr %arg1, ptr %arg2) unnamed_addr personality ptr @personality {
; CHECK-LABEL: test:
; CHECK: .cfi_personality 148, DW.ref.personality
; CHECK-NEXT: .cfi_lsda 20, .Lexception0
br label %bb12
bb9: ; preds = %bb3
- %tmp = call i8 @test5(%14* noalias nonnull readonly align 8 dereferenceable(64) undef), !range !0
+ %tmp = call i8 @test5(ptr noalias nonnull readonly align 8 dereferenceable(64) undef), !range !0
%tmp10 = zext i8 %tmp to i24
%tmp11 = shl nuw nsw i24 %tmp10, 8
br label %bb12
bb12: ; preds = %bb9, %bb8, %bb7, %bb6, %bb5, %bb3
%tmp13 = phi i24 [ 1024, %bb8 ], [ 768, %bb7 ], [ 512, %bb6 ], [ 256, %bb5 ], [ %tmp11, %bb9 ], [ 0, %bb3 ]
- %tmp14 = call fastcc align 8 dereferenceable(288) %16* @test3(%20* noalias nonnull readonly align 8 dereferenceable(24) undef, i24 %tmp13)
+ %tmp14 = call fastcc align 8 dereferenceable(288) ptr @test3(ptr noalias nonnull readonly align 8 dereferenceable(24) undef, i24 %tmp13)
br label %bb22
bb15: ; No predecessors!
- %tmp16 = invoke fastcc { i64*, i8* } @test2(%0** nonnull align 8 dereferenceable(8) undef)
+ %tmp16 = invoke fastcc { ptr, ptr } @test2(ptr nonnull align 8 dereferenceable(8) undef)
to label %bb17 unwind label %bb18
bb17: ; preds = %bb15
- invoke void @test4({ [0 x i64], { [0 x i8]*, i64 }, [0 x i64], { [0 x i8]*, i64 }, [0 x i32], i32, [0 x i32], i32, [0 x i32] }* noalias readonly align 8 dereferenceable(40) bitcast (<{ i8*, [8 x i8], i8*, [16 x i8] }>* @var to { [0 x i64], { [0 x i8]*, i64 }, [0 x i64], { [0 x i8]*, i64 }, [0 x i32], i32, [0 x i32], i32, [0 x i32] }*))
+ invoke void @test4(ptr noalias readonly align 8 dereferenceable(40) @var)
to label %bb23 unwind label %bb25
bb18: ; preds = %bb15
- %tmp19 = landingpad { i8*, i32 }
+ %tmp19 = landingpad { ptr, i32 }
cleanup
- resume { i8*, i32 } undef
+ resume { ptr, i32 } undef
bb20: ; No predecessors!
- invoke void @test4({ [0 x i64], { [0 x i8]*, i64 }, [0 x i64], { [0 x i8]*, i64 }, [0 x i32], i32, [0 x i32], i32, [0 x i32] }* noalias readonly align 8 dereferenceable(40) bitcast (<{ i8*, [8 x i8], i8*, [16 x i8] }>* @var to { [0 x i64], { [0 x i8]*, i64 }, [0 x i64], { [0 x i8]*, i64 }, [0 x i32], i32, [0 x i32], i32, [0 x i32] }*))
+ invoke void @test4(ptr noalias readonly align 8 dereferenceable(40) @var)
to label %bb24 unwind label %bb25
bb21: ; preds = %bb
unreachable
bb25: ; preds = %bb20, %bb17
- %tmp26 = landingpad { i8*, i32 }
+ %tmp26 = landingpad { ptr, i32 }
cleanup
- resume { i8*, i32 } undef
+ resume { ptr, i32 } undef
}
-declare dso_local fastcc %16* @test3(%20*, i24) unnamed_addr
+declare dso_local fastcc ptr @test3(ptr, i24) unnamed_addr
-declare i32 @personality(i32, i32, i64, %21*, %22*) unnamed_addr
+declare i32 @personality(i32, i32, i64, ptr, ptr) unnamed_addr
-declare void @test4({ [0 x i64], { [0 x i8]*, i64 }, [0 x i64], { [0 x i8]*, i64 }, [0 x i32], i32, [0 x i32], i32, [0 x i32] }*) unnamed_addr
+declare void @test4(ptr) unnamed_addr
-declare i8 @test5(%14*) unnamed_addr
+declare i8 @test5(ptr) unnamed_addr
!0 = !{i8 0, i8 5}
; REQUIRES: asserts
define protected swiftcc void @"$s22LanguageServerProtocol13HoverResponseV8contents5rangeAcA13MarkupContentV_SnyAA8PositionVGSgtcfC"() {
- %1 = load <2 x i64>, <2 x i64>* undef, align 16
- %2 = load i1, i1* undef, align 8
+ %1 = load <2 x i64>, ptr undef, align 16
+ %2 = load i1, ptr undef, align 8
%3 = insertelement <2 x i1> undef, i1 %2, i32 0
%4 = shufflevector <2 x i1> %3, <2 x i1> undef, <2 x i32> zeroinitializer
%5 = select <2 x i1> %4, <2 x i64> zeroinitializer, <2 x i64> %1
- store <2 x i64> %5, <2 x i64>* undef, align 8
+ store <2 x i64> %5, ptr undef, align 8
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr9 < %s | FileCheck %s
-define void @f(i8*, i8*, i64*) {
+define void @f(ptr, ptr, ptr) {
; Check we don't assert and this is not a Hardware Loop
; CHECK-LABEL: f:
; CHECK: # %bb.0:
; CHECK-NEXT: std 3, 8(5)
; CHECK-NEXT: blr
- %4 = icmp eq i8* %0, %1
+ %4 = icmp eq ptr %0, %1
br i1 %4, label %9, label %5
5: ; preds = %3
- %6 = getelementptr inbounds i64, i64* %2, i64 1
- %7 = load i64, i64* %6, align 8
+ %6 = getelementptr inbounds i64, ptr %2, i64 1
+ %7 = load i64, ptr %6, align 8
br label %10
8: ; preds = %10
- store i64 %14, i64* %6, align 8
+ store i64 %14, ptr %6, align 8
br label %9
9: ; preds = %8, %3
10: ; preds = %5, %10
%11 = phi i64 [ %7, %5 ], [ %14, %10 ]
%12 = phi i32 [ 0, %5 ], [ %15, %10 ]
- %13 = phi i8* [ %0, %5 ], [ %16, %10 ]
+ %13 = phi ptr [ %0, %5 ], [ %16, %10 ]
%14 = shl nsw i64 %11, 4
%15 = add nuw nsw i32 %12, 1
- %16 = getelementptr inbounds i8, i8* %13, i64 1
+ %16 = getelementptr inbounds i8, ptr %13, i64 1
%17 = icmp ugt i32 %12, 14
- %18 = icmp eq i8* %16, %1
+ %18 = icmp eq ptr %16, %1
%19 = or i1 %18, %17
br i1 %19, label %8, label %10
}
bb5: ; preds = %bb5, %bb4
%tmp6 = phi i64 [ %tmp12, %bb5 ], [ 0, %bb4 ]
- %tmp7 = getelementptr inbounds float, float* null, i64 %tmp6
- %tmp8 = load float, float* %tmp7, align 4
+ %tmp7 = getelementptr inbounds float, ptr null, i64 %tmp6
+ %tmp8 = load float, ptr %tmp7, align 4
%tmp9 = fpext float %tmp8 to double
%tmp10 = tail call i64 @llvm.lrint.i64.f64(double %tmp9) #2
%tmp11 = trunc i64 %tmp10 to i8
- store i8 %tmp11, i8* undef, align 1
+ store i8 %tmp11, ptr undef, align 1
%tmp12 = add nuw i64 %tmp6, 1
%tmp13 = icmp eq i64 %tmp12, %tmp
br i1 %tmp13, label %bb15, label %bb5
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* @a, align 8
+ %0 = load double, ptr @a, align 8
%conv = fptoui double %0 to i64
%conv1 = sitofp i64 %conv to double
%mul = fmul double %conv1, 1.000000e+06
%struct.l.0.3.6.9 = type { i8 }
%struct.a.1.4.7.10 = type { [27 x i8], [0 x i32], [4 x i8] }
-define void @_ZN1m1nEv(%struct.m.2.5.8.11* %this) local_unnamed_addr nounwind align 2 {
+define void @_ZN1m1nEv(ptr %this) local_unnamed_addr nounwind align 2 {
; CHECK-LABEL: _ZN1m1nEv:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %bc = getelementptr inbounds %struct.m.2.5.8.11, %struct.m.2.5.8.11* %this, i64 0, i32 2
- %0 = bitcast %struct.a.1.4.7.10* %bc to i216*
- %bf.load = load i216, i216* %0, align 8
+ %bc = getelementptr inbounds %struct.m.2.5.8.11, ptr %this, i64 0, i32 2
+ %bf.load = load i216, ptr %bc, align 8
%bf.lshr = lshr i216 %bf.load, 4
%shl.i23 = shl i216 %bf.lshr, 31
%shl.i = trunc i216 %shl.i23 to i32
- %arrayidx = getelementptr inbounds %struct.m.2.5.8.11, %struct.m.2.5.8.11* %this, i64 0, i32 2, i32 1, i64 0
- %1 = load i32, i32* %arrayidx, align 4
- %and.i = and i32 %1, 1
+ %arrayidx = getelementptr inbounds %struct.m.2.5.8.11, ptr %this, i64 0, i32 2, i32 1, i64 0
+ %0 = load i32, ptr %arrayidx, align 4
+ %and.i = and i32 %0, 1
%or.i = or i32 %and.i, %shl.i
- tail call void @_ZN1llsE1d(%struct.l.0.3.6.9* undef, i32 %or.i) #1
- %bf.load10 = load i216, i216* %0, align 8
+ tail call void @_ZN1llsE1d(ptr undef, i32 %or.i) #1
+ %bf.load10 = load i216, ptr %bc, align 8
%bf.lshr11 = lshr i216 %bf.load10, 4
%shl.i1524 = shl i216 %bf.lshr11, 31
%shl.i15 = trunc i216 %shl.i1524 to i32
- tail call void @_ZN1llsE1d(%struct.l.0.3.6.9* undef, i32 %shl.i15) #1
+ tail call void @_ZN1llsE1d(ptr undef, i32 %shl.i15) #1
ret void
}
-declare void @_ZN1llsE1d(%struct.l.0.3.6.9*, i32) local_unnamed_addr #0
+declare void @_ZN1llsE1d(ptr, i32) local_unnamed_addr #0
@d = local_unnamed_addr global %struct.anon zeroinitializer, align 8
; Function Attrs: norecurse nounwind readonly
-define i64 @e(i8* nocapture readonly %f) local_unnamed_addr #0 {
+define i64 @e(ptr nocapture readonly %f) local_unnamed_addr #0 {
; CHECK-LABEL: e:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* %f, align 1
+ %0 = load i8, ptr %f, align 1
%conv = zext i8 %0 to i64
%shl = shl nuw i64 %conv, 56
- %arrayidx1 = getelementptr inbounds i8, i8* %f, i64 1
- %1 = load i8, i8* %arrayidx1, align 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %f, i64 1
+ %1 = load i8, ptr %arrayidx1, align 1
%conv2 = zext i8 %1 to i64
%shl3 = shl nuw nsw i64 %conv2, 48
%or = or i64 %shl3, %shl
- %arrayidx4 = getelementptr inbounds i8, i8* %f, i64 2
- %2 = load i8, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %f, i64 2
+ %2 = load i8, ptr %arrayidx4, align 1
%conv5 = zext i8 %2 to i64
%shl6 = shl nuw nsw i64 %conv5, 40
%or7 = or i64 %or, %shl6
- %arrayidx8 = getelementptr inbounds i8, i8* %f, i64 3
- %3 = load i8, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %f, i64 3
+ %3 = load i8, ptr %arrayidx8, align 1
%conv9 = zext i8 %3 to i64
%shl10 = shl nuw nsw i64 %conv9, 32
%or11 = or i64 %or7, %shl10
- %arrayidx12 = getelementptr inbounds i8, i8* %f, i64 4
- %4 = load i8, i8* %arrayidx12, align 1
+ %arrayidx12 = getelementptr inbounds i8, ptr %f, i64 4
+ %4 = load i8, ptr %arrayidx12, align 1
%conv13 = zext i8 %4 to i64
%shl14 = shl nuw nsw i64 %conv13, 24
%or15 = or i64 %or11, %shl14
- %arrayidx16 = getelementptr inbounds i8, i8* %f, i64 5
- %5 = load i8, i8* %arrayidx16, align 1
+ %arrayidx16 = getelementptr inbounds i8, ptr %f, i64 5
+ %5 = load i8, ptr %arrayidx16, align 1
%conv17 = zext i8 %5 to i64
%shl18 = shl nuw nsw i64 %conv17, 16
%or20 = or i64 %or15, %shl18
- %arrayidx21 = getelementptr inbounds i8, i8* %f, i64 6
- %6 = load i8, i8* %arrayidx21, align 1
+ %arrayidx21 = getelementptr inbounds i8, ptr %f, i64 6
+ %6 = load i8, ptr %arrayidx21, align 1
%conv22 = zext i8 %6 to i64
%shl23 = shl nuw nsw i64 %conv22, 8
%or25 = or i64 %or20, %shl23
- %arrayidx26 = getelementptr inbounds i8, i8* %f, i64 7
- %7 = load i8, i8* %arrayidx26, align 1
+ %arrayidx26 = getelementptr inbounds i8, ptr %f, i64 7
+ %7 = load i8, ptr %arrayidx26, align 1
%conv27 = zext i8 %7 to i64
%or28 = or i64 %or25, %conv27
ret i64 %or28
; CHECK-NEXT: stxvd2x vs0, 0, r4
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* getelementptr inbounds (i8, i8* bitcast (void ()* @g to i8*), i64 8), align 1
+ %0 = load i8, ptr getelementptr inbounds (i8, ptr @g, i64 8), align 1
%conv.i = zext i8 %0 to i64
%shl.i = shl nuw i64 %conv.i, 56
- %1 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 9), align 1
+ %1 = load i8, ptr getelementptr (i8, ptr @g, i64 9), align 1
%conv2.i = zext i8 %1 to i64
%shl3.i = shl nuw nsw i64 %conv2.i, 48
%or.i = or i64 %shl3.i, %shl.i
- %2 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 10), align 1
+ %2 = load i8, ptr getelementptr (i8, ptr @g, i64 10), align 1
%conv5.i = zext i8 %2 to i64
%shl6.i = shl nuw nsw i64 %conv5.i, 40
%or7.i = or i64 %or.i, %shl6.i
- %3 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 11), align 1
+ %3 = load i8, ptr getelementptr (i8, ptr @g, i64 11), align 1
%conv9.i = zext i8 %3 to i64
%shl10.i = shl nuw nsw i64 %conv9.i, 32
%or11.i = or i64 %or7.i, %shl10.i
- %4 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 12), align 1
+ %4 = load i8, ptr getelementptr (i8, ptr @g, i64 12), align 1
%conv13.i = zext i8 %4 to i64
%shl14.i = shl nuw nsw i64 %conv13.i, 24
%or15.i = or i64 %or11.i, %shl14.i
- %5 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 13), align 1
+ %5 = load i8, ptr getelementptr (i8, ptr @g, i64 13), align 1
%conv17.i = zext i8 %5 to i64
%shl18.i = shl nuw nsw i64 %conv17.i, 16
%or20.i = or i64 %or15.i, %shl18.i
- %6 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 14), align 1
+ %6 = load i8, ptr getelementptr (i8, ptr @g, i64 14), align 1
%conv22.i = zext i8 %6 to i64
%shl23.i = shl nuw nsw i64 %conv22.i, 8
%or25.i = or i64 %or20.i, %shl23.i
- %7 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 15), align 1
+ %7 = load i8, ptr getelementptr (i8, ptr @g, i64 15), align 1
%conv27.i = zext i8 %7 to i64
%or28.i = or i64 %or25.i, %conv27.i
- store i64 %or28.i, i64* getelementptr inbounds (%struct.anon, %struct.anon* @d, i64 0, i32 1), align 8
- %8 = load i8, i8* bitcast (void ()* @g to i8*), align 1
+ store i64 %or28.i, ptr getelementptr inbounds (%struct.anon, ptr @d, i64 0, i32 1), align 8
+ %8 = load i8, ptr @g, align 1
%conv.i2 = zext i8 %8 to i64
%shl.i3 = shl nuw i64 %conv.i2, 56
- %9 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 1), align 1
+ %9 = load i8, ptr getelementptr (i8, ptr @g, i64 1), align 1
%conv2.i4 = zext i8 %9 to i64
%shl3.i5 = shl nuw nsw i64 %conv2.i4, 48
%or.i6 = or i64 %shl3.i5, %shl.i3
- %10 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 2), align 1
+ %10 = load i8, ptr getelementptr (i8, ptr @g, i64 2), align 1
%conv5.i7 = zext i8 %10 to i64
%shl6.i8 = shl nuw nsw i64 %conv5.i7, 40
%or7.i9 = or i64 %or.i6, %shl6.i8
- %11 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 3), align 1
+ %11 = load i8, ptr getelementptr (i8, ptr @g, i64 3), align 1
%conv9.i10 = zext i8 %11 to i64
%shl10.i11 = shl nuw nsw i64 %conv9.i10, 32
%or11.i12 = or i64 %or7.i9, %shl10.i11
- %12 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 4), align 1
+ %12 = load i8, ptr getelementptr (i8, ptr @g, i64 4), align 1
%conv13.i13 = zext i8 %12 to i64
%shl14.i14 = shl nuw nsw i64 %conv13.i13, 24
%or15.i15 = or i64 %or11.i12, %shl14.i14
- %13 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 5), align 1
+ %13 = load i8, ptr getelementptr (i8, ptr @g, i64 5), align 1
%conv17.i16 = zext i8 %13 to i64
%shl18.i17 = shl nuw nsw i64 %conv17.i16, 16
%or20.i18 = or i64 %or15.i15, %shl18.i17
- %14 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 6), align 1
+ %14 = load i8, ptr getelementptr (i8, ptr @g, i64 6), align 1
%conv22.i19 = zext i8 %14 to i64
%shl23.i20 = shl nuw nsw i64 %conv22.i19, 8
%or25.i21 = or i64 %or20.i18, %shl23.i20
- %15 = load i8, i8* getelementptr (i8, i8* bitcast (void ()* @g to i8*), i64 7), align 1
+ %15 = load i8, ptr getelementptr (i8, ptr @g, i64 7), align 1
%conv27.i22 = zext i8 %15 to i64
%or28.i23 = or i64 %or25.i21, %conv27.i22
- store i64 %or28.i23, i64* getelementptr inbounds (%struct.anon, %struct.anon* @d, i64 0, i32 0), align 8
+ store i64 %or28.i23, ptr @d, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%1 = fptosi float %0 to i32
- store i32 %1, i32* @Global, align 4
+ store i32 %1, ptr @Global, align 4
ret void
}
; RUN: -ppc-asm-full-reg-names < %s | FileCheck %s
%struct.e.0.1.2.3.12.29 = type { [10 x i32] }
-define dso_local void @g(%struct.e.0.1.2.3.12.29* %agg.result) local_unnamed_addr #0 {
+define dso_local void @g(ptr %agg.result) local_unnamed_addr #0 {
; CHECK-LABEL: g:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
entry:
- %call = tail call signext i32 bitcast (i32 (...)* @i to i32 ()*)()
+ %call = tail call signext i32 @i()
%conv = sext i32 %call to i64
- %0 = inttoptr i64 %conv to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 4 dereferenceable(40) %0, i8* nonnull align 4 dereferenceable(40) bitcast (void (%struct.e.0.1.2.3.12.29*)* @g to i8*), i64 40, i1 false)
- %1 = inttoptr i64 %conv to i32*
- %2 = load i32, i32* %1, align 4
+ %0 = inttoptr i64 %conv to ptr
+ tail call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 dereferenceable(40) %0, ptr nonnull align 4 dereferenceable(40) @g, i64 40, i1 false)
+ %1 = inttoptr i64 %conv to ptr
+ %2 = load i32, ptr %1, align 4
%rev.i = tail call i32 @llvm.bswap.i32(i32 %2)
- store i32 %rev.i, i32* %1, align 4
- %incdec.ptr.i.4 = getelementptr inbounds i32, i32* %1, i64 5
- %3 = load i32, i32* %incdec.ptr.i.4, align 4
+ store i32 %rev.i, ptr %1, align 4
+ %incdec.ptr.i.4 = getelementptr inbounds i32, ptr %1, i64 5
+ %3 = load i32, ptr %incdec.ptr.i.4, align 4
%rev.i.5 = tail call i32 @llvm.bswap.i32(i32 %3)
- store i32 %rev.i.5, i32* %incdec.ptr.i.4, align 4
+ store i32 %rev.i.5, ptr %incdec.ptr.i.4, align 4
ret void
}
declare i32 @i(...) local_unnamed_addr
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #1
; Function Attrs: nounwind readnone speculatable willreturn
declare i32 @llvm.bswap.i32(i32)
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB0_2: # %bb5
bb:
- %i = load i32, i32* @g, align 4
+ %i = load i32, ptr @g, align 4
%i1 = sext i32 %i to i64
- %i2 = getelementptr inbounds [0 x %1], [0 x %1]* bitcast (double* getelementptr inbounds (%0, %0* @f, i64 1, i32 0) to [0 x %1]*), i64 0, i64 %i1, i32 0
- %i3 = load i32, i32* %i2, align 4
+ %i2 = getelementptr inbounds [0 x %1], ptr getelementptr inbounds (%0, ptr @f, i64 1, i32 0), i64 0, i64 %i1, i32 0
+ %i3 = load i32, ptr %i2, align 4
%i4 = icmp eq i32 %i3, 0
br i1 %i4, label %bb6, label %bb5
; CHECK-NEXT: # %bb.9: # %L917
; CHECK-NEXT: .LBB0_10: # %L994
top:
- %0 = load i64, i64* undef, align 8
+ %0 = load i64, ptr undef, align 8
%1 = icmp ne i64 %0, 0
%2 = sext i64 %0 to i128
switch i64 %0, label %pass195 [
entry:
%0 = zext i32 %vla_size to i64
%vla = alloca i8, i64 %0, align 2048
- %1 = load volatile i8, i8* %vla, align 2048
+ %1 = load volatile i8, ptr %vla, align 2048
ret void
}
br label %end
end:
- %a = phi i1 [ icmp ugt (i64 0, i64 ptrtoint (i64* @bar to i64)), %true ],
+ %a = phi i1 [ icmp ugt (i64 0, i64 ptrtoint (ptr @bar to i64)), %true ],
[ icmp ugt (i64 0, i64 2), %false ]
ret i1 %a
}
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-grtev4-linux-gnu"
-define void @foo(i64* %p1, i64 %v1, i8 %v2, i64 %v3) {
+define void @foo(ptr %p1, i64 %v1, i8 %v2, i64 %v3) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0:
; CHECK-NEXT: mr 7, 5
; CHECK-NEXT: # %bb.3: # %bb3
; CHECK-NEXT: std 6, 0(3)
; CHECK-NEXT: blr
- store i64 0, i64* %p1, align 8
+ store i64 0, ptr %p1, align 8
%ext = zext i8 %v2 to i64
%shift = shl nuw i64 %v1, 8
%merge = or i64 %shift, %ext
br i1 %cond1, label %bb2, label %bb1 ; be used by this conditional branch
bb1:
- store i64 %v1, i64* %p1, align 8
+ store i64 %v1, ptr %p1, align 8
br label %bb2
bb2:
br i1 %not0, label %exit, label %bb3
bb3:
- store i64 %v3, i64* %p1, align 8
+ store i64 %v3, ptr %p1, align 8
br label %exit
exit:
%struct.poly2 = type { [11 x i64] }
; Function Attrs: nofree norecurse nounwind
-define dso_local void @poly2_lshift1(%struct.poly2* nocapture %p) local_unnamed_addr #0 {
+define dso_local void @poly2_lshift1(ptr nocapture %p) local_unnamed_addr #0 {
; CHECK-LABEL: poly2_lshift1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li r4, 72
; CHECK-NEXT: stxvd2x vs0, r3, r4
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 0
- %0 = load i64, i64* %arrayidx, align 8
+ %0 = load i64, ptr %p, align 8
%shl = shl i64 %0, 1
- store i64 %shl, i64* %arrayidx, align 8
- %arrayidx.1 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 1
- %1 = load i64, i64* %arrayidx.1, align 8
+ store i64 %shl, ptr %p, align 8
+ %arrayidx.1 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 1
+ %1 = load i64, ptr %arrayidx.1, align 8
%or.1 = call i64 @llvm.fshl.i64(i64 %1, i64 %0, i64 1)
- store i64 %or.1, i64* %arrayidx.1, align 8
- %arrayidx.2 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 2
- %2 = load i64, i64* %arrayidx.2, align 8
+ store i64 %or.1, ptr %arrayidx.1, align 8
+ %arrayidx.2 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 2
+ %2 = load i64, ptr %arrayidx.2, align 8
%or.2 = call i64 @llvm.fshl.i64(i64 %2, i64 %1, i64 1)
- store i64 %or.2, i64* %arrayidx.2, align 8
- %arrayidx.3 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 3
- %3 = load i64, i64* %arrayidx.3, align 8
+ store i64 %or.2, ptr %arrayidx.2, align 8
+ %arrayidx.3 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 3
+ %3 = load i64, ptr %arrayidx.3, align 8
%or.3 = call i64 @llvm.fshl.i64(i64 %3, i64 %2, i64 1)
- store i64 %or.3, i64* %arrayidx.3, align 8
- %arrayidx.4 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 4
- %4 = load i64, i64* %arrayidx.4, align 8
+ store i64 %or.3, ptr %arrayidx.3, align 8
+ %arrayidx.4 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 4
+ %4 = load i64, ptr %arrayidx.4, align 8
%or.4 = call i64 @llvm.fshl.i64(i64 %4, i64 %3, i64 1)
- store i64 %or.4, i64* %arrayidx.4, align 8
- %arrayidx.5 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 5
- %5 = load i64, i64* %arrayidx.5, align 8
+ store i64 %or.4, ptr %arrayidx.4, align 8
+ %arrayidx.5 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 5
+ %5 = load i64, ptr %arrayidx.5, align 8
%or.5 = call i64 @llvm.fshl.i64(i64 %5, i64 %4, i64 1)
- store i64 %or.5, i64* %arrayidx.5, align 8
- %arrayidx.6 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 6
- %6 = load i64, i64* %arrayidx.6, align 8
+ store i64 %or.5, ptr %arrayidx.5, align 8
+ %arrayidx.6 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 6
+ %6 = load i64, ptr %arrayidx.6, align 8
%or.6 = call i64 @llvm.fshl.i64(i64 %6, i64 %5, i64 1)
- store i64 %or.6, i64* %arrayidx.6, align 8
- %arrayidx.7 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 7
- %7 = load i64, i64* %arrayidx.7, align 8
+ store i64 %or.6, ptr %arrayidx.6, align 8
+ %arrayidx.7 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 7
+ %7 = load i64, ptr %arrayidx.7, align 8
%or.7 = call i64 @llvm.fshl.i64(i64 %7, i64 %6, i64 1)
- store i64 %or.7, i64* %arrayidx.7, align 8
- %arrayidx.8 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 8
- %8 = load i64, i64* %arrayidx.8, align 8
+ store i64 %or.7, ptr %arrayidx.7, align 8
+ %arrayidx.8 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 8
+ %8 = load i64, ptr %arrayidx.8, align 8
%or.8 = call i64 @llvm.fshl.i64(i64 %8, i64 %7, i64 1)
- store i64 %or.8, i64* %arrayidx.8, align 8
- %arrayidx.9 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 9
- %9 = bitcast i64* %arrayidx.9 to <2 x i64>*
- %10 = load <2 x i64>, <2 x i64>* %9, align 8
- %11 = insertelement <2 x i64> undef, i64 %8, i32 0
- %12 = shufflevector <2 x i64> %11, <2 x i64> %10, <2 x i32> <i32 0, i32 2>
- %13 = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %10, <2 x i64> %12, <2 x i64> <i64 1, i64 1>)
- %14 = bitcast i64* %arrayidx.9 to <2 x i64>*
- store <2 x i64> %13, <2 x i64>* %14, align 8
+ store i64 %or.8, ptr %arrayidx.8, align 8
+ %arrayidx.9 = getelementptr inbounds %struct.poly2, ptr %p, i64 0, i32 0, i64 9
+ %9 = load <2 x i64>, ptr %arrayidx.9, align 8
+ %10 = insertelement <2 x i64> undef, i64 %8, i32 0
+ %11 = shufflevector <2 x i64> %10, <2 x i64> %9, <2 x i32> <i32 0, i32 2>
+ %12 = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %9, <2 x i64> %11, <2 x i64> <i64 1, i64 1>)
+ store <2 x i64> %12, ptr %arrayidx.9, align 8
ret void
}
; CHECK-NEXT: stxvd2x vs0, 0, r3
; CHECK-NEXT: blr
entry:
- %wide.load42 = load <2 x i32>, <2 x i32>* undef, align 4
+ %wide.load42 = load <2 x i32>, ptr undef, align 4
%interleaved.vec49 = shufflevector <2 x i32> %wide.load42, <2 x i32> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
- store <4 x i32> %interleaved.vec49, <4 x i32>* undef, align 4
+ store <4 x i32> %interleaved.vec49, ptr undef, align 4
ret void
}
; CHECK-P9-NEXT: xscvdphp f0, f0
; CHECK-P9-NEXT: stxsihx f0, 0, r3
bb:
- %i = load i64, i64 addrspace(11)* null, align 8
+ %i = load i64, ptr addrspace(11) null, align 8
%i1 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %i, i64 0)
%i2 = extractvalue { i64, i1 } %i1, 0
br label %bb3
%i6 = add nsw i64 %i5, -1
%i7 = add i64 %i6, 0
%i8 = sitofp i64 %i7 to half
- store half %i8, half addrspace(13)* undef, align 2
+ store half %i8, ptr addrspace(13) undef, align 2
%i9 = icmp eq i64 %i4, 0
%i10 = add i64 %i4, 1
br i1 %i9, label %bb11, label %bb3
bb3: ; preds = %bb1
%i4 = add nuw nsw i64 %i, 1
- %i5 = load half, half* null, align 2
+ %i5 = load half, ptr null, align 2
%i6 = fpext half %i5 to float
%i7 = fcmp uno float %i6, 0.000000e+00
%i8 = or i1 %i7, false
; CHECK-P9-NEXT: b .LBB2_1
; CHECK-P9-NEXT: .LBB2_5: # %bb15
bb:
- %i = load i64, i64 addrspace(11)* undef, align 8
- %i1 = load i64, i64 addrspace(11)* undef, align 8
+ %i = load i64, ptr addrspace(11) undef, align 8
+ %i1 = load i64, ptr addrspace(11) undef, align 8
br label %bb2
bb2: ; preds = %bb12, %bb
]
bb5: ; preds = %bb4, %bb4
- %i6 = load half, half addrspace(13)* undef, align 2
+ %i6 = load half, ptr addrspace(13) undef, align 2
%i7 = icmp ult i64 0, %i1
br i1 %i7, label %bb8, label %bb15
bb8: ; preds = %bb5
- store half %i6, half addrspace(13)* null, align 2
+ store half %i6, ptr addrspace(13) null, align 2
br label %bb10
bb9: ; preds = %bb4
bb1: ; preds = %bb1, %bb
%i = phi i64 [ 0, %bb ], [ %i3, %bb1 ]
- %i2 = getelementptr inbounds half, half addrspace(13)* null, i64 %i
- store half %arg, half addrspace(13)* %i2, align 2
+ %i2 = getelementptr inbounds half, ptr addrspace(13) null, i64 %i
+ store half %arg, ptr addrspace(13) %i2, align 2
%i3 = add i64 %i, 12
%i4 = icmp eq i64 %i3, 0
br i1 %i4, label %bb5, label %bb1
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* undef, align 4
+ %0 = load i32, ptr undef, align 4
br label %monotonic.i
for.cond.i: ; preds = %monotonic.i
monotonic.i: ; preds = %for.cond.i, %entry
%i.018.i = phi i32 [ %inc.i, %for.cond.i ], [ 0, %entry ]
- %1 = load atomic i32, i32* getelementptr inbounds (%struct.e.0.12.28.44.104.108.112.188, %struct.e.0.12.28.44.104.108.112.188* @g, i64 0, i32 0) monotonic, align 4
+ %1 = load atomic i32, ptr @g monotonic, align 4
%conv.i = trunc i32 %1 to i8
%tobool.not.i = icmp eq i8 %conv.i, 0
%inc.i = add nuw nsw i32 %i.018.i, 1
br i1 %tobool.not.i, label %for.cond.i, label %if.end
if.end: ; preds = %monotonic.i, %for.cond.i
- %.sink = phi i64* [ getelementptr inbounds (%struct.t.1.13.29.45.105.109.113.189, %struct.t.1.13.29.45.105.109.113.189* @aj, i64 0, i32 1), %monotonic.i ], [ getelementptr inbounds (%struct.t.1.13.29.45.105.109.113.189, %struct.t.1.13.29.45.105.109.113.189* @aj, i64 0, i32 0), %for.cond.i ]
- store i64 1, i64* %.sink, align 8
+ %.sink = phi ptr [ getelementptr inbounds (%struct.t.1.13.29.45.105.109.113.189, ptr @aj, i64 0, i32 1), %monotonic.i ], [ @aj, %for.cond.i ]
+ store i64 1, ptr %.sink, align 8
ret void
}
bb2: ; preds = %bb
%i = select i1 undef, i64 0, i64 72057594037927936
- store i64 %i, i64* undef, align 8
+ store i64 %i, ptr undef, align 8
ret void
bb1: ; preds = %bb
- %i50 = load i8, i8* undef, align 8
- %i52 = load i128, i128* null, align 8
+ %i50 = load i8, ptr undef, align 8
+ %i52 = load i128, ptr null, align 8
%i62 = icmp eq i8 %i50, 0
br i1 undef, label %bb66, label %bb64
%i71 = icmp eq i128 %i70, 0
%i74 = select i1 %i62, i64 0, i64 72057594037927936
%i75 = select i1 %i71, i64 144115188075855872, i64 %i74
- store i64 %i75, i64* undef, align 8
+ store i64 %i75, ptr undef, align 8
ret void
}
%conv1 = fpext float %j to double
%conv2 = fpext float %k to double
%conv3 = fpext float %l to double
- %call = tail call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([32 x i8], [32 x i8]* @.str, i64 0, i64 0), double %conv, double %conv1, double %conv2, double %conv3)
+ %call = tail call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str, double %conv, double %conv1, double %conv2, double %conv3)
ret void
}
-declare signext i32 @printf(i8* nocapture readonly, ...)
+declare signext i32 @printf(ptr nocapture readonly, ...)
; RUN: -mtriple=powerpc-ibm-aix-xcoff -vec-extabi \
; RUN: < %s | FileCheck %s --check-prefixes=P9BE-AIX32
-define void @test64(i8* nocapture readonly %pix2, i32 signext %i_pix2) {
+define void @test64(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
; P9LE-LABEL: test64:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: add 5, 3, 4
; P9BE-AIX32-NEXT: blr
entry:
%idx.ext63 = sext i32 %i_pix2 to i64
- %add.ptr64 = getelementptr inbounds i8, i8* %pix2, i64 %idx.ext63
- %arrayidx5.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 4
- %0 = bitcast i8* %add.ptr64 to <4 x i16>*
- %1 = load <4 x i16>, <4 x i16>* %0, align 1
- %reorder_shuffle117 = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %2 = zext <4 x i16> %reorder_shuffle117 to <4 x i32>
- %3 = sub nsw <4 x i32> zeroinitializer, %2
- %4 = bitcast i8* %arrayidx5.1 to <4 x i16>*
- %5 = load <4 x i16>, <4 x i16>* %4, align 1
- %reorder_shuffle115 = shufflevector <4 x i16> %5, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %6 = zext <4 x i16> %reorder_shuffle115 to <4 x i32>
- %7 = sub nsw <4 x i32> zeroinitializer, %6
- %8 = shl nsw <4 x i32> %7, <i32 16, i32 16, i32 16, i32 16>
- %9 = add nsw <4 x i32> %8, %3
- %10 = sub nsw <4 x i32> %9, zeroinitializer
- %11 = shufflevector <4 x i32> undef, <4 x i32> %10, <4 x i32> <i32 2, i32 7, i32 0, i32 5>
- %12 = add nsw <4 x i32> zeroinitializer, %11
- %13 = shufflevector <4 x i32> %12, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
- store <4 x i32> %13, <4 x i32>* undef, align 16
+ %add.ptr64 = getelementptr inbounds i8, ptr %pix2, i64 %idx.ext63
+ %arrayidx5.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 4
+ %0 = load <4 x i16>, ptr %add.ptr64, align 1
+ %reorder_shuffle117 = shufflevector <4 x i16> %0, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %1 = zext <4 x i16> %reorder_shuffle117 to <4 x i32>
+ %2 = sub nsw <4 x i32> zeroinitializer, %1
+ %3 = load <4 x i16>, ptr %arrayidx5.1, align 1
+ %reorder_shuffle115 = shufflevector <4 x i16> %3, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %4 = zext <4 x i16> %reorder_shuffle115 to <4 x i32>
+ %5 = sub nsw <4 x i32> zeroinitializer, %4
+ %6 = shl nsw <4 x i32> %5, <i32 16, i32 16, i32 16, i32 16>
+ %7 = add nsw <4 x i32> %6, %2
+ %8 = sub nsw <4 x i32> %7, zeroinitializer
+ %9 = shufflevector <4 x i32> undef, <4 x i32> %8, <4 x i32> <i32 2, i32 7, i32 0, i32 5>
+ %10 = add nsw <4 x i32> zeroinitializer, %9
+ %11 = shufflevector <4 x i32> %10, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ store <4 x i32> %11, ptr undef, align 16
ret void
}
-define void @test32(i8* nocapture readonly %pix2, i32 signext %i_pix2) {
+define void @test32(ptr nocapture readonly %pix2, i32 signext %i_pix2) {
; P9LE-LABEL: test32:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: add 5, 3, 4
; P9BE-AIX32-NEXT: blr
entry:
%idx.ext63 = sext i32 %i_pix2 to i64
- %add.ptr64 = getelementptr inbounds i8, i8* %pix2, i64 %idx.ext63
- %arrayidx5.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 4
- %0 = bitcast i8* %add.ptr64 to <4 x i8>*
- %1 = load <4 x i8>, <4 x i8>* %0, align 1
- %reorder_shuffle117 = shufflevector <4 x i8> %1, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %2 = zext <4 x i8> %reorder_shuffle117 to <4 x i32>
- %3 = sub nsw <4 x i32> zeroinitializer, %2
- %4 = bitcast i8* %arrayidx5.1 to <4 x i8>*
- %5 = load <4 x i8>, <4 x i8>* %4, align 1
- %reorder_shuffle115 = shufflevector <4 x i8> %5, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %6 = zext <4 x i8> %reorder_shuffle115 to <4 x i32>
- %7 = sub nsw <4 x i32> zeroinitializer, %6
- %8 = shl nsw <4 x i32> %7, <i32 16, i32 16, i32 16, i32 16>
- %9 = add nsw <4 x i32> %8, %3
- %10 = sub nsw <4 x i32> %9, zeroinitializer
- %11 = shufflevector <4 x i32> undef, <4 x i32> %10, <4 x i32> <i32 2, i32 7, i32 0, i32 5>
- %12 = add nsw <4 x i32> zeroinitializer, %11
- %13 = shufflevector <4 x i32> %12, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
- store <4 x i32> %13, <4 x i32>* undef, align 16
+ %add.ptr64 = getelementptr inbounds i8, ptr %pix2, i64 %idx.ext63
+ %arrayidx5.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 4
+ %0 = load <4 x i8>, ptr %add.ptr64, align 1
+ %reorder_shuffle117 = shufflevector <4 x i8> %0, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %1 = zext <4 x i8> %reorder_shuffle117 to <4 x i32>
+ %2 = sub nsw <4 x i32> zeroinitializer, %1
+ %3 = load <4 x i8>, ptr %arrayidx5.1, align 1
+ %reorder_shuffle115 = shufflevector <4 x i8> %3, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %4 = zext <4 x i8> %reorder_shuffle115 to <4 x i32>
+ %5 = sub nsw <4 x i32> zeroinitializer, %4
+ %6 = shl nsw <4 x i32> %5, <i32 16, i32 16, i32 16, i32 16>
+ %7 = add nsw <4 x i32> %6, %2
+ %8 = sub nsw <4 x i32> %7, zeroinitializer
+ %9 = shufflevector <4 x i32> undef, <4 x i32> %8, <4 x i32> <i32 2, i32 7, i32 0, i32 5>
+ %10 = add nsw <4 x i32> zeroinitializer, %9
+ %11 = shufflevector <4 x i32> %10, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ store <4 x i32> %11, ptr undef, align 16
ret void
}
-define void @test16(i16* nocapture readonly %sums, i32 signext %delta, i32 signext %thresh) {
+define void @test16(ptr nocapture readonly %sums, i32 signext %delta, i32 signext %thresh) {
; P9LE-LABEL: test16:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: sldi 4, 4, 1
br label %for.body
for.body: ; preds = %entry
- %arrayidx8 = getelementptr inbounds i16, i16* %sums, i64 %idxprom
- %0 = load i16, i16* %arrayidx8, align 2
- %arrayidx16 = getelementptr inbounds i16, i16* %sums, i64 %idxprom15
- %1 = load i16, i16* %arrayidx16, align 2
+ %arrayidx8 = getelementptr inbounds i16, ptr %sums, i64 %idxprom
+ %0 = load i16, ptr %arrayidx8, align 2
+ %arrayidx16 = getelementptr inbounds i16, ptr %sums, i64 %idxprom15
+ %1 = load i16, ptr %arrayidx16, align 2
%2 = insertelement <4 x i16> undef, i16 %0, i32 2
%3 = insertelement <4 x i16> %2, i16 %1, i32 3
%4 = zext <4 x i16> %3 to <4 x i32>
ret void
}
-define void @test8(i8* nocapture readonly %sums, i32 signext %delta, i32 signext %thresh) {
+define void @test8(ptr nocapture readonly %sums, i32 signext %delta, i32 signext %thresh) {
; P9LE-LABEL: test8:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: add 6, 3, 4
br label %for.body
for.body: ; preds = %entry
- %arrayidx8 = getelementptr inbounds i8, i8* %sums, i64 %idxprom
- %0 = load i8, i8* %arrayidx8, align 2
- %arrayidx16 = getelementptr inbounds i8, i8* %sums, i64 %idxprom15
- %1 = load i8, i8* %arrayidx16, align 2
+ %arrayidx8 = getelementptr inbounds i8, ptr %sums, i64 %idxprom
+ %0 = load i8, ptr %arrayidx8, align 2
+ %arrayidx16 = getelementptr inbounds i8, ptr %sums, i64 %idxprom15
+ %1 = load i8, ptr %arrayidx16, align 2
%2 = insertelement <4 x i8> undef, i8 %0, i32 2
%3 = insertelement <4 x i8> %2, i8 %1, i32 3
%4 = zext <4 x i8> %3 to <4 x i32>
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux"
-%t1 = type { %t2*, %t3* }
-%t2 = type <{ %t3*, i32, [4 x i8] }>
-%t3 = type { %t3* }
+%t1 = type { ptr, ptr }
+%t2 = type <{ ptr, i32, [4 x i8] }>
+%t3 = type { ptr }
@_ZN4Foam10SLListBase13endConstIter_E = external global %t1
if.then17.i: ; preds = %if.end75
%tobool.i.i.i = icmp eq i32 undef, 0
- %0 = load i64*, i64** undef, align 8
- %agg.tmp.sroa.3.0.copyload33.in.i = select i1 %tobool.i.i.i, i64* bitcast (%t3** getelementptr inbounds (%t1, %t1* @_ZN4Foam10SLListBase13endConstIter_E, i64 0, i32 1) to i64*), i64* %0
- %agg.tmp.sroa.3.0.copyload33.i = load i64, i64* %agg.tmp.sroa.3.0.copyload33.in.i, align 8
- %1 = inttoptr i64 %agg.tmp.sroa.3.0.copyload33.i to %t3*
- %2 = load %t3*, %t3** getelementptr inbounds (%t1, %t1* @_ZN4Foam10SLListBase13endConstIter_E, i64 0, i32 1), align 8
- %cmp.i37.i = icmp eq %t3* %1, %2
+ %0 = load ptr, ptr undef, align 8
+ %agg.tmp.sroa.3.0.copyload33.in.i = select i1 %tobool.i.i.i, ptr getelementptr inbounds (%t1, ptr @_ZN4Foam10SLListBase13endConstIter_E, i64 0, i32 1), ptr %0
+ %agg.tmp.sroa.3.0.copyload33.i = load i64, ptr %agg.tmp.sroa.3.0.copyload33.in.i, align 8
+ %1 = inttoptr i64 %agg.tmp.sroa.3.0.copyload33.i to ptr
+ %2 = load ptr, ptr getelementptr inbounds (%t1, ptr @_ZN4Foam10SLListBase13endConstIter_E, i64 0, i32 1), align 8
+ %cmp.i37.i = icmp eq ptr %1, %2
br i1 %cmp.i37.i, label %invoke.cont79, label %for.body.lr.ph.i
; CHECK-LABEL: @_ZN4FoamrsIbEERNS_7IstreamES2_RNS_4ListIT_EE
declare i32 @__gxx_personality_v0(...)
-define void @_Z11GetPasswordP13CStdOutStreamb(i1 %cond, i8 %arg1, i8* %arg2) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @_Z11GetPasswordP13CStdOutStreamb(i1 %cond, i8 %arg1, ptr %arg2) personality ptr @__gxx_personality_v0 {
entry:
br label %for.cond.i.i
to label %invoke.cont4 unwind label %lpad
invoke.cont4: ; preds = %invoke.cont
- %call7 = invoke i8* @getpass()
+ %call7 = invoke ptr @getpass()
to label %for.cond.i.i30 unwind label %lpad
; CHECK-LABEL: @_Z11GetPasswordP13CStdOutStreamb
for.cond.i.i30: ; preds = %for.cond.i.i30, %invoke.cont4
%indvars.iv.i.i26 = phi i64 [ %indvars.iv.next.i.i29, %for.cond.i.i30 ], [ 0, %invoke.cont4 ]
- %arrayidx.i.i27 = getelementptr inbounds i8, i8* %call7, i64 %indvars.iv.i.i26
- %0 = load i8, i8* %arrayidx.i.i27, align 1
+ %arrayidx.i.i27 = getelementptr inbounds i8, ptr %call7, i64 %indvars.iv.i.i26
+ %0 = load i8, ptr %arrayidx.i.i27, align 1
%1 = add i8 %0, %arg1
- store i8 %1, i8* %arg2, align 1
+ store i8 %1, ptr %arg2, align 1
%indvars.iv.next.i.i29 = add nuw nsw i64 %indvars.iv.i.i26, 1
br label %for.cond.i.i30
lpad: ; preds = %invoke.cont4, %invoke.cont, %_ZN11CStringBaseIcEC2EPKc.exit.critedge
- %2 = landingpad { i8*, i32 }
+ %2 = landingpad { ptr, i32 }
cleanup
- resume { i8*, i32 } undef
+ resume { ptr, i32 } undef
}
-declare i8* @getpass()
+declare ptr @getpass()
call void @foo()
; CHECK: lis{{.*}}.Lbaz
- %1 = load i32, i32* @baz, align 4
+ %1 = load i32, ptr @baz, align 4
ret i32 %1
}
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-define void @foo(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c, float* nocapture %d) #0 {
+define void @foo(ptr nocapture %a, ptr nocapture %b, ptr nocapture readonly %c, ptr nocapture %d) #0 {
; CHECK-LABEL: @foo
entry:
- %0 = load float, float* %b, align 4
- store float %0, float* %a, align 4
- %1 = load float, float* %c, align 4
- store float %1, float* %b, align 4
- %2 = load float, float* %a, align 4
- store float %2, float* %d, align 4
+ %0 = load float, ptr %b, align 4
+ store float %0, ptr %a, align 4
+ %1 = load float, ptr %c, align 4
+ store float %1, ptr %b, align 4
+ %2 = load float, ptr %a, align 4
+ store float %2, ptr %d, align 4
ret void
; CHECK: lwz [[REG1:[0-9]+]], 0(4)
define i128 @foo() nounwind {
entry:
%x = alloca i128, align 16
- store i128 27, i128* %x, align 16
- %0 = load i128, i128* %x, align 16
+ store i128 27, ptr %x, align 16
+ %0 = load i128, ptr %x, align 16
ret i128 %0
}
ret double %7
}
-define void @cse_nomerge(double* %f1, double* %f2, double %f3) #0 {
+define void @cse_nomerge(ptr %f1, ptr %f2, double %f3) #0 {
; CHECK-LABEL: cse_nomerge:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr 0
; CHECK-NEXT: blr
entry:
%0 = call double @llvm.ppc.readflm()
- store double %0, double* %f1, align 8
+ store double %0, ptr %f1, align 8
call void @effect_func()
%1 = call double @llvm.ppc.readflm()
- store double %1, double* %f2, align 8
+ store double %1, ptr %f2, align 8
%2 = call contract double @llvm.ppc.setflm(double %f3)
ret void
}
-define void @cse_nomerge_readonly(double* %f1, double* %f2, double %f3) #0 {
+define void @cse_nomerge_readonly(ptr %f1, ptr %f2, double %f3) #0 {
; CHECK-LABEL: cse_nomerge_readonly:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr 0
; CHECK-NEXT: blr
entry:
%0 = call double @llvm.ppc.readflm()
- store double %0, double* %f1, align 8
+ store double %0, ptr %f1, align 8
call void @readonly_func()
%1 = call double @llvm.ppc.readflm()
- store double %1, double* %f2, align 8
+ store double %1, ptr %f2, align 8
%2 = call contract double @llvm.ppc.setflm(double %f3)
ret void
}
;CHECK-NEXT: - BB4[optional1]: float = 0.625, int = 8
-define void @loop_test(i32* %tags, i32 %count) {
+define void @loop_test(ptr %tags, i32 %count) {
entry:
br label %for.check
for.check:
%count.loop = phi i32 [%count, %entry], [%count.sub, %for.latch]
%done.count = icmp ugt i32 %count.loop, 0
- %tag_ptr = getelementptr inbounds i32, i32* %tags, i32 %count
- %tag = load i32, i32* %tag_ptr
+ %tag_ptr = getelementptr inbounds i32, ptr %tags, i32 %count
+ %tag = load i32, ptr %tag_ptr
%done.tag = icmp eq i32 %tag, 0
%done = and i1 %done.count, %done.tag
br i1 %done, label %test1, label %exit, !prof !1
; RUN: FileCheck %s --check-prefix=AIX-32
; Function Attrs: norecurse nounwind readonly
-define dso_local <2 x double> @test1(<2 x float>* nocapture readonly %Ptr) {
+define dso_local <2 x double> @test1(ptr nocapture readonly %Ptr) {
; CHECK-LABEL: test1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 0(r3)
; AIX-32-NEXT: xxmrghd v2, vs1, vs0
; AIX-32-NEXT: blr
entry:
- %0 = load <2 x float>, <2 x float>* %Ptr, align 8
+ %0 = load <2 x float>, ptr %Ptr, align 8
%1 = fpext <2 x float> %0 to <2 x double>
ret <2 x double> %1
}
; Function Attrs: norecurse nounwind readonly
-define dso_local <2 x double> @test2(<2 x float>* nocapture readonly %a, <2 x float>* nocapture readonly %b) {
+define dso_local <2 x double> @test2(ptr nocapture readonly %a, ptr nocapture readonly %b) {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 0(r4)
; AIX-32-NEXT: xxmrghd v2, vs0, vs1
; AIX-32-NEXT: blr
entry:
- %0 = load <2 x float>, <2 x float>* %a, align 8
- %1 = load <2 x float>, <2 x float>* %b, align 8
+ %0 = load <2 x float>, ptr %a, align 8
+ %1 = load <2 x float>, ptr %b, align 8
%sub = fsub <2 x float> %0, %1
%2 = fpext <2 x float> %sub to <2 x double>
ret <2 x double> %2
; Function Attrs: norecurse nounwind readonly
; Function Attrs: norecurse nounwind readonly
-define dso_local <2 x double> @test3(<2 x float>* nocapture readonly %a, <2 x float>* nocapture readonly %b) {
+define dso_local <2 x double> @test3(ptr nocapture readonly %a, ptr nocapture readonly %b) {
; CHECK-LABEL: test3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 0(r4)
; AIX-32-NEXT: xxmrghd v2, vs0, vs1
; AIX-32-NEXT: blr
entry:
- %0 = load <2 x float>, <2 x float>* %a, align 8
- %1 = load <2 x float>, <2 x float>* %b, align 8
+ %0 = load <2 x float>, ptr %a, align 8
+ %1 = load <2 x float>, ptr %b, align 8
%sub = fadd <2 x float> %0, %1
%2 = fpext <2 x float> %sub to <2 x double>
ret <2 x double> %2
; Function Attrs: norecurse nounwind readonly
; Function Attrs: norecurse nounwind readonly
-define dso_local <2 x double> @test4(<2 x float>* nocapture readonly %a, <2 x float>* nocapture readonly %b) {
+define dso_local <2 x double> @test4(ptr nocapture readonly %a, ptr nocapture readonly %b) {
; CHECK-LABEL: test4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 0(r4)
; AIX-32-NEXT: xxmrghd v2, vs0, vs1
; AIX-32-NEXT: blr
entry:
- %0 = load <2 x float>, <2 x float>* %a, align 8
- %1 = load <2 x float>, <2 x float>* %b, align 8
+ %0 = load <2 x float>, ptr %a, align 8
+ %1 = load <2 x float>, ptr %b, align 8
%sub = fmul <2 x float> %0, %1
%2 = fpext <2 x float> %sub to <2 x double>
ret <2 x double> %2
; AIX-32-NEXT: xvadddp v2, vs0, v2
; AIX-32-NEXT: blr
entry:
- %0 = load <2 x float>, <2 x float>* @G, align 8
+ %0 = load <2 x float>, ptr @G, align 8
%1 = fpext <2 x float> %0 to <2 x double>
%add = fadd <2 x double> %1, %a
ret <2 x double> %add
br label %bb1
bb1: ; preds = %bb
- %i = load <2 x float>, <2 x float>* bitcast (i8* getelementptr inbounds ([25 x %0], [25 x %0]* @Glob1, i64 0, i64 6, i32 20, i64 22392) to <2 x float>*), align 8
+ %i = load <2 x float>, ptr getelementptr inbounds ([25 x %0], ptr @Glob1, i64 0, i64 6, i32 20, i64 22392), align 8
%i2 = fpext <2 x float> %i to <2 x double>
%i3 = fcmp contract oeq <2 x double> zeroinitializer, %i2
%i4 = shufflevector <2 x i1> %i3, <2 x i1> poison, <2 x i32> <i32 1, i32 undef>
; Test reduce scalarization in fpext v2f32 to v2f64 from the extract_subvector v4f32 node.
-define dso_local void @test(<4 x float>* nocapture readonly %a, <2 x double>* nocapture %b, <2 x double>* nocapture %c) {
+define dso_local void @test(ptr nocapture readonly %a, ptr nocapture %b, ptr nocapture %c) {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv vs0, 0(r3)
; CHECK-BE-NEXT: stxv vs0, 0(r5)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
+ %0 = load <4 x float>, ptr %a, align 16
%shuffle = shufflevector <4 x float> %0, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%shuffle1 = shufflevector <4 x float> %0, <4 x float> undef, <2 x i32> <i32 2, i32 3>
%vecinit4 = fpext <2 x float> %shuffle to <2 x double>
%vecinit11 = fpext <2 x float> %shuffle1 to <2 x double>
- store <2 x double> %vecinit4, <2 x double>* %b, align 16
- store <2 x double> %vecinit11, <2 x double>* %c, align 16
+ store <2 x double> %vecinit4, ptr %b, align 16
+ store <2 x double> %vecinit11, ptr %c, align 16
ret void
}
; Ensure we don't crash for wider types
-define dso_local void @test2(<16 x float>* nocapture readonly %a, <2 x double>* nocapture %b, <2 x double>* nocapture %c) {
+define dso_local void @test2(ptr nocapture readonly %a, ptr nocapture %b, ptr nocapture %c) {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv vs0, 0(r3)
; CHECK-BE-NEXT: stxv vs0, 0(r5)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <16 x float>, <16 x float>* %a, align 16
+ %0 = load <16 x float>, ptr %a, align 16
%shuffle = shufflevector <16 x float> %0, <16 x float> undef, <2 x i32> <i32 0, i32 1>
%shuffle1 = shufflevector <16 x float> %0, <16 x float> undef, <2 x i32> <i32 2, i32 3>
%vecinit4 = fpext <2 x float> %shuffle to <2 x double>
%vecinit11 = fpext <2 x float> %shuffle1 to <2 x double>
- store <2 x double> %vecinit4, <2 x double>* %b, align 16
- store <2 x double> %vecinit11, <2 x double>* %c, align 16
+ store <2 x double> %vecinit4, ptr %b, align 16
+ store <2 x double> %vecinit11, ptr %c, align 16
ret void
}
%"class.G" = type { [2 x i32] }
%"class.H" = type { %"struct.A" }
%"struct.A" = type { %"struct.B" }
-%"struct.B" = type { i32*, i32*, i32* }
+%"struct.B" = type { ptr, ptr, ptr }
-define dso_local i1 @t(%class.A* %this, i32 %color, i32 %vertex) local_unnamed_addr {
+define dso_local i1 @t(ptr %this, i32 %color, i32 %vertex) local_unnamed_addr {
; CHECK-P9-LABEL: t:
; CHECK-P9: # %bb.0: # %entry
; CHECK-P9-NEXT: li r5, 1
br i1 %or.cond, label %cleanup16, label %for.inc
for.inc: ; preds = %lor.lhs.false, %land.lhs.true
- %arrayidx.i31.1 = getelementptr inbounds %class.A, %class.A* %this, i64 0, i32 8, i32 0, i64 undef
- %0 = load i16, i16* %arrayidx.i31.1, align 2
+ %arrayidx.i31.1 = getelementptr inbounds %class.A, ptr %this, i64 0, i32 8, i32 0, i64 undef
+ %0 = load i16, ptr %arrayidx.i31.1, align 2
%cmp8.1 = icmp ult i16 %0, 2
br i1 %cmp8.1, label %land.lhs.true.1, label %lor.lhs.false.1
%struct.foo = type { i32, i32, [0 x i8] }
-define i32 @test(%struct.foo* %X) nounwind {
- %tmp1 = getelementptr %struct.foo, %struct.foo* %X, i32 0, i32 2, i32 100 ; <i8*> [#uses=1]
- %tmp = load i8, i8* %tmp1 ; <i8> [#uses=1]
+define i32 @test(ptr %X) nounwind {
+ %tmp1 = getelementptr %struct.foo, ptr %X, i32 0, i32 2, i32 100 ; <ptr> [#uses=1]
+ %tmp = load i8, ptr %tmp1 ; <i8> [#uses=1]
%tmp2 = zext i8 %tmp to i32 ; <i32> [#uses=1]
ret i32 %tmp2
}
%7 = fmul contract reassoc nsz float %6, 0x3DB2533FE0000000
%8 = fadd contract reassoc nsz float %7, %5
%9 = fmul contract reassoc nsz float %1, 0xBDB2533FE0000000
- store float %9, float* @global_val, align 4
+ store float %9, ptr @global_val, align 4
ret float %8
}
; Function Attrs: nounwind readonly
define signext i32 @main() #0 {
entry:
- %call = tail call fastcc signext i32 @func_90(%struct.S1* byval(%struct.S1) bitcast ({ i8, i8, i8, i8, i8, i8, i8, i8 }* @main.l_1554 to %struct.S1*))
+ %call = tail call fastcc signext i32 @func_90(ptr byval(%struct.S1) @main.l_1554)
; CHECK-NOT: ld {{[0-9]+}}, main.l_1554@toc@l
ret i32 %call
}
; Function Attrs: nounwind readonly
-define internal fastcc signext i32 @func_90(%struct.S1* byval(%struct.S1) nocapture %p_91) #0 {
+define internal fastcc signext i32 @func_90(ptr byval(%struct.S1) nocapture %p_91) #0 {
entry:
- %0 = bitcast %struct.S1* %p_91 to i64*
- %bf.load = load i64, i64* %0, align 1
+ %bf.load = load i64, ptr %p_91, align 1
%bf.shl = shl i64 %bf.load, 26
%bf.ashr = ashr i64 %bf.shl, 54
%bf.cast = trunc i64 %bf.ashr to i32
entry:
; CHECK: li 4, 128
; CHECK-NOT: mr 4, {{.*}}
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), i32 128, i32 128) nounwind
+ %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 128, i32 128) nounwind
ret i32 0
}
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
-define zeroext i32 @test1(i64 %0, i64* %1) {
+define zeroext i32 @test1(i64 %0, ptr %1) {
; CHECK-LABEL: test1:
; CHECK: # %bb.0:
; CHECK-NEXT: stdu 1, -720(1)
; CHECK-NEXT: ld 14, 576(1) # 8-byte Folded Reload
; CHECK-NEXT: addi 1, 1, 720
; CHECK-NEXT: blr
- %3 = getelementptr inbounds i64, i64* %1, i64 144115188075855
- %4 = getelementptr i64, i64* %1, i64 144115586875855
- %5 = getelementptr i64, i64* %1, i64 144115587175855
- %6 = getelementptr i64, i64* %1, i64 144115587075855
- %7 = getelementptr i64, i64* %1, i64 144115586975855
- %8 = getelementptr i64, i64* %1, i64 144115587275855
- %9 = getelementptr i64, i64* %1, i64 144115587575855
- %10 = getelementptr i64, i64* %1, i64 144115587475855
- %11 = getelementptr i64, i64* %1, i64 144115587375855
- %12 = getelementptr i64, i64* %1, i64 144115587675855
- %13 = getelementptr i64, i64* %1, i64 144115587975855
- %14 = getelementptr i64, i64* %1, i64 144115587875855
- %15 = getelementptr i64, i64* %1, i64 144115587775855
- %16 = getelementptr i64, i64* %1, i64 144115588075855
- %17 = getelementptr i64, i64* %1, i64 144115588375855
- %18 = getelementptr i64, i64* %1, i64 144115588275855
- %19 = getelementptr i64, i64* %1, i64 144115588175855
+ %3 = getelementptr inbounds i64, ptr %1, i64 144115188075855
+ %4 = getelementptr i64, ptr %1, i64 144115586875855
+ %5 = getelementptr i64, ptr %1, i64 144115587175855
+ %6 = getelementptr i64, ptr %1, i64 144115587075855
+ %7 = getelementptr i64, ptr %1, i64 144115586975855
+ %8 = getelementptr i64, ptr %1, i64 144115587275855
+ %9 = getelementptr i64, ptr %1, i64 144115587575855
+ %10 = getelementptr i64, ptr %1, i64 144115587475855
+ %11 = getelementptr i64, ptr %1, i64 144115587375855
+ %12 = getelementptr i64, ptr %1, i64 144115587675855
+ %13 = getelementptr i64, ptr %1, i64 144115587975855
+ %14 = getelementptr i64, ptr %1, i64 144115587875855
+ %15 = getelementptr i64, ptr %1, i64 144115587775855
+ %16 = getelementptr i64, ptr %1, i64 144115588075855
+ %17 = getelementptr i64, ptr %1, i64 144115588375855
+ %18 = getelementptr i64, ptr %1, i64 144115588275855
+ %19 = getelementptr i64, ptr %1, i64 144115588175855
br label %20
20: ; preds = %2, %109
22: ; preds = %22, %20
%23 = phi i64 [ 0, %20 ], [ %107, %22 ]
%24 = mul i64 %23, 400000
- %25 = getelementptr i64, i64* %3, i64 %24
+ %25 = getelementptr i64, ptr %3, i64 %24
%26 = or i64 %23, 1
%27 = mul i64 %26, 400000
- %28 = getelementptr i64, i64* %3, i64 %27
+ %28 = getelementptr i64, ptr %3, i64 %27
%29 = or i64 %23, 2
%30 = mul i64 %29, 400000
- %31 = getelementptr i64, i64* %3, i64 %30
+ %31 = getelementptr i64, ptr %3, i64 %30
%32 = or i64 %23, 3
%33 = mul i64 %32, 400000
- %34 = getelementptr i64, i64* %3, i64 %33
+ %34 = getelementptr i64, ptr %3, i64 %33
%35 = mul i64 %23, 400000
%36 = add i64 %35, 1600000
- %37 = getelementptr i64, i64* %3, i64 %36
+ %37 = getelementptr i64, ptr %3, i64 %36
%38 = mul i64 %23, 400000
%39 = add i64 %38, 2000000
- %40 = getelementptr i64, i64* %3, i64 %39
+ %40 = getelementptr i64, ptr %3, i64 %39
%41 = mul i64 %23, 400000
%42 = add i64 %41, 2400000
- %43 = getelementptr i64, i64* %3, i64 %42
+ %43 = getelementptr i64, ptr %3, i64 %42
%44 = mul i64 %23, 400000
%45 = add i64 %44, 2800000
- %46 = getelementptr i64, i64* %3, i64 %45
+ %46 = getelementptr i64, ptr %3, i64 %45
%47 = mul i64 %23, 400000
%48 = add i64 %47, 3200000
- %49 = getelementptr i64, i64* %3, i64 %48
+ %49 = getelementptr i64, ptr %3, i64 %48
%50 = mul i64 %23, 400000
%51 = add i64 %50, 3600000
- %52 = getelementptr i64, i64* %3, i64 %51
+ %52 = getelementptr i64, ptr %3, i64 %51
%53 = mul i64 %23, 400000
%54 = add i64 %53, 4000000
- %55 = getelementptr i64, i64* %3, i64 %54
+ %55 = getelementptr i64, ptr %3, i64 %54
%56 = mul i64 %23, 400000
%57 = add i64 %56, 4400000
- %58 = getelementptr i64, i64* %3, i64 %57
- %59 = getelementptr inbounds i64, i64* %25, i64 400000
- %60 = getelementptr inbounds i64, i64* %28, i64 400000
- %61 = getelementptr inbounds i64, i64* %31, i64 400000
- %62 = getelementptr inbounds i64, i64* %34, i64 400000
- %63 = getelementptr inbounds i64, i64* %37, i64 400000
- %64 = getelementptr inbounds i64, i64* %40, i64 400000
- %65 = getelementptr inbounds i64, i64* %43, i64 400000
- %66 = getelementptr inbounds i64, i64* %46, i64 400000
- %67 = getelementptr inbounds i64, i64* %49, i64 400000
- %68 = getelementptr inbounds i64, i64* %52, i64 400000
- %69 = getelementptr inbounds i64, i64* %55, i64 400000
- %70 = getelementptr inbounds i64, i64* %58, i64 400000
- store i64 %0, i64* %59, align 8
- store i64 %0, i64* %60, align 8
- store i64 %0, i64* %61, align 8
- store i64 %0, i64* %62, align 8
- store i64 %0, i64* %63, align 8
- store i64 %0, i64* %64, align 8
- store i64 %0, i64* %65, align 8
- store i64 %0, i64* %66, align 8
- store i64 %0, i64* %67, align 8
- store i64 %0, i64* %68, align 8
- store i64 %0, i64* %69, align 8
- store i64 %0, i64* %70, align 8
- %71 = getelementptr inbounds i64, i64* %25, i64 700000
- %72 = getelementptr inbounds i64, i64* %28, i64 700000
- %73 = getelementptr inbounds i64, i64* %31, i64 700000
- %74 = getelementptr inbounds i64, i64* %34, i64 700000
- %75 = getelementptr inbounds i64, i64* %37, i64 700000
- %76 = getelementptr inbounds i64, i64* %40, i64 700000
- %77 = getelementptr inbounds i64, i64* %43, i64 700000
- %78 = getelementptr inbounds i64, i64* %46, i64 700000
- %79 = getelementptr inbounds i64, i64* %49, i64 700000
- %80 = getelementptr inbounds i64, i64* %52, i64 700000
- %81 = getelementptr inbounds i64, i64* %55, i64 700000
- %82 = getelementptr inbounds i64, i64* %58, i64 700000
- store i64 %0, i64* %71, align 8
- store i64 %0, i64* %72, align 8
- store i64 %0, i64* %73, align 8
- store i64 %0, i64* %74, align 8
- store i64 %0, i64* %75, align 8
- store i64 %0, i64* %76, align 8
- store i64 %0, i64* %77, align 8
- store i64 %0, i64* %78, align 8
- store i64 %0, i64* %79, align 8
- store i64 %0, i64* %80, align 8
- store i64 %0, i64* %81, align 8
- store i64 %0, i64* %82, align 8
- %83 = getelementptr inbounds i64, i64* %25, i64 600000
- %84 = getelementptr inbounds i64, i64* %28, i64 600000
- %85 = getelementptr inbounds i64, i64* %31, i64 600000
- %86 = getelementptr inbounds i64, i64* %34, i64 600000
- %87 = getelementptr inbounds i64, i64* %37, i64 600000
- %88 = getelementptr inbounds i64, i64* %40, i64 600000
- %89 = getelementptr inbounds i64, i64* %43, i64 600000
- %90 = getelementptr inbounds i64, i64* %46, i64 600000
- %91 = getelementptr inbounds i64, i64* %49, i64 600000
- %92 = getelementptr inbounds i64, i64* %52, i64 600000
- %93 = getelementptr inbounds i64, i64* %55, i64 600000
- %94 = getelementptr inbounds i64, i64* %58, i64 600000
- store i64 %0, i64* %83, align 8
- store i64 %0, i64* %84, align 8
- store i64 %0, i64* %85, align 8
- store i64 %0, i64* %86, align 8
- store i64 %0, i64* %87, align 8
- store i64 %0, i64* %88, align 8
- store i64 %0, i64* %89, align 8
- store i64 %0, i64* %90, align 8
- store i64 %0, i64* %91, align 8
- store i64 %0, i64* %92, align 8
- store i64 %0, i64* %93, align 8
- store i64 %0, i64* %94, align 8
- %95 = getelementptr inbounds i64, i64* %25, i64 500000
- %96 = getelementptr inbounds i64, i64* %28, i64 500000
- %97 = getelementptr inbounds i64, i64* %31, i64 500000
- %98 = getelementptr inbounds i64, i64* %34, i64 500000
- %99 = getelementptr inbounds i64, i64* %37, i64 500000
- %100 = getelementptr inbounds i64, i64* %40, i64 500000
- %101 = getelementptr inbounds i64, i64* %43, i64 500000
- %102 = getelementptr inbounds i64, i64* %46, i64 500000
- %103 = getelementptr inbounds i64, i64* %49, i64 500000
- %104 = getelementptr inbounds i64, i64* %52, i64 500000
- %105 = getelementptr inbounds i64, i64* %55, i64 500000
- %106 = getelementptr inbounds i64, i64* %58, i64 500000
- store i64 %0, i64* %95, align 8
- store i64 %0, i64* %96, align 8
- store i64 %0, i64* %97, align 8
- store i64 %0, i64* %98, align 8
- store i64 %0, i64* %99, align 8
- store i64 %0, i64* %100, align 8
- store i64 %0, i64* %101, align 8
- store i64 %0, i64* %102, align 8
- store i64 %0, i64* %103, align 8
- store i64 %0, i64* %104, align 8
- store i64 %0, i64* %105, align 8
- store i64 %0, i64* %106, align 8
+ %58 = getelementptr i64, ptr %3, i64 %57
+ %59 = getelementptr inbounds i64, ptr %25, i64 400000
+ %60 = getelementptr inbounds i64, ptr %28, i64 400000
+ %61 = getelementptr inbounds i64, ptr %31, i64 400000
+ %62 = getelementptr inbounds i64, ptr %34, i64 400000
+ %63 = getelementptr inbounds i64, ptr %37, i64 400000
+ %64 = getelementptr inbounds i64, ptr %40, i64 400000
+ %65 = getelementptr inbounds i64, ptr %43, i64 400000
+ %66 = getelementptr inbounds i64, ptr %46, i64 400000
+ %67 = getelementptr inbounds i64, ptr %49, i64 400000
+ %68 = getelementptr inbounds i64, ptr %52, i64 400000
+ %69 = getelementptr inbounds i64, ptr %55, i64 400000
+ %70 = getelementptr inbounds i64, ptr %58, i64 400000
+ store i64 %0, ptr %59, align 8
+ store i64 %0, ptr %60, align 8
+ store i64 %0, ptr %61, align 8
+ store i64 %0, ptr %62, align 8
+ store i64 %0, ptr %63, align 8
+ store i64 %0, ptr %64, align 8
+ store i64 %0, ptr %65, align 8
+ store i64 %0, ptr %66, align 8
+ store i64 %0, ptr %67, align 8
+ store i64 %0, ptr %68, align 8
+ store i64 %0, ptr %69, align 8
+ store i64 %0, ptr %70, align 8
+ %71 = getelementptr inbounds i64, ptr %25, i64 700000
+ %72 = getelementptr inbounds i64, ptr %28, i64 700000
+ %73 = getelementptr inbounds i64, ptr %31, i64 700000
+ %74 = getelementptr inbounds i64, ptr %34, i64 700000
+ %75 = getelementptr inbounds i64, ptr %37, i64 700000
+ %76 = getelementptr inbounds i64, ptr %40, i64 700000
+ %77 = getelementptr inbounds i64, ptr %43, i64 700000
+ %78 = getelementptr inbounds i64, ptr %46, i64 700000
+ %79 = getelementptr inbounds i64, ptr %49, i64 700000
+ %80 = getelementptr inbounds i64, ptr %52, i64 700000
+ %81 = getelementptr inbounds i64, ptr %55, i64 700000
+ %82 = getelementptr inbounds i64, ptr %58, i64 700000
+ store i64 %0, ptr %71, align 8
+ store i64 %0, ptr %72, align 8
+ store i64 %0, ptr %73, align 8
+ store i64 %0, ptr %74, align 8
+ store i64 %0, ptr %75, align 8
+ store i64 %0, ptr %76, align 8
+ store i64 %0, ptr %77, align 8
+ store i64 %0, ptr %78, align 8
+ store i64 %0, ptr %79, align 8
+ store i64 %0, ptr %80, align 8
+ store i64 %0, ptr %81, align 8
+ store i64 %0, ptr %82, align 8
+ %83 = getelementptr inbounds i64, ptr %25, i64 600000
+ %84 = getelementptr inbounds i64, ptr %28, i64 600000
+ %85 = getelementptr inbounds i64, ptr %31, i64 600000
+ %86 = getelementptr inbounds i64, ptr %34, i64 600000
+ %87 = getelementptr inbounds i64, ptr %37, i64 600000
+ %88 = getelementptr inbounds i64, ptr %40, i64 600000
+ %89 = getelementptr inbounds i64, ptr %43, i64 600000
+ %90 = getelementptr inbounds i64, ptr %46, i64 600000
+ %91 = getelementptr inbounds i64, ptr %49, i64 600000
+ %92 = getelementptr inbounds i64, ptr %52, i64 600000
+ %93 = getelementptr inbounds i64, ptr %55, i64 600000
+ %94 = getelementptr inbounds i64, ptr %58, i64 600000
+ store i64 %0, ptr %83, align 8
+ store i64 %0, ptr %84, align 8
+ store i64 %0, ptr %85, align 8
+ store i64 %0, ptr %86, align 8
+ store i64 %0, ptr %87, align 8
+ store i64 %0, ptr %88, align 8
+ store i64 %0, ptr %89, align 8
+ store i64 %0, ptr %90, align 8
+ store i64 %0, ptr %91, align 8
+ store i64 %0, ptr %92, align 8
+ store i64 %0, ptr %93, align 8
+ store i64 %0, ptr %94, align 8
+ %95 = getelementptr inbounds i64, ptr %25, i64 500000
+ %96 = getelementptr inbounds i64, ptr %28, i64 500000
+ %97 = getelementptr inbounds i64, ptr %31, i64 500000
+ %98 = getelementptr inbounds i64, ptr %34, i64 500000
+ %99 = getelementptr inbounds i64, ptr %37, i64 500000
+ %100 = getelementptr inbounds i64, ptr %40, i64 500000
+ %101 = getelementptr inbounds i64, ptr %43, i64 500000
+ %102 = getelementptr inbounds i64, ptr %46, i64 500000
+ %103 = getelementptr inbounds i64, ptr %49, i64 500000
+ %104 = getelementptr inbounds i64, ptr %52, i64 500000
+ %105 = getelementptr inbounds i64, ptr %55, i64 500000
+ %106 = getelementptr inbounds i64, ptr %58, i64 500000
+ store i64 %0, ptr %95, align 8
+ store i64 %0, ptr %96, align 8
+ store i64 %0, ptr %97, align 8
+ store i64 %0, ptr %98, align 8
+ store i64 %0, ptr %99, align 8
+ store i64 %0, ptr %100, align 8
+ store i64 %0, ptr %101, align 8
+ store i64 %0, ptr %102, align 8
+ store i64 %0, ptr %103, align 8
+ store i64 %0, ptr %104, align 8
+ store i64 %0, ptr %105, align 8
+ store i64 %0, ptr %106, align 8
%107 = add i64 %23, 12
%108 = icmp eq i64 %107, 996
br i1 %108, label %109, label %22
109: ; preds = %22
- store i64 %0, i64* %4, align 8
- store i64 %0, i64* %5, align 8
- store i64 %0, i64* %6, align 8
- store i64 %0, i64* %7, align 8
- store i64 %0, i64* %8, align 8
- store i64 %0, i64* %9, align 8
- store i64 %0, i64* %10, align 8
- store i64 %0, i64* %11, align 8
- store i64 %0, i64* %12, align 8
- store i64 %0, i64* %13, align 8
- store i64 %0, i64* %14, align 8
- store i64 %0, i64* %15, align 8
- store i64 %0, i64* %16, align 8
- store i64 %0, i64* %17, align 8
- store i64 %0, i64* %18, align 8
- store i64 %0, i64* %19, align 8
+ store i64 %0, ptr %4, align 8
+ store i64 %0, ptr %5, align 8
+ store i64 %0, ptr %6, align 8
+ store i64 %0, ptr %7, align 8
+ store i64 %0, ptr %8, align 8
+ store i64 %0, ptr %9, align 8
+ store i64 %0, ptr %10, align 8
+ store i64 %0, ptr %11, align 8
+ store i64 %0, ptr %12, align 8
+ store i64 %0, ptr %13, align 8
+ store i64 %0, ptr %14, align 8
+ store i64 %0, ptr %15, align 8
+ store i64 %0, ptr %16, align 8
+ store i64 %0, ptr %17, align 8
+ store i64 %0, ptr %18, align 8
+ store i64 %0, ptr %19, align 8
%110 = add nuw nsw i32 %21, 1
%111 = icmp eq i32 %110, 400000
br i1 %111, label %112, label %20
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
-@global.6 = external global i32*
+@global.6 = external global ptr
-declare void @barney.88(i1, i32*)
-declare void @barney.94(i8*, i32)
+declare void @barney.88(i1, ptr)
+declare void @barney.94(ptr, i32)
define void @redundancy_on_ppc_only(i1 %arg7) nounwind {
; PPC64LE-LABEL: redundancy_on_ppc_only:
br label %bb10
bb10: ; preds = %bb
- call void @barney.88(i1 %arg7, i32* null)
+ call void @barney.88(i1 %arg7, ptr null)
ret void
}
; PPC64LE-NEXT: std 4, 0(3)
; PPC64LE-NEXT: bl barney.94
; PPC64LE-NEXT: nop
- store i32* null, i32** @global.6
- call void @barney.94(i8* undef, i32 0)
+ store ptr null, ptr @global.6
+ call void @barney.94(ptr undef, i32 0)
unreachable
}
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-ibm-aix-xcoff < %s | FileCheck %s --check-prefix=AIX64
; RUN: llc -verify-machineinstrs -mtriple=powerpc-ibm-aix-xcoff < %s | FileCheck %s --check-prefix=AIX32
-define signext i32 @test1(i32 signext %i, i32 (i32)* nocapture %Func, i32 (i32)* nocapture %Func2) {
+define signext i32 @test1(i32 signext %i, ptr nocapture %Func, ptr nocapture %Func2) {
entry:
; CHECK-LABEL: test1:
; CHECK: std 2, 24(1)
ret i32 %add2
}
-define signext i32 @test2(i32 signext %i, i32 signext %j, i32 (i32)* nocapture %Func, i32 (i32)* nocapture %Func2) {
+define signext i32 @test2(i32 signext %i, i32 signext %j, ptr nocapture %Func, ptr nocapture %Func2) {
entry:
; CHECK-LABEL: test2:
; CHECK: std 2, 24(1)
}
; Check for multiple TOC saves with if then else where neither dominates the other.
-define signext i32 @test3(i32 signext %i, i32 (i32)* nocapture %Func, i32 (i32)* nocapture %Func2) {
+define signext i32 @test3(i32 signext %i, ptr nocapture %Func, ptr nocapture %Func2) {
; CHECK-LABEL: test3:
; CHECK: std 2, 24(1)
; CHECK-NOT: std 2, 24(1)
ret i32 %add4
}
-define signext i32 @test4(i32 signext %i, i32 (i32)* nocapture %Func, i32 (i32)* nocapture %Func2) {
+define signext i32 @test4(i32 signext %i, ptr nocapture %Func, ptr nocapture %Func2) {
; CHECK-LABEL: test4:
; CHECK: std 2, 24(1)
; CHECK-NOT: std 2, 24(1)
}
; Check for multiple TOC saves with if then where neither is redundant.
-define signext i32 @test5(i32 signext %i, i32 (i32)* nocapture %Func, i32 (i32)* nocapture readnone %Func2) {
+define signext i32 @test5(i32 signext %i, ptr nocapture %Func, ptr nocapture readnone %Func2) {
entry:
; CHECK-LABEL: test5:
; CHECK: std 2, 24(1)
}
; Check for multiple TOC saves if there are dynamic allocations on the stack.
-define signext i32 @test6(i32 signext %i, i32 (i32)* nocapture %Func, i32 (i32)* nocapture %Func2) {
+define signext i32 @test6(i32 signext %i, ptr nocapture %Func, ptr nocapture %Func2) {
entry:
; CHECK-LABEL: test6:
; CHECK: std 2, 24(1)
; AIX32: stw 2, 20(1)
%conv = sext i32 %i to i64
%0 = alloca i8, i64 %conv, align 16
- %1 = bitcast i8* %0 to i32*
%call = tail call signext i32 %Func(i32 signext %i)
- call void @useAlloca(i32* nonnull %1, i32 signext %call)
+ call void @useAlloca(ptr nonnull %0, i32 signext %call)
%call1 = call signext i32 %Func2(i32 signext %i)
%add2 = add nsw i32 %call1, %call
ret i32 %add2
}
-declare void @useAlloca(i32*, i32 signext)
+declare void @useAlloca(ptr, i32 signext)
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-%struct.Info = type { i32, i32, i8*, i8*, i8*, [32 x i8*], i64, [32 x i64], i64, i64, i64, [32 x i64] }
-%struct.S1998 = type { [2 x i32*], i64, i64, double, i16, i32, [29 x %struct.anon], i16, i8, i32, [8 x i8] }
+%struct.Info = type { i32, i32, ptr, ptr, ptr, [32 x ptr], i64, [32 x i64], i64, i64, i64, [32 x i64] }
+%struct.S1998 = type { [2 x ptr], i64, i64, double, i16, i32, [29 x %struct.anon], i16, i8, i32, [8 x i8] }
%struct.anon = type { [16 x double], i32, i16, i32, [3 x i8], [6 x i8], [4 x i32], i8 }
@info = global %struct.Info zeroinitializer, align 8
%agg.tmp117 = alloca %struct.S1998, align 16
%agg.tmp118 = alloca %struct.S1998, align 16
%agg.tmp119 = alloca %struct.S1998, align 16
- call void @llvm.memset.p0i8.i64(i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i8 0, i64 5168, i1 false)
- call void @llvm.memset.p0i8.i64(i8* align 16 bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8 0, i64 25840, i1 false)
- call void @llvm.memset.p0i8.i64(i8* align 8 bitcast (%struct.Info* @info to i8*), i8 0, i64 832, i1 false)
- store i8* bitcast (%struct.S1998* @s1998 to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 2), align 8
- store i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 3), align 8
- store i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 3) to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 4), align 8
- store i64 5168, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 6), align 8
- store i64 16, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 8), align 8
- store i64 16, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 9), align 8
- store i64 16, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 10), align 8
- %0 = load i64, i64* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 8), align 8
+ call void @llvm.memset.p0.i64(ptr align 16 @s1998, i8 0, i64 5168, i1 false)
+ call void @llvm.memset.p0.i64(ptr align 16 @a1998, i8 0, i64 25840, i1 false)
+ call void @llvm.memset.p0.i64(ptr align 8 @info, i8 0, i64 832, i1 false)
+ store ptr @s1998, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 2), align 8
+ store ptr @a1998, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 3), align 8
+ store ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 3), ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 4), align 8
+ store i64 5168, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 6), align 8
+ store i64 16, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 8), align 8
+ store i64 16, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 9), align 8
+ store i64 16, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 10), align 8
+ %0 = load i64, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 8), align 8
%sub = sub i64 %0, 1
- %and = and i64 ptrtoint (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 3) to i64), %sub
+ %and = and i64 ptrtoint (ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 3) to i64), %sub
%tobool = icmp ne i64 %and, 0
br i1 %tobool, label %if.then, label %if.end
if.then: ; preds = %entry
- %1 = load i32, i32* @fails, align 4
+ %1 = load i32, ptr @fails, align 4
%inc = add nsw i32 %1, 1
- store i32 %inc, i32* @fails, align 4
+ store i32 %inc, ptr @fails, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
- store i32 0, i32* %i, align 4
- store i32 0, i32* %j, align 4
- %2 = load i32, i32* %i, align 4
+ store i32 0, ptr %i, align 4
+ store i32 0, ptr %j, align 4
+ %2 = load i32, ptr %i, align 4
%idxprom = sext i32 %2 to i64
- %arrayidx = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom
- store i8* bitcast (i32** getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 0, i64 1) to i8*), i8** %arrayidx, align 8
- %3 = load i32, i32* %i, align 4
+ %arrayidx = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 0, i64 1), ptr %arrayidx, align 8
+ %3 = load i32, ptr %i, align 4
%idxprom1 = sext i32 %3 to i64
- %arrayidx2 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom1
- store i64 8, i64* %arrayidx2, align 8
- %4 = load i32, i32* %i, align 4
+ %arrayidx2 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom1
+ store i64 8, ptr %arrayidx2, align 8
+ %4 = load i32, ptr %i, align 4
%idxprom3 = sext i32 %4 to i64
- %arrayidx4 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom3
- store i64 8, i64* %arrayidx4, align 8
- store i32* getelementptr inbounds ([256 x i32], [256 x i32]* @intarray, i32 0, i64 190), i32** getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 0, i64 1), align 8
- store i32* getelementptr inbounds ([256 x i32], [256 x i32]* @intarray, i32 0, i64 241), i32** getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 0, i64 1), align 8
- %5 = load i32, i32* %i, align 4
+ %arrayidx4 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom3
+ store i64 8, ptr %arrayidx4, align 8
+ store ptr getelementptr inbounds ([256 x i32], ptr @intarray, i32 0, i64 190), ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 0, i64 1), align 8
+ store ptr getelementptr inbounds ([256 x i32], ptr @intarray, i32 0, i64 241), ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 0, i64 1), align 8
+ %5 = load i32, ptr %i, align 4
%inc5 = add nsw i32 %5, 1
- store i32 %inc5, i32* %i, align 4
- %6 = load i32, i32* %i, align 4
+ store i32 %inc5, ptr %i, align 4
+ %6 = load i32, ptr %i, align 4
%idxprom6 = sext i32 %6 to i64
- %arrayidx7 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom6
- store i8* bitcast (i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 1) to i8*), i8** %arrayidx7, align 8
- %7 = load i32, i32* %i, align 4
+ %arrayidx7 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom6
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 1), ptr %arrayidx7, align 8
+ %7 = load i32, ptr %i, align 4
%idxprom8 = sext i32 %7 to i64
- %arrayidx9 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom8
- store i64 8, i64* %arrayidx9, align 8
- %8 = load i32, i32* %i, align 4
+ %arrayidx9 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom8
+ store i64 8, ptr %arrayidx9, align 8
+ %8 = load i32, ptr %i, align 4
%idxprom10 = sext i32 %8 to i64
- %arrayidx11 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom10
- store i64 8, i64* %arrayidx11, align 8
- store i64 -3866974208859106459, i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 1), align 8
- store i64 -185376695371304091, i64* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 1), align 8
- %9 = load i32, i32* %i, align 4
+ %arrayidx11 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom10
+ store i64 8, ptr %arrayidx11, align 8
+ store i64 -3866974208859106459, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 1), align 8
+ store i64 -185376695371304091, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 1), align 8
+ %9 = load i32, ptr %i, align 4
%inc12 = add nsw i32 %9, 1
- store i32 %inc12, i32* %i, align 4
- %10 = load i32, i32* %i, align 4
+ store i32 %inc12, ptr %i, align 4
+ %10 = load i32, ptr %i, align 4
%idxprom13 = sext i32 %10 to i64
- %arrayidx14 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom13
- store i8* bitcast (i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 2) to i8*), i8** %arrayidx14, align 8
- %11 = load i32, i32* %i, align 4
+ %arrayidx14 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom13
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 2), ptr %arrayidx14, align 8
+ %11 = load i32, ptr %i, align 4
%idxprom15 = sext i32 %11 to i64
- %arrayidx16 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom15
- store i64 8, i64* %arrayidx16, align 8
- %12 = load i32, i32* %i, align 4
+ %arrayidx16 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom15
+ store i64 8, ptr %arrayidx16, align 8
+ %12 = load i32, ptr %i, align 4
%idxprom17 = sext i32 %12 to i64
- %arrayidx18 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom17
- store i64 8, i64* %arrayidx18, align 8
- store i64 -963638028680427187, i64* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 2), align 8
- store i64 7510542175772455554, i64* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 2), align 8
- %13 = load i32, i32* %i, align 4
+ %arrayidx18 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom17
+ store i64 8, ptr %arrayidx18, align 8
+ store i64 -963638028680427187, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 2), align 8
+ store i64 7510542175772455554, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 2), align 8
+ %13 = load i32, ptr %i, align 4
%inc19 = add nsw i32 %13, 1
- store i32 %inc19, i32* %i, align 4
- %14 = load i32, i32* %i, align 4
+ store i32 %inc19, ptr %i, align 4
+ %14 = load i32, ptr %i, align 4
%idxprom20 = sext i32 %14 to i64
- %arrayidx21 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom20
- store i8* bitcast (double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 3) to i8*), i8** %arrayidx21, align 8
- %15 = load i32, i32* %i, align 4
+ %arrayidx21 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom20
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 3), ptr %arrayidx21, align 8
+ %15 = load i32, ptr %i, align 4
%idxprom22 = sext i32 %15 to i64
- %arrayidx23 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom22
- store i64 8, i64* %arrayidx23, align 8
- %16 = load i32, i32* %i, align 4
+ %arrayidx23 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom22
+ store i64 8, ptr %arrayidx23, align 8
+ %16 = load i32, ptr %i, align 4
%idxprom24 = sext i32 %16 to i64
- %arrayidx25 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom24
- store i64 16, i64* %arrayidx25, align 8
- store double 0xC0F8783300000000, double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 3), align 16
- store double 0xC10DF3CCC0000000, double* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 3), align 16
- %17 = load i32, i32* %i, align 4
+ %arrayidx25 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom24
+ store i64 16, ptr %arrayidx25, align 8
+ store double 0xC0F8783300000000, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 3), align 16
+ store double 0xC10DF3CCC0000000, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 3), align 16
+ %17 = load i32, ptr %i, align 4
%inc26 = add nsw i32 %17, 1
- store i32 %inc26, i32* %i, align 4
- %18 = load i32, i32* %i, align 4
+ store i32 %inc26, ptr %i, align 4
+ %18 = load i32, ptr %i, align 4
%idxprom27 = sext i32 %18 to i64
- %arrayidx28 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom27
- store i8* bitcast (i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 4) to i8*), i8** %arrayidx28, align 8
- %19 = load i32, i32* %i, align 4
+ %arrayidx28 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom27
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 4), ptr %arrayidx28, align 8
+ %19 = load i32, ptr %i, align 4
%idxprom29 = sext i32 %19 to i64
- %arrayidx30 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom29
- store i64 2, i64* %arrayidx30, align 8
- %20 = load i32, i32* %i, align 4
+ %arrayidx30 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom29
+ store i64 2, ptr %arrayidx30, align 8
+ %20 = load i32, ptr %i, align 4
%idxprom31 = sext i32 %20 to i64
- %arrayidx32 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom31
- store i64 2, i64* %arrayidx32, align 8
- store i16 -15897, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 4), align 2
- store i16 30935, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 4), align 2
- %21 = load i32, i32* %i, align 4
+ %arrayidx32 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom31
+ store i64 2, ptr %arrayidx32, align 8
+ store i16 -15897, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 4), align 2
+ store i16 30935, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 4), align 2
+ %21 = load i32, ptr %i, align 4
%inc33 = add nsw i32 %21, 1
- store i32 %inc33, i32* %i, align 4
- store i32 -419541644, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 5), align 4
- store i32 2125926812, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 5), align 4
- %22 = load i32, i32* %j, align 4
+ store i32 %inc33, ptr %i, align 4
+ store i32 -419541644, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 5), align 4
+ store i32 2125926812, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 5), align 4
+ %22 = load i32, ptr %j, align 4
%inc34 = add nsw i32 %22, 1
- store i32 %inc34, i32* %j, align 4
- %23 = load i32, i32* %i, align 4
+ store i32 %inc34, ptr %j, align 4
+ %23 = load i32, ptr %i, align 4
%idxprom35 = sext i32 %23 to i64
- %arrayidx36 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom35
- store i8* bitcast (double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0) to i8*), i8** %arrayidx36, align 8
- %24 = load i32, i32* %i, align 4
+ %arrayidx36 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom35
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), ptr %arrayidx36, align 8
+ %24 = load i32, ptr %i, align 4
%idxprom37 = sext i32 %24 to i64
- %arrayidx38 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom37
- store i64 8, i64* %arrayidx38, align 8
- %25 = load i32, i32* %i, align 4
+ %arrayidx38 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom37
+ store i64 8, ptr %arrayidx38, align 8
+ %25 = load i32, ptr %i, align 4
%idxprom39 = sext i32 %25 to i64
- %arrayidx40 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom39
- store i64 8, i64* %arrayidx40, align 8
- store double 0xC0FC765780000000, double* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), align 8
- store double 0xC1025CD7A0000000, double* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 0, i64 0), align 8
- %26 = load i32, i32* %i, align 4
+ %arrayidx40 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom39
+ store i64 8, ptr %arrayidx40, align 8
+ store double 0xC0FC765780000000, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), align 8
+ store double 0xC1025CD7A0000000, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 0, i64 0), align 8
+ %26 = load i32, ptr %i, align 4
%inc41 = add nsw i32 %26, 1
- store i32 %inc41, i32* %i, align 4
- %bf.load = load i32, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
+ store i32 %inc41, ptr %i, align 4
+ %bf.load = load i32, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 1), align 8
%bf.clear = and i32 %bf.load, 7
%bf.set = or i32 %bf.clear, 16
- store i32 %bf.set, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
- %bf.load42 = load i32, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
+ store i32 %bf.set, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 1), align 8
+ %bf.load42 = load i32, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
%bf.clear43 = and i32 %bf.load42, 7
%bf.set44 = or i32 %bf.clear43, 24
- store i32 %bf.set44, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
- %27 = load i32, i32* %j, align 4
+ store i32 %bf.set44, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
+ %27 = load i32, ptr %j, align 4
%inc45 = add nsw i32 %27, 1
- store i32 %inc45, i32* %j, align 4
- %bf.load46 = load i16, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
+ store i32 %inc45, ptr %j, align 4
+ %bf.load46 = load i16, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 2), align 4
%bf.clear47 = and i16 %bf.load46, 127
- store i16 %bf.clear47, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
- %bf.load48 = load i16, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
+ store i16 %bf.clear47, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 2), align 4
+ %bf.load48 = load i16, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
%bf.clear49 = and i16 %bf.load48, 127
- store i16 %bf.clear49, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
- %28 = load i32, i32* %j, align 4
+ store i16 %bf.clear49, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
+ %28 = load i32, ptr %j, align 4
%inc50 = add nsw i32 %28, 1
- store i32 %inc50, i32* %j, align 4
- %bf.load51 = load i32, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
+ store i32 %inc50, ptr %j, align 4
+ %bf.load51 = load i32, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 3), align 8
%bf.clear52 = and i32 %bf.load51, 63
- store i32 %bf.clear52, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
- %bf.load53 = load i32, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
+ store i32 %bf.clear52, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 3), align 8
+ %bf.load53 = load i32, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
%bf.clear54 = and i32 %bf.load53, 63
%bf.set55 = or i32 %bf.clear54, 64
- store i32 %bf.set55, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
- %29 = load i32, i32* %j, align 4
+ store i32 %bf.set55, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
+ %29 = load i32, ptr %j, align 4
%inc56 = add nsw i32 %29, 1
- store i32 %inc56, i32* %j, align 4
- %bf.load57 = load i24, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
+ store i32 %inc56, ptr %j, align 4
+ %bf.load57 = load i24, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 4), align 4
%bf.clear58 = and i24 %bf.load57, 63
- store i24 %bf.clear58, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
- %bf.load59 = load i24, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
+ store i24 %bf.clear58, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 4), align 4
+ %bf.load59 = load i24, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 4), align 4
%bf.clear60 = and i24 %bf.load59, 63
- store i24 %bf.clear60, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
- %30 = load i32, i32* %j, align 4
+ store i24 %bf.clear60, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 4), align 4
+ %30 = load i32, ptr %j, align 4
%inc61 = add nsw i32 %30, 1
- store i32 %inc61, i32* %j, align 4
- %31 = load i32, i32* %i, align 4
+ store i32 %inc61, ptr %j, align 4
+ %31 = load i32, ptr %i, align 4
%idxprom62 = sext i32 %31 to i64
- %arrayidx63 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom62
- store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), i8** %arrayidx63, align 8
- %32 = load i32, i32* %i, align 4
+ %arrayidx63 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom62
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), ptr %arrayidx63, align 8
+ %32 = load i32, ptr %i, align 4
%idxprom64 = sext i32 %32 to i64
- %arrayidx65 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom64
- store i64 1, i64* %arrayidx65, align 8
- %33 = load i32, i32* %i, align 4
+ %arrayidx65 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom64
+ store i64 1, ptr %arrayidx65, align 8
+ %33 = load i32, ptr %i, align 4
%idxprom66 = sext i32 %33 to i64
- %arrayidx67 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom66
- store i64 1, i64* %arrayidx67, align 8
- store i8 -83, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), align 1
- store i8 -67, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 5), align 1
- %34 = load i32, i32* %i, align 4
+ %arrayidx67 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom66
+ store i64 1, ptr %arrayidx67, align 8
+ store i8 -83, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), align 1
+ store i8 -67, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 5), align 1
+ %34 = load i32, ptr %i, align 4
%inc68 = add nsw i32 %34, 1
- store i32 %inc68, i32* %i, align 4
- %35 = load i32, i32* %i, align 4
+ store i32 %inc68, ptr %i, align 4
+ %35 = load i32, ptr %i, align 4
%idxprom69 = sext i32 %35 to i64
- %arrayidx70 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom69
- store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), i8** %arrayidx70, align 8
- %36 = load i32, i32* %i, align 4
+ %arrayidx70 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom69
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), ptr %arrayidx70, align 8
+ %36 = load i32, ptr %i, align 4
%idxprom71 = sext i32 %36 to i64
- %arrayidx72 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom71
- store i64 1, i64* %arrayidx72, align 8
- %37 = load i32, i32* %i, align 4
+ %arrayidx72 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom71
+ store i64 1, ptr %arrayidx72, align 8
+ %37 = load i32, ptr %i, align 4
%idxprom73 = sext i32 %37 to i64
- %arrayidx74 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom73
- store i64 1, i64* %arrayidx74, align 8
- store i8 34, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), align 1
- store i8 64, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 1), align 1
- %38 = load i32, i32* %i, align 4
+ %arrayidx74 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom73
+ store i64 1, ptr %arrayidx74, align 8
+ store i8 34, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), align 1
+ store i8 64, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 1), align 1
+ %38 = load i32, ptr %i, align 4
%inc75 = add nsw i32 %38, 1
- store i32 %inc75, i32* %i, align 4
- %39 = load i32, i32* %i, align 4
+ store i32 %inc75, ptr %i, align 4
+ %39 = load i32, ptr %i, align 4
%idxprom76 = sext i32 %39 to i64
- %arrayidx77 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom76
- store i8* bitcast (i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3) to i8*), i8** %arrayidx77, align 8
- %40 = load i32, i32* %i, align 4
+ %arrayidx77 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom76
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), ptr %arrayidx77, align 8
+ %40 = load i32, ptr %i, align 4
%idxprom78 = sext i32 %40 to i64
- %arrayidx79 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom78
- store i64 4, i64* %arrayidx79, align 8
- %41 = load i32, i32* %i, align 4
+ %arrayidx79 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom78
+ store i64 4, ptr %arrayidx79, align 8
+ %41 = load i32, ptr %i, align 4
%idxprom80 = sext i32 %41 to i64
- %arrayidx81 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom80
- store i64 4, i64* %arrayidx81, align 8
- store i32 -3, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), align 4
- store i32 -3, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 6, i64 3), align 4
- %42 = load i32, i32* %i, align 4
+ %arrayidx81 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom80
+ store i64 4, ptr %arrayidx81, align 8
+ store i32 -3, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), align 4
+ store i32 -3, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 6, i64 3), align 4
+ %42 = load i32, ptr %i, align 4
%inc82 = add nsw i32 %42, 1
- store i32 %inc82, i32* %i, align 4
- %43 = load i32, i32* %i, align 4
+ store i32 %inc82, ptr %i, align 4
+ %43 = load i32, ptr %i, align 4
%idxprom83 = sext i32 %43 to i64
- %arrayidx84 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom83
- store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), i8** %arrayidx84, align 8
- %44 = load i32, i32* %i, align 4
+ %arrayidx84 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom83
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 7), ptr %arrayidx84, align 8
+ %44 = load i32, ptr %i, align 4
%idxprom85 = sext i32 %44 to i64
- %arrayidx86 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom85
- store i64 1, i64* %arrayidx86, align 8
- %45 = load i32, i32* %i, align 4
+ %arrayidx86 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom85
+ store i64 1, ptr %arrayidx86, align 8
+ %45 = load i32, ptr %i, align 4
%idxprom87 = sext i32 %45 to i64
- %arrayidx88 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom87
- store i64 1, i64* %arrayidx88, align 8
- store i8 106, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), align 1
- store i8 -102, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 7), align 1
- %46 = load i32, i32* %i, align 4
+ %arrayidx88 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom87
+ store i64 1, ptr %arrayidx88, align 8
+ store i8 106, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 6, i64 4, i32 7), align 1
+ store i8 -102, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 6, i64 4, i32 7), align 1
+ %46 = load i32, ptr %i, align 4
%inc89 = add nsw i32 %46, 1
- store i32 %inc89, i32* %i, align 4
- %47 = load i32, i32* %i, align 4
+ store i32 %inc89, ptr %i, align 4
+ %47 = load i32, ptr %i, align 4
%idxprom90 = sext i32 %47 to i64
- %arrayidx91 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom90
- store i8* bitcast (i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 7) to i8*), i8** %arrayidx91, align 8
- %48 = load i32, i32* %i, align 4
+ %arrayidx91 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom90
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 7), ptr %arrayidx91, align 8
+ %48 = load i32, ptr %i, align 4
%idxprom92 = sext i32 %48 to i64
- %arrayidx93 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom92
- store i64 2, i64* %arrayidx93, align 8
- %49 = load i32, i32* %i, align 4
+ %arrayidx93 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom92
+ store i64 2, ptr %arrayidx93, align 8
+ %49 = load i32, ptr %i, align 4
%idxprom94 = sext i32 %49 to i64
- %arrayidx95 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom94
- store i64 2, i64* %arrayidx95, align 8
- store i16 29665, i16* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 7), align 2
- store i16 7107, i16* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 7), align 2
- %50 = load i32, i32* %i, align 4
+ %arrayidx95 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom94
+ store i64 2, ptr %arrayidx95, align 8
+ store i16 29665, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 7), align 2
+ store i16 7107, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 7), align 2
+ %50 = load i32, ptr %i, align 4
%inc96 = add nsw i32 %50, 1
- store i32 %inc96, i32* %i, align 4
- %51 = load i32, i32* %i, align 4
+ store i32 %inc96, ptr %i, align 4
+ %51 = load i32, ptr %i, align 4
%idxprom97 = sext i32 %51 to i64
- %arrayidx98 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom97
- store i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 8), i8** %arrayidx98, align 8
- %52 = load i32, i32* %i, align 4
+ %arrayidx98 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom97
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 8), ptr %arrayidx98, align 8
+ %52 = load i32, ptr %i, align 4
%idxprom99 = sext i32 %52 to i64
- %arrayidx100 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom99
- store i64 1, i64* %arrayidx100, align 8
- %53 = load i32, i32* %i, align 4
+ %arrayidx100 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom99
+ store i64 1, ptr %arrayidx100, align 8
+ %53 = load i32, ptr %i, align 4
%idxprom101 = sext i32 %53 to i64
- %arrayidx102 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom101
- store i64 1, i64* %arrayidx102, align 8
- store i8 52, i8* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 8), align 1
- store i8 -86, i8* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 8), align 1
- %54 = load i32, i32* %i, align 4
+ %arrayidx102 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom101
+ store i64 1, ptr %arrayidx102, align 8
+ store i8 52, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 8), align 1
+ store i8 -86, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 8), align 1
+ %54 = load i32, ptr %i, align 4
%inc103 = add nsw i32 %54, 1
- store i32 %inc103, i32* %i, align 4
- %55 = load i32, i32* %i, align 4
+ store i32 %inc103, ptr %i, align 4
+ %55 = load i32, ptr %i, align 4
%idxprom104 = sext i32 %55 to i64
- %arrayidx105 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom104
- store i8* bitcast (i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 9) to i8*), i8** %arrayidx105, align 8
- %56 = load i32, i32* %i, align 4
+ %arrayidx105 = getelementptr inbounds [32 x ptr], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 5), i32 0, i64 %idxprom104
+ store ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 9), ptr %arrayidx105, align 8
+ %56 = load i32, ptr %i, align 4
%idxprom106 = sext i32 %56 to i64
- %arrayidx107 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom106
- store i64 4, i64* %arrayidx107, align 8
- %57 = load i32, i32* %i, align 4
+ %arrayidx107 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 7), i32 0, i64 %idxprom106
+ store i64 4, ptr %arrayidx107, align 8
+ %57 = load i32, ptr %i, align 4
%idxprom108 = sext i32 %57 to i64
- %arrayidx109 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom108
- store i64 4, i64* %arrayidx109, align 8
- store i32 -54118453, i32* getelementptr inbounds (%struct.S1998, %struct.S1998* @s1998, i32 0, i32 9), align 4
- store i32 1668755823, i32* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 9), align 4
- %58 = load i32, i32* %i, align 4
+ %arrayidx109 = getelementptr inbounds [32 x i64], ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 11), i32 0, i64 %idxprom108
+ store i64 4, ptr %arrayidx109, align 8
+ store i32 -54118453, ptr getelementptr inbounds (%struct.S1998, ptr @s1998, i32 0, i32 9), align 4
+ store i32 1668755823, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2, i32 9), align 4
+ %58 = load i32, ptr %i, align 4
%inc110 = add nsw i32 %58, 1
- store i32 %inc110, i32* %i, align 4
- store i32 %inc110, i32* %tmp
- %59 = load i32, i32* %tmp
- %60 = load i32, i32* %i, align 4
- store i32 %60, i32* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 0), align 4
- %61 = load i32, i32* %j, align 4
- store i32 %61, i32* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 1), align 4
- %62 = bitcast %struct.S1998* %agg.tmp111 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %62, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
- %63 = bitcast %struct.S1998* %agg.tmp112 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %63, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
- call void @check1998(%struct.S1998* sret(%struct.S1998) %agg.tmp, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp111, %struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 1), %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp112)
- call void @checkx1998(%struct.S1998* byval(%struct.S1998) align 16 %agg.tmp)
- %64 = bitcast %struct.S1998* %agg.tmp113 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %64, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
- %65 = bitcast %struct.S1998* %agg.tmp114 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %65, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
- %66 = bitcast %struct.S1998* %agg.tmp115 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %66, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
- call void (i32, ...) @check1998va(i32 signext 1, double 1.000000e+00, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp113, i64 2, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp114, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp115)
- %67 = bitcast %struct.S1998* %agg.tmp116 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %67, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
- %68 = bitcast %struct.S1998* %agg.tmp117 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %68, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
- %69 = bitcast %struct.S1998* %agg.tmp118 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %69, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false)
- %70 = bitcast %struct.S1998* %agg.tmp119 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %70, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false)
- call void (i32, ...) @check1998va(i32 signext 2, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp116, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp118, %struct.S1998* byval(%struct.S1998) align 16 %agg.tmp119)
+ store i32 %inc110, ptr %i, align 4
+ store i32 %inc110, ptr %tmp
+ %59 = load i32, ptr %tmp
+ %60 = load i32, ptr %i, align 4
+ store i32 %60, ptr @info, align 4
+ %61 = load i32, ptr %j, align 4
+ store i32 %61, ptr getelementptr inbounds (%struct.Info, ptr @info, i32 0, i32 1), align 4
+ call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp111, ptr align 16 @s1998, i64 5168, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp112, ptr align 16 getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2), i64 5168, i1 false)
+ call void @check1998(ptr sret(%struct.S1998) %agg.tmp, ptr byval(%struct.S1998) align 16 %agg.tmp111, ptr getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 1), ptr byval(%struct.S1998) align 16 %agg.tmp112)
+ call void @checkx1998(ptr byval(%struct.S1998) align 16 %agg.tmp)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp113, ptr align 16 @s1998, i64 5168, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp114, ptr align 16 getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2), i64 5168, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp115, ptr align 16 getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2), i64 5168, i1 false)
+ call void (i32, ...) @check1998va(i32 signext 1, double 1.000000e+00, ptr byval(%struct.S1998) align 16 %agg.tmp113, i64 2, ptr byval(%struct.S1998) align 16 %agg.tmp114, ptr byval(%struct.S1998) align 16 %agg.tmp115)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp116, ptr align 16 @s1998, i64 5168, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp117, ptr align 16 @s1998, i64 5168, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp118, ptr align 16 getelementptr inbounds ([5 x %struct.S1998], ptr @a1998, i32 0, i64 2), i64 5168, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 16 %agg.tmp119, ptr align 16 @s1998, i64 5168, i1 false)
+ call void (i32, ...) @check1998va(i32 signext 2, ptr byval(%struct.S1998) align 16 %agg.tmp116, ptr byval(%struct.S1998) align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, ptr byval(%struct.S1998) align 16 %agg.tmp118, ptr byval(%struct.S1998) align 16 %agg.tmp119)
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
-declare void @check1998(%struct.S1998* sret(%struct.S1998), %struct.S1998* byval(%struct.S1998) align 16, %struct.S1998*, %struct.S1998* byval(%struct.S1998) align 16)
+declare void @check1998(ptr sret(%struct.S1998), ptr byval(%struct.S1998) align 16, ptr, ptr byval(%struct.S1998) align 16)
declare void @check1998va(i32 signext, ...)
-declare void @checkx1998(%struct.S1998* byval(%struct.S1998) align 16 %arg)
+declare void @checkx1998(ptr byval(%struct.S1998) align 16 %arg)
@s2760 = external global %struct.S2760
@fails = external global i32
-define void @check2760(%struct.S2760* noalias sret(%struct.S2760) %agg.result, %struct.S2760* byval(%struct.S2760) align 16, %struct.S2760* %arg1, %struct.S2760* byval(%struct.S2760) align 16) {
+define void @check2760(ptr noalias sret(%struct.S2760) %agg.result, ptr byval(%struct.S2760) align 16, ptr %arg1, ptr byval(%struct.S2760) align 16) {
entry:
%arg0 = alloca %struct.S2760, align 32
%arg2 = alloca %struct.S2760, align 32
- %arg1.addr = alloca %struct.S2760*, align 8
+ %arg1.addr = alloca ptr, align 8
%ret = alloca %struct.S2760, align 32
%b1 = alloca %struct.S2760, align 32
%b2 = alloca %struct.S2760, align 32
- %2 = bitcast %struct.S2760* %arg0 to i8*
- %3 = bitcast %struct.S2760* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %2, i8* align 16 %3, i64 11104, i1 false)
- %4 = bitcast %struct.S2760* %arg2 to i8*
- %5 = bitcast %struct.S2760* %1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %4, i8* align 16 %5, i64 11104, i1 false)
- store %struct.S2760* %arg1, %struct.S2760** %arg1.addr, align 8
- %6 = bitcast %struct.S2760* %ret to i8*
- call void @llvm.memset.p0i8.i64(i8* align 32 %6, i8 0, i64 11104, i1 false)
- %7 = bitcast %struct.S2760* %b1 to i8*
- call void @llvm.memset.p0i8.i64(i8* align 32 %7, i8 0, i64 11104, i1 false)
- %8 = bitcast %struct.S2760* %b2 to i8*
- call void @llvm.memset.p0i8.i64(i8* align 32 %8, i8 0, i64 11104, i1 false)
- %b = getelementptr inbounds %struct.S2760, %struct.S2760* %arg0, i32 0, i32 1
- %g = getelementptr inbounds %struct.anon, %struct.anon* %b, i32 0, i32 1
- %9 = load i64, i64* %g, align 8
- %10 = load i64, i64* getelementptr inbounds (%struct.S2760, %struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
- %cmp = icmp ne i64 %9, %10
+ call void @llvm.memcpy.p0.p0.i64(ptr align 16 %arg0, ptr align 16 %0, i64 11104, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 16 %arg2, ptr align 16 %1, i64 11104, i1 false)
+ store ptr %arg1, ptr %arg1.addr, align 8
+ call void @llvm.memset.p0.i64(ptr align 32 %ret, i8 0, i64 11104, i1 false)
+ call void @llvm.memset.p0.i64(ptr align 32 %b1, i8 0, i64 11104, i1 false)
+ call void @llvm.memset.p0.i64(ptr align 32 %b2, i8 0, i64 11104, i1 false)
+ %b = getelementptr inbounds %struct.S2760, ptr %arg0, i32 0, i32 1
+ %g = getelementptr inbounds %struct.anon, ptr %b, i32 0, i32 1
+ %2 = load i64, ptr %g, align 8
+ %3 = load i64, ptr getelementptr inbounds (%struct.S2760, ptr @s2760, i32 0, i32 1, i32 1), align 8
+ %cmp = icmp ne i64 %2, %3
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- %11 = load i32, i32* @fails, align 4
- %inc = add nsw i32 %11, 1
- store i32 %inc, i32* @fails, align 4
+ %4 = load i32, ptr @fails, align 4
+ %inc = add nsw i32 %4, 1
+ store i32 %inc, ptr @fails, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
- %12 = load i64, i64* getelementptr inbounds (%struct.S2760, %struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
- %b3 = getelementptr inbounds %struct.S2760, %struct.S2760* %ret, i32 0, i32 1
- %g4 = getelementptr inbounds %struct.anon, %struct.anon* %b3, i32 0, i32 1
- store i64 %12, i64* %g4, align 8
- %13 = bitcast %struct.S2760* %agg.result to i8*
- %14 = bitcast %struct.S2760* %ret to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 %13, i8* align 32 %14, i64 11104, i1 false)
+ %5 = load i64, ptr getelementptr inbounds (%struct.S2760, ptr @s2760, i32 0, i32 1, i32 1), align 8
+ %b3 = getelementptr inbounds %struct.S2760, ptr %ret, i32 0, i32 1
+ %g4 = getelementptr inbounds %struct.anon, ptr %b3, i32 0, i32 1
+ store i64 %5, ptr %g4, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr align 32 %agg.result, ptr align 32 %ret, i64 11104, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
; RUN: -mcpu=pwr9 -ppc-asm-full-reg-names < %s | grep 'xvmuldp' | count 4
; RUN: llc -verify-machineinstrs --mtriple powerpc64le-unknown-linux-gnu \
; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names < %s | grep 'xvmuldp' | count 4
-@IndirectCallPtr = dso_local local_unnamed_addr global void (...)* null, align 8
+@IndirectCallPtr = dso_local local_unnamed_addr global ptr null, align 8
define dso_local signext i32 @func1() local_unnamed_addr #0 {
entry:
- tail call void bitcast (void (...)* @directCall to void ()*)() #0
+ tail call void @directCall() #0
%0 = tail call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> <double -9.990000e+01, double 9.990000e+01>, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%vecext = extractelement <2 x double> %0, i32 0
%sub = tail call double @llvm.experimental.constrained.fsub.f64(double %vecext, double -9.900000e+01, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %sub, metadata !"fpexcept.ignore") #0
- tail call void bitcast (void (...)* @directCall to void ()*)() #0
+ tail call void @directCall() #0
%1 = tail call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> <double -9.990000e+01, double 9.990000e+01>, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%vecext3 = extractelement <2 x double> %1, i32 1
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %vecext3, double 9.900000e+01, metadata !"une", metadata !"fpexcept.ignore") #0
define dso_local signext i32 @func2() local_unnamed_addr #0 {
entry:
- %call = tail call <2 x double> bitcast (<2 x double> (...)* @getvector1 to <2 x double> ()*)() #0
- %call1 = tail call <2 x double> bitcast (<2 x double> (...)* @getvector2 to <2 x double> ()*)() #0
- tail call void bitcast (void (...)* @directCall to void ()*)() #0
+ %call = tail call <2 x double> @getvector1() #0
+ %call1 = tail call <2 x double> @getvector2() #0
+ tail call void @directCall() #0
%mul = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %call, <2 x double> %call1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%vecext = extractelement <2 x double> %mul, i32 0
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %vecext, double 4.000000e+00, metadata !"oeq", metadata !"fpexcept.ignore") #0
br i1 %cmp, label %cleanup, label %if.end
if.end: ; preds = %entry
- tail call void bitcast (void (...)* @directCall to void ()*)() #0
+ tail call void @directCall() #0
%mul10 = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %call, <2 x double> %call1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%0 = tail call i32 @llvm.ppc.vsx.xvcmpeqdp.p(i32 2, <2 x double> %mul, <2 x double> %mul10) #0
br label %cleanup
define dso_local signext i32 @func3() local_unnamed_addr #0 {
entry:
- %0 = load void ()*, void ()** bitcast (void (...)** @IndirectCallPtr to void ()**), align 8
+ %0 = load ptr, ptr @IndirectCallPtr, align 8
tail call void %0() #0
%1 = tail call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> <double -9.990000e+01, double 9.990000e+01>, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%vecext = extractelement <2 x double> %1, i32 0
%sub = tail call double @llvm.experimental.constrained.fsub.f64(double %vecext, double -9.900000e+01, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %sub, metadata !"fpexcept.ignore") #0
- %2 = load void ()*, void ()** bitcast (void (...)** @IndirectCallPtr to void ()**), align 8
+ %2 = load ptr, ptr @IndirectCallPtr, align 8
tail call void %2() #0
%3 = tail call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> <double -9.990000e+01, double 9.990000e+01>, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%vecext4 = extractelement <2 x double> %3, i32 1
define dso_local signext i32 @func4() local_unnamed_addr #0 {
entry:
- %call = tail call <2 x double> bitcast (<2 x double> (...)* @getvector1 to <2 x double> ()*)() #0
- %call1 = tail call <2 x double> bitcast (<2 x double> (...)* @getvector2 to <2 x double> ()*)() #0
- %0 = load void ()*, void ()** bitcast (void (...)** @IndirectCallPtr to void ()**), align 8
+ %call = tail call <2 x double> @getvector1() #0
+ %call1 = tail call <2 x double> @getvector2() #0
+ %0 = load ptr, ptr @IndirectCallPtr, align 8
tail call void %0() #0
%mul = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %call, <2 x double> %call1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%vecext = extractelement <2 x double> %mul, i32 0
br i1 %cmp, label %cleanup, label %if.end
if.end: ; preds = %entry
- %1 = load void ()*, void ()** bitcast (void (...)** @IndirectCallPtr to void ()**), align 8
+ %1 = load ptr, ptr @IndirectCallPtr, align 8
tail call void %1() #0
%mul11 = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %call, <2 x double> %call1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
%2 = tail call i32 @llvm.ppc.vsx.xvcmpeqdp.p(i32 2, <2 x double> %mul, <2 x double> %mul11) #0
; PPC64: mtlr 0
; PPC64: blr
-define void @foo(i8** %X) nounwind {
+define void @foo(ptr %X) nounwind {
entry:
- %tmp = tail call i8* @llvm.returnaddress( i32 0 ) ; <i8*> [#uses=1]
- store i8* %tmp, i8** %X, align 4
+ %tmp = tail call ptr @llvm.returnaddress( i32 0 ) ; <ptr> [#uses=1]
+ store ptr %tmp, ptr %X, align 4
ret void
}
-declare i8* @llvm.returnaddress(i32)
+declare ptr @llvm.returnaddress(i32)
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind readnone
-define i8* @test1() #0 {
+define ptr @test1() #0 {
entry:
- %0 = tail call i8* @llvm.returnaddress(i32 0)
- ret i8* %0
+ %0 = tail call ptr @llvm.returnaddress(i32 0)
+ ret ptr %0
}
; CHECK-LABEL: @test1
; CHECK: blr
; Function Attrs: nounwind readnone
-declare i8* @llvm.returnaddress(i32) #0
+declare ptr @llvm.returnaddress(i32) #0
attributes #0 = { nounwind readnone }
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-aix \
; RUN: -mcpu=pwr7 | FileCheck %s -check-prefix=CHECK-32B-BE
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone
-define i8* @test0() nounwind readnone {
+define ptr @test0() nounwind readnone {
; CHECK-64B-LE-LABEL: test0:
; CHECK-64B-LE: # %bb.0: # %entry
; CHECK-64B-LE-NEXT: mflr 0
; CHECK-32B-BE-NEXT: mtlr 0
; CHECK-32B-BE-NEXT: blr
entry:
- %0 = tail call i8* @llvm.returnaddress(i32 0);
- ret i8* %0
+ %0 = tail call ptr @llvm.returnaddress(i32 0);
+ ret ptr %0
}
-define i8* @test1() nounwind readnone {
+define ptr @test1() nounwind readnone {
; CHECK-64B-LE-LABEL: test1:
; CHECK-64B-LE: # %bb.0: # %entry
; CHECK-64B-LE-NEXT: mflr 0
; CHECK-32B-BE-NEXT: mtlr 0
; CHECK-32B-BE-NEXT: blr
entry:
- %0 = tail call i8* @llvm.returnaddress(i32 1);
- ret i8* %0
+ %0 = tail call ptr @llvm.returnaddress(i32 1);
+ ret ptr %0
}
-define i8* @test2() nounwind readnone {
+define ptr @test2() nounwind readnone {
; CHECK-64B-LE-LABEL: test2:
; CHECK-64B-LE: # %bb.0: # %entry
; CHECK-64B-LE-NEXT: mflr 0
; CHECK-32B-BE-NEXT: mtlr 0
; CHECK-32B-BE-NEXT: blr
entry:
- %0 = tail call i8* @llvm.returnaddress(i32 2);
- ret i8* %0
+ %0 = tail call ptr @llvm.returnaddress(i32 2);
+ ret ptr %0
}
define i128 @__fixsfdi(float %a) {
entry:
- %a_addr = alloca float ; <float*> [#uses=4]
- %retval = alloca i128, align 16 ; <i128*> [#uses=2]
- %tmp = alloca i128, align 16 ; <i128*> [#uses=3]
+ %a_addr = alloca float ; <ptr> [#uses=4]
+ %retval = alloca i128, align 16 ; <ptr> [#uses=2]
+ %tmp = alloca i128, align 16 ; <ptr> [#uses=3]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float %a, float* %a_addr
- %tmp1 = load float, float* %a_addr, align 4 ; <float> [#uses=1]
+ store float %a, ptr %a_addr
+ %tmp1 = load float, ptr %a_addr, align 4 ; <float> [#uses=1]
%tmp2 = fcmp olt float %tmp1, 0.000000e+00 ; <i1> [#uses=1]
%tmp23 = zext i1 %tmp2 to i8 ; <i8> [#uses=1]
%toBool = icmp ne i8 %tmp23, 0 ; <i1> [#uses=1]
br i1 %toBool, label %bb, label %bb8
bb: ; preds = %entry
- %tmp4 = load float, float* %a_addr, align 4 ; <float> [#uses=1]
+ %tmp4 = load float, ptr %a_addr, align 4 ; <float> [#uses=1]
%tmp5 = fsub float -0.000000e+00, %tmp4 ; <float> [#uses=1]
%tmp6 = call i128 @__fixunssfDI( float %tmp5 ) nounwind ; <i128> [#uses=1]
%tmp7 = sub i128 0, %tmp6 ; <i128> [#uses=1]
- store i128 %tmp7, i128* %tmp, align 16
+ store i128 %tmp7, ptr %tmp, align 16
br label %bb11
bb8: ; preds = %entry
- %tmp9 = load float, float* %a_addr, align 4 ; <float> [#uses=1]
+ %tmp9 = load float, ptr %a_addr, align 4 ; <float> [#uses=1]
%tmp10 = call i128 @__fixunssfDI( float %tmp9 ) nounwind ; <i128> [#uses=1]
- store i128 %tmp10, i128* %tmp, align 16
+ store i128 %tmp10, ptr %tmp, align 16
br label %bb11
bb11: ; preds = %bb8, %bb
- %tmp12 = load i128, i128* %tmp, align 16 ; <i128> [#uses=1]
- store i128 %tmp12, i128* %retval, align 16
+ %tmp12 = load i128, ptr %tmp, align 16 ; <i128> [#uses=1]
+ store i128 %tmp12, ptr %retval, align 16
br label %return
return: ; preds = %bb11
- %retval13 = load i128, i128* %retval ; <i128> [#uses=1]
+ %retval13 = load i128, ptr %retval ; <i128> [#uses=1]
ret i128 %retval13
}
; CHECK-NOT: rlwimi
; CHECK: andi
- %0 = load i32, i32* @m, align 4
+ %0 = load i32, ptr @m, align 4
%or = or i32 %0, 250
- store i32 %or, i32* @m, align 4
+ store i32 %or, ptr @m, align 4
%and = and i32 %or, 249
%sub.i = sub i32 %and, 0
%sext = shl i32 %sub.i, 24
unreachable
codeRepl17: ; preds = %codeRepl4
- %0 = load i8, i8* undef, align 2
+ %0 = load i8, ptr undef, align 2
%1 = and i8 %0, 1
%not.tobool.i.i.i = icmp eq i8 %1, 0
%2 = select i1 %not.tobool.i.i.i, i16 0, i16 256
- %3 = load i8, i8* undef, align 1
+ %3 = load i8, ptr undef, align 1
%4 = and i8 %3, 1
%not.tobool.i.1.i.i = icmp eq i8 %4, 0
%rvml38.sroa.1.1.insert.ext = select i1 %not.tobool.i.1.i.i, i16 0, i16 1
%rvml38.sroa.0.0.insert.insert = or i16 %rvml38.sroa.1.1.insert.ext, %2
- store i16 %rvml38.sroa.0.0.insert.insert, i16* undef, align 2
+ store i16 %rvml38.sroa.0.0.insert.insert, ptr undef, align 2
unreachable
; CHECK: @test
; Make sure there is no register-register copies here.
-define void @test1(i32* %A, i32* %B, i32* %D, i32* %E) {
- %A.upgrd.1 = load i32, i32* %A ; <i32> [#uses=2]
- %B.upgrd.2 = load i32, i32* %B ; <i32> [#uses=1]
+define void @test1(ptr %A, ptr %B, ptr %D, ptr %E) {
+ %A.upgrd.1 = load i32, ptr %A ; <i32> [#uses=2]
+ %B.upgrd.2 = load i32, ptr %B ; <i32> [#uses=1]
%X = and i32 %A.upgrd.1, 15 ; <i32> [#uses=1]
%Y = and i32 %B.upgrd.2, -16 ; <i32> [#uses=1]
%Z = or i32 %X, %Y ; <i32> [#uses=1]
- store i32 %Z, i32* %D
- store i32 %A.upgrd.1, i32* %E
+ store i32 %Z, ptr %D
+ store i32 %A.upgrd.1, ptr %E
ret void
}
-define void @test2(i32* %A, i32* %B, i32* %D, i32* %E) {
- %A.upgrd.3 = load i32, i32* %A ; <i32> [#uses=1]
- %B.upgrd.4 = load i32, i32* %B ; <i32> [#uses=2]
+define void @test2(ptr %A, ptr %B, ptr %D, ptr %E) {
+ %A.upgrd.3 = load i32, ptr %A ; <i32> [#uses=1]
+ %B.upgrd.4 = load i32, ptr %B ; <i32> [#uses=2]
%X = and i32 %A.upgrd.3, 15 ; <i32> [#uses=1]
%Y = and i32 %B.upgrd.4, -16 ; <i32> [#uses=1]
%Z = or i32 %X, %Y ; <i32> [#uses=1]
- store i32 %Z, i32* %D
- store i32 %B.upgrd.4, i32* %E
+ store i32 %Z, ptr %D
+ store i32 %B.upgrd.4, ptr %E
ret void
}
define i32 @test1() #0 {
entry:
- %conv67.reload = load i32, i32* undef
+ %conv67.reload = load i32, ptr undef
%const = bitcast i32 65535 to i32
br label %next
next:
%shl161 = shl nuw nsw i32 %conv67.reload, 15
- %0 = load i8, i8* undef, align 1
+ %0 = load i8, ptr undef, align 1
%conv169 = zext i8 %0 to i32
%shl170 = shl nuw nsw i32 %conv169, 7
%const_mat = add i32 %const, -32767
define i32 @test2() #0 {
entry:
- %conv67.reload = load i32, i32* undef
+ %conv67.reload = load i32, ptr undef
%const = bitcast i32 65535 to i32
br label %next
next:
%shl161 = shl nuw nsw i32 %conv67.reload, 15
- %0 = load i8, i8* undef, align 1
+ %0 = load i8, ptr undef, align 1
%conv169 = zext i8 %0 to i32
%shl170 = shl nuw nsw i32 %conv169, 7
%shl161.masked = and i32 %shl161, 32768
%tmp6 = and i32 %tmp2, %tmp5
%tmp7 = shl i32 %c, 8
%tmp8 = or i32 %tmp6, %tmp7
- store i32 %tmp8, i32* @foo, align 4
+ store i32 %tmp8, ptr @foo, align 4
br label %return
return:
declare i32 @llvm.bswap.i32(i32) #0
; Function Attrs: nounwind readonly
-define zeroext i32 @bs32(i32* nocapture readonly %x) #1 {
+define zeroext i32 @bs32(ptr nocapture readonly %x) #1 {
entry:
- %0 = load i32, i32* %x, align 4
+ %0 = load i32, ptr %x, align 4
%1 = tail call i32 @llvm.bswap.i32(i32 %0)
ret i32 %1
}
; Function Attrs: nounwind readonly
-define zeroext i16 @bs16(i16* nocapture readonly %x) #1 {
+define zeroext i16 @bs16(ptr nocapture readonly %x) #1 {
entry:
- %0 = load i16, i16* %x, align 2
+ %0 = load i16, ptr %x, align 2
%1 = tail call i16 @llvm.bswap.i16(i16 %0)
ret i16 %1
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s
target triple = "powerpc64-unknown-linux-gnu"
-define void @autogen_SD156869(i8*, i64*) {
+define void @autogen_SD156869(ptr, ptr) {
BB:
%A3 = alloca <2 x i1>
%A2 = alloca <8 x i32>
br i1 undef, label %CF, label %CF82.critedge
CF82.critedge: ; preds = %CF
- store i8 -59, i8* %0
+ store i8 -59, ptr %0
br label %CF82
CF82: ; preds = %CF82, %CF82.critedge
- %L17 = load i8, i8* %0
+ %L17 = load i8, ptr %0
%E18 = extractelement <2 x i64> undef, i32 0
- %PC = bitcast <2 x i1>* %A3 to i64*
br i1 undef, label %CF82, label %CF84.critedge
CF84.critedge: ; preds = %CF82
- store i64 455385, i64* %PC
+ store i64 455385, ptr %A3
br label %CF84
CF84: ; preds = %CF84, %CF84.critedge
- %L40 = load i64, i64* %PC
- store i64 -1, i64* %PC
+ %L40 = load i64, ptr %A3
+ store i64 -1, ptr %A3
%Sl46 = select i1 undef, i1 undef, i1 false
br i1 %Sl46, label %CF84, label %CF85
CF85: ; preds = %CF84
- %L47 = load i64, i64* %PC
- store i64 %E18, i64* %PC
- %PC52 = bitcast <8 x i32>* %A2 to ppc_fp128*
- store ppc_fp128 0xM4D436562A0416DE00000000000000000, ppc_fp128* %PC52
- %PC59 = bitcast i64* %1 to i8*
+ %L47 = load i64, ptr %A3
+ store i64 %E18, ptr %A3
+ store ppc_fp128 0xM4D436562A0416DE00000000000000000, ptr %A2
%Cmp61 = icmp slt i64 %L47, %L40
br i1 %Cmp61, label %CF, label %CF77
br i1 undef, label %CF77, label %CF81
CF81: ; preds = %CF77
- store i8 %L17, i8* %PC59
+ store i8 %L17, ptr %1
ret void
}
declare void @check(i32 signext) nounwind
-declare signext i32 @printf(i8* nocapture, ...) nounwind
+declare signext i32 @printf(ptr nocapture, ...) nounwind
-declare signext i32 @init(i8*) nounwind
+declare signext i32 @init(ptr) nounwind
define signext i32 @s000() nounwind {
entry:
- %call = tail call signext i32 @init(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str1, i64 0, i64 0))
+ %call = tail call signext i32 @init(ptr @.str1)
%call1 = tail call i64 @clock() nounwind
br label %for.cond2.preheader
for.body4: ; preds = %for.body4, %for.cond2.preheader
%indvars.iv = phi i64 [ 0, %for.cond2.preheader ], [ %indvars.iv.next.15, %for.body4 ]
- %arrayidx = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv
- %arrayidx6 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv
- %0 = bitcast double* %arrayidx to <1 x double>*
- %1 = load <1 x double>, <1 x double>* %0, align 32
- %add = fadd <1 x double> %1, <double 1.000000e+00>
- %2 = bitcast double* %arrayidx6 to <1 x double>*
- store <1 x double> %add, <1 x double>* %2, align 32
+ %arrayidx = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv
+ %arrayidx6 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv
+ %0 = load <1 x double>, ptr %arrayidx, align 32
+ %add = fadd <1 x double> %0, <double 1.000000e+00>
+ store <1 x double> %add, ptr %arrayidx6, align 32
%indvars.iv.next.322 = or i64 %indvars.iv, 4
- %arrayidx.4 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.322
- %arrayidx6.4 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.322
- %3 = bitcast double* %arrayidx.4 to <1 x double>*
- %4 = load <1 x double>, <1 x double>* %3, align 32
- %add.4 = fadd <1 x double> %4, <double 1.000000e+00>
- %5 = bitcast double* %arrayidx6.4 to <1 x double>*
- store <1 x double> %add.4, <1 x double>* %5, align 32
+ %arrayidx.4 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.322
+ %arrayidx6.4 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.322
+ %1 = load <1 x double>, ptr %arrayidx.4, align 32
+ %add.4 = fadd <1 x double> %1, <double 1.000000e+00>
+ store <1 x double> %add.4, ptr %arrayidx6.4, align 32
%indvars.iv.next.726 = or i64 %indvars.iv, 8
- %arrayidx.8 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.726
- %arrayidx6.8 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.726
- %6 = bitcast double* %arrayidx.8 to <1 x double>*
- %7 = load <1 x double>, <1 x double>* %6, align 32
- %add.8 = fadd <1 x double> %7, <double 1.000000e+00>
- %8 = bitcast double* %arrayidx6.8 to <1 x double>*
- store <1 x double> %add.8, <1 x double>* %8, align 32
+ %arrayidx.8 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.726
+ %arrayidx6.8 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.726
+ %2 = load <1 x double>, ptr %arrayidx.8, align 32
+ %add.8 = fadd <1 x double> %2, <double 1.000000e+00>
+ store <1 x double> %add.8, ptr %arrayidx6.8, align 32
%indvars.iv.next.1130 = or i64 %indvars.iv, 12
- %arrayidx.12 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1130
- %arrayidx6.12 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1130
- %9 = bitcast double* %arrayidx.12 to <1 x double>*
- %10 = load <1 x double>, <1 x double>* %9, align 32
- %add.12 = fadd <1 x double> %10, <double 1.000000e+00>
- %11 = bitcast double* %arrayidx6.12 to <1 x double>*
- store <1 x double> %add.12, <1 x double>* %11, align 32
+ %arrayidx.12 = getelementptr inbounds [16000 x double], ptr @Y, i64 0, i64 %indvars.iv.next.1130
+ %arrayidx6.12 = getelementptr inbounds [16000 x double], ptr @X, i64 0, i64 %indvars.iv.next.1130
+ %3 = load <1 x double>, ptr %arrayidx.12, align 32
+ %add.12 = fadd <1 x double> %3, <double 1.000000e+00>
+ store <1 x double> %add.12, ptr %arrayidx6.12, align 32
%indvars.iv.next.15 = add i64 %indvars.iv, 16
%lftr.wideiv.15 = trunc i64 %indvars.iv.next.15 to i32
%exitcond.15 = icmp eq i32 %lftr.wideiv.15, 16000
; CHECK: bdnz
for.end: ; preds = %for.body4
- %call7 = tail call signext i32 @dummy(double* getelementptr inbounds ([16000 x double], [16000 x double]* @X, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @Y, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @Z, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @U, i64 0, i64 0), double* getelementptr inbounds ([16000 x double], [16000 x double]* @V, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @aa, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @bb, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]], [256 x [256 x double]]* @cc, i64 0, i64 0), double 0.000000e+00) nounwind
+ %call7 = tail call signext i32 @dummy(ptr @X, ptr @Y, ptr @Z, ptr @U, ptr @V, ptr @aa, ptr @bb, ptr @cc, double 0.000000e+00) nounwind
%inc9 = add nsw i32 %nl.018, 1
%exitcond = icmp eq i32 %inc9, 400000
br i1 %exitcond, label %for.end10, label %for.cond2.preheader
%sub = sub nsw i64 %call11, %call1
%conv = sitofp i64 %sub to double
%div = fdiv double %conv, 1.000000e+06
- %call12 = tail call signext i32 (i8*, ...) @printf(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str137, i64 0, i64 0), double %div) nounwind
+ %call12 = tail call signext i32 (ptr, ...) @printf(ptr @.str137, double %div) nounwind
tail call void @check(i32 signext 1)
ret i32 0
}
declare i64 @clock() nounwind
-declare signext i32 @dummy(double*, double*, double*, double*, double*, [256 x double]*, [256 x double]*, [256 x double]*, double)
+declare signext i32 @dummy(ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, double)
; RUN: llc -ppc-asm-full-reg-names -mtriple=powerpc64le-unknown-linux-gnu \
; RUN: %s -o - -verify-machineinstrs -mcpu=pwr9 | FileCheck %s
-define <4 x i32> @test(<4 x i32> %a, <4 x i32> %b, <4 x i32> %aa, <8 x i16>* %FromVSCR) {
+define <4 x i32> @test(<4 x i32> %a, <4 x i32> %b, <4 x i32> %aa, ptr %FromVSCR) {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsumsws v5, v2, v3
%1 = tail call <8 x i16> @llvm.ppc.altivec.vpkswus(<4 x i32> %a, <4 x i32> %b)
%2 = bitcast <8 x i16> %1 to <4 x i32>
%3 = tail call <8 x i16> @llvm.ppc.altivec.mfvscr()
- store <8 x i16> %3, <8 x i16>* %FromVSCR, align 16
+ store <8 x i16> %3, ptr %FromVSCR, align 16
%4 = tail call <8 x i16> @llvm.ppc.altivec.vpkswus(<4 x i32> %b, <4 x i32> %aa)
%5 = bitcast <8 x i16> %4 to <4 x i32>
%add1 = add <4 x i32> %add, %0
define i64 @fred(double %a0) local_unnamed_addr #0 {
b1:
%v2 = alloca i64, align 128
- store i64 0, i64* %v2
+ store i64 0, ptr %v2
%a1 = tail call double asm "fadd $0, $1, $2", "=f,f,f,~{cr2}"(double %a0, double %a0)
%v3 = fcmp olt double %a1, 0x43E0000000000000
br i1 %v3, label %b4, label %b8
b4: ; preds = %b1
%v5 = fcmp olt double %a0, 0xC3E0000000000000
%v6 = fptosi double %a0 to i64
- store i64 %v6, i64* %v2
+ store i64 %v6, ptr %v2
%v7 = select i1 %v5, i64 -9223372036854775808, i64 %v6
br label %b15
b15: ; preds = %b12, %b10, %b4
%v16 = phi i64 [ %v7, %b4 ], [ %v11, %b10 ], [ %v14, %b12 ]
- %v17 = load i64, i64* %v2
+ %v17 = load i64, ptr %v2
%v18 = add i64 %v17, %v16
ret i64 %v18
}
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
%conv = uitofp i8 %1 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-POSTP8-LABEL: ld_align16_double_uint8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: addi r3, r3, 8
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
%conv = uitofp i8 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_double_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 99999000
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = uitofp i8 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_double_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = uitofp i8 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-POSTP8-LABEL: ld_reg_double_uint8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: lxsibzx f0, r3, r4
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
%conv = uitofp i8 %0 to double
ret double %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = uitofp i8 %1 to double
ret double %conv1
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = uitofp i8 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
%conv = uitofp i8 %1 to double
ret double %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = uitofp i8 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
%conv = uitofp i8 %1 to double
ret double %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = uitofp i8 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
%conv = uitofp i8 %1 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
%conv = uitofp i8 %0 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = uitofp i8 %0 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = uitofp i8 %0 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sitofp i8 %1 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_int8_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_int8_t(ptr nocapture readonly %ptr) {
; CHECK-POSTP8-LABEL: ld_align16_double_int8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: addi r3, r3, 8
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sitofp i8 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_int8_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_double_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 99999000
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sitofp i8 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_int8_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_double_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sitofp i8 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-POSTP8-LABEL: ld_reg_double_int8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: lxsibzx v2, r3, r4
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sitofp i8 %0 to double
ret double %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = sitofp i8 %1 to double
ret double %conv1
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sitofp i8 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
%conv = sitofp i8 %1 to double
ret double %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sitofp i8 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
%conv = sitofp i8 %1 to double
ret double %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sitofp i8 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
%conv = sitofp i8 %1 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sitofp i8 %0 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sitofp i8 %0 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sitofp i8 %0 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
%conv = uitofp i16 %1 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-POSTP8-LABEL: ld_align16_double_uint16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: addi r3, r3, 8
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = uitofp i16 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = uitofp i16 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_double_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 99999000
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = uitofp i16 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = uitofp i16 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_double_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = uitofp i16 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = uitofp i16 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-POSTP8-LABEL: ld_reg_double_uint16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: lxsihzx f0, r3, r4
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = uitofp i16 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = uitofp i16 %0 to double
ret double %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = uitofp i16 %1 to double
ret double %conv1
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = uitofp i16 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
%conv = uitofp i16 %1 to double
ret double %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = uitofp i16 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
%conv = uitofp i16 %1 to double
ret double %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = uitofp i16 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
%conv = uitofp i16 %1 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
%conv = uitofp i16 %0 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = uitofp i16 %0 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvuxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = uitofp i16 %0 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sitofp i16 %1 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_int16_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_int16_t(ptr nocapture readonly %ptr) {
; CHECK-POSTP8-LABEL: ld_align16_double_int16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: addi r3, r3, 8
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sitofp i16 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sitofp i16 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_int16_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_double_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 99999000
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sitofp i16 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sitofp i16 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_int16_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_double_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sitofp i16 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sitofp i16 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_int16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-POSTP8-LABEL: ld_reg_double_int16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: lxsihzx v2, r3, r4
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sitofp i16 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sitofp i16 %0 to double
ret double %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = sitofp i16 %1 to double
ret double %conv1
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sitofp i16 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
%conv = sitofp i16 %1 to double
ret double %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sitofp i16 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
%conv = sitofp i16 %1 to double
ret double %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sitofp i16 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
%conv = sitofp i16 %1 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sitofp i16 %0 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sitofp i16 %0 to double
ret double %conv
}
; CHECK-P8-NEXT: xscvsxddp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sitofp i16 %0 to double
ret double %conv
}
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i32, ptr %0, align 4
%conv = uitofp i32 %1 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_double_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r3, r3, 8
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = uitofp i32 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = uitofp i32 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_double_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 99999000
; CHECK-PREP10-NEXT: xscvuxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = uitofp i32 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = uitofp i32 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_double_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: xscvuxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = uitofp i32 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = uitofp i32 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_double_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfiwzx f0, r3, r4
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = uitofp i32 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = uitofp i32 %0 to double
ret double %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv1 = uitofp i32 %1 to double
ret double %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = uitofp i32 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 8
%conv = uitofp i32 %1 to double
ret double %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = uitofp i32 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 16
%conv = uitofp i32 %1 to double
ret double %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = uitofp i32 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4096
%conv = uitofp i32 %1 to double
ret double %conv
}
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+ %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
%conv = uitofp i32 %0 to double
ret double %conv
}
; CHECK-PREP10-NEXT: xscvuxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = uitofp i32 %0 to double
ret double %conv
}
; CHECK-PREP10-NEXT: xscvuxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = uitofp i32 %0 to double
ret double %conv
}
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sitofp i32 %1 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_int32_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_int32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_double_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r3, r3, 8
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sitofp i32 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sitofp i32 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_int32_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_double_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 99999000
; CHECK-PREP10-NEXT: xscvsxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sitofp i32 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sitofp i32 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_int32_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_double_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: xscvsxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sitofp i32 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sitofp i32 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_int32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_int32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_double_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfiwax f0, r3, r4
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sitofp i32 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sitofp i32 %0 to double
ret double %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv1 = sitofp i32 %1 to double
ret double %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sitofp i32 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 8
%conv = sitofp i32 %1 to double
ret double %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sitofp i32 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 16
%conv = sitofp i32 %1 to double
ret double %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sitofp i32 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4096
%conv = sitofp i32 %1 to double
ret double %conv
}
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+ %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sitofp i32 %0 to double
ret double %conv
}
; CHECK-PREP10-NEXT: xscvsxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sitofp i32 %0 to double
ret double %conv
}
; CHECK-PREP10-NEXT: xscvsxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sitofp i32 %0 to double
ret double %conv
}
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i64, ptr %0, align 8
%conv = uitofp i64 %1 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_double_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = uitofp i64 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = uitofp i64 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_double_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: xscvuxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = uitofp i64 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = uitofp i64 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_double_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: xscvuxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = uitofp i64 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = uitofp i64 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_double_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = uitofp i64 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = uitofp i64 %0 to double
ret double %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv1 = uitofp i64 %1 to double
ret double %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = uitofp i64 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = uitofp i64 %1 to double
ret double %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = uitofp i64 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 16
%conv = uitofp i64 %1 to double
ret double %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = uitofp i64 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 4096
%conv = uitofp i64 %1 to double
ret double %conv
}
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+ %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
%conv = uitofp i64 %0 to double
ret double %conv
}
; CHECK-NEXT: xscvuxddp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = uitofp i64 %0 to double
ret double %conv
}
; CHECK-PREP10-NEXT: xscvuxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = uitofp i64 %0 to double
ret double %conv
}
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i64, ptr %0, align 8
%conv = sitofp i64 %1 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_int64_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_int64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_double_int64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = sitofp i64 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = sitofp i64 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_int64_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_int64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_double_int64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: xscvsxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = sitofp i64 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = sitofp i64 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_int64_t(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_int64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_double_int64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: xscvsxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = sitofp i64 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = sitofp i64 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_int64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_int64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_double_int64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = sitofp i64 %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = sitofp i64 %0 to double
ret double %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv1 = sitofp i64 %1 to double
ret double %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = sitofp i64 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = sitofp i64 %1 to double
ret double %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = sitofp i64 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 16
%conv = sitofp i64 %1 to double
ret double %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = sitofp i64 %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 4096
%conv = sitofp i64 %1 to double
ret double %conv
}
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+ %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sitofp i64 %0 to double
ret double %conv
}
; CHECK-NEXT: xscvsxddp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = sitofp i64 %0 to double
ret double %conv
}
; CHECK-PREP10-NEXT: xscvsxddp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sitofp i64 %0 to double
ret double %conv
}
; CHECK-NEXT: lfs f1, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load float, ptr %0, align 4
%conv = fpext float %1 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_float(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_float(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_double_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs f1, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fpext float %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fpext float %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_float(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_double_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfs f1, 99999000(r3), 0
; CHECK-PREP10-NEXT: lfsx f1, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fpext float %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fpext float %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_float(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_double_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lfsx f1, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fpext float %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fpext float %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_float(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_double_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfsx f1, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fpext float %1 to double
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fpext float %0 to double
ret double %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv1 = fpext float %1 to double
ret double %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fpext float %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 8
%conv = fpext float %1 to double
ret double %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fpext float %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 16
%conv = fpext float %1 to double
ret double %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fpext float %1 to double
ret double %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4096
%conv = fpext float %1 to double
ret double %conv
}
; CHECK-NEXT: lfs f1, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+ %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fpext float %0 to double
ret double %conv
}
; CHECK-NEXT: lfs f1, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = fpext float %0 to double
ret double %conv
}
; CHECK-PREP10-NEXT: lfs f1, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+ %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fpext float %0 to double
ret double %conv
}
; CHECK-NEXT: lfd f1, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load double, ptr %0, align 8
ret double %1
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align16_double_double(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align16_double_double(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_double_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f1, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- ret double %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load double, ptr %add.ptr, align 8
+ ret double %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align32_double_double(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align32_double_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_double_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f1, 99999000(r3), 0
; CHECK-PREP10-NEXT: lfdx f1, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- ret double %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load double, ptr %add.ptr, align 8
+ ret double %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_align64_double_double(i8* nocapture readonly %ptr) {
+define dso_local double @ld_align64_double_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_double_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lfdx f1, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- ret double %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load double, ptr %add.ptr, align 8
+ ret double %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local double @ld_reg_double_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local double @ld_reg_double_double(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_double_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f1, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- ret double %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load double, ptr %add.ptr, align 8
+ ret double %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
ret double %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
ret double %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
ret double %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
ret double %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 16
ret double %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
ret double %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 4096
ret double %1
}
; CHECK-NEXT: lfd f1, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+ %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
ret double %0
}
; CHECK-NEXT: lfd f1, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
ret double %0
}
; CHECK-PREP10-NEXT: lfd f1, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+ %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret double %0
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i8
- %0 = inttoptr i64 %ptr to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_uint8_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_uint8_t(ptr nocapture %ptr, double %str) {
; CHECK-POSTP8-LABEL: st_align16_double_uint8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_uint8_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_uint8_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align32_double_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_uint8_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_uint8_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align64_double_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_uint8_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_uint8_t(ptr nocapture %ptr, i64 %off, double %str) {
; CHECK-POSTP8-LABEL: st_reg_double_uint8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
%conv = fptoui double %str to i8
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
entry:
%conv = fptoui double %str to i8
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -4096
%conv = fptoui double %str to i8
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptoui double %str to i8
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptoui double %str to i8
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptoui double %str to i8
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptoui double %str to i8
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i8
- store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+ store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i8
- store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+ store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i8
- store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i8
- %0 = inttoptr i64 %ptr to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_int8_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_int8_t(ptr nocapture %ptr, double %str) {
; CHECK-POSTP8-LABEL: st_align16_double_int8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_int8_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_int8_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align32_double_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_int8_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_int8_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align64_double_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_int8_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_int8_t(ptr nocapture %ptr, i64 %off, double %str) {
; CHECK-POSTP8-LABEL: st_reg_double_int8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
%conv = fptosi double %str to i8
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
entry:
%conv = fptosi double %str to i8
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -4096
%conv = fptosi double %str to i8
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptosi double %str to i8
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptosi double %str to i8
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptosi double %str to i8
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptosi double %str to i8
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i8
- store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+ store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i8
- store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+ store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i8
- store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i16
- %0 = inttoptr i64 %ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_uint16_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_uint16_t(ptr nocapture %ptr, double %str) {
; CHECK-POSTP8-LABEL: st_align16_double_uint16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_uint16_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_uint16_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align32_double_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_uint16_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_uint16_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align64_double_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_uint16_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_uint16_t(ptr nocapture %ptr, i64 %off, double %str) {
; CHECK-POSTP8-LABEL: st_reg_double_uint16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
%conv = fptoui double %str to i16
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
entry:
%conv = fptoui double %str to i16
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -4096
%conv = fptoui double %str to i16
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptoui double %str to i16
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptoui double %str to i16
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptoui double %str to i16
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptoui double %str to i16
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i16
- store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+ store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i16
- store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+ store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i16
- store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i16
- %0 = inttoptr i64 %ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_int16_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_int16_t(ptr nocapture %ptr, double %str) {
; CHECK-POSTP8-LABEL: st_align16_double_int16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_int16_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_int16_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align32_double_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_int16_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_int16_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align64_double_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_int16_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_int16_t(ptr nocapture %ptr, i64 %off, double %str) {
; CHECK-POSTP8-LABEL: st_reg_double_int16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
%conv = fptosi double %str to i16
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
entry:
%conv = fptosi double %str to i16
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -4096
%conv = fptosi double %str to i16
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptosi double %str to i16
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptosi double %str to i16
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptosi double %str to i16
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptosi double %str to i16
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i16
- store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+ store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i16
- store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+ store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i16
- store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fptoui double %str to i32
- %0 = inttoptr i64 %ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_uint32_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_uint32_t(ptr nocapture %ptr, double %str) {
; CHECK-LABEL: st_align16_double_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxws f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptoui double %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_uint32_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_uint32_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align32_double_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxws f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptoui double %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_uint32_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_uint32_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align64_double_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxws f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptoui double %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_uint32_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_uint32_t(ptr nocapture %ptr, i64 %off, double %str) {
; CHECK-LABEL: st_reg_double_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxws f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptoui double %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
%conv = fptoui double %str to i32
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
entry:
%conv = fptoui double %str to i32
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = fptoui double %str to i32
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptoui double %str to i32
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptoui double %str to i32
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptoui double %str to i32
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptoui double %str to i32
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fptoui double %str to i32
- store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+ store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptoui double %str to i32
- store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+ store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptoui double %str to i32
- store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fptosi double %str to i32
- %0 = inttoptr i64 %ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_int32_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_int32_t(ptr nocapture %ptr, double %str) {
; CHECK-LABEL: st_align16_double_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptosi double %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_int32_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_int32_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align32_double_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxws f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptosi double %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_int32_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_int32_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align64_double_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxws f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptosi double %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_int32_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_int32_t(ptr nocapture %ptr, i64 %off, double %str) {
; CHECK-LABEL: st_reg_double_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptosi double %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
%conv = fptosi double %str to i32
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
entry:
%conv = fptosi double %str to i32
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = fptosi double %str to i32
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptosi double %str to i32
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptosi double %str to i32
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptosi double %str to i32
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptosi double %str to i32
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fptosi double %str to i32
- store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+ store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptosi double %str to i32
- store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+ store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptosi double %str to i32
- store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i64
- %0 = inttoptr i64 %ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_uint64_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_uint64_t(ptr nocapture %ptr, double %str) {
; CHECK-POSTP8-LABEL: st_align16_double_uint64_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpuxds v2, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_uint64_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_uint64_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align32_double_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxds v2, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptoui double %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_uint64_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_uint64_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align64_double_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxds f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptoui double %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_uint64_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_uint64_t(ptr nocapture %ptr, i64 %off, double %str) {
; CHECK-LABEL: st_reg_double_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxds f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptoui double %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
%conv = fptoui double %str to i64
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptoui double %str to i64
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = fptoui double %str to i64
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptoui double %str to i64
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptoui double %str to i64
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptoui double %str to i64
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptoui double %str to i64
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i64
- store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+ store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i64
- store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+ store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %str to i64
- store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i64
- %0 = inttoptr i64 %ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_int64_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_int64_t(ptr nocapture %ptr, double %str) {
; CHECK-POSTP8-LABEL: st_align16_double_int64_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpsxds v2, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_int64_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_int64_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align32_double_int64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxds v2, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptosi double %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_int64_t(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_int64_t(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align64_double_int64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxds f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptosi double %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_int64_t(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_int64_t(ptr nocapture %ptr, i64 %off, double %str) {
; CHECK-LABEL: st_reg_double_int64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxds f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptosi double %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
%conv = fptosi double %str to i64
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptosi double %str to i64
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = fptosi double %str to i64
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptosi double %str to i64
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptosi double %str to i64
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptosi double %str to i64
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptosi double %str to i64
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i64
- store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+ store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i64
- store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+ store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %str to i64
- store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fptrunc double %str to float
- %0 = inttoptr i64 %ptr to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store float %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_float(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_float(ptr nocapture %ptr, double %str) {
; CHECK-LABEL: st_align16_double_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xsrsp f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptrunc double %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_float(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_float(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align32_double_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xsrsp f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptrunc double %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_float(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_float(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align64_double_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xsrsp f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptrunc double %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_float(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_float(ptr nocapture %ptr, i64 %off, double %str) {
; CHECK-LABEL: st_reg_double_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xsrsp f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptrunc double %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store float %conv, ptr %add.ptr, align 4
ret void
}
%conv = fptrunc double %str to float
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
entry:
%conv = fptrunc double %str to float
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = fptrunc double %str to float
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptrunc double %str to float
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptrunc double %str to float
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptrunc double %str to float
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptrunc double %str to float
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fptrunc double %str to float
- store float %conv, float* inttoptr (i64 4080 to float*), align 16
+ store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fptrunc double %str to float
- store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+ store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptrunc double %str to float
- store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+ store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: stfd f1, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to double*
- store double %str, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store double %str, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_double_double(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align16_double_double(ptr nocapture %ptr, double %str) {
; CHECK-LABEL: st_align16_double_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stfd f1, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- store double %str, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store double %str, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_double_double(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align32_double_double(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align32_double_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstfd f1, 99999000(r3), 0
; CHECK-PREP10-NEXT: stfdx f1, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- store double %str, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store double %str, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_double_double(i8* nocapture %ptr, double %str) {
+define dso_local void @st_align64_double_double(ptr nocapture %ptr, double %str) {
; CHECK-P10-LABEL: st_align64_double_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: stfdx f1, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- store double %str, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store double %str, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_double_double(i8* nocapture %ptr, i64 %off, double %str) {
+define dso_local void @st_reg_double_double(ptr nocapture %ptr, i64 %off, double %str) {
; CHECK-LABEL: st_reg_double_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stfdx f1, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- store double %str, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store double %str, ptr %add.ptr, align 8
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to double*
- store double %str, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %str, ptr %0, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- store double %str, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %str, ptr %0, align 8
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- store double %str, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %str, ptr %0, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- store double %str, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %str, ptr %0, align 8
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- store double %str, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store double %str, ptr %0, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- store double %str, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %str, ptr %0, align 8
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- store double %str, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store double %str, ptr %0, align 4096
ret void
}
; CHECK-NEXT: stfd f1, 4080(0)
; CHECK-NEXT: blr
entry:
- store double %str, double* inttoptr (i64 4080 to double*), align 16
+ store double %str, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: stfd f1, -27108(r3)
; CHECK-NEXT: blr
entry:
- store double %str, double* inttoptr (i64 9999900 to double*), align 8
+ store double %str, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-PREP10-NEXT: stfd f1, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- store double %str, double* inttoptr (i64 1000000000000 to double*), align 4096
+ store double %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
%conv = uitofp i8 %1 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-POSTP8-LABEL: ld_align16_float_uint8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: addi r3, r3, 8
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
%conv = uitofp i8 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_float_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 99999000
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = uitofp i8 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_float_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = uitofp i8 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-POSTP8-LABEL: ld_reg_float_uint8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: lxsibzx f0, r3, r4
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
%conv = uitofp i8 %0 to float
ret float %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = uitofp i8 %1 to float
ret float %conv1
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = uitofp i8 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
%conv = uitofp i8 %1 to float
ret float %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = uitofp i8 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
%conv = uitofp i8 %1 to float
ret float %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = uitofp i8 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
%conv = uitofp i8 %1 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
%conv = uitofp i8 %0 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = uitofp i8 %0 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = uitofp i8 %0 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sitofp i8 %1 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_int8_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_int8_t(ptr nocapture readonly %ptr) {
; CHECK-POSTP8-LABEL: ld_align16_float_int8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: addi r3, r3, 8
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sitofp i8 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_int8_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_float_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 99999000
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sitofp i8 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_int8_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_float_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sitofp i8 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-POSTP8-LABEL: ld_reg_float_int8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: lxsibzx v2, r3, r4
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sitofp i8 %0 to float
ret float %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = sitofp i8 %1 to float
ret float %conv1
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sitofp i8 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
%conv = sitofp i8 %1 to float
ret float %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sitofp i8 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
%conv = sitofp i8 %1 to float
ret float %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sitofp i8 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
%conv = sitofp i8 %1 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sitofp i8 %0 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sitofp i8 %0 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sitofp i8 %0 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
%conv = uitofp i16 %1 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-POSTP8-LABEL: ld_align16_float_uint16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: addi r3, r3, 8
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = uitofp i16 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = uitofp i16 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_float_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 99999000
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = uitofp i16 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = uitofp i16 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_float_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = uitofp i16 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = uitofp i16 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-POSTP8-LABEL: ld_reg_float_uint16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: lxsihzx f0, r3, r4
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = uitofp i16 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = uitofp i16 %0 to float
ret float %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = uitofp i16 %1 to float
ret float %conv1
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = uitofp i16 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
%conv = uitofp i16 %1 to float
ret float %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = uitofp i16 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
%conv = uitofp i16 %1 to float
ret float %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = uitofp i16 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
%conv = uitofp i16 %1 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
%conv = uitofp i16 %0 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = uitofp i16 %0 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvuxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = uitofp i16 %0 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sitofp i16 %1 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_int16_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_int16_t(ptr nocapture readonly %ptr) {
; CHECK-POSTP8-LABEL: ld_align16_float_int16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: addi r3, r3, 8
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sitofp i16 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sitofp i16 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_int16_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_float_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 99999000
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sitofp i16 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sitofp i16 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_int16_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_float_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sitofp i16 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sitofp i16 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_int16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-POSTP8-LABEL: ld_reg_float_int16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: lxsihzx v2, r3, r4
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sitofp i16 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sitofp i16 %0 to float
ret float %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = sitofp i16 %1 to float
ret float %conv1
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sitofp i16 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
%conv = sitofp i16 %1 to float
ret float %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sitofp i16 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
%conv = sitofp i16 %1 to float
ret float %conv
}
; CHECK-P8-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sitofp i16 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
%conv = sitofp i16 %1 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sitofp i16 %0 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sitofp i16 %0 to float
ret float %conv
}
; CHECK-P8-NEXT: xscvsxdsp f1, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sitofp i16 %0 to float
ret float %conv
}
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i32, ptr %0, align 4
%conv = uitofp i32 %1 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_float_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r3, r3, 8
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = uitofp i32 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = uitofp i32 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_float_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 99999000
; CHECK-PREP10-NEXT: xscvuxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = uitofp i32 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = uitofp i32 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_float_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: xscvuxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = uitofp i32 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = uitofp i32 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_float_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfiwzx f0, r3, r4
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = uitofp i32 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = uitofp i32 %0 to float
ret float %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv1 = uitofp i32 %1 to float
ret float %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = uitofp i32 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 8
%conv = uitofp i32 %1 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = uitofp i32 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 16
%conv = uitofp i32 %1 to float
ret float %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = uitofp i32 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4096
%conv = uitofp i32 %1 to float
ret float %conv
}
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+ %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
%conv = uitofp i32 %0 to float
ret float %conv
}
; CHECK-PREP10-NEXT: xscvuxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = uitofp i32 %0 to float
ret float %conv
}
; CHECK-PREP10-NEXT: xscvuxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = uitofp i32 %0 to float
ret float %conv
}
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sitofp i32 %1 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_int32_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_int32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_float_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r3, r3, 8
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sitofp i32 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sitofp i32 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_int32_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_float_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 99999000
; CHECK-PREP10-NEXT: xscvsxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sitofp i32 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sitofp i32 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_int32_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_float_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: xscvsxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sitofp i32 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sitofp i32 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_int32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_int32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_float_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfiwax f0, r3, r4
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sitofp i32 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sitofp i32 %0 to float
ret float %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv1 = sitofp i32 %1 to float
ret float %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sitofp i32 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 8
%conv = sitofp i32 %1 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sitofp i32 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 16
%conv = sitofp i32 %1 to float
ret float %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sitofp i32 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4096
%conv = sitofp i32 %1 to float
ret float %conv
}
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+ %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sitofp i32 %0 to float
ret float %conv
}
; CHECK-PREP10-NEXT: xscvsxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sitofp i32 %0 to float
ret float %conv
}
; CHECK-PREP10-NEXT: xscvsxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sitofp i32 %0 to float
ret float %conv
}
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i64, ptr %0, align 8
%conv = uitofp i64 %1 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_float_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = uitofp i64 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = uitofp i64 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_float_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: xscvuxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = uitofp i64 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = uitofp i64 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_float_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: xscvuxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = uitofp i64 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = uitofp i64 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_float_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = uitofp i64 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = uitofp i64 %0 to float
ret float %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv1 = uitofp i64 %1 to float
ret float %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = uitofp i64 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = uitofp i64 %1 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = uitofp i64 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 16
%conv = uitofp i64 %1 to float
ret float %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = uitofp i64 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 4096
%conv = uitofp i64 %1 to float
ret float %conv
}
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+ %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
%conv = uitofp i64 %0 to float
ret float %conv
}
; CHECK-NEXT: xscvuxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = uitofp i64 %0 to float
ret float %conv
}
; CHECK-PREP10-NEXT: xscvuxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = uitofp i64 %0 to float
ret float %conv
}
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i64, ptr %0, align 8
%conv = sitofp i64 %1 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_int64_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_int64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_float_int64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = sitofp i64 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = sitofp i64 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_int64_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_int64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_float_int64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: xscvsxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = sitofp i64 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = sitofp i64 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_int64_t(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_int64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_float_int64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: xscvsxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = sitofp i64 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = sitofp i64 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_int64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_int64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_float_int64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = sitofp i64 %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = sitofp i64 %0 to float
ret float %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv1 = sitofp i64 %1 to float
ret float %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = sitofp i64 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = sitofp i64 %1 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = sitofp i64 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 16
%conv = sitofp i64 %1 to float
ret float %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = sitofp i64 %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 4096
%conv = sitofp i64 %1 to float
ret float %conv
}
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+ %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sitofp i64 %0 to float
ret float %conv
}
; CHECK-NEXT: xscvsxdsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = sitofp i64 %0 to float
ret float %conv
}
; CHECK-PREP10-NEXT: xscvsxdsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sitofp i64 %0 to float
ret float %conv
}
; CHECK-NEXT: lfs f1, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load float, ptr %0, align 4
ret float %1
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_float(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_float(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_float_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs f1, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- ret float %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load float, ptr %add.ptr, align 4
+ ret float %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_float(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_float_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfs f1, 99999000(r3), 0
; CHECK-PREP10-NEXT: lfsx f1, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- ret float %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load float, ptr %add.ptr, align 4
+ ret float %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_float(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_float_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lfsx f1, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- ret float %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load float, ptr %add.ptr, align 4
+ ret float %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_float(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_float_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfsx f1, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- ret float %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load float, ptr %add.ptr, align 4
+ ret float %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
ret float %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
ret float %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 8
ret float %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
ret float %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 16
ret float %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
ret float %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4096
ret float %1
}
; CHECK-NEXT: lfs f1, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+ %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
ret float %0
}
; CHECK-NEXT: lfs f1, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
ret float %0
}
; CHECK-PREP10-NEXT: lfs f1, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+ %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret float %0
}
; CHECK-NEXT: xsrsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptrunc double %1 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align16_float_double(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align16_float_double(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_float_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: xsrsp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptrunc double %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptrunc double %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align32_float_double(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align32_float_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_float_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: xsrsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptrunc double %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptrunc double %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_align64_float_double(i8* nocapture readonly %ptr) {
+define dso_local float @ld_align64_float_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_float_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: xsrsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptrunc double %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptrunc double %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local float @ld_reg_float_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local float @ld_reg_float_double(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_float_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: xsrsp f1, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptrunc double %1 to float
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptrunc double %0 to float
ret float %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv1 = fptrunc double %1 to float
ret float %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptrunc double %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptrunc double %1 to float
ret float %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptrunc double %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 16
%conv = fptrunc double %1 to float
ret float %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptrunc double %1 to float
ret float %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 4096
%conv = fptrunc double %1 to float
ret float %conv
}
; CHECK-NEXT: xsrsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+ %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptrunc double %0 to float
ret float %conv
}
; CHECK-NEXT: xsrsp f1, f0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = fptrunc double %0 to float
ret float %conv
}
; CHECK-PREP10-NEXT: xsrsp f1, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+ %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptrunc double %0 to float
ret float %conv
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i8
- %0 = inttoptr i64 %ptr to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_uint8_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_uint8_t(ptr nocapture %ptr, float %str) {
; CHECK-POSTP8-LABEL: st_align16_float_uint8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_uint8_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_uint8_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align32_float_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_uint8_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_uint8_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align64_float_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_uint8_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_uint8_t(ptr nocapture %ptr, i64 %off, float %str) {
; CHECK-POSTP8-LABEL: st_reg_float_uint8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
%conv = fptoui float %str to i8
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
entry:
%conv = fptoui float %str to i8
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -4096
%conv = fptoui float %str to i8
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptoui float %str to i8
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptoui float %str to i8
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptoui float %str to i8
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptoui float %str to i8
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i8
- store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+ store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i8
- store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+ store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i8
- store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i8
- %0 = inttoptr i64 %ptr to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_int8_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_int8_t(ptr nocapture %ptr, float %str) {
; CHECK-POSTP8-LABEL: st_align16_float_int8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_int8_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_int8_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align32_float_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_int8_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_int8_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align64_float_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_int8_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_int8_t(ptr nocapture %ptr, i64 %off, float %str) {
; CHECK-POSTP8-LABEL: st_reg_float_int8_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
%conv = fptosi float %str to i8
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
entry:
%conv = fptosi float %str to i8
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -4096
%conv = fptosi float %str to i8
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptosi float %str to i8
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptosi float %str to i8
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptosi float %str to i8
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptosi float %str to i8
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i8
- store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+ store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i8
- store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+ store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i8
- store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i16
- %0 = inttoptr i64 %ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_uint16_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_uint16_t(ptr nocapture %ptr, float %str) {
; CHECK-POSTP8-LABEL: st_align16_float_uint16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_uint16_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_uint16_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align32_float_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_uint16_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_uint16_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align64_float_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_uint16_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_uint16_t(ptr nocapture %ptr, i64 %off, float %str) {
; CHECK-POSTP8-LABEL: st_reg_float_uint16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
%conv = fptoui float %str to i16
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
entry:
%conv = fptoui float %str to i16
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -4096
%conv = fptoui float %str to i16
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptoui float %str to i16
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptoui float %str to i16
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptoui float %str to i16
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptoui float %str to i16
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i16
- store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+ store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i16
- store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+ store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i16
- store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i16
- %0 = inttoptr i64 %ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_int16_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_int16_t(ptr nocapture %ptr, float %str) {
; CHECK-POSTP8-LABEL: st_align16_float_int16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_int16_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_int16_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align32_float_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_int16_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_int16_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align64_float_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_int16_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_int16_t(ptr nocapture %ptr, i64 %off, float %str) {
; CHECK-POSTP8-LABEL: st_reg_float_int16_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
%conv = fptosi float %str to i16
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
entry:
%conv = fptosi float %str to i16
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -4096
%conv = fptosi float %str to i16
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptosi float %str to i16
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptosi float %str to i16
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptosi float %str to i16
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptosi float %str to i16
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i16
- store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+ store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i16
- store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+ store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i16
- store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fptoui float %str to i32
- %0 = inttoptr i64 %ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_uint32_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_uint32_t(ptr nocapture %ptr, float %str) {
; CHECK-LABEL: st_align16_float_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxws f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptoui float %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_uint32_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_uint32_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align32_float_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxws f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptoui float %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_uint32_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_uint32_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align64_float_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxws f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptoui float %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_uint32_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_uint32_t(ptr nocapture %ptr, i64 %off, float %str) {
; CHECK-LABEL: st_reg_float_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxws f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptoui float %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
%conv = fptoui float %str to i32
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
entry:
%conv = fptoui float %str to i32
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = fptoui float %str to i32
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptoui float %str to i32
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptoui float %str to i32
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptoui float %str to i32
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptoui float %str to i32
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fptoui float %str to i32
- store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+ store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptoui float %str to i32
- store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+ store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptoui float %str to i32
- store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fptosi float %str to i32
- %0 = inttoptr i64 %ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_int32_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_int32_t(ptr nocapture %ptr, float %str) {
; CHECK-LABEL: st_align16_float_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptosi float %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_int32_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_int32_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align32_float_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxws f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptosi float %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_int32_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_int32_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align64_float_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxws f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptosi float %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_int32_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_int32_t(ptr nocapture %ptr, i64 %off, float %str) {
; CHECK-LABEL: st_reg_float_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptosi float %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
%conv = fptosi float %str to i32
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
entry:
%conv = fptosi float %str to i32
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = fptosi float %str to i32
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptosi float %str to i32
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptosi float %str to i32
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptosi float %str to i32
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptosi float %str to i32
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fptosi float %str to i32
- store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+ store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptosi float %str to i32
- store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+ store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptosi float %str to i32
- store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i64
- %0 = inttoptr i64 %ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_uint64_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_uint64_t(ptr nocapture %ptr, float %str) {
; CHECK-POSTP8-LABEL: st_align16_float_uint64_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpuxds v2, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_uint64_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_uint64_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align32_float_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxds v2, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptoui float %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_uint64_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_uint64_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align64_float_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpuxds f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptoui float %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_uint64_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_uint64_t(ptr nocapture %ptr, i64 %off, float %str) {
; CHECK-LABEL: st_reg_float_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxds f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptoui float %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
%conv = fptoui float %str to i64
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptoui float %str to i64
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = fptoui float %str to i64
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptoui float %str to i64
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptoui float %str to i64
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptoui float %str to i64
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptoui float %str to i64
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i64
- store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+ store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i64
- store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+ store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %str to i64
- store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i64
- %0 = inttoptr i64 %ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_int64_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_int64_t(ptr nocapture %ptr, float %str) {
; CHECK-POSTP8-LABEL: st_align16_float_int64_t:
; CHECK-POSTP8: # %bb.0: # %entry
; CHECK-POSTP8-NEXT: xscvdpsxds v2, f1
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_int64_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_int64_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align32_float_int64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxds v2, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptosi float %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_int64_t(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_int64_t(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align64_float_int64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: xscvdpsxds f0, f1
; CHECK-PREP10-NEXT: blr
entry:
%conv = fptosi float %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_int64_t(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_int64_t(ptr nocapture %ptr, i64 %off, float %str) {
; CHECK-LABEL: st_reg_float_int64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxds f0, f1
; CHECK-NEXT: blr
entry:
%conv = fptosi float %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
%conv = fptosi float %str to i64
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptosi float %str to i64
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = fptosi float %str to i64
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = fptosi float %str to i64
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fptosi float %str to i64
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 16
ret void
}
entry:
%conv = fptosi float %str to i64
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fptosi float %str to i64
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 4096
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i64
- store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+ store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i64
- store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+ store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %str to i64
- store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: stfs f1, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to float*
- store float %str, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store float %str, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_float(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_float(ptr nocapture %ptr, float %str) {
; CHECK-LABEL: st_align16_float_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stfs f1, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- store float %str, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store float %str, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_float(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_float(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align32_float_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstfs f1, 99999000(r3), 0
; CHECK-PREP10-NEXT: stfsx f1, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- store float %str, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store float %str, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_float(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_float(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align64_float_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: stfsx f1, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- store float %str, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store float %str, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_float(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_float(ptr nocapture %ptr, i64 %off, float %str) {
; CHECK-LABEL: st_reg_float_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stfsx f1, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- store float %str, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store float %str, ptr %add.ptr, align 4
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to float*
- store float %str, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %str, ptr %0, align 4
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- store float %str, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %str, ptr %0, align 4
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- store float %str, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store float %str, ptr %0, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- store float %str, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %str, ptr %0, align 4
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- store float %str, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store float %str, ptr %0, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- store float %str, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %str, ptr %0, align 4
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- store float %str, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store float %str, ptr %0, align 4096
ret void
}
; CHECK-NEXT: stfs f1, 4080(0)
; CHECK-NEXT: blr
entry:
- store float %str, float* inttoptr (i64 4080 to float*), align 16
+ store float %str, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: stfs f1, -27108(r3)
; CHECK-NEXT: blr
entry:
- store float %str, float* inttoptr (i64 9999900 to float*), align 4
+ store float %str, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: stfs f1, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- store float %str, float* inttoptr (i64 1000000000000 to float*), align 4096
+ store float %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fpext float %str to double
- %0 = inttoptr i64 %ptr to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store double %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_float_double(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align16_float_double(ptr nocapture %ptr, float %str) {
; CHECK-LABEL: st_align16_float_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stfd f1, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = fpext float %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_float_double(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align32_float_double(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align32_float_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstfd f1, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = fpext float %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_float_double(i8* nocapture %ptr, float %str) {
+define dso_local void @st_align64_float_double(ptr nocapture %ptr, float %str) {
; CHECK-P10-LABEL: st_align64_float_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = fpext float %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_float_double(i8* nocapture %ptr, i64 %off, float %str) {
+define dso_local void @st_reg_float_double(ptr nocapture %ptr, i64 %off, float %str) {
; CHECK-LABEL: st_reg_float_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stfdx f1, r3, r4
; CHECK-NEXT: blr
entry:
%conv = fpext float %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store double %conv, ptr %add.ptr, align 8
ret void
}
%conv = fpext float %str to double
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = fpext float %str to double
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = fpext float %str to double
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = fpext float %str to double
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = fpext float %str to double
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 16
ret void
}
entry:
%conv = fpext float %str to double
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = fpext float %str to double
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fpext float %str to double
- store double %conv, double* inttoptr (i64 4080 to double*), align 16
+ store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = fpext float %str to double
- store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+ store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = fpext float %str to double
- store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+ store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int16_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int16_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int16_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int16_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = sext i8 %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
%conv = sext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
%conv = sext i8 %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
%conv = sext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sext i8 %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: lha r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
ret i16 %1
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int16_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lha r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ ret i16 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int16_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plha r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lhax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ ret i16 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int16_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lhax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ ret i16 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int16_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhax r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ ret i16 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
ret i16 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
ret i16 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
ret i16 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
ret i16 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
ret i16 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
ret i16 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
ret i16 %1
}
; CHECK-NEXT: lha r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
ret i16 %0
}
; CHECK-NEXT: lha r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
ret i16 %0
}
; CHECK-PREP10-NEXT: lha r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret i16 %0
}
; CHECK-BE-NEXT: lha r3, 2(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LE-LABEL: ld_align16_int16_t_uint32_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lha r3, 8(r3)
; CHECK-BE-NEXT: lha r3, 10(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align32_int16_t_uint32_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plha r3, 99999000(r3), 0
; CHECK-P8-BE-NEXT: lhax r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align64_int16_t_uint32_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: lhax r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LE-LABEL: ld_reg_int16_t_uint32_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lhax r3, r3, r4
; CHECK-BE-NEXT: lha r3, 2(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv1 = trunc i32 %1 to i16
ret i16 %conv1
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 8
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 16
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4096
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: lha r3, 4082(0)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+ %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
%conv = trunc i32 %0 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: lha r3, -27106(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = trunc i32 %0 to i16
ret i16 %conv
}
; CHECK-P8-BE-NEXT: lha r3, 0(r3)
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = trunc i32 %0 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: lha r3, 6(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LE-LABEL: ld_align16_int16_t_uint64_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lha r3, 8(r3)
; CHECK-BE-NEXT: lha r3, 14(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align32_int16_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plha r3, 99999000(r3), 0
; CHECK-P8-BE-NEXT: lhax r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align64_int16_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: lhax r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LE-LABEL: ld_reg_int16_t_uint64_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lhax r3, r3, r4
; CHECK-BE-NEXT: lha r3, 6(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv1 = trunc i64 %1 to i16
ret i16 %conv1
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 16
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 4096
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: lha r3, 4086(0)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+ %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
%conv = trunc i64 %0 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: lha r3, -27102(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = trunc i64 %0 to i16
ret i16 %conv
}
; CHECK-P8-BE-NEXT: lha r3, 0(r3)
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = trunc i64 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_float(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int16_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs f0, 8(r3)
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int16_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfs f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int16_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_float(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int16_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfsx f0, r3, r4
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv1 = fptosi float %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 8
%conv = fptosi float %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 16
%conv = fptosi float %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4096
%conv = fptosi float %1 to i16
ret i16 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+ %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptosi float %0 to i16
ret i16 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = fptosi float %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+ %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptosi float %0 to i16
ret i16 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align16_int16_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align16_int16_t_double(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int16_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align32_int16_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align32_int16_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int16_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_align64_int16_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i16 @ld_align64_int16_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int16_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i16 @ld_reg_int16_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i16 @ld_reg_int16_t_double(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int16_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv1 = fptosi double %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 16
%conv = fptosi double %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 4096
%conv = fptosi double %1 to i16
ret i16 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+ %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptosi double %0 to i16
ret i16 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = fptosi double %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+ %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptosi double %0 to i16
ret i16 %conv
}
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint16_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint16_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint16_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint16_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = zext i8 %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
%conv = zext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
%conv = zext i8 %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
%conv = zext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: lbz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
%conv = zext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: lbz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = zext i8 %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = zext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint16_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint16_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: clrldi r3, r3, 48
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint16_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: clrldi r3, r3, 48
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint16_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = sext i8 %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
%conv = sext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
%conv = sext i8 %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
%conv = sext i8 %1 to i16
ret i16 %conv
}
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: clrldi r3, r3, 48
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sext i8 %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: clrldi r3, r3, 48
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sext i8 %0 to i16
ret i16 %conv
}
; CHECK-NEXT: lhz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
ret i16 %1
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint16_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ ret i16 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint16_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ ret i16 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint16_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ ret i16 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint16_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- ret i16 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ ret i16 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
ret i16 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
ret i16 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
ret i16 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
ret i16 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
ret i16 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
ret i16 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
ret i16 %1
}
; CHECK-NEXT: lhz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
ret i16 %0
}
; CHECK-NEXT: lhz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
ret i16 %0
}
; CHECK-PREP10-NEXT: lhz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret i16 %0
}
; CHECK-BE-NEXT: lhz r3, 2(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LE-LABEL: ld_align16_uint16_t_uint32_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lhz r3, 8(r3)
; CHECK-BE-NEXT: lhz r3, 10(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align32_uint16_t_uint32_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plhz r3, 99999000(r3), 0
; CHECK-P8-BE-NEXT: lhzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align64_uint16_t_uint32_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: lhzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LE-LABEL: ld_reg_uint16_t_uint32_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lhzx r3, r3, r4
; CHECK-BE-NEXT: lhz r3, 2(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv1 = trunc i32 %1 to i16
ret i16 %conv1
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 8
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 16
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4096
%conv = trunc i32 %1 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: lhz r3, 4082(0)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+ %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
%conv = trunc i32 %0 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: lhz r3, -27106(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = trunc i32 %0 to i16
ret i16 %conv
}
; CHECK-P8-BE-NEXT: lhz r3, 0(r3)
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = trunc i32 %0 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: lhz r3, 6(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LE-LABEL: ld_align16_uint16_t_uint64_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lhz r3, 8(r3)
; CHECK-BE-NEXT: lhz r3, 14(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align32_uint16_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plhz r3, 99999000(r3), 0
; CHECK-P8-BE-NEXT: lhzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align64_uint16_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: lhzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LE-LABEL: ld_reg_uint16_t_uint64_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lhzx r3, r3, r4
; CHECK-BE-NEXT: lhz r3, 6(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv1 = trunc i64 %1 to i16
ret i16 %conv1
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 16
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 4096
%conv = trunc i64 %1 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: lhz r3, 4086(0)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+ %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
%conv = trunc i64 %0 to i16
ret i16 %conv
}
; CHECK-BE-NEXT: lhz r3, -27102(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = trunc i64 %0 to i16
ret i16 %conv
}
; CHECK-P8-BE-NEXT: lhz r3, 0(r3)
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = trunc i64 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_float(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint16_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs f0, 8(r3)
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint16_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfs f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint16_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_float(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint16_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfsx f0, r3, r4
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv1 = fptoui float %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 8
%conv = fptoui float %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 16
%conv = fptoui float %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4096
%conv = fptoui float %1 to i16
ret i16 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+ %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptoui float %0 to i16
ret i16 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = fptoui float %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+ %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptoui float %0 to i16
ret i16 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align16_uint16_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align16_uint16_t_double(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint16_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align32_uint16_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align32_uint16_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint16_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_align64_uint16_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i16 @ld_align64_uint16_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint16_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i16 @ld_reg_uint16_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i16 @ld_reg_uint16_t_double(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint16_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i16
ret i16 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv1 = fptoui double %1 to i16
ret i16 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 16
%conv = fptoui double %1 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i16
ret i16 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 4096
%conv = fptoui double %1 to i16
ret i16 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+ %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptoui double %0 to i16
ret i16 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = fptoui double %0 to i16
ret i16 %conv
}
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+ %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptoui double %0 to i16
ret i16 %conv
}
; CHECK-NEXT: blr
entry:
%conv = trunc i16 %str to i8
- %0 = inttoptr i64 %ptr to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint16_t_uint8_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_uint8_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-LABEL: st_align16_uint16_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stb r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = trunc i16 %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint16_t_uint8_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_uint8_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint16_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstb r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i16 %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint16_t_uint8_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_uint8_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint16_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i16 %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint16_t_uint8_t(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_uint8_t(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
; CHECK-LABEL: st_reg_uint16_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stbx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = trunc i16 %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
%conv = trunc i16 %str to i8
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
entry:
%conv = trunc i16 %str to i8
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -4096
%conv = trunc i16 %str to i8
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 8
ret void
}
entry:
%conv = trunc i16 %str to i8
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1000341504
%conv = trunc i16 %str to i8
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 16
ret void
}
entry:
%conv = trunc i16 %str to i8
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = trunc i16 %str to i8
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i16 %str to i8
- store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+ store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i16 %str to i8
- store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+ store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i16 %str to i8
- store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: sth r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- store i16 %str, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ store i16 %str, ptr %0, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint16_t_uint16_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_uint16_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-LABEL: st_align16_uint16_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %str, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i16 %str, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint16_t_uint16_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_uint16_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint16_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: psth r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: sthx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %str, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i16 %str, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint16_t_uint16_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_uint16_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint16_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: sthx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %str, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i16 %str, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint16_t_uint16_t(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_uint16_t(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
; CHECK-LABEL: st_reg_uint16_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sthx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %str, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i16 %str, ptr %add.ptr, align 2
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- store i16 %str, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %str, ptr %0, align 2
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- store i16 %str, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %str, ptr %0, align 2
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- store i16 %str, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i16 %str, ptr %0, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- store i16 %str, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %str, ptr %0, align 2
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- store i16 %str, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i16 %str, ptr %0, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- store i16 %str, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %str, ptr %0, align 2
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- store i16 %str, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i16 %str, ptr %0, align 4096
ret void
}
; CHECK-NEXT: sth r3, 4080(0)
; CHECK-NEXT: blr
entry:
- store i16 %str, i16* inttoptr (i64 4080 to i16*), align 16
+ store i16 %str, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: sth r3, -27108(r4)
; CHECK-NEXT: blr
entry:
- store i16 %str, i16* inttoptr (i64 9999900 to i16*), align 4
+ store i16 %str, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: sth r3, 0(r4)
; CHECK-PREP10-NEXT: blr
entry:
- store i16 %str, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ store i16 %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i32
- %0 = inttoptr i64 %ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint16_t_uint32_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_uint32_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-LABEL: st_align16_uint16_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint16_t_uint32_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_uint32_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint16_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstw r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i16 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint16_t_uint32_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_uint32_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint16_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i16 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint16_t_uint32_t(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_uint32_t(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
; CHECK-LABEL: st_reg_uint16_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
%conv = zext i16 %str to i32
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
entry:
%conv = zext i16 %str to i32
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = zext i16 %str to i32
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 8
ret void
}
entry:
%conv = zext i16 %str to i32
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = zext i16 %str to i32
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 16
ret void
}
entry:
%conv = zext i16 %str to i32
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = zext i16 %str to i32
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i32
- store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+ store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i32
- store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+ store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i16 %str to i32
- store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i64
- %0 = inttoptr i64 %ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint16_t_uint64_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_uint64_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-LABEL: st_align16_uint16_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint16_t_uint64_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_uint64_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint16_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i16 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint16_t_uint64_t(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_uint64_t(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint16_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i16 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint16_t_uint64_t(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_uint64_t(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
; CHECK-LABEL: st_reg_uint16_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
%conv = zext i16 %str to i64
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = zext i16 %str to i64
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = zext i16 %str to i64
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = zext i16 %str to i64
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = zext i16 %str to i64
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 16
ret void
}
entry:
%conv = zext i16 %str to i64
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = zext i16 %str to i64
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i64
- store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+ store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i16 %str to i64
- store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+ store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i16 %str to i64
- store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i16 %str to float
- %0 = inttoptr i64 %ptr to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store float %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint16_t_float(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_float(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-LABEL: st_align16_uint16_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r4
; CHECK-NEXT: blr
entry:
%conv = uitofp i16 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint16_t_float(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_float(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint16_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwz f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i16 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint16_t_float(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_float(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint16_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwz f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i16 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint16_t_float(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_float(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
; CHECK-LABEL: st_reg_uint16_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r5
; CHECK-NEXT: blr
entry:
%conv = uitofp i16 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store float %conv, ptr %add.ptr, align 4
ret void
}
%conv = uitofp i16 %str to float
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
entry:
%conv = uitofp i16 %str to float
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = uitofp i16 %str to float
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 8
ret void
}
entry:
%conv = uitofp i16 %str to float
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = uitofp i16 %str to float
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 16
ret void
}
entry:
%conv = uitofp i16 %str to float
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = uitofp i16 %str to float
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i16 %str to float
- store float %conv, float* inttoptr (i64 4080 to float*), align 16
+ store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i16 %str to float
- store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+ store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i16 %str to float
- store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+ store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i16 %str to double
- %0 = inttoptr i64 %ptr to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store double %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint16_t_double(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align16_uint16_t_double(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-LABEL: st_align16_uint16_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r4
; CHECK-NEXT: blr
entry:
%conv = uitofp i16 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint16_t_double(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align32_uint16_t_double(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint16_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwz f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i16 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint16_t_double(i8* nocapture %ptr, i16 zeroext %str) {
+define dso_local void @st_align64_uint16_t_double(ptr nocapture %ptr, i16 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint16_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwz f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i16 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint16_t_double(i8* nocapture %ptr, i64 %off, i16 zeroext %str) {
+define dso_local void @st_reg_uint16_t_double(ptr nocapture %ptr, i64 %off, i16 zeroext %str) {
; CHECK-LABEL: st_reg_uint16_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r5
; CHECK-NEXT: blr
entry:
%conv = uitofp i16 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store double %conv, ptr %add.ptr, align 8
ret void
}
%conv = uitofp i16 %str to double
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = uitofp i16 %str to double
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = uitofp i16 %str to double
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = uitofp i16 %str to double
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = uitofp i16 %str to double
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 16
ret void
}
entry:
%conv = uitofp i16 %str to double
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = uitofp i16 %str to double
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i16 %str to double
- store double %conv, double* inttoptr (i64 4080 to double*), align 16
+ store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i16 %str to double
- store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+ store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i16 %str to double
- store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+ store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i32
- %0 = inttoptr i64 %ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int16_t_uint32_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align16_int16_t_uint32_t(ptr nocapture %ptr, i16 signext %str) {
; CHECK-LABEL: st_align16_int16_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int16_t_uint32_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align32_int16_t_uint32_t(ptr nocapture %ptr, i16 signext %str) {
; CHECK-P10-LABEL: st_align32_int16_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstw r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i16 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int16_t_uint32_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align64_int16_t_uint32_t(ptr nocapture %ptr, i16 signext %str) {
; CHECK-P10-LABEL: st_align64_int16_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i16 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int16_t_uint32_t(i8* nocapture %ptr, i64 %off, i16 signext %str) {
+define dso_local void @st_reg_int16_t_uint32_t(ptr nocapture %ptr, i64 %off, i16 signext %str) {
; CHECK-LABEL: st_reg_int16_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
%conv = sext i16 %str to i32
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
entry:
%conv = sext i16 %str to i32
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = sext i16 %str to i32
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 8
ret void
}
entry:
%conv = sext i16 %str to i32
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sext i16 %str to i32
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 16
ret void
}
entry:
%conv = sext i16 %str to i32
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sext i16 %str to i32
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i32
- store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+ store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i32
- store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+ store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i16 %str to i32
- store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i64
- %0 = inttoptr i64 %ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int16_t_uint64_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align16_int16_t_uint64_t(ptr nocapture %ptr, i16 signext %str) {
; CHECK-LABEL: st_align16_int16_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int16_t_uint64_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align32_int16_t_uint64_t(ptr nocapture %ptr, i16 signext %str) {
; CHECK-P10-LABEL: st_align32_int16_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i16 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int16_t_uint64_t(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align64_int16_t_uint64_t(ptr nocapture %ptr, i16 signext %str) {
; CHECK-P10-LABEL: st_align64_int16_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i16 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int16_t_uint64_t(i8* nocapture %ptr, i64 %off, i16 signext %str) {
+define dso_local void @st_reg_int16_t_uint64_t(ptr nocapture %ptr, i64 %off, i16 signext %str) {
; CHECK-LABEL: st_reg_int16_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
%conv = sext i16 %str to i64
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = sext i16 %str to i64
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = sext i16 %str to i64
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = sext i16 %str to i64
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sext i16 %str to i64
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 16
ret void
}
entry:
%conv = sext i16 %str to i64
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sext i16 %str to i64
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i64
- store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+ store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i16 %str to i64
- store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+ store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i16 %str to i64
- store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i16 %str to float
- %0 = inttoptr i64 %ptr to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store float %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int16_t_float(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align16_int16_t_float(ptr nocapture %ptr, i16 signext %str) {
; CHECK-LABEL: st_align16_int16_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r4
; CHECK-NEXT: blr
entry:
%conv = sitofp i16 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int16_t_float(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align32_int16_t_float(ptr nocapture %ptr, i16 signext %str) {
; CHECK-P10-LABEL: st_align32_int16_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwa f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i16 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int16_t_float(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align64_int16_t_float(ptr nocapture %ptr, i16 signext %str) {
; CHECK-P10-LABEL: st_align64_int16_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwa f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i16 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int16_t_float(i8* nocapture %ptr, i64 %off, i16 signext %str) {
+define dso_local void @st_reg_int16_t_float(ptr nocapture %ptr, i64 %off, i16 signext %str) {
; CHECK-LABEL: st_reg_int16_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r5
; CHECK-NEXT: blr
entry:
%conv = sitofp i16 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store float %conv, ptr %add.ptr, align 4
ret void
}
%conv = sitofp i16 %str to float
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
entry:
%conv = sitofp i16 %str to float
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = sitofp i16 %str to float
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 8
ret void
}
entry:
%conv = sitofp i16 %str to float
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sitofp i16 %str to float
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 16
ret void
}
entry:
%conv = sitofp i16 %str to float
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sitofp i16 %str to float
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i16 %str to float
- store float %conv, float* inttoptr (i64 4080 to float*), align 16
+ store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i16 %str to float
- store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+ store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i16 %str to float
- store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+ store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i16 %str to double
- %0 = inttoptr i64 %ptr to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store double %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int16_t_double(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align16_int16_t_double(ptr nocapture %ptr, i16 signext %str) {
; CHECK-LABEL: st_align16_int16_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r4
; CHECK-NEXT: blr
entry:
%conv = sitofp i16 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int16_t_double(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align32_int16_t_double(ptr nocapture %ptr, i16 signext %str) {
; CHECK-P10-LABEL: st_align32_int16_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwa f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i16 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int16_t_double(i8* nocapture %ptr, i16 signext %str) {
+define dso_local void @st_align64_int16_t_double(ptr nocapture %ptr, i16 signext %str) {
; CHECK-P10-LABEL: st_align64_int16_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwa f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i16 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int16_t_double(i8* nocapture %ptr, i64 %off, i16 signext %str) {
+define dso_local void @st_reg_int16_t_double(ptr nocapture %ptr, i64 %off, i16 signext %str) {
; CHECK-LABEL: st_reg_int16_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r5
; CHECK-NEXT: blr
entry:
%conv = sitofp i16 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store double %conv, ptr %add.ptr, align 8
ret void
}
%conv = sitofp i16 %str to double
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = sitofp i16 %str to double
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = sitofp i16 %str to double
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = sitofp i16 %str to double
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sitofp i16 %str to double
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 16
ret void
}
entry:
%conv = sitofp i16 %str to double
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sitofp i16 %str to double
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i16 %str to double
- store double %conv, double* inttoptr (i64 4080 to double*), align 16
+ store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i16 %str to double
- store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+ store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i16 %str to double
- store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+ store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int32_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int32_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int32_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int32_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = sext i8 %1 to i32
ret i32 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = sext i8 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
%conv = sext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
%conv = sext i8 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
%conv = sext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sext i8 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lha r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int32_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lha r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int32_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plha r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lhax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int32_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lhax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_int16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int32_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhax r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = sext i16 %1 to i32
ret i32 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = sext i16 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
%conv = sext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
%conv = sext i16 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
%conv = sext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: lha r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lha r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sext i16 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: lha r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lwa r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int32_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwa r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i32, ptr %add.ptr, align 4
+ ret i32 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int32_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwa r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i32, ptr %add.ptr, align 4
+ ret i32 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int32_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i32, ptr %add.ptr, align 4
+ ret i32 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int32_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwax r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i32, ptr %add.ptr, align 4
+ ret i32 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 8
ret i32 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 16
ret i32 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4096
ret i32 %1
}
; CHECK-NEXT: lwa r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+ %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
ret i32 %0
}
; CHECK-NEXT: lwa r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
ret i32 %0
}
; CHECK-PREP10-NEXT: lwa r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret i32 %0
}
; CHECK-BE-NEXT: lwa r3, 4(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_unalign16_int32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_unalign16_int32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_unalign16_int32_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plwa r3, 1(r3), 0
; CHECK-P8-BE-NEXT: lwax r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LE-LABEL: ld_align16_int32_t_uint64_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lwa r3, 8(r3)
; CHECK-BE-NEXT: lwa r3, 12(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align32_int32_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plwa r3, 99999000(r3), 0
; CHECK-P8-BE-NEXT: lwax r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align64_int32_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: lwax r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LE-LABEL: ld_reg_int32_t_uint64_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lwax r3, r3, r4
; CHECK-BE-NEXT: lwa r3, 4(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv1 = trunc i64 %1 to i32
ret i32 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv1 = trunc i64 %1 to i32
ret i32 %conv1
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 16
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 4096
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; CHECK-BE-NEXT: lwa r3, 4084(0)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+ %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-BE-NEXT: lwa r3, -27104(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-P8-BE-NEXT: lwa r3, 0(r3)
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_float(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int32_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs f0, 8(r3)
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int32_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfs f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int32_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_float(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int32_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfsx f0, r3, r4
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv1 = fptosi float %1 to i32
ret i32 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv1 = fptosi float %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 8
%conv = fptosi float %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 16
%conv = fptosi float %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4096
%conv = fptosi float %1 to i32
ret i32 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+ %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptosi float %0 to i32
ret i32 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = fptosi float %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+ %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptosi float %0 to i32
ret i32 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align16_int32_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align16_int32_t_double(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int32_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align32_int32_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align32_int32_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int32_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_align64_int32_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i32 @ld_align64_int32_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int32_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i32 @ld_reg_int32_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i32 @ld_reg_int32_t_double(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int32_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv1 = fptosi double %1 to i32
ret i32 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv1 = fptosi double %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 16
%conv = fptosi double %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 4096
%conv = fptosi double %1 to i32
ret i32 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+ %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptosi double %0 to i32
ret i32 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = fptosi double %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+ %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptosi double %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = zext i8 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
%conv = zext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
%conv = zext i8 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
%conv = zext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: lbz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
%conv = zext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lbz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = zext i8 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = zext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = sext i8 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
%conv = sext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
%conv = sext i8 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
%conv = sext i8 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sext i8 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sext i8 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lhz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
%conv = zext i16 %1 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = zext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = zext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = zext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = zext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = zext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = zext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = zext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = zext i16 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = zext i16 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = zext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
%conv = zext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = zext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
%conv = zext i16 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = zext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
%conv = zext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: lhz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
%conv = zext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lhz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = zext i16 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: lhz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = zext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lha r3, 8(r3)
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plha r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_int16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhax r3, r3, r4
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = sext i16 %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
%conv = sext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
%conv = sext i16 %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
%conv = sext i16 %1 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sext i16 %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: clrldi r3, r3, 32
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sext i16 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: lwz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i32, ptr %add.ptr, align 4
+ ret i32 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lwzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i32, ptr %add.ptr, align 4
+ ret i32 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lwzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i32, ptr %add.ptr, align 4
+ ret i32 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- ret i32 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i32, ptr %add.ptr, align 4
+ ret i32 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 8
ret i32 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 16
ret i32 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
ret i32 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4096
ret i32 %1
}
; CHECK-NEXT: lwz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+ %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
ret i32 %0
}
; CHECK-NEXT: lwz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
ret i32 %0
}
; CHECK-PREP10-NEXT: lwz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret i32 %0
}
; CHECK-BE-NEXT: lwz r3, 4(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LE-LABEL: ld_align16_uint32_t_uint64_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lwz r3, 8(r3)
; CHECK-BE-NEXT: lwz r3, 12(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align32_uint32_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plwz r3, 99999000(r3), 0
; CHECK-P8-BE-NEXT: lwzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align64_uint32_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: lwzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LE-LABEL: ld_reg_uint32_t_uint64_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lwzx r3, r3, r4
; CHECK-BE-NEXT: lwz r3, 4(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv1 = trunc i64 %1 to i32
ret i32 %conv1
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 16
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 4096
%conv = trunc i64 %1 to i32
ret i32 %conv
}
; CHECK-BE-NEXT: lwz r3, 4084(0)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+ %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-BE-NEXT: lwz r3, -27104(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-P8-BE-NEXT: lwz r3, 0(r3)
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_float(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs f0, 8(r3)
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfs f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_float(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfsx f0, r3, r4
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv1 = fptoui float %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 8
%conv = fptoui float %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 16
%conv = fptoui float %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4096
%conv = fptoui float %1 to i32
ret i32 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+ %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptoui float %0 to i32
ret i32 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = fptoui float %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+ %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptoui float %0 to i32
ret i32 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align16_uint32_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align16_uint32_t_double(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint32_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align32_uint32_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align32_uint32_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint32_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_align64_uint32_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i32 @ld_align64_uint32_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint32_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i32 @ld_reg_uint32_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i32 @ld_reg_uint32_t_double(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint32_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i32
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i32
ret i32 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv1 = fptoui double %1 to i32
ret i32 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 16
%conv = fptoui double %1 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i32
ret i32 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 4096
%conv = fptoui double %1 to i32
ret i32 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+ %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptoui double %0 to i32
ret i32 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = fptoui double %0 to i32
ret i32 %conv
}
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+ %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptoui double %0 to i32
ret i32 %conv
}
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i8
- %0 = inttoptr i64 %ptr to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint32_t_uint8_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_uint8_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-LABEL: st_align16_uint32_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stb r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint32_t_uint8_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_uint8_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint32_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstb r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i32 %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint32_t_uint8_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_uint8_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint32_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i32 %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint32_t_uint8_t(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_uint8_t(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
; CHECK-LABEL: st_reg_uint32_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stbx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
%conv = trunc i32 %str to i8
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
entry:
%conv = trunc i32 %str to i8
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -4096
%conv = trunc i32 %str to i8
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 8
ret void
}
entry:
%conv = trunc i32 %str to i8
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1000341504
%conv = trunc i32 %str to i8
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 16
ret void
}
entry:
%conv = trunc i32 %str to i8
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = trunc i32 %str to i8
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i8
- store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+ store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i8
- store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+ store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i32 %str to i8
- store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i16
- %0 = inttoptr i64 %ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint32_t_uint16_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_uint16_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-LABEL: st_align16_uint32_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint32_t_uint16_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_uint16_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint32_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: psth r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i32 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint32_t_uint16_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_uint16_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint32_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i32 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint32_t_uint16_t(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_uint16_t(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
; CHECK-LABEL: st_reg_uint32_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sthx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
%conv = trunc i32 %str to i16
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
entry:
%conv = trunc i32 %str to i16
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -4096
%conv = trunc i32 %str to i16
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 8
ret void
}
entry:
%conv = trunc i32 %str to i16
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1000341504
%conv = trunc i32 %str to i16
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 16
ret void
}
entry:
%conv = trunc i32 %str to i16
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = trunc i32 %str to i16
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i16
- store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+ store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %str to i16
- store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+ store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i32 %str to i16
- store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: stw r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- store i32 %str, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store i32 %str, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint32_t_uint32_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_uint32_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-LABEL: st_align16_uint32_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %str, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i32 %str, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint32_t_uint32_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_uint32_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint32_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstw r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %str, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i32 %str, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint32_t_uint32_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_uint32_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint32_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stwx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %str, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i32 %str, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint32_t_uint32_t(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_uint32_t(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
; CHECK-LABEL: st_reg_uint32_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %str, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i32 %str, ptr %add.ptr, align 4
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- store i32 %str, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %str, ptr %0, align 4
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- store i32 %str, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %str, ptr %0, align 4
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- store i32 %str, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i32 %str, ptr %0, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- store i32 %str, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %str, ptr %0, align 4
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- store i32 %str, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i32 %str, ptr %0, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- store i32 %str, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %str, ptr %0, align 4
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- store i32 %str, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i32 %str, ptr %0, align 4096
ret void
}
; CHECK-NEXT: stw r3, 4080(0)
; CHECK-NEXT: blr
entry:
- store i32 %str, i32* inttoptr (i64 4080 to i32*), align 16
+ store i32 %str, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: stw r3, -27108(r4)
; CHECK-NEXT: blr
entry:
- store i32 %str, i32* inttoptr (i64 9999900 to i32*), align 4
+ store i32 %str, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: stw r3, 0(r4)
; CHECK-PREP10-NEXT: blr
entry:
- store i32 %str, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ store i32 %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i32 %str to i64
- %0 = inttoptr i64 %ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint32_t_uint64_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_uint64_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-LABEL: st_align16_uint32_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = zext i32 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint32_t_uint64_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_uint64_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint32_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i32 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint32_t_uint64_t(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_uint64_t(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint32_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i32 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint32_t_uint64_t(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_uint64_t(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
; CHECK-LABEL: st_reg_uint32_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = zext i32 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
%conv = zext i32 %str to i64
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = zext i32 %str to i64
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = zext i32 %str to i64
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = zext i32 %str to i64
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = zext i32 %str to i64
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 16
ret void
}
entry:
%conv = zext i32 %str to i64
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = zext i32 %str to i64
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i32 %str to i64
- store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+ store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i32 %str to i64
- store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+ store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i32 %str to i64
- store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i32 %str to float
- %0 = inttoptr i64 %ptr to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store float %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint32_t_float(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_float(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-LABEL: st_align16_uint32_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r4
; CHECK-NEXT: blr
entry:
%conv = uitofp i32 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint32_t_float(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_float(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint32_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwz f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i32 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint32_t_float(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_float(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint32_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwz f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i32 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint32_t_float(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_float(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
; CHECK-LABEL: st_reg_uint32_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r5
; CHECK-NEXT: blr
entry:
%conv = uitofp i32 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store float %conv, ptr %add.ptr, align 4
ret void
}
%conv = uitofp i32 %str to float
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
entry:
%conv = uitofp i32 %str to float
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = uitofp i32 %str to float
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 8
ret void
}
entry:
%conv = uitofp i32 %str to float
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = uitofp i32 %str to float
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 16
ret void
}
entry:
%conv = uitofp i32 %str to float
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = uitofp i32 %str to float
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i32 %str to float
- store float %conv, float* inttoptr (i64 4080 to float*), align 16
+ store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i32 %str to float
- store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+ store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i32 %str to float
- store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+ store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i32 %str to double
- %0 = inttoptr i64 %ptr to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store double %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint32_t_double(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align16_uint32_t_double(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-LABEL: st_align16_uint32_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r4
; CHECK-NEXT: blr
entry:
%conv = uitofp i32 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint32_t_double(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align32_uint32_t_double(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint32_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwz f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i32 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint32_t_double(i8* nocapture %ptr, i32 zeroext %str) {
+define dso_local void @st_align64_uint32_t_double(ptr nocapture %ptr, i32 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint32_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwz f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i32 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint32_t_double(i8* nocapture %ptr, i64 %off, i32 zeroext %str) {
+define dso_local void @st_reg_uint32_t_double(ptr nocapture %ptr, i64 %off, i32 zeroext %str) {
; CHECK-LABEL: st_reg_uint32_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r5
; CHECK-NEXT: blr
entry:
%conv = uitofp i32 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store double %conv, ptr %add.ptr, align 8
ret void
}
%conv = uitofp i32 %str to double
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = uitofp i32 %str to double
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = uitofp i32 %str to double
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = uitofp i32 %str to double
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = uitofp i32 %str to double
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 16
ret void
}
entry:
%conv = uitofp i32 %str to double
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = uitofp i32 %str to double
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i32 %str to double
- store double %conv, double* inttoptr (i64 4080 to double*), align 16
+ store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i32 %str to double
- store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+ store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i32 %str to double
- store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+ store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i32 %str to i64
- %0 = inttoptr i64 %ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int32_t_uint64_t(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align16_int32_t_uint64_t(ptr nocapture %ptr, i32 signext %str) {
; CHECK-LABEL: st_align16_int32_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = sext i32 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int32_t_uint64_t(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align32_int32_t_uint64_t(ptr nocapture %ptr, i32 signext %str) {
; CHECK-P10-LABEL: st_align32_int32_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i32 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int32_t_uint64_t(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align64_int32_t_uint64_t(ptr nocapture %ptr, i32 signext %str) {
; CHECK-P10-LABEL: st_align64_int32_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i32 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int32_t_uint64_t(i8* nocapture %ptr, i64 %off, i32 signext %str) {
+define dso_local void @st_reg_int32_t_uint64_t(ptr nocapture %ptr, i64 %off, i32 signext %str) {
; CHECK-LABEL: st_reg_int32_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = sext i32 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
%conv = sext i32 %str to i64
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = sext i32 %str to i64
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = sext i32 %str to i64
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = sext i32 %str to i64
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sext i32 %str to i64
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 16
ret void
}
entry:
%conv = sext i32 %str to i64
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sext i32 %str to i64
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i32 %str to i64
- store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+ store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i32 %str to i64
- store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+ store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i32 %str to i64
- store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i32 %str to float
- %0 = inttoptr i64 %ptr to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store float %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int32_t_float(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align16_int32_t_float(ptr nocapture %ptr, i32 signext %str) {
; CHECK-LABEL: st_align16_int32_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r4
; CHECK-NEXT: blr
entry:
%conv = sitofp i32 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int32_t_float(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align32_int32_t_float(ptr nocapture %ptr, i32 signext %str) {
; CHECK-P10-LABEL: st_align32_int32_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwa f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i32 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int32_t_float(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align64_int32_t_float(ptr nocapture %ptr, i32 signext %str) {
; CHECK-P10-LABEL: st_align64_int32_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwa f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i32 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int32_t_float(i8* nocapture %ptr, i64 %off, i32 signext %str) {
+define dso_local void @st_reg_int32_t_float(ptr nocapture %ptr, i64 %off, i32 signext %str) {
; CHECK-LABEL: st_reg_int32_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r5
; CHECK-NEXT: blr
entry:
%conv = sitofp i32 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store float %conv, ptr %add.ptr, align 4
ret void
}
%conv = sitofp i32 %str to float
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
entry:
%conv = sitofp i32 %str to float
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = sitofp i32 %str to float
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 8
ret void
}
entry:
%conv = sitofp i32 %str to float
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sitofp i32 %str to float
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 16
ret void
}
entry:
%conv = sitofp i32 %str to float
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sitofp i32 %str to float
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i32 %str to float
- store float %conv, float* inttoptr (i64 4080 to float*), align 16
+ store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i32 %str to float
- store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+ store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i32 %str to float
- store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+ store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i32 %str to double
- %0 = inttoptr i64 %ptr to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store double %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int32_t_double(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align16_int32_t_double(ptr nocapture %ptr, i32 signext %str) {
; CHECK-LABEL: st_align16_int32_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r4
; CHECK-NEXT: blr
entry:
%conv = sitofp i32 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int32_t_double(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align32_int32_t_double(ptr nocapture %ptr, i32 signext %str) {
; CHECK-P10-LABEL: st_align32_int32_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwa f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i32 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int32_t_double(i8* nocapture %ptr, i32 signext %str) {
+define dso_local void @st_align64_int32_t_double(ptr nocapture %ptr, i32 signext %str) {
; CHECK-P10-LABEL: st_align64_int32_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwa f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i32 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int32_t_double(i8* nocapture %ptr, i64 %off, i32 signext %str) {
+define dso_local void @st_reg_int32_t_double(ptr nocapture %ptr, i64 %off, i32 signext %str) {
; CHECK-LABEL: st_reg_int32_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r5
; CHECK-NEXT: blr
entry:
%conv = sitofp i32 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store double %conv, ptr %add.ptr, align 8
ret void
}
%conv = sitofp i32 %str to double
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = sitofp i32 %str to double
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = sitofp i32 %str to double
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = sitofp i32 %str to double
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sitofp i32 %str to double
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 16
ret void
}
entry:
%conv = sitofp i32 %str to double
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sitofp i32 %str to double
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i32 %str to double
- store double %conv, double* inttoptr (i64 4080 to double*), align 16
+ store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i32 %str to double
- store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+ store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i32 %str to double
- store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+ store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_int64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_int64_t_float(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int64_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs f0, 8(r3)
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_int64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_int64_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int64_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfs f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_int64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_int64_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int64_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_int64_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_int64_t_float(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int64_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfsx f0, r3, r4
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv1 = fptosi float %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 8
%conv = fptosi float %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 16
%conv = fptosi float %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4096
%conv = fptosi float %1 to i64
ret i64 %conv
}
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+ %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptosi float %0 to i64
ret i64 %conv
}
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = fptosi float %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+ %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptosi float %0 to i64
ret i64 %conv
}
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_int64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_int64_t_double(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int64_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_int64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_int64_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int64_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_int64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_int64_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int64_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_int64_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_int64_t_double(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int64_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv1 = fptosi double %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 16
%conv = fptosi double %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 4096
%conv = fptosi double %1 to i64
ret i64 %conv
}
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+ %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptosi double %0 to i64
ret i64 %conv
}
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = fptosi double %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+ %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptosi double %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_unalign16_uint64_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 1(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign32_uint64_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999(r3), 0
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_uint64_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = zext i8 %1 to i64
ret i64 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = zext i8 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 2
%conv = zext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
%conv = zext i8 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
%conv = zext i8 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = zext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
%conv = zext i8 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lbz r3, 255(0)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 255 to i8*), align 1
+ %0 = load i8, ptr inttoptr (i64 255 to ptr), align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lbz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lbz r3, -31073(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 99999 to i8*), align 1
+ %0 = load i8, ptr inttoptr (i64 99999 to ptr), align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lbz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000001 to i8*), align 1
+ %0 = load i8, ptr inttoptr (i64 1000000000001 to ptr), align 1
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_unalign16_uint64_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 1(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign32_uint64_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_uint64_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_int8_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_int8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_int8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_int8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_int8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_int8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = sext i8 %1 to i64
ret i64 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv1 = sext i8 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 2
%conv = sext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
%conv = sext i8 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
%conv = sext i8 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
%conv = sext i8 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
%conv = sext i8 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 255 to i8*), align 1
+ %0 = load i8, ptr inttoptr (i64 255 to ptr), align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 99999 to i8*), align 1
+ %0 = load i8, ptr inttoptr (i64 99999 to ptr), align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000001 to i8*), align 1
+ %0 = load i8, ptr inttoptr (i64 1000000000001 to ptr), align 1
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lhz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_unalign16_uint64_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 1(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign32_uint64_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999(r3), 0
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plhz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_uint64_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lhzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = zext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = zext i16 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = zext i16 %1 to i64
ret i64 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = zext i16 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
%conv = zext i16 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
%conv = zext i16 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = zext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
%conv = zext i16 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lhz r3, 255(0)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 255 to i16*), align 2
+ %0 = load i16, ptr inttoptr (i64 255 to ptr), align 2
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lhz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lhz r3, -31073(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 99999 to i16*), align 2
+ %0 = load i16, ptr inttoptr (i64 99999 to ptr), align 2
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lhz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lhz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000001 to i16*), align 2
+ %0 = load i16, ptr inttoptr (i64 1000000000001 to ptr), align 2
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lhz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lha r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_unalign16_uint64_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lha r3, 1(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lha r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign32_uint64_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plha r3, 99999(r3), 0
; CHECK-PREP10-NEXT: lhax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plha r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lhax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_uint64_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: lhax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_int16_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_int16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lhax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_int16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_int16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhax r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = sext i16 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = sext i16 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = sext i16 %1 to i64
ret i64 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = sext i16 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
%conv = sext i16 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
%conv = sext i16 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = sext i16 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
%conv = sext i16 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lha r3, 255(0)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 255 to i16*), align 2
+ %0 = load i16, ptr inttoptr (i64 255 to ptr), align 2
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lha r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lha r3, -31073(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 99999 to i16*), align 2
+ %0 = load i16, ptr inttoptr (i64 99999 to ptr), align 2
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lha r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lha r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000001 to i16*), align 2
+ %0 = load i16, ptr inttoptr (i64 1000000000001 to ptr), align 2
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lha r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i32, ptr %0, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_unalign16_uint64_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 1(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign32_uint64_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999(r3), 0
; CHECK-PREP10-NEXT: lwzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lwzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_uint64_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: lwzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lwzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = zext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = zext i32 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv1 = zext i32 %1 to i64
ret i64 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv1 = zext i32 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 8
%conv = zext i32 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 16
%conv = zext i32 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = zext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4096
%conv = zext i32 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lwz r3, 255(0)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 255 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 255 to ptr), align 4
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+ %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwz r3, -31073(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 99999 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 99999 to ptr), align 4
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lwz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000001 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 1000000000001 to ptr), align 4
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lwz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwa r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign16_uint64_t_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwa r3, 1(r3), 0
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_int32_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwa r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign32_uint64_t_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwa r3, 99999(r3), 0
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plwa r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_uint64_t_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_int32_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_int32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_int32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lwax r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_int32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_int32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_int32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwax r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = sext i32 %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = sext i32 %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv1 = sext i32 %1 to i64
ret i64 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv1 = sext i32 %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 8
%conv = sext i32 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 16
%conv = sext i32 %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = sext i32 %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4096
%conv = sext i32 %1 to i64
ret i64 %conv
}
; CHECK-NEXT: lwa r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 255 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 255 to ptr), align 4
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwa r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+ %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lwa r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 99999 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 99999 to ptr), align 4
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: lwa r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lwa r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000001 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 1000000000001 to ptr), align 4
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: lwa r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-NEXT: ld r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i64, ptr %0, align 8
ret i64 %1
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign16_uint64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pld r3, 1(r3), 0
; CHECK-PREP10-NEXT: ldx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load i64, ptr %add.ptr, align 8
+ ret i64 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i64, ptr %add.ptr, align 8
+ ret i64 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign32_uint64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pld r3, 99999(r3), 0
; CHECK-PREP10-NEXT: ldx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ %0 = load i64, ptr %add.ptr, align 8
+ ret i64 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pld r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: ldx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i64, ptr %add.ptr, align 8
+ ret i64 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_uint64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: ldx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load i64, ptr %add.ptr, align 8
+ ret i64 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: ldx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i64, ptr %add.ptr, align 8
+ ret i64 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ldx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- ret i64 %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i64, ptr %add.ptr, align 8
+ ret i64 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
ret i64 %1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
ret i64 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
ret i64 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
ret i64 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
ret i64 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
ret i64 %1
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
ret i64 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 16
ret i64 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
ret i64 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
ret i64 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 4096
ret i64 %1
}
; CHECK-NEXT: ld r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 255 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 255 to ptr), align 8
ret i64 %0
}
; CHECK-NEXT: ld r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+ %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
ret i64 %0
}
; CHECK-PREP10-NEXT: ld r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 99999 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 99999 to ptr), align 8
ret i64 %0
}
; CHECK-NEXT: ld r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
ret i64 %0
}
; CHECK-PREP10-NEXT: ld r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000001 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 1000000000001 to ptr), align 8
ret i64 %0
}
; CHECK-PREP10-NEXT: ld r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret i64 %0
}
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign16_uint64_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfs f0, 1(r3), 0
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_float(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs f0, 8(r3)
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign32_uint64_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfs f0, 99999(r3), 0
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfs f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_uint64_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_float(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_float(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfsx f0, r3, r4
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv1 = fptoui float %1 to i64
ret i64 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv1 = fptoui float %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 8
%conv = fptoui float %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 16
%conv = fptoui float %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4096
%conv = fptoui float %1 to i64
ret i64 %conv
}
; CHECK-P8-NEXT: mffprd r3, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 255 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 255 to ptr), align 4
%conv = fptoui float %0 to i64
ret i64 %conv
}
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+ %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptoui float %0 to i64
ret i64 %conv
}
; CHECK-P8-NEXT: mffprd r3, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 99999 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 99999 to ptr), align 4
%conv = fptoui float %0 to i64
ret i64 %conv
}
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = fptoui float %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 1000000000001 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 1000000000001 to ptr), align 4
%conv = fptoui float %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+ %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptoui float %0 to i64
ret i64 %conv
}
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign16_uint64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign16_uint64_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign16_uint64_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 1(r3), 0
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align16_uint64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align16_uint64_t_double(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint64_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign32_uint64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign32_uint64_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign32_uint64_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999(r3), 0
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align32_uint64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align32_uint64_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint64_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_unalign64_uint64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_unalign64_uint64_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_uint64_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_align64_uint64_t_double(i8* nocapture readonly %ptr) {
+define dso_local i64 @ld_align64_uint64_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint64_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local i64 @ld_reg_uint64_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local i64 @ld_reg_uint64_t_double(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint64_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i64
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i64
ret i64 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv1 = fptoui double %1 to i64
ret i64 %conv1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv1 = fptoui double %1 to i64
ret i64 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 16
%conv = fptoui double %1 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i64
ret i64 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 4096
%conv = fptoui double %1 to i64
ret i64 %conv
}
; CHECK-P8-NEXT: mffprd r3, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 255 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 255 to ptr), align 8
%conv = fptoui double %0 to i64
ret i64 %conv
}
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+ %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptoui double %0 to i64
ret i64 %conv
}
; CHECK-P8-NEXT: mffprd r3, f0
; CHECK-P8-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 99999 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 99999 to ptr), align 8
%conv = fptoui double %0 to i64
ret i64 %conv
}
; CHECK-NEXT: mffprd r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = fptoui double %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 1000000000001 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 1000000000001 to ptr), align 8
%conv = fptoui double %0 to i64
ret i64 %conv
}
; CHECK-PREP10-NEXT: mffprd r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+ %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptoui double %0 to i64
ret i64 %conv
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i8
- %0 = inttoptr i64 %ptr to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_uint8_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_uint8_t(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_uint64_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stb r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_uint8_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_uint8_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_uint64_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstb r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_uint8_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_uint8_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_uint64_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_uint8_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_uint8_t(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_uint64_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stbx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i8
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- store i8 %conv, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i8 %conv, ptr %add.ptr, align 1
ret void
}
%conv = trunc i64 %str to i8
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%conv = trunc i64 %str to i8
%conv1 = zext i8 %off to i64
%or = or i64 %and, %conv1
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
entry:
%conv = trunc i64 %str to i8
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -4096
%conv = trunc i64 %str to i8
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 8
ret void
}
entry:
%conv = trunc i64 %str to i8
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1000341504
%conv = trunc i64 %str to i8
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 16
ret void
}
entry:
%conv = trunc i64 %str to i8
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 1
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = trunc i64 %str to i8
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- store i8 %conv, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i8 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i8
- store i8 %conv, i8* inttoptr (i64 4080 to i8*), align 16
+ store i8 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i8
- store i8 %conv, i8* inttoptr (i64 9999900 to i8*), align 4
+ store i8 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i8
- store i8 %conv, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ store i8 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- %0 = inttoptr i64 %ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_uint16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_uint16_t(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_uint64_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_uint16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_uint16_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_uint64_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: psth r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_uint16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_uint16_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_uint64_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_uint16_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_uint16_t(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_uint64_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sthx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
%conv = trunc i64 %str to i16
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%conv = trunc i64 %str to i16
%conv1 = zext i8 %off to i64
%or = or i64 %and, %conv1
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
entry:
%conv = trunc i64 %str to i16
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -4096
%conv = trunc i64 %str to i16
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 8
ret void
}
entry:
%conv = trunc i64 %str to i16
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1000341504
%conv = trunc i64 %str to i16
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 16
ret void
}
entry:
%conv = trunc i64 %str to i16
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = trunc i64 %str to i16
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+ store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+ store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- %0 = inttoptr i64 %ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_int16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_int16_t(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_uint64_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_int16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_int16_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_uint64_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: psth r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_int16_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_int16_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_uint64_t_int16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_int16_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_int16_t(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_uint64_t_int16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sthx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
%conv = trunc i64 %str to i16
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%conv = trunc i64 %str to i16
%conv1 = zext i8 %off to i64
%or = or i64 %and, %conv1
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
entry:
%conv = trunc i64 %str to i16
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -4096
%conv = trunc i64 %str to i16
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 8
ret void
}
entry:
%conv = trunc i64 %str to i16
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1000341504
%conv = trunc i64 %str to i16
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 16
ret void
}
entry:
%conv = trunc i64 %str to i16
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = trunc i64 %str to i16
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+ store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+ store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i16
- store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i32
- %0 = inttoptr i64 %ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_uint32_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_uint32_t(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_uint64_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_uint32_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_uint32_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_uint64_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstw r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_uint32_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_uint32_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_uint64_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_uint32_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_uint32_t(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_uint64_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
%conv = trunc i64 %str to i32
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%conv = trunc i64 %str to i32
%conv1 = zext i8 %off to i64
%or = or i64 %and, %conv1
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
entry:
%conv = trunc i64 %str to i32
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = trunc i64 %str to i32
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 8
ret void
}
entry:
%conv = trunc i64 %str to i32
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = trunc i64 %str to i32
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 16
ret void
}
entry:
%conv = trunc i64 %str to i32
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = trunc i64 %str to i32
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i32
- store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+ store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = trunc i64 %str to i32
- store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+ store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = trunc i64 %str to i32
- store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: std r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- store i64 %str, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store i64 %str, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_uint64_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_uint64_t(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_uint64_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %str, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i64 %str, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_uint64_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_uint64_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_uint64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %str, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i64 %str, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_uint64_t(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_uint64_t(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_uint64_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stdx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %str, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i64 %str, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_uint64_t(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_uint64_t(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_uint64_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %str, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i64 %str, ptr %add.ptr, align 8
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- store i64 %str, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %str, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to i64*
- store i64 %str, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %str, ptr %0, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- store i64 %str, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %str, ptr %0, align 8
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- store i64 %str, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %str, ptr %0, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- store i64 %str, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %str, ptr %0, align 8
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- store i64 %str, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i64 %str, ptr %0, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- store i64 %str, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %str, ptr %0, align 8
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- store i64 %str, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i64 %str, ptr %0, align 4096
ret void
}
; CHECK-NEXT: std r3, 4080(0)
; CHECK-NEXT: blr
entry:
- store i64 %str, i64* inttoptr (i64 4080 to i64*), align 16
+ store i64 %str, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: std r3, -27108(r4)
; CHECK-NEXT: blr
entry:
- store i64 %str, i64* inttoptr (i64 9999900 to i64*), align 8
+ store i64 %str, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-PREP10-NEXT: std r3, 0(r4)
; CHECK-PREP10-NEXT: blr
entry:
- store i64 %str, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ store i64 %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i64 %str to float
- %0 = inttoptr i64 %ptr to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store float %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_float(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_float(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_uint64_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r4
; CHECK-NEXT: blr
entry:
%conv = uitofp i64 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_float(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_float(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_uint64_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprd f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i64 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_float(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_float(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_uint64_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprd f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i64 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_float(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_float(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_uint64_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r5
; CHECK-NEXT: blr
entry:
%conv = uitofp i64 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store float %conv, ptr %add.ptr, align 4
ret void
}
%conv = uitofp i64 %str to float
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%conv = uitofp i64 %str to float
%conv1 = zext i8 %off to i64
%or = or i64 %and, %conv1
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
entry:
%conv = uitofp i64 %str to float
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = uitofp i64 %str to float
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 8
ret void
}
entry:
%conv = uitofp i64 %str to float
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = uitofp i64 %str to float
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 16
ret void
}
entry:
%conv = uitofp i64 %str to float
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = uitofp i64 %str to float
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i64 %str to float
- store float %conv, float* inttoptr (i64 4080 to float*), align 16
+ store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i64 %str to float
- store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+ store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i64 %str to float
- store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+ store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i64 %str to double
- %0 = inttoptr i64 %ptr to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store double %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint64_t_double(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_uint64_t_double(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_uint64_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r4
; CHECK-NEXT: blr
entry:
%conv = uitofp i64 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint64_t_double(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_uint64_t_double(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_uint64_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprd f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i64 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint64_t_double(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_uint64_t_double(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_uint64_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprd f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i64 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint64_t_double(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_uint64_t_double(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_uint64_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r5
; CHECK-NEXT: blr
entry:
%conv = uitofp i64 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store double %conv, ptr %add.ptr, align 8
ret void
}
%conv = uitofp i64 %str to double
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%conv = uitofp i64 %str to double
%conv1 = zext i8 %off to i64
%or = or i64 %and, %conv1
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = uitofp i64 %str to double
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = uitofp i64 %str to double
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = uitofp i64 %str to double
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = uitofp i64 %str to double
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 16
ret void
}
entry:
%conv = uitofp i64 %str to double
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = uitofp i64 %str to double
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i64 %str to double
- store double %conv, double* inttoptr (i64 4080 to double*), align 16
+ store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i64 %str to double
- store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+ store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i64 %str to double
- store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+ store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i64 %str to float
- %0 = inttoptr i64 %ptr to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store float %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int64_t_float(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_int64_t_float(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_int64_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r4
; CHECK-NEXT: blr
entry:
%conv = sitofp i64 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int64_t_float(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_int64_t_float(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_int64_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprd f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i64 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int64_t_float(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_int64_t_float(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_int64_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprd f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i64 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int64_t_float(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_int64_t_float(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_int64_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r5
; CHECK-NEXT: blr
entry:
%conv = sitofp i64 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store float %conv, ptr %add.ptr, align 4
ret void
}
%conv = sitofp i64 %str to float
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%conv = sitofp i64 %str to float
%conv1 = zext i8 %off to i64
%or = or i64 %and, %conv1
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
entry:
%conv = sitofp i64 %str to float
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = sitofp i64 %str to float
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 8
ret void
}
entry:
%conv = sitofp i64 %str to float
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sitofp i64 %str to float
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 16
ret void
}
entry:
%conv = sitofp i64 %str to float
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sitofp i64 %str to float
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i64 %str to float
- store float %conv, float* inttoptr (i64 4080 to float*), align 16
+ store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i64 %str to float
- store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+ store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i64 %str to float
- store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+ store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i64 %str to double
- %0 = inttoptr i64 %ptr to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store double %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int64_t_double(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align16_int64_t_double(ptr nocapture %ptr, i64 %str) {
; CHECK-LABEL: st_align16_int64_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r4
; CHECK-NEXT: blr
entry:
%conv = sitofp i64 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int64_t_double(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align32_int64_t_double(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align32_int64_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprd f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i64 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int64_t_double(i8* nocapture %ptr, i64 %str) {
+define dso_local void @st_align64_int64_t_double(ptr nocapture %ptr, i64 %str) {
; CHECK-P10-LABEL: st_align64_int64_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprd f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i64 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int64_t_double(i8* nocapture %ptr, i64 %off, i64 %str) {
+define dso_local void @st_reg_int64_t_double(ptr nocapture %ptr, i64 %off, i64 %str) {
; CHECK-LABEL: st_reg_int64_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprd f0, r5
; CHECK-NEXT: blr
entry:
%conv = sitofp i64 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store double %conv, ptr %add.ptr, align 8
ret void
}
%conv = sitofp i64 %str to double
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%conv = sitofp i64 %str to double
%conv1 = zext i8 %off to i64
%or = or i64 %and, %conv1
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = sitofp i64 %str to double
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = sitofp i64 %str to double
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = sitofp i64 %str to double
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sitofp i64 %str to double
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 16
ret void
}
entry:
%conv = sitofp i64 %str to double
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sitofp i64 %str to double
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i64 %str to double
- store double %conv, double* inttoptr (i64 4080 to double*), align 16
+ store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i64 %str to double
- store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+ store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i64 %str to double
- store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+ store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int8_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
ret i8 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
ret i8 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
ret i8 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int8_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
ret i8 %0
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
ret i8 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
ret i8 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
ret i8 %1
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
ret i8 %0
}
; CHECK-NEXT: extsb r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
ret i8 %0
}
; CHECK-PREP10-NEXT: extsb r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret i8 %0
}
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LE-LABEL: ld_align16_int8_t_uint16_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbz r3, 8(r3)
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align32_int8_t_uint16_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plbz r3, 99999000(r3), 0
; CHECK-P8-BE-NEXT: extsb r3, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align64_int8_t_uint16_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: extsb r3, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LE-LABEL: ld_reg_int8_t_uint16_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbzx r3, r3, r4
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = trunc i16 %1 to i8
ret i8 %conv1
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: extsb r3, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LE-LABEL: ld_align16_int8_t_uint32_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbz r3, 8(r3)
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align32_int8_t_uint32_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plbz r3, 99999000(r3), 0
; CHECK-P8-BE-NEXT: extsb r3, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align64_int8_t_uint32_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: extsb r3, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LE-LABEL: ld_reg_int8_t_uint32_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbzx r3, r3, r4
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv1 = trunc i32 %1 to i8
ret i8 %conv1
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 8
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 16
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4096
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+ %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: extsb r3, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LE-LABEL: ld_align16_int8_t_uint64_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbz r3, 8(r3)
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align32_int8_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plbz r3, 99999000(r3), 0
; CHECK-P8-BE-NEXT: extsb r3, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align64_int8_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: extsb r3, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LE-LABEL: ld_reg_int8_t_uint64_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbzx r3, r3, r4
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv1 = trunc i64 %1 to i8
ret i8 %conv1
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 16
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 4096
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+ %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: extsb r3, r3
; CHECK-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: extsb r3, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_float(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int8_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs f0, 8(r3)
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int8_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfs f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_float(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int8_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_float(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int8_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfsx f0, r3, r4
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptosi float %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptosi float %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv1 = fptosi float %1 to i8
ret i8 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 8
%conv = fptosi float %1 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 16
%conv = fptosi float %1 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4096
%conv = fptosi float %1 to i8
ret i8 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+ %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptosi float %0 to i8
ret i8 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = fptosi float %0 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+ %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptosi float %0 to i8
ret i8 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align16_int8_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align16_int8_t_double(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_int8_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align32_int8_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align32_int8_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_int8_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_align64_int8_t_double(i8* nocapture readonly %ptr) {
+define dso_local signext i8 @ld_align64_int8_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_int8_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local signext i8 @ld_reg_int8_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local signext i8 @ld_reg_int8_t_double(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_int8_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptosi double %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptosi double %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv1 = fptosi double %1 to i8
ret i8 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 16
%conv = fptosi double %1 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptosi double %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 4096
%conv = fptosi double %1 to i8
ret i8 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+ %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptosi double %0 to i8
ret i8 %conv
}
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = fptosi double %0 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: extsw r3, r3
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+ %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptosi double %0 to i8
ret i8 %conv
}
; CHECK-NEXT: lbz r3, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint8_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r3, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i8, ptr %add.ptr, align 1
ret i8 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plbz r3, 99999000(r3), 0
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i8, ptr %add.ptr, align 1
ret i8 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_unalign64_uint8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_unalign64_uint8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_uint8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load i8, ptr %add.ptr, align 1
ret i8 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_uint8_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_uint8_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: lbzx r3, r3, r4
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i8, ptr %add.ptr, align 1
ret i8 %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_uint8_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_uint8_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint8_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i8, ptr %add.ptr, align 1
ret i8 %0
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 8
ret i8 %1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 16
ret i8 %1
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 1
ret i8 %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- %1 = load i8, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i8, ptr %0, align 4096
ret i8 %1
}
; CHECK-NEXT: lbz r3, 4080(0)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 4080 to i8*), align 16
+ %0 = load i8, ptr inttoptr (i64 4080 to ptr), align 16
ret i8 %0
}
; CHECK-NEXT: lbz r3, -27108(r3)
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 9999900 to i8*), align 4
+ %0 = load i8, ptr inttoptr (i64 9999900 to ptr), align 4
ret i8 %0
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000001 to i8*), align 1
+ %0 = load i8, ptr inttoptr (i64 1000000000001 to ptr), align 1
ret i8 %0
}
; CHECK-PREP10-NEXT: lbz r3, 0(r3)
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load i8, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ %0 = load i8, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret i8 %0
}
; CHECK-BE-NEXT: lbz r3, 1(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i16, ptr %0, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-LE-LABEL: ld_align16_uint8_t_uint16_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbz r3, 8(r3)
; CHECK-BE-NEXT: lbz r3, 9(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align32_uint8_t_uint16_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plbz r3, 99999000(r3), 0
; CHECK-P8-BE-NEXT: lbzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_unalign64_uint8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_unalign64_uint8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_unalign64_uint8_t_uint16_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 232
; CHECK-P8-BE-NEXT: lbzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_uint16_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_uint16_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align64_uint8_t_uint16_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: lbzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_uint16_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_uint16_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LE-LABEL: ld_reg_uint8_t_uint16_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbzx r3, r3, r4
; CHECK-BE-NEXT: lbz r3, 1(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- %1 = load i16, i16* %0, align 2
- %conv = trunc i16 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i16, ptr %add.ptr, align 2
+ %conv = trunc i16 %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv1 = trunc i16 %1 to i8
ret i8 %conv1
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 8
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 16
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 2
%conv = trunc i16 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- %1 = load i16, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i16, ptr %0, align 4096
%conv = trunc i16 %1 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: lbz r3, 4081(0)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 4080 to i16*), align 16
+ %0 = load i16, ptr inttoptr (i64 4080 to ptr), align 16
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: lbz r3, -27107(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 9999900 to i16*), align 4
+ %0 = load i16, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: lbz r3, 0(r3)
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000001 to i16*), align 2
+ %0 = load i16, ptr inttoptr (i64 1000000000001 to ptr), align 2
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: lbz r3, 0(r3)
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i16, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ %0 = load i16, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = trunc i16 %0 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: lbz r3, 3(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-LE-LABEL: ld_align16_uint8_t_uint32_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbz r3, 8(r3)
; CHECK-BE-NEXT: lbz r3, 11(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align32_uint8_t_uint32_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plbz r3, 99999000(r3), 0
; CHECK-P8-BE-NEXT: lbzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_unalign64_uint8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_unalign64_uint8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_unalign64_uint8_t_uint32_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 232
; CHECK-P8-BE-NEXT: lbzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_uint32_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_uint32_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align64_uint8_t_uint32_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: lbzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_uint32_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_uint32_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LE-LABEL: ld_reg_uint8_t_uint32_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbzx r3, r3, r4
; CHECK-BE-NEXT: lbz r3, 3(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- %1 = load i32, i32* %0, align 4
- %conv = trunc i32 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i32, ptr %add.ptr, align 4
+ %conv = trunc i32 %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv1 = trunc i32 %1 to i8
ret i8 %conv1
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 8
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 16
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4
%conv = trunc i32 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- %1 = load i32, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i32, ptr %0, align 4096
%conv = trunc i32 %1 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: lbz r3, 4083(0)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 4080 to i32*), align 16
+ %0 = load i32, ptr inttoptr (i64 4080 to ptr), align 16
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: lbz r3, -27105(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 9999900 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: lbz r3, 0(r3)
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000001 to i32*), align 4
+ %0 = load i32, ptr inttoptr (i64 1000000000001 to ptr), align 4
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: lbz r3, 0(r3)
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i32, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ %0 = load i32, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = trunc i32 %0 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: lbz r3, 7(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-LE-LABEL: ld_align16_uint8_t_uint64_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbz r3, 8(r3)
; CHECK-BE-NEXT: lbz r3, 15(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align32_uint8_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: plbz r3, 99999000(r3), 0
; CHECK-P8-BE-NEXT: lbzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_unalign64_uint8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_unalign64_uint8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_unalign64_uint8_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 232
; CHECK-P8-BE-NEXT: lbzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_uint64_t(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_uint64_t(ptr nocapture readonly %ptr) {
; CHECK-P10-LE-LABEL: ld_align64_uint8_t_uint64_t:
; CHECK-P10-LE: # %bb.0: # %entry
; CHECK-P10-LE-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: lbzx r3, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_uint64_t(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_uint64_t(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LE-LABEL: ld_reg_uint8_t_uint64_t:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: lbzx r3, r3, r4
; CHECK-BE-NEXT: lbz r3, 7(r3)
; CHECK-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %conv = trunc i64 %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load i64, ptr %add.ptr, align 8
+ %conv = trunc i64 %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv1 = trunc i64 %1 to i8
ret i8 %conv1
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 16
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 8
%conv = trunc i64 %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- %1 = load i64, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load i64, ptr %0, align 4096
%conv = trunc i64 %1 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: lbz r3, 4087(0)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 4080 to i64*), align 16
+ %0 = load i64, ptr inttoptr (i64 4080 to ptr), align 16
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-BE-NEXT: lbz r3, -27101(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 9999900 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: lbz r3, 0(r3)
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000001 to i64*), align 8
+ %0 = load i64, ptr inttoptr (i64 1000000000001 to ptr), align 8
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-P8-BE-NEXT: lbz r3, 0(r3)
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load i64, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ %0 = load i64, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = trunc i64 %0 to i8
ret i8 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_float(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint8_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs f0, 8(r3)
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint8_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfs f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_unalign64_uint8_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_unalign64_uint8_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_uint8_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_float(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_float(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint8_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_float(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_float(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint8_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfsx f0, r3, r4
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- %1 = load float, float* %0, align 4
- %conv = fptoui float %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load float, ptr %add.ptr, align 4
+ %conv = fptoui float %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv1 = fptoui float %1 to i8
ret i8 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 8
%conv = fptoui float %1 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 16
%conv = fptoui float %1 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4
%conv = fptoui float %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- %1 = load float, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load float, ptr %0, align 4096
%conv = fptoui float %1 to i8
ret i8 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 4080 to float*), align 16
+ %0 = load float, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptoui float %0 to i8
ret i8 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 9999900 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 9999900 to ptr), align 4
%conv = fptoui float %0 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 1000000000001 to float*), align 4
+ %0 = load float, ptr inttoptr (i64 1000000000001 to ptr), align 4
%conv = fptoui float %0 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load float, float* inttoptr (i64 1000000000000 to float*), align 4096
+ %0 = load float, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptoui float %0 to i8
ret i8 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align16_uint8_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align16_uint8_t_double(ptr nocapture readonly %ptr) {
; CHECK-LABEL: ld_align16_uint8_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 8(r3)
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align32_uint8_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align32_uint8_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_uint8_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plfd f0, 99999000(r3), 0
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_unalign64_uint8_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_unalign64_uint8_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_uint8_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_align64_uint8_t_double(i8* nocapture readonly %ptr) {
+define dso_local zeroext i8 @ld_align64_uint8_t_double(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_uint8_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local zeroext i8 @ld_reg_uint8_t_double(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local zeroext i8 @ld_reg_uint8_t_double(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_uint8_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfdx f0, r3, r4
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %conv = fptoui double %1 to i8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load double, ptr %add.ptr, align 8
+ %conv = fptoui double %0 to i8
ret i8 %conv
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv1 = fptoui double %1 to i8
ret i8 %conv1
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i8
ret i8 %conv
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 16
%conv = fptoui double %1 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 8
%conv = fptoui double %1 to i8
ret i8 %conv
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- %1 = load double, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load double, ptr %0, align 4096
%conv = fptoui double %1 to i8
ret i8 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 4080 to double*), align 16
+ %0 = load double, ptr inttoptr (i64 4080 to ptr), align 16
%conv = fptoui double %0 to i8
ret i8 %conv
}
; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 9999900 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 9999900 to ptr), align 8
%conv = fptoui double %0 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 1000000000001 to double*), align 8
+ %0 = load double, ptr inttoptr (i64 1000000000001 to ptr), align 8
%conv = fptoui double %0 to i8
ret i8 %conv
}
; CHECK-PREP10-NEXT: mffprwz r3, f0
; CHECK-PREP10-NEXT: blr
entry:
- %0 = load double, double* inttoptr (i64 1000000000000 to double*), align 4096
+ %0 = load double, ptr inttoptr (i64 1000000000000 to ptr), align 4096
%conv = fptoui double %0 to i8
ret i8 %conv
}
; CHECK-NEXT: stb r4, 0(r3)
; CHECK-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to i8*
- store i8 %str, i8* %0, align 1
+ %0 = inttoptr i64 %ptr to ptr
+ store i8 %str, ptr %0, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint8_t_uint8_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_uint8_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-LABEL: st_align16_uint8_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stb r4, 8(r3)
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- store i8 %str, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i8 %str, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint8_t_uint8_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_uint8_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstb r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: stbx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- store i8 %str, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i8 %str, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint8_t_uint8_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_uint8_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint8_t_uint8_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: stbx r4, r3, r5
; CHECK-PREP10-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- store i8 %str, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i8 %str, ptr %add.ptr, align 1
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint8_t_uint8_t(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_uint8_t(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
; CHECK-LABEL: st_reg_uint8_t_uint8_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stbx r5, r3, r4
; CHECK-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- store i8 %str, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i8 %str, ptr %add.ptr, align 1
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to i8*
- store i8 %str, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %str, ptr %0, align 1
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i8*
- store i8 %str, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %str, ptr %0, align 1
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i8*
- store i8 %str, i8* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i8 %str, ptr %0, align 8
ret void
}
; CHECK-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i8*
- store i8 %str, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %str, ptr %0, align 1
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i8*
- store i8 %str, i8* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i8 %str, ptr %0, align 16
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i8*
- store i8 %str, i8* %0, align 1
+ %0 = inttoptr i64 %or to ptr
+ store i8 %str, ptr %0, align 1
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i8*
- store i8 %str, i8* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i8 %str, ptr %0, align 4096
ret void
}
; CHECK-NEXT: stb r3, 4080(0)
; CHECK-NEXT: blr
entry:
- store i8 %str, i8* inttoptr (i64 4080 to i8*), align 16
+ store i8 %str, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: stb r3, -27108(r4)
; CHECK-NEXT: blr
entry:
- store i8 %str, i8* inttoptr (i64 9999900 to i8*), align 4
+ store i8 %str, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: stb r3, 0(r4)
; CHECK-PREP10-NEXT: blr
entry:
- store i8 %str, i8* inttoptr (i64 1000000000000 to i8*), align 4096
+ store i8 %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i16
- %0 = inttoptr i64 %ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint8_t_uint16_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_uint16_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-LABEL: st_align16_uint8_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint8_t_uint16_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_uint16_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint8_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: psth r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i8 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint8_t_uint16_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_uint16_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint8_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i8 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint8_t_uint16_t(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_uint16_t(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
; CHECK-LABEL: st_reg_uint8_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sthx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
%conv = zext i8 %str to i16
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
entry:
%conv = zext i8 %str to i16
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -4096
%conv = zext i8 %str to i16
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 8
ret void
}
entry:
%conv = zext i8 %str to i16
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1000341504
%conv = zext i8 %str to i16
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 16
ret void
}
entry:
%conv = zext i8 %str to i16
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = zext i8 %str to i16
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i16
- store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+ store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i16
- store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+ store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i8 %str to i16
- store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i32
- %0 = inttoptr i64 %ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint8_t_uint32_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_uint32_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-LABEL: st_align16_uint8_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint8_t_uint32_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_uint32_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint8_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstw r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i8 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint8_t_uint32_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_uint32_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint8_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i8 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint8_t_uint32_t(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_uint32_t(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
; CHECK-LABEL: st_reg_uint8_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
%conv = zext i8 %str to i32
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
entry:
%conv = zext i8 %str to i32
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = zext i8 %str to i32
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 8
ret void
}
entry:
%conv = zext i8 %str to i32
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = zext i8 %str to i32
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 16
ret void
}
entry:
%conv = zext i8 %str to i32
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = zext i8 %str to i32
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i32
- store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+ store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i32
- store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+ store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i8 %str to i32
- store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i64
- %0 = inttoptr i64 %ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint8_t_uint64_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_uint64_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-LABEL: st_align16_uint8_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint8_t_uint64_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_uint64_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint8_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i8 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint8_t_uint64_t(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_uint64_t(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint8_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i8 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint8_t_uint64_t(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_uint64_t(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
; CHECK-LABEL: st_reg_uint8_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
%conv = zext i8 %str to i64
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = zext i8 %str to i64
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = zext i8 %str to i64
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = zext i8 %str to i64
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = zext i8 %str to i64
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 16
ret void
}
entry:
%conv = zext i8 %str to i64
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = zext i8 %str to i64
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i64
- store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+ store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = zext i8 %str to i64
- store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+ store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = zext i8 %str to i64
- store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i8 %str to float
- %0 = inttoptr i64 %ptr to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store float %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint8_t_float(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_float(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-LABEL: st_align16_uint8_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r4
; CHECK-NEXT: blr
entry:
%conv = uitofp i8 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint8_t_float(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_float(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint8_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwz f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i8 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint8_t_float(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_float(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint8_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwz f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i8 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint8_t_float(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_float(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
; CHECK-LABEL: st_reg_uint8_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r5
; CHECK-NEXT: blr
entry:
%conv = uitofp i8 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store float %conv, ptr %add.ptr, align 4
ret void
}
%conv = uitofp i8 %str to float
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
entry:
%conv = uitofp i8 %str to float
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = uitofp i8 %str to float
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 8
ret void
}
entry:
%conv = uitofp i8 %str to float
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = uitofp i8 %str to float
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 16
ret void
}
entry:
%conv = uitofp i8 %str to float
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = uitofp i8 %str to float
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i8 %str to float
- store float %conv, float* inttoptr (i64 4080 to float*), align 16
+ store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i8 %str to float
- store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+ store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i8 %str to float
- store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+ store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i8 %str to double
- %0 = inttoptr i64 %ptr to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store double %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_uint8_t_double(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align16_uint8_t_double(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-LABEL: st_align16_uint8_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r4
; CHECK-NEXT: blr
entry:
%conv = uitofp i8 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_uint8_t_double(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align32_uint8_t_double(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align32_uint8_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwz f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i8 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_uint8_t_double(i8* nocapture %ptr, i8 zeroext %str) {
+define dso_local void @st_align64_uint8_t_double(ptr nocapture %ptr, i8 zeroext %str) {
; CHECK-P10-LABEL: st_align64_uint8_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwz f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i8 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_uint8_t_double(i8* nocapture %ptr, i64 %off, i8 zeroext %str) {
+define dso_local void @st_reg_uint8_t_double(ptr nocapture %ptr, i64 %off, i8 zeroext %str) {
; CHECK-LABEL: st_reg_uint8_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwz f0, r5
; CHECK-NEXT: blr
entry:
%conv = uitofp i8 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store double %conv, ptr %add.ptr, align 8
ret void
}
%conv = uitofp i8 %str to double
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = uitofp i8 %str to double
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = uitofp i8 %str to double
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = uitofp i8 %str to double
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = uitofp i8 %str to double
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 16
ret void
}
entry:
%conv = uitofp i8 %str to double
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = uitofp i8 %str to double
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i8 %str to double
- store double %conv, double* inttoptr (i64 4080 to double*), align 16
+ store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = uitofp i8 %str to double
- store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+ store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i8 %str to double
- store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+ store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i16
- %0 = inttoptr i64 %ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %ptr to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int8_t_uint16_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_uint16_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-LABEL: st_align16_int8_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sth r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int8_t_uint16_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_uint16_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align32_int8_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: psth r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i8 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int8_t_uint16_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_uint16_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align64_int8_t_uint16_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i8 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int8_t_uint16_t(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_uint16_t(ptr nocapture %ptr, i64 %off, i8 signext %str) {
; CHECK-LABEL: st_reg_int8_t_uint16_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sthx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i16
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i16*
- store i16 %conv, i16* %0, align 2
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i16 %conv, ptr %add.ptr, align 2
ret void
}
%conv = sext i8 %str to i16
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
entry:
%conv = sext i8 %str to i16
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -4096
%conv = sext i8 %str to i16
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 8
ret void
}
entry:
%conv = sext i8 %str to i16
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sext i8 %str to i16
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 16
ret void
}
entry:
%conv = sext i8 %str to i16
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 2
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 2
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sext i8 %str to i16
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i16*
- store i16 %conv, i16* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i16 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i16
- store i16 %conv, i16* inttoptr (i64 4080 to i16*), align 16
+ store i16 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i16
- store i16 %conv, i16* inttoptr (i64 9999900 to i16*), align 4
+ store i16 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i8 %str to i16
- store i16 %conv, i16* inttoptr (i64 1000000000000 to i16*), align 4096
+ store i16 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i32
- %0 = inttoptr i64 %ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int8_t_uint32_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_uint32_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-LABEL: st_align16_int8_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int8_t_uint32_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_uint32_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align32_int8_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstw r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i8 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int8_t_uint32_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_uint32_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align64_int8_t_uint32_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i8 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int8_t_uint32_t(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_uint32_t(ptr nocapture %ptr, i64 %off, i8 signext %str) {
; CHECK-LABEL: st_reg_int8_t_uint32_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i32
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i32*
- store i32 %conv, i32* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i32 %conv, ptr %add.ptr, align 4
ret void
}
%conv = sext i8 %str to i32
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
entry:
%conv = sext i8 %str to i32
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = sext i8 %str to i32
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 8
ret void
}
entry:
%conv = sext i8 %str to i32
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sext i8 %str to i32
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 16
ret void
}
entry:
%conv = sext i8 %str to i32
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sext i8 %str to i32
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i32*
- store i32 %conv, i32* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i32 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i32
- store i32 %conv, i32* inttoptr (i64 4080 to i32*), align 16
+ store i32 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i32
- store i32 %conv, i32* inttoptr (i64 9999900 to i32*), align 4
+ store i32 %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i8 %str to i32
- store i32 %conv, i32* inttoptr (i64 1000000000000 to i32*), align 4096
+ store i32 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i64
- %0 = inttoptr i64 %ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int8_t_uint64_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_uint64_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-LABEL: st_align16_int8_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std r4, 8(r3)
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int8_t_uint64_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_uint64_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align32_int8_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstd r4, 99999000(r3), 0
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i8 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int8_t_uint64_t(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_uint64_t(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align64_int8_t_uint64_t:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r5, 244140625
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i8 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int8_t_uint64_t(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_uint64_t(ptr nocapture %ptr, i64 %off, i8 signext %str) {
; CHECK-LABEL: st_reg_int8_t_uint64_t:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdx r5, r3, r4
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i64
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to i64*
- store i64 %conv, i64* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store i64 %conv, ptr %add.ptr, align 8
ret void
}
%conv = sext i8 %str to i64
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = sext i8 %str to i64
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = sext i8 %str to i64
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
entry:
%conv = sext i8 %str to i64
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sext i8 %str to i64
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 16
ret void
}
entry:
%conv = sext i8 %str to i64
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sext i8 %str to i64
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to i64*
- store i64 %conv, i64* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store i64 %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i64
- store i64 %conv, i64* inttoptr (i64 4080 to i64*), align 16
+ store i64 %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sext i8 %str to i64
- store i64 %conv, i64* inttoptr (i64 9999900 to i64*), align 8
+ store i64 %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-PREP10-NEXT: blr
entry:
%conv = sext i8 %str to i64
- store i64 %conv, i64* inttoptr (i64 1000000000000 to i64*), align 4096
+ store i64 %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i8 %str to float
- %0 = inttoptr i64 %ptr to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %ptr to ptr
+ store float %conv, ptr %0, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int8_t_float(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_float(ptr nocapture %ptr, i8 signext %str) {
; CHECK-LABEL: st_align16_int8_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r4
; CHECK-NEXT: blr
entry:
%conv = sitofp i8 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int8_t_float(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_float(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align32_int8_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwa f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i8 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int8_t_float(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_float(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align64_int8_t_float:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwa f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i8 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store float %conv, ptr %add.ptr, align 4
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int8_t_float(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_float(ptr nocapture %ptr, i64 %off, i8 signext %str) {
; CHECK-LABEL: st_reg_int8_t_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r5
; CHECK-NEXT: blr
entry:
%conv = sitofp i8 %str to float
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to float*
- store float %conv, float* %0, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store float %conv, ptr %add.ptr, align 4
ret void
}
%conv = sitofp i8 %str to float
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
entry:
%conv = sitofp i8 %str to float
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -4096
%conv = sitofp i8 %str to float
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 8
ret void
}
entry:
%conv = sitofp i8 %str to float
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sitofp i8 %str to float
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 16
ret void
}
entry:
%conv = sitofp i8 %str to float
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sitofp i8 %str to float
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to float*
- store float %conv, float* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store float %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i8 %str to float
- store float %conv, float* inttoptr (i64 4080 to float*), align 16
+ store float %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i8 %str to float
- store float %conv, float* inttoptr (i64 9999900 to float*), align 4
+ store float %conv, ptr inttoptr (i64 9999900 to ptr), align 4
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i8 %str to float
- store float %conv, float* inttoptr (i64 1000000000000 to float*), align 4096
+ store float %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i8 %str to double
- %0 = inttoptr i64 %ptr to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %ptr to ptr
+ store double %conv, ptr %0, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_int8_t_double(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align16_int8_t_double(ptr nocapture %ptr, i8 signext %str) {
; CHECK-LABEL: st_align16_int8_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r4
; CHECK-NEXT: blr
entry:
%conv = sitofp i8 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_int8_t_double(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align32_int8_t_double(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align32_int8_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwa f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i8 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_int8_t_double(i8* nocapture %ptr, i8 signext %str) {
+define dso_local void @st_align64_int8_t_double(ptr nocapture %ptr, i8 signext %str) {
; CHECK-P10-LABEL: st_align64_int8_t_double:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: mtfprwa f0, r4
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i8 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store double %conv, ptr %add.ptr, align 8
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_int8_t_double(i8* nocapture %ptr, i64 %off, i8 signext %str) {
+define dso_local void @st_reg_int8_t_double(ptr nocapture %ptr, i64 %off, i8 signext %str) {
; CHECK-LABEL: st_reg_int8_t_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtfprwa f0, r5
; CHECK-NEXT: blr
entry:
%conv = sitofp i8 %str to double
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to double*
- store double %conv, double* %0, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store double %conv, ptr %add.ptr, align 8
ret void
}
%conv = sitofp i8 %str to double
%conv1 = zext i8 %off to i64
%or = or i64 %conv1, %ptr
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = sitofp i8 %str to double
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -4096
%conv = sitofp i8 %str to double
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
entry:
%conv = sitofp i8 %str to double
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1000341504
%conv = sitofp i8 %str to double
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 16
ret void
}
entry:
%conv = sitofp i8 %str to double
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 8
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 8
ret void
}
%and = and i64 %ptr, -1099511627776
%conv = sitofp i8 %str to double
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to double*
- store double %conv, double* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store double %conv, ptr %0, align 4096
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i8 %str to double
- store double %conv, double* inttoptr (i64 4080 to double*), align 16
+ store double %conv, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-NEXT: blr
entry:
%conv = sitofp i8 %str to double
- store double %conv, double* inttoptr (i64 9999900 to double*), align 8
+ store double %conv, ptr inttoptr (i64 9999900 to ptr), align 8
ret void
}
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i8 %str to double
- store double %conv, double* inttoptr (i64 1000000000000 to double*), align 4096
+ store double %conv, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test1(i64* nocapture readonly %int64, <2 x i64> %vec) {
+define <2 x i64> @s2v_test1(ptr nocapture readonly %int64, <2 x i64> %vec) {
; P9LE-LABEL: s2v_test1:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfd f0, 0(r3)
; P8BE-NEXT: blr
entry:
- %0 = load i64, i64* %int64, align 8
+ %0 = load i64, ptr %int64, align 8
%vecins = insertelement <2 x i64> %vec, i64 %0, i32 0
ret <2 x i64> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test2(i64* nocapture readonly %int64, <2 x i64> %vec) {
+define <2 x i64> @s2v_test2(ptr nocapture readonly %int64, <2 x i64> %vec) {
; P9LE-LABEL: s2v_test2:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfd f0, 8(r3)
; P8BE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i64, i64* %int64, i64 1
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %int64, i64 1
+ %0 = load i64, ptr %arrayidx, align 8
%vecins = insertelement <2 x i64> %vec, i64 %0, i32 0
ret <2 x i64> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test3(i64* nocapture readonly %int64, <2 x i64> %vec, i32 signext %Idx) {
+define <2 x i64> @s2v_test3(ptr nocapture readonly %int64, <2 x i64> %vec, i32 signext %Idx) {
; P9LE-LABEL: s2v_test3:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: sldi r4, r7, 3
entry:
%idxprom = sext i32 %Idx to i64
- %arrayidx = getelementptr inbounds i64, i64* %int64, i64 %idxprom
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %int64, i64 %idxprom
+ %0 = load i64, ptr %arrayidx, align 8
%vecins = insertelement <2 x i64> %vec, i64 %0, i32 0
ret <2 x i64> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test4(i64* nocapture readonly %int64, <2 x i64> %vec) {
+define <2 x i64> @s2v_test4(ptr nocapture readonly %int64, <2 x i64> %vec) {
; P9LE-LABEL: s2v_test4:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfd f0, 8(r3)
; P8BE-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i64, i64* %int64, i64 1
- %0 = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %int64, i64 1
+ %0 = load i64, ptr %arrayidx, align 8
%vecins = insertelement <2 x i64> %vec, i64 %0, i32 0
ret <2 x i64> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test5(<2 x i64> %vec, i64* nocapture readonly %ptr1) {
+define <2 x i64> @s2v_test5(<2 x i64> %vec, ptr nocapture readonly %ptr1) {
; P9LE-LABEL: s2v_test5:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfd f0, 0(r5)
; P8BE-NEXT: blr
entry:
- %0 = load i64, i64* %ptr1, align 8
+ %0 = load i64, ptr %ptr1, align 8
%vecins = insertelement <2 x i64> %vec, i64 %0, i32 0
ret <2 x i64> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x double> @s2v_test_f1(double* nocapture readonly %f64, <2 x double> %vec) {
+define <2 x double> @s2v_test_f1(ptr nocapture readonly %f64, <2 x double> %vec) {
; P9LE-LABEL: s2v_test_f1:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfd f0, 0(r3)
entry:
- %0 = load double, double* %f64, align 8
+ %0 = load double, ptr %f64, align 8
%vecins = insertelement <2 x double> %vec, double %0, i32 0
ret <2 x double> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x double> @s2v_test_f2(double* nocapture readonly %f64, <2 x double> %vec) {
+define <2 x double> @s2v_test_f2(ptr nocapture readonly %f64, <2 x double> %vec) {
; P9LE-LABEL: s2v_test_f2:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfd f0, 8(r3)
entry:
- %arrayidx = getelementptr inbounds double, double* %f64, i64 1
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %f64, i64 1
+ %0 = load double, ptr %arrayidx, align 8
%vecins = insertelement <2 x double> %vec, double %0, i32 0
ret <2 x double> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x double> @s2v_test_f3(double* nocapture readonly %f64, <2 x double> %vec, i32 signext %Idx) {
+define <2 x double> @s2v_test_f3(ptr nocapture readonly %f64, <2 x double> %vec, i32 signext %Idx) {
; P9LE-LABEL: s2v_test_f3:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: sldi r4, r7, 3
entry:
%idxprom = sext i32 %Idx to i64
- %arrayidx = getelementptr inbounds double, double* %f64, i64 %idxprom
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %f64, i64 %idxprom
+ %0 = load double, ptr %arrayidx, align 8
%vecins = insertelement <2 x double> %vec, double %0, i32 0
ret <2 x double> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x double> @s2v_test_f4(double* nocapture readonly %f64, <2 x double> %vec) {
+define <2 x double> @s2v_test_f4(ptr nocapture readonly %f64, <2 x double> %vec) {
; P9LE-LABEL: s2v_test_f4:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfd f0, 8(r3)
entry:
- %arrayidx = getelementptr inbounds double, double* %f64, i64 1
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %f64, i64 1
+ %0 = load double, ptr %arrayidx, align 8
%vecins = insertelement <2 x double> %vec, double %0, i32 0
ret <2 x double> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x double> @s2v_test_f5(<2 x double> %vec, double* nocapture readonly %ptr1) {
+define <2 x double> @s2v_test_f5(<2 x double> %vec, ptr nocapture readonly %ptr1) {
; P9LE-LABEL: s2v_test_f5:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfd f0, 0(r5)
entry:
- %0 = load double, double* %ptr1, align 8
+ %0 = load double, ptr %ptr1, align 8
%vecins = insertelement <2 x double> %vec, double %0, i32 0
ret <2 x double> %vecins
}
; RUN: llc -mcpu=pwr8 -verify-machineinstrs -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names \
; RUN: -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P8BE
-define void @test_liwzx1(<1 x float>* %A, <1 x float>* %B, <1 x float>* %C) {
+define void @test_liwzx1(ptr %A, ptr %B, ptr %C) {
; P9LE-LABEL: test_liwzx1:
; P9LE: # %bb.0:
; P9LE-NEXT: lfs f0, 0(r3)
- %a = load <1 x float>, <1 x float>* %A
- %b = load <1 x float>, <1 x float>* %B
+ %a = load <1 x float>, ptr %A
+ %b = load <1 x float>, ptr %B
%X = fadd <1 x float> %a, %b
- store <1 x float> %X, <1 x float>* %C
+ store <1 x float> %X, ptr %C
ret void
}
-define <1 x float>* @test_liwzx2(<1 x float>* %A, <1 x float>* %B, <1 x float>* %C) {
+define ptr @test_liwzx2(ptr %A, ptr %B, ptr %C) {
; P9LE-LABEL: test_liwzx2:
; P9LE: # %bb.0:
; P9LE-NEXT: lfs f0, 0(r3)
- %a = load <1 x float>, <1 x float>* %A
- %b = load <1 x float>, <1 x float>* %B
+ %a = load <1 x float>, ptr %A
+ %b = load <1 x float>, ptr %B
%X = fsub <1 x float> %a, %b
- store <1 x float> %X, <1 x float>* %C
- ret <1 x float>* %C
+ store <1 x float> %X, ptr %C
+ ret ptr %C
}
; RUN: -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P8BE
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test1(i32* nocapture readonly %int32, <2 x i64> %vec) {
+define <2 x i64> @s2v_test1(ptr nocapture readonly %int32, <2 x i64> %vec) {
; P9LE-LABEL: s2v_test1:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfiwax f0, 0, r3
entry:
- %0 = load i32, i32* %int32, align 4
+ %0 = load i32, ptr %int32, align 4
%conv = sext i32 %0 to i64
%vecins = insertelement <2 x i64> %vec, i64 %conv, i32 0
ret <2 x i64> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test2(i32* nocapture readonly %int32, <2 x i64> %vec) {
+define <2 x i64> @s2v_test2(ptr nocapture readonly %int32, <2 x i64> %vec) {
; P9LE-LABEL: s2v_test2:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: addi r3, r3, 4
entry:
- %arrayidx = getelementptr inbounds i32, i32* %int32, i64 1
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %int32, i64 1
+ %0 = load i32, ptr %arrayidx, align 4
%conv = sext i32 %0 to i64
%vecins = insertelement <2 x i64> %vec, i64 %conv, i32 0
ret <2 x i64> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test3(i32* nocapture readonly %int32, <2 x i64> %vec, i32 signext %Idx) {
+define <2 x i64> @s2v_test3(ptr nocapture readonly %int32, <2 x i64> %vec, i32 signext %Idx) {
; P9LE-LABEL: s2v_test3:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: sldi r4, r7, 2
entry:
%idxprom = sext i32 %Idx to i64
- %arrayidx = getelementptr inbounds i32, i32* %int32, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %int32, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%conv = sext i32 %0 to i64
%vecins = insertelement <2 x i64> %vec, i64 %conv, i32 0
ret <2 x i64> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test4(i32* nocapture readonly %int32, <2 x i64> %vec) {
+define <2 x i64> @s2v_test4(ptr nocapture readonly %int32, <2 x i64> %vec) {
; P9LE-LABEL: s2v_test4:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: addi r3, r3, 4
entry:
- %arrayidx = getelementptr inbounds i32, i32* %int32, i64 1
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %int32, i64 1
+ %0 = load i32, ptr %arrayidx, align 4
%conv = sext i32 %0 to i64
%vecins = insertelement <2 x i64> %vec, i64 %conv, i32 0
ret <2 x i64> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test5(<2 x i64> %vec, i32* nocapture readonly %ptr1) {
+define <2 x i64> @s2v_test5(<2 x i64> %vec, ptr nocapture readonly %ptr1) {
; P9LE-LABEL: s2v_test5:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfiwax f0, 0, r5
entry:
- %0 = load i32, i32* %ptr1, align 4
+ %0 = load i32, ptr %ptr1, align 4
%conv = sext i32 %0 to i64
%vecins = insertelement <2 x i64> %vec, i64 %conv, i32 0
ret <2 x i64> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test6(i32* nocapture readonly %ptr) {
+define <2 x i64> @s2v_test6(ptr nocapture readonly %ptr) {
; P9LE-LABEL: s2v_test6:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfiwax f0, 0, r3
entry:
- %0 = load i32, i32* %ptr, align 4
+ %0 = load i32, ptr %ptr, align 4
%conv = sext i32 %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @s2v_test7(i32* nocapture readonly %ptr) {
+define <2 x i64> @s2v_test7(ptr nocapture readonly %ptr) {
; P9LE-LABEL: s2v_test7:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfiwax f0, 0, r3
entry:
- %0 = load i32, i32* %ptr, align 4
+ %0 = load i32, ptr %ptr, align 4
%conv = sext i32 %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
; RUN: --check-prefixes=AIX,P8-AIX-32
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @s2v_test1(i32* nocapture readonly %int32, <4 x i32> %vec) {
+define <4 x i32> @s2v_test1(ptr nocapture readonly %int32, <4 x i32> %vec) {
; P9LE-LABEL: s2v_test1:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lwz r3, 0(r3)
; P8-AIX-32-NEXT: vperm v2, v4, v2, v3
; P8-AIX-32-NEXT: blr
entry:
- %0 = load i32, i32* %int32, align 4
+ %0 = load i32, ptr %int32, align 4
%vecins = insertelement <4 x i32> %vec, i32 %0, i32 0
ret <4 x i32> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @s2v_test2(i32* nocapture readonly %int32, <4 x i32> %vec) {
+define <4 x i32> @s2v_test2(ptr nocapture readonly %int32, <4 x i32> %vec) {
; P9LE-LABEL: s2v_test2:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lwz r3, 4(r3)
; P8-AIX-32-NEXT: vperm v2, v4, v2, v3
; P8-AIX-32-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i32, i32* %int32, i64 1
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %int32, i64 1
+ %0 = load i32, ptr %arrayidx, align 4
%vecins = insertelement <4 x i32> %vec, i32 %0, i32 0
ret <4 x i32> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @s2v_test3(i32* nocapture readonly %int32, <4 x i32> %vec, i32 signext %Idx) {
+define <4 x i32> @s2v_test3(ptr nocapture readonly %int32, <4 x i32> %vec, i32 signext %Idx) {
; P9LE-LABEL: s2v_test3:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: sldi r4, r7, 2
; P8-AIX-32-NEXT: blr
entry:
%idxprom = sext i32 %Idx to i64
- %arrayidx = getelementptr inbounds i32, i32* %int32, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %int32, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%vecins = insertelement <4 x i32> %vec, i32 %0, i32 0
ret <4 x i32> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @s2v_test4(i32* nocapture readonly %int32, <4 x i32> %vec) {
+define <4 x i32> @s2v_test4(ptr nocapture readonly %int32, <4 x i32> %vec) {
; P9LE-LABEL: s2v_test4:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lwz r3, 4(r3)
; P8-AIX-32-NEXT: vperm v2, v4, v2, v3
; P8-AIX-32-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i32, i32* %int32, i64 1
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %int32, i64 1
+ %0 = load i32, ptr %arrayidx, align 4
%vecins = insertelement <4 x i32> %vec, i32 %0, i32 0
ret <4 x i32> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @s2v_test5(<4 x i32> %vec, i32* nocapture readonly %ptr1) {
+define <4 x i32> @s2v_test5(<4 x i32> %vec, ptr nocapture readonly %ptr1) {
; P9LE-LABEL: s2v_test5:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lwz r3, 0(r5)
; P8-AIX-32-NEXT: vperm v2, v4, v2, v3
; P8-AIX-32-NEXT: blr
entry:
- %0 = load i32, i32* %ptr1, align 4
+ %0 = load i32, ptr %ptr1, align 4
%vecins = insertelement <4 x i32> %vec, i32 %0, i32 0
ret <4 x i32> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <4 x float> @s2v_test_f1(float* nocapture readonly %f64, <4 x float> %vec) {
+define <4 x float> @s2v_test_f1(ptr nocapture readonly %f64, <4 x float> %vec) {
; P9LE-LABEL: s2v_test_f1:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lwz r3, 0(r3)
; P8-AIX-32-NEXT: vperm v2, v3, v2, v4
; P8-AIX-32-NEXT: blr
entry:
- %0 = load float, float* %f64, align 4
+ %0 = load float, ptr %f64, align 4
%vecins = insertelement <4 x float> %vec, float %0, i32 0
ret <4 x float> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x float> @s2v_test_f2(float* nocapture readonly %f64, <2 x float> %vec) {
+define <2 x float> @s2v_test_f2(ptr nocapture readonly %f64, <2 x float> %vec) {
; P9LE-LABEL: s2v_test_f2:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: addi r3, r3, 4
; AIX-NEXT: vmrgow v2, v3, v2
; AIX-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds float, float* %f64, i64 1
- %0 = load float, float* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds float, ptr %f64, i64 1
+ %0 = load float, ptr %arrayidx, align 8
%vecins = insertelement <2 x float> %vec, float %0, i32 0
ret <2 x float> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x float> @s2v_test_f3(float* nocapture readonly %f64, <2 x float> %vec, i32 signext %Idx) {
+define <2 x float> @s2v_test_f3(ptr nocapture readonly %f64, <2 x float> %vec, i32 signext %Idx) {
; P9LE-LABEL: s2v_test_f3:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: sldi r4, r7, 2
; P8-AIX-32-NEXT: blr
entry:
%idxprom = sext i32 %Idx to i64
- %arrayidx = getelementptr inbounds float, float* %f64, i64 %idxprom
- %0 = load float, float* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds float, ptr %f64, i64 %idxprom
+ %0 = load float, ptr %arrayidx, align 8
%vecins = insertelement <2 x float> %vec, float %0, i32 0
ret <2 x float> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x float> @s2v_test_f4(float* nocapture readonly %f64, <2 x float> %vec) {
+define <2 x float> @s2v_test_f4(ptr nocapture readonly %f64, <2 x float> %vec) {
; P9LE-LABEL: s2v_test_f4:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: addi r3, r3, 4
; AIX-NEXT: vmrgow v2, v3, v2
; AIX-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds float, float* %f64, i64 1
- %0 = load float, float* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds float, ptr %f64, i64 1
+ %0 = load float, ptr %arrayidx, align 8
%vecins = insertelement <2 x float> %vec, float %0, i32 0
ret <2 x float> %vecins
}
; Function Attrs: norecurse nounwind readonly
-define <2 x float> @s2v_test_f5(<2 x float> %vec, float* nocapture readonly %ptr1) {
+define <2 x float> @s2v_test_f5(<2 x float> %vec, ptr nocapture readonly %ptr1) {
; P9LE-LABEL: s2v_test_f5:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lfiwzx f0, 0, r5
; AIX-NEXT: vmrgow v2, v3, v2
; AIX-NEXT: blr
entry:
- %0 = load float, float* %ptr1, align 8
+ %0 = load float, ptr %ptr1, align 8
%vecins = insertelement <2 x float> %vec, float %0, i32 0
ret <2 x float> %vecins
}
; RUN: llc -mcpu=pwr8 -verify-machineinstrs -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names \
; RUN: -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P8BE
-define i8 @scalar_to_vector_half(i16* nocapture readonly %ad) {
+define i8 @scalar_to_vector_half(ptr nocapture readonly %ad) {
; P9LE-LABEL: scalar_to_vector_half:
; P9LE: # %bb.0: # %entry
; P9LE-NEXT: lhz r3, 0(r3)
; P8BE-NEXT: rldicl r3, r3, 8, 56
; P8BE-NEXT: blr
entry:
- %0 = bitcast i16* %ad to <2 x i8>*
- %1 = load <2 x i8>, <2 x i8>* %0, align 1
- %2 = extractelement <2 x i8> %1, i32 0
- ret i8 %2
+ %0 = load <2 x i8>, ptr %ad, align 1
+ %1 = extractelement <2 x i8> %0, i32 0
+ ret i8 %1
}
; RUN: llc -mtriple=powerpc64le-- -verify-machineinstrs \
; RUN: -ppc-asm-full-reg-names -mcpu=pwr8 < %s | FileCheck --check-prefix=LE %s
-define dso_local void @test1(<2 x double>* %v, i64 %a) local_unnamed_addr #0 {
+define dso_local void @test1(ptr %v, i64 %a) local_unnamed_addr #0 {
; AIX64-LABEL: test1:
; AIX64: # %bb.0: # %entry
; AIX64-NEXT: mtvsrd vs34, r4
; LE-NEXT: #NO_APP
; LE-NEXT: blr
entry:
- tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i64 %a, <2 x double>* %v)
+ tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i64 %a, ptr %v)
ret void
}
-define dso_local void @test2(<2 x double>* %v, i32 signext %a) local_unnamed_addr #0 {
+define dso_local void @test2(ptr %v, i32 signext %a) local_unnamed_addr #0 {
; AIX64-LABEL: test2:
; AIX64: # %bb.0: # %entry
; AIX64-NEXT: clrldi r4, r4, 32
; LE-NEXT: #NO_APP
; LE-NEXT: blr
entry:
- tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i32 %a, <2 x double>* %v)
+ tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i32 %a, ptr %v)
ret void
}
-define dso_local void @test3(<2 x double>* %v, i16 signext %a) local_unnamed_addr #0 {
+define dso_local void @test3(ptr %v, i16 signext %a) local_unnamed_addr #0 {
; AIX64-LABEL: test3:
; AIX64: # %bb.0: # %entry
; AIX64-NEXT: clrldi r4, r4, 48
; LE-NEXT: #NO_APP
; LE-NEXT: blr
entry:
- tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i16 %a, <2 x double>* %v)
+ tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i16 %a, ptr %v)
ret void
}
-define dso_local void @test4(<2 x double>* %v, i8 signext %a) local_unnamed_addr #0 {
+define dso_local void @test4(ptr %v, i8 signext %a) local_unnamed_addr #0 {
; AIX64-LABEL: test4:
; AIX64: # %bb.0: # %entry
; AIX64-NEXT: clrldi r4, r4, 56
; LE-NEXT: #NO_APP
; LE-NEXT: blr
entry:
- tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i8 %a, <2 x double>* %v)
+ tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i8 %a, ptr %v)
ret void
}
-define dso_local void @test6(<2 x double>* %v, i32 zeroext %a) local_unnamed_addr #0 {
+define dso_local void @test6(ptr %v, i32 zeroext %a) local_unnamed_addr #0 {
; AIX64-LABEL: test6:
; AIX64: # %bb.0: # %entry
; AIX64-NEXT: mtvsrd vs34, r4
; LE-NEXT: #NO_APP
; LE-NEXT: blr
entry:
- tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i32 %a, <2 x double>* %v)
+ tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i32 %a, ptr %v)
ret void
}
-define dso_local void @test7(<2 x double>* %v, i16 zeroext %a) local_unnamed_addr #0 {
+define dso_local void @test7(ptr %v, i16 zeroext %a) local_unnamed_addr #0 {
; AIX64-LABEL: test7:
; AIX64: # %bb.0: # %entry
; AIX64-NEXT: mtvsrd vs34, r4
; LE-NEXT: #NO_APP
; LE-NEXT: blr
entry:
- tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i16 %a, <2 x double>* %v)
+ tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i16 %a, ptr %v)
ret void
}
-define dso_local void @test8(<2 x double>* %v, i8 zeroext %a) local_unnamed_addr #0 {
+define dso_local void @test8(ptr %v, i8 zeroext %a) local_unnamed_addr #0 {
; AIX64-LABEL: test8:
; AIX64: # %bb.0: # %entry
; AIX64-NEXT: mtvsrd vs34, r4
; LE-NEXT: #NO_APP
; LE-NEXT: blr
entry:
- tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i8 %a, <2 x double>* %v)
+ tail call void asm sideeffect "stvx $0,0,$1", "v,r,~{memory}"(i8 %a, ptr %v)
ret void
}
@scalars = common dso_local local_unnamed_addr global %_type_of_scalars zeroinitializer, align 16
-define dso_local void @test([0 x %_elem_type_of_x]* noalias %.x, [0 x %_elem_type_of_a]* %.a, i64* noalias %.n) {
+define dso_local void @test(ptr noalias %.x, ptr %.a, ptr noalias %.n) {
; CHECK-P9-LABEL: test:
; CHECK-P9: # %bb.0: # %entry
; CHECK-P9-NEXT: ld 5, 0(5)
; CHECK-P9-NO-HEURISTIC-NEXT: # %bb.2: # %return.block
; CHECK-P9-NO-HEURISTIC-NEXT: blr
entry:
- %x_rvo_based_addr_3 = getelementptr inbounds [0 x %_elem_type_of_x], [0 x %_elem_type_of_x]* %.x, i64 0, i64 -1
- %a_rvo_based_addr_5 = getelementptr inbounds [0 x %_elem_type_of_a], [0 x %_elem_type_of_a]* %.a, i64 0, i64 -1
- %_val_n_ = load i64, i64* %.n, align 8
- %_val_c1_ = load double, double* getelementptr inbounds (%_type_of_scalars, %_type_of_scalars* @scalars, i64 0, i32 1), align 16
+ %x_rvo_based_addr_3 = getelementptr inbounds [0 x %_elem_type_of_x], ptr %.x, i64 0, i64 -1
+ %a_rvo_based_addr_5 = getelementptr inbounds [0 x %_elem_type_of_a], ptr %.a, i64 0, i64 -1
+ %_val_n_ = load i64, ptr %.n, align 8
+ %_val_c1_ = load double, ptr getelementptr inbounds (%_type_of_scalars, ptr @scalars, i64 0, i32 1), align 16
%n.vec = and i64 %_val_n_, -32
%broadcast.splatinsert26 = insertelement <4 x double> undef, double %_val_c1_, i32 0
%broadcast.splat27 = shufflevector <4 x double> %broadcast.splatinsert26, <4 x double> undef, <4 x i32> zeroinitializer
vector.body:
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%offset.idx = or i64 %index, 1
- %0 = getelementptr %_elem_type_of_x, %_elem_type_of_x* %x_rvo_based_addr_3, i64 %offset.idx, i32 0
- %1 = getelementptr %_elem_type_of_a, %_elem_type_of_a* %a_rvo_based_addr_5, i64 %offset.idx, i32 0
- %2 = bitcast double* %1 to <4 x double>*
- %wide.load = load <4 x double>, <4 x double>* %2, align 8
- %3 = getelementptr double, double* %1, i64 4
- %4 = bitcast double* %3 to <4 x double>*
- %wide.load19 = load <4 x double>, <4 x double>* %4, align 8
- %5 = getelementptr double, double* %1, i64 8
- %6 = bitcast double* %5 to <4 x double>*
- %wide.load20 = load <4 x double>, <4 x double>* %6, align 8
- %7 = getelementptr double, double* %1, i64 12
- %8 = bitcast double* %7 to <4 x double>*
- %wide.load21 = load <4 x double>, <4 x double>* %8, align 8
- %9 = getelementptr double, double* %1, i64 16
- %10 = bitcast double* %9 to <4 x double>*
- %wide.load22 = load <4 x double>, <4 x double>* %10, align 8
- %11 = getelementptr double, double* %1, i64 20
- %12 = bitcast double* %11 to <4 x double>*
- %wide.load23 = load <4 x double>, <4 x double>* %12, align 8
- %13 = getelementptr double, double* %1, i64 24
- %14 = bitcast double* %13 to <4 x double>*
- %wide.load24 = load <4 x double>, <4 x double>* %14, align 8
- %15 = getelementptr double, double* %1, i64 28
- %16 = bitcast double* %15 to <4 x double>*
- %wide.load25 = load <4 x double>, <4 x double>* %16, align 8
- %17 = fmul fast <4 x double> %wide.load, %broadcast.splat27
- %18 = fmul fast <4 x double> %wide.load19, %broadcast.splat27
- %19 = fmul fast <4 x double> %wide.load20, %broadcast.splat27
- %20 = fmul fast <4 x double> %wide.load21, %broadcast.splat27
- %21 = fmul fast <4 x double> %wide.load22, %broadcast.splat27
- %22 = fmul fast <4 x double> %wide.load23, %broadcast.splat27
- %23 = fmul fast <4 x double> %wide.load24, %broadcast.splat27
- %24 = fmul fast <4 x double> %wide.load25, %broadcast.splat27
- %25 = bitcast double* %0 to <4 x double>*
- store <4 x double> %17, <4 x double>* %25, align 8
- %26 = getelementptr double, double* %0, i64 4
- %27 = bitcast double* %26 to <4 x double>*
- store <4 x double> %18, <4 x double>* %27, align 8
- %28 = getelementptr double, double* %0, i64 8
- %29 = bitcast double* %28 to <4 x double>*
- %30 = getelementptr double, double* %0, i64 12
- %31 = bitcast double* %30 to <4 x double>*
- %32 = getelementptr double, double* %0, i64 16
- %33 = bitcast double* %32 to <4 x double>*
- %34 = getelementptr double, double* %0, i64 20
- %35 = bitcast double* %34 to <4 x double>*
- %36 = getelementptr double, double* %0, i64 24
- %37 = bitcast double* %36 to <4 x double>*
- %38 = getelementptr double, double* %0, i64 28
- %39 = bitcast double* %38 to <4 x double>*
- store <4 x double> %24, <4 x double>* %39, align 8
+ %0 = getelementptr %_elem_type_of_x, ptr %x_rvo_based_addr_3, i64 %offset.idx, i32 0
+ %1 = getelementptr %_elem_type_of_a, ptr %a_rvo_based_addr_5, i64 %offset.idx, i32 0
+ %wide.load = load <4 x double>, ptr %1, align 8
+ %2 = getelementptr double, ptr %1, i64 4
+ %wide.load19 = load <4 x double>, ptr %2, align 8
+ %3 = getelementptr double, ptr %1, i64 8
+ %wide.load20 = load <4 x double>, ptr %3, align 8
+ %4 = getelementptr double, ptr %1, i64 12
+ %wide.load21 = load <4 x double>, ptr %4, align 8
+ %5 = getelementptr double, ptr %1, i64 16
+ %wide.load22 = load <4 x double>, ptr %5, align 8
+ %6 = getelementptr double, ptr %1, i64 20
+ %wide.load23 = load <4 x double>, ptr %6, align 8
+ %7 = getelementptr double, ptr %1, i64 24
+ %wide.load24 = load <4 x double>, ptr %7, align 8
+ %8 = getelementptr double, ptr %1, i64 28
+ %wide.load25 = load <4 x double>, ptr %8, align 8
+ %9 = fmul fast <4 x double> %wide.load, %broadcast.splat27
+ %10 = fmul fast <4 x double> %wide.load19, %broadcast.splat27
+ %11 = fmul fast <4 x double> %wide.load20, %broadcast.splat27
+ %12 = fmul fast <4 x double> %wide.load21, %broadcast.splat27
+ %13 = fmul fast <4 x double> %wide.load22, %broadcast.splat27
+ %14 = fmul fast <4 x double> %wide.load23, %broadcast.splat27
+ %15 = fmul fast <4 x double> %wide.load24, %broadcast.splat27
+ %16 = fmul fast <4 x double> %wide.load25, %broadcast.splat27
+ store <4 x double> %9, ptr %0, align 8
+ %17 = getelementptr double, ptr %0, i64 4
+ store <4 x double> %10, ptr %17, align 8
+ %18 = getelementptr double, ptr %0, i64 8
+ %19 = getelementptr double, ptr %0, i64 12
+ %20 = getelementptr double, ptr %0, i64 16
+ %21 = getelementptr double, ptr %0, i64 20
+ %22 = getelementptr double, ptr %0, i64 24
+ %23 = getelementptr double, ptr %0, i64 28
+ store <4 x double> %16, ptr %23, align 8
%index.next = add i64 %index, 32
%cm = icmp eq i64 %index.next, %n.vec
br i1 %cm, label %return.block, label %vector.body
; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s
; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s --check-prefix=CHECK-P9
-define i64 @store_disjoint_memory(i64* nocapture %P, i64 %v) {
+define i64 @store_disjoint_memory(ptr nocapture %P, i64 %v) {
entry:
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: store_disjoint_memory:%bb.0
; CHECK:SU([[REG3:[0-9]+]]): STD renamable $x{{[0-9]+}}, 16, renamable $x[[REG5]]
; CHECK: Predecessors:
; CHECK-NOT: SU([[REG2]]): Ord Latency=0 Memory
- %arrayidx = getelementptr inbounds i64, i64* %P, i64 3
- store i64 %v, i64* %arrayidx
- %arrayidx1 = getelementptr inbounds i64, i64* %P, i64 2
- store i64 %v, i64* %arrayidx1
+ %arrayidx = getelementptr inbounds i64, ptr %P, i64 3
+ store i64 %v, ptr %arrayidx
+ %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 2
+ store i64 %v, ptr %arrayidx1
ret i64 %v
}
define double @test_lxsd_no_barrier(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, double %j, double %k, double %l, double %m) {
entry:
- %0 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 10), align 8
- %1 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 17), align 8
- %2 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 87), align 8
- %3 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 97), align 8
- %4 = load double, double* getelementptr inbounds ([500 x double], [500 x double]* @gd, i64 0, i64 77), align 8
+ %0 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 10), align 8
+ %1 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 17), align 8
+ %2 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 87), align 8
+ %3 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 97), align 8
+ %4 = load double, ptr getelementptr inbounds ([500 x double], ptr @gd, i64 0, i64 77), align 8
%add = fadd double %a, %b
%add1 = fadd double %add, %c
%add2 = fadd double %add1, %d
br i1 undef, label %if, label %else
; CHECK: cmplwi 3, 0
if: ; preds = %entry
- store { ppc_fp128, ppc_fp128 } zeroinitializer, { ppc_fp128, ppc_fp128 }* undef
+ store { ppc_fp128, ppc_fp128 } zeroinitializer, ptr undef
ret void
else: ; preds = %entry
; RUN: llc -mcpu=pwr8 -mtriple=powerpc64-unknown-unknown -verify-machineinstrs < %s | FileCheck %s
; Function Attrs: norecurse nounwind readonly
-define float @testSingleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 {
+define float @testSingleAccess(ptr nocapture readonly %arr) local_unnamed_addr #0 {
; CHECK-LABEL: testSingleAccess:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi 3, 3, 8
; CHECK-NEXT: xscvsxdsp 1, 0
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 2
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 2
+ %0 = load i32, ptr %arrayidx, align 4
%conv = sitofp i32 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind readonly
-define float @testMultipleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 {
+define float @testMultipleAccess(ptr nocapture readonly %arr) local_unnamed_addr #0 {
; CHECK-LABEL: testMultipleAccess:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 4, 8(3)
; CHECK-NEXT: xscvsxdsp 1, 0
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 2
- %0 = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 3
- %1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 2
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %arr, i64 3
+ %1 = load i32, ptr %arrayidx1, align 4
%add = add nsw i32 %1, %0
%conv = sitofp i32 %add to float
ret float %conv
; Check that llc does not crash due to an illegal APInt operation
-define i1 @f(i8* %ptr) {
+define i1 @f(ptr %ptr) {
entry:
- %val = load i8, i8* %ptr, align 8, !range !0
+ %val = load i8, ptr %ptr, align 8, !range !0
%tobool = icmp eq i8 %val, 0
ret i1 %tobool
}
declare void @g(i32 signext)
-define void @foo(i8* %p) {
+define void @foo(ptr %p) {
entry:
br label %while.body
while.body:
- %0 = load i8, i8* %p, align 1
+ %0 = load i8, ptr %p, align 1
%conv = zext i8 %0 to i32
%cmp = icmp sgt i8 %0, 0
br i1 %cmp, label %if.then, label %while.body
}
; PR3351 - (P == 0) & (Q == 0) -> (P|Q) == 0
-define i32 @all_bits_clear_branch(i32* %P, i32* %Q) {
+define i32 @all_bits_clear_branch(ptr %P, ptr %Q) {
; CHECK-LABEL: all_bits_clear_branch:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: or. 3, 3, 4
; CHECK-NEXT: li 3, 192
; CHECK-NEXT: blr
entry:
- %a = icmp eq i32* %P, null
- %b = icmp eq i32* %Q, null
+ %a = icmp eq ptr %P, null
+ %b = icmp eq ptr %Q, null
%c = and i1 %a, %b
br i1 %c, label %bb1, label %return
}
; PR3351 - (P != 0) | (Q != 0) -> (P|Q) != 0
-define i32 @any_bits_set_branch(i32* %P, i32* %Q) {
+define i32 @any_bits_set_branch(ptr %P, ptr %Q) {
; CHECK-LABEL: any_bits_set_branch:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: or. 3, 3, 4
; CHECK-NEXT: li 3, 192
; CHECK-NEXT: blr
entry:
- %a = icmp ne i32* %P, null
- %b = icmp ne i32* %Q, null
+ %a = icmp ne ptr %P, null
+ %b = icmp ne ptr %Q, null
%c = or i1 %a, %b
br i1 %c, label %bb1, label %return
; RUN: llc -verify-machineinstrs -mtriple=powerpc64-ibm-aix-xcoff -mcpu=pwr8 \
; RUN: < %s -vec-extabi | FileCheck %s
-%class.PB2 = type { [1 x i32], %class.PB1* }
+%class.PB2 = type { [1 x i32], ptr }
%class.PB1 = type { [1 x i32], i64, i64, i32 }
; Function Attrs: norecurse nounwind readonly
-define zeroext i1 @test1(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+define zeroext i1 @test1(ptr %s_a, ptr %s_b) local_unnamed_addr #0 {
; CHECK-LABEL: test1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 3, 0(3)
; CHECK-NEXT: rldicl 3, 3, 1, 63
; CHECK-NEXT: blr
entry:
- %arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
- %0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
+ %0 = load i32, ptr %s_a, align 8, !tbaa !1
%and.i = and i32 %0, 8
- %arrayidx.i37 = bitcast %class.PB2* %s_b to i32*
- %1 = load i32, i32* %arrayidx.i37, align 8, !tbaa !1
+ %1 = load i32, ptr %s_b, align 8, !tbaa !1
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp ult i32 %and.i, %and.i4
ret i1 %cmp.i5
}
; Function Attrs: norecurse nounwind readonly
-define zeroext i1 @test2(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+define zeroext i1 @test2(ptr %s_a, ptr %s_b) local_unnamed_addr #0 {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 3, 0(3)
; CHECK-NEXT: rldicl 3, 3, 1, 63
; CHECK-NEXT: blr
entry:
- %arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
- %0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
+ %0 = load i32, ptr %s_a, align 8, !tbaa !1
%and.i = and i32 %0, 8
- %arrayidx.i37 = bitcast %class.PB2* %s_b to i32*
- %1 = load i32, i32* %arrayidx.i37, align 8, !tbaa !1
+ %1 = load i32, ptr %s_b, align 8, !tbaa !1
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp ule i32 %and.i, %and.i4
ret i1 %cmp.i5
}
; Function Attrs: norecurse nounwind readonly
-define zeroext i1 @test3(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+define zeroext i1 @test3(ptr %s_a, ptr %s_b) local_unnamed_addr #0 {
; CHECK-LABEL: test3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 3, 0(3)
; CHECK-NEXT: rldicl 3, 3, 1, 63
; CHECK-NEXT: blr
entry:
- %arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
- %0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
+ %0 = load i32, ptr %s_a, align 8, !tbaa !1
%and.i = and i32 %0, 8
- %arrayidx.i37 = bitcast %class.PB2* %s_b to i32*
- %1 = load i32, i32* %arrayidx.i37, align 8, !tbaa !1
+ %1 = load i32, ptr %s_b, align 8, !tbaa !1
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp ugt i32 %and.i, %and.i4
ret i1 %cmp.i5
}
; Function Attrs: norecurse nounwind readonly
-define zeroext i1 @test4(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+define zeroext i1 @test4(ptr %s_a, ptr %s_b) local_unnamed_addr #0 {
; CHECK-LABEL: test4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 3, 0(3)
; CHECK-NEXT: rldicl 3, 3, 1, 63
; CHECK-NEXT: blr
entry:
- %arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
- %0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
+ %0 = load i32, ptr %s_a, align 8, !tbaa !1
%and.i = and i32 %0, 8
- %arrayidx.i37 = bitcast %class.PB2* %s_b to i32*
- %1 = load i32, i32* %arrayidx.i37, align 8, !tbaa !1
+ %1 = load i32, ptr %s_b, align 8, !tbaa !1
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp uge i32 %and.i, %and.i4
ret i1 %cmp.i5
; default at the default CodeOpt level.
; XFAIL: *
-define i32 @setcc_one_or_zero(i32* %a) {
+define i32 @setcc_one_or_zero(ptr %a) {
entry:
- %tmp.1 = icmp ne i32* %a, null ; <i1> [#uses=1]
+ %tmp.1 = icmp ne ptr %a, null ; <i1> [#uses=1]
%inc.1 = zext i1 %tmp.1 to i32 ; <i32> [#uses=1]
ret i32 %inc.1
}
; Function Attrs: nounwind
define void @fn1() #0 {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp ne i32 %0, 1
%conv = zext i1 %cmp to i32
- %1 = load i32, i32* @b, align 4
+ %1 = load i32, ptr @b, align 4
%cmp1 = icmp ne i32 0, %1
%conv2 = zext i1 %cmp1 to i32
%or = or i32 %conv, %conv2
%xor = xor i32 1, %or
%call = call signext i32 @fn2(i32 signext %xor)
%conv4 = zext i1 undef to i32
- store i32 %conv4, i32* @b, align 4
+ store i32 %conv4, ptr @b, align 4
ret void
; CHECK-LABEL: @fn1
; CHECK-NEXT: blr
top:
%1 = alloca i16, align 4
- %2 = bitcast i16* %1 to i8*
- store i16 0, i16* %1, align 4
- %rv.i = atomicrmw min i16* %1, i16 %0 acq_rel
- %rv.i2 = load atomic i16, i16* %1 acquire, align 16
+ store i16 0, ptr %1, align 4
+ %rv.i = atomicrmw min ptr %1, i16 %0 acq_rel
+ %rv.i2 = load atomic i16, ptr %1 acquire, align 16
ret i16 %rv.i2
}
; CHECK-NEXT: blr
top:
%1 = alloca i16, align 4
- %2 = bitcast i16* %1 to i8*
- store i16 0, i16* %1, align 4
- %rv.i = atomicrmw min i16* %1, i16 %0 acq_rel
- %rv.i2 = load atomic i16, i16* %1 acquire, align 16
+ store i16 0, ptr %1, align 4
+ %rv.i = atomicrmw min ptr %1, i16 %0 acq_rel
+ %rv.i2 = load atomic i16, ptr %1 acquire, align 16
ret i16 %rv.i2
}
-define i16 @noSEXTLoad(i16 *%p) #0 {
+define i16 @noSEXTLoad(ptr %p) #0 {
; CHECK-LABEL: noSEXTLoad:
; CHECK: # %bb.0: # %top
; CHECK-NEXT: lha 3, 0(3)
; CHECK-NEXT: isync
; CHECK-NEXT: blr
top:
- %0 = load i16, i16* %p, align 2
+ %0 = load i16, ptr %p, align 2
%1 = alloca i16, align 4
- %2 = bitcast i16* %1 to i8*
- store i16 0, i16* %1, align 4
- %rv.i = atomicrmw min i16* %1, i16 %0 acq_rel
- %rv.i2 = load atomic i16, i16* %1 acquire, align 16
+ store i16 0, ptr %1, align 4
+ %rv.i = atomicrmw min ptr %1, i16 %0 acq_rel
+ %rv.i2 = load atomic i16, ptr %1 acquire, align 16
ret i16 %rv.i2
}
attributes #0 = { nounwind }
test2.exit.us.unr-lcssa: ; preds = %if.end.i.us.1, %for.body.i62.us.preheader
%c.addr.036.i.us.unr = phi i64 [ 0, %for.body.i62.us.preheader ], [ %c.addr.1.i.us.1, %if.end.i.us.1 ]
- %1 = load i64, i64* undef, align 8
+ %1 = load i64, ptr undef, align 8
%tobool.i61.us.epil = icmp eq i64 %c.addr.036.i.us.unr, 0
%add.neg.i.us.epil.pn = select i1 %tobool.i61.us.epil, i64 %1, i64 0
%storemerge269 = sub i64 %add.neg.i.us.epil.pn, 0
- store i64 %storemerge269, i64* undef, align 8
+ store i64 %storemerge269, ptr undef, align 8
unreachable
test3.exit.split: ; preds = %cond.end.i
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=powerpc64le -mcpu=pwr9 -verify-machineinstrs < %s | FileCheck %s
-define double @zot(i32* %arg, float* %arg1, i16* %arg2) {
+define double @zot(ptr %arg, ptr %arg1, ptr %arg2) {
; CHECK-LABEL: zot:
; CHECK: # %bb.0: # %bb
; CHECK-NEXT: bc 12, 20, .LBB0_2
; CHECK-NEXT: xsmuldp 1, 1, 0
; CHECK-NEXT: b .LBB0_3
bb:
- %tmp = load i32, i32* %arg, align 8
+ %tmp = load i32, ptr %arg, align 8
br i1 undef, label %bb9, label %bb3
bb3:
- %tmp4 = load i16, i16* %arg2, align 4
+ %tmp4 = load i16, ptr %arg2, align 4
%tmp5 = lshr i16 %tmp4, 4
%tmp6 = and i16 %tmp5, 3
%tmp7 = zext i16 %tmp6 to i32
br label %bb13
bb13:
- %tmp14 = load float, float* %arg1, align 4
+ %tmp14 = load float, ptr %arg1, align 4
%tmp15 = fpext float %tmp14 to double
br label %bb16
for.body: ; preds = %for.cond, %for.body.lr.ph
%i.032 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.cond ]
- %0 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag.1.15.17.21.25.49.53.55]* @env_sigill to i8*))
+ %0 = call i32 @llvm.eh.sjlj.setjmp(ptr @env_sigill)
%inc = add nsw i32 %i.032, 1
br i1 false, label %if.else, label %for.cond
}
; Function Attrs: nounwind
-declare i32 @llvm.eh.sjlj.setjmp(i8*) #0
+declare i32 @llvm.eh.sjlj.setjmp(ptr) #0
attributes #0 = { nounwind }
define void @foo() #0 {
entry:
- call void @llvm.eh.sjlj.longjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
+ call void @llvm.eh.sjlj.longjmp(ptr @env_sigill)
unreachable
; CHECK: @foo
ret void
}
-declare void @llvm.eh.sjlj.longjmp(i8*) #1
+declare void @llvm.eh.sjlj.longjmp(ptr) #1
define signext i32 @main() #0 {
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = call i8* @llvm.frameaddress(i32 0)
- store i8* %0, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**)
- %1 = call i8* @llvm.stacksave()
- store i8* %1, i8** getelementptr (i8*, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**), i32 2)
- %2 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
+ store i32 0, ptr %retval
+ %0 = call ptr @llvm.frameaddress(i32 0)
+ store ptr %0, ptr @env_sigill
+ %1 = call ptr @llvm.stacksave()
+ store ptr %1, ptr getelementptr (ptr, ptr @env_sigill, i32 2)
+ %2 = call i32 @llvm.eh.sjlj.setjmp(ptr @env_sigill)
%tobool = icmp ne i32 %2, 0
br i1 %tobool, label %if.then, label %if.else
if.then: ; preds = %entry
- store i32 1, i32* %retval
+ store i32 1, ptr %retval
br label %return
if.else: ; preds = %entry
br label %if.end
if.end: ; preds = %if.else
- store i32 0, i32* %retval
+ store i32 0, ptr %retval
br label %return
return: ; preds = %if.end, %if.then
- %3 = load i32, i32* %retval
+ %3 = load i32, ptr %retval
ret i32 %3
define signext i32 @main2() #0 {
entry:
%a = alloca i8, align 64
- call void @bar(i8* %a)
+ call void @bar(ptr %a)
%retval = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = call i8* @llvm.frameaddress(i32 0)
- store i8* %0, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**)
- %1 = call i8* @llvm.stacksave()
- store i8* %1, i8** getelementptr (i8*, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**), i32 2)
- %2 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
+ store i32 0, ptr %retval
+ %0 = call ptr @llvm.frameaddress(i32 0)
+ store ptr %0, ptr @env_sigill
+ %1 = call ptr @llvm.stacksave()
+ store ptr %1, ptr getelementptr (ptr, ptr @env_sigill, i32 2)
+ %2 = call i32 @llvm.eh.sjlj.setjmp(ptr @env_sigill)
%tobool = icmp ne i32 %2, 0
br i1 %tobool, label %if.then, label %if.else
if.then: ; preds = %entry
- store i32 1, i32* %retval
+ store i32 1, ptr %retval
br label %return
if.else: ; preds = %entry
br label %if.end
if.end: ; preds = %if.else
- store i32 0, i32* %retval
+ store i32 0, ptr %retval
br label %return
return: ; preds = %if.end, %if.then
- %3 = load i32, i32* %retval
+ %3 = load i32, ptr %retval
ret i32 %3
; CHECK-LABEL: main2:
define void @test_sjlj_setjmp() #0 {
entry:
- %0 = load i8, i8* @cond, align 1
+ %0 = load i8, ptr @cond, align 1
%tobool = trunc i8 %0 to i1
br i1 %tobool, label %return, label %end
end:
- %1 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
+ %1 = call i32 @llvm.eh.sjlj.setjmp(ptr @env_sigill)
br label %return
return:
; CHECK-NOT: bl _setjmp
}
-declare void @bar(i8*) #3
+declare void @bar(ptr) #3
-declare i8* @llvm.frameaddress(i32) #2
+declare ptr @llvm.frameaddress(i32) #2
-declare i8* @llvm.stacksave() #3
+declare ptr @llvm.stacksave() #3
-declare i32 @llvm.eh.sjlj.setjmp(i8*) #3
+declare i32 @llvm.eh.sjlj.setjmp(ptr) #3
attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { noreturn nounwind }
; Function Attrs: noinline nounwind
define void @_Z23BuiltinLongJmpFunc1_bufv() #0 {
entry:
- call void @llvm.eh.sjlj.longjmp(i8* bitcast (void ()* @_Z23BuiltinLongJmpFunc1_bufv to i8*))
+ call void @llvm.eh.sjlj.longjmp(ptr @_Z23BuiltinLongJmpFunc1_bufv)
unreachable
; CHECK: @_Z23BuiltinLongJmpFunc1_bufv
}
; Function Attrs: noreturn nounwind
-declare void @llvm.eh.sjlj.longjmp(i8*) #1
+declare void @llvm.eh.sjlj.longjmp(ptr) #1
ret void
}
-define i32 @test4(i16* %P) {
- %tmp.1 = load i16, i16* %P
+define i32 @test4(ptr %P) {
+ %tmp.1 = load i16, ptr %P
%tmp.2 = zext i16 %tmp.1 to i32
%tmp.3 = and i32 %tmp.2, 255
ret i32 %tmp.3
}
-define i32 @test5(i16* %P) {
- %tmp.1 = load i16, i16* %P
+define i32 @test5(ptr %P) {
+ %tmp.1 = load i16, ptr %P
%tmp.2 = bitcast i16 %tmp.1 to i16
%tmp.3 = zext i16 %tmp.2 to i32
%tmp.4 = and i32 %tmp.3, 255
ret i32 %tmp.4
}
-define i32 @test6(i32* %P) {
- %tmp.1 = load i32, i32* %P
+define i32 @test6(ptr %P) {
+ %tmp.1 = load i32, ptr %P
%tmp.2 = and i32 %tmp.1, 255
ret i32 %tmp.2
}
; CHECK-NEXT: li 5, 0
; CHECK-NEXT: bl printf
; CHECK-NEXT: nop
- %1 = load i32, i32* undef, align 4
+ %1 = load i32, ptr undef, align 4
%2 = add i32 %1, -1
%3 = zext i32 %2 to i64
%4 = zext i32 3 to i64
%8 = trunc i64 %6 to i32
%9 = sub i32 0, %8
%10 = zext i32 %9 to i64
- %11 = getelementptr inbounds i8, i8* null, i64 %10
- %12 = load i8, i8* %11, align 1
+ %11 = getelementptr inbounds i8, ptr null, i64 %10
+ %12 = load i8, ptr %11, align 1
%13 = icmp eq i8 %12, 84
%14 = zext i1 %13 to i32
%15 = add i32 %7, %14
20: ; preds = %5
%21 = trunc i64 %16 to i32
- call void (i8*, ...) @printf(i8* getelementptr inbounds ([69 x i8], [69 x i8]* @.str.28, i64 0, i64 0), i32 zeroext 3, i32 zeroext undef, i32 zeroext %15, i32 zeroext undef, i32 zeroext 3, i8* undef, i32 zeroext undef, i32 zeroext 3, i32 zeroext %21, i8* undef, i32 zeroext undef) #1
+ call void (ptr, ...) @printf(ptr @.str.28, i32 zeroext 3, i32 zeroext undef, i32 zeroext %15, i32 zeroext undef, i32 zeroext 3, ptr undef, i32 zeroext undef, i32 zeroext 3, i32 zeroext %21, ptr undef, i32 zeroext undef) #1
unreachable
}
-declare void @printf(i8*, ...) local_unnamed_addr #0
+declare void @printf(ptr, ...) local_unnamed_addr #0
1: ; preds = %1, %0
%2 = phi i64 [ 0, %0 ], [ %13, %1 ]
- %3 = load i16, i16* null, align 2
- %4 = load i16, i16* undef, align 2
+ %3 = load i16, ptr null, align 2
+ %4 = load i16, ptr undef, align 2
%5 = sext i16 %3 to i32
%6 = sext i16 %4 to i32
%7 = add nsw i32 0, %5
%10 = sdiv i32 %8, 2
%11 = trunc i32 %9 to i16
%12 = trunc i32 %10 to i16
- store i16 %11, i16* null, align 2
- store i16 %12, i16* undef, align 2
+ store i16 %11, ptr null, align 2
+ store i16 %12, ptr undef, align 2
%13 = add i64 %2, 4
%14 = icmp eq i64 %13, 0
br i1 %14, label %15, label %1
%6 = phi i64 [ %12, %3 ], [ undef, %2 ]
%7 = add nsw i64 %4, -1
%8 = fmul fast double %5, 1.000000e+07
- %9 = getelementptr inbounds %0, %0* null, i64 1, i32 1, i64 %7
- %10 = load double, double* %9, align 8
+ %9 = getelementptr inbounds %0, ptr null, i64 1, i32 1, i64 %7
+ %10 = load double, ptr %9, align 8
%11 = fadd fast double %10, %8
%12 = add i64 %6, -1
%13 = icmp eq i64 %12, 0
; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs\
; RUN: -mcpu=pwr9 --ppc-enable-pipeliner --pipeliner-force-ii=15 2>&1 | FileCheck %s
-define void @phi2(i32, i32, i8*) local_unnamed_addr {
+define void @phi2(i32, i32, ptr) local_unnamed_addr {
; CHECK-LABEL: phi2:
; CHECK: # %bb.0:
; CHECK-NEXT: divw 8, 3, 4
%12 = trunc i32 %10 to i8
%13 = select i1 %11, i8 48, i8 55
%14 = add i8 %13, %12
- %15 = getelementptr inbounds i8, i8* %2, i64 %7
- store i8 %14, i8* %15, align 1
+ %15 = getelementptr inbounds i8, ptr %2, i64 %7
+ store i8 %14, ptr %15, align 1
%16 = icmp sgt i64 %5, 1
br i1 %16, label %4, label %17
]
1: ; preds = %0, %0, %0, %0
- %2 = load i16, i16* undef, align 2
+ %2 = load i16, ptr undef, align 2
br label %3
3: ; preds = %3, %1
%2 = phi i64 [ 0, %0 ], [ %12, %1 ]
%3 = phi i64 [ undef, %0 ], [ %11, %1 ]
%4 = phi i64 [ undef, %0 ], [ %3, %1 ]
- %5 = getelementptr inbounds [80 x i64], [80 x i64]* null, i64 0, i64 %2
- %6 = load i64, i64* %5, align 8
+ %5 = getelementptr inbounds [80 x i64], ptr null, i64 0, i64 %2
+ %6 = load i64, ptr %5, align 8
%7 = add i64 0, %6
%8 = and i64 %3, %4
%9 = or i64 0, %8
14: ; preds = %1
%15 = add i64 %4, 0
- store i64 %15, i64* undef, align 8
+ store i64 %15, ptr undef, align 8
ret void
}
@x = dso_local local_unnamed_addr global <{ i32, i32, i32, i32, [1020 x i32] }> <{ i32 1, i32 2, i32 3, i32 4, [1020 x i32] zeroinitializer }>, align 4
@y = dso_local global [1024 x i32] zeroinitializer, align 4
-define dso_local i32* @foo() local_unnamed_addr {
+define dso_local ptr @foo() local_unnamed_addr {
;ENABLED: Schedule found with Initiation Interval
;ENABLED: Pipelined succesfully!
;DISABLED-NOT: remark
entry:
- %.pre = load i32, i32* getelementptr inbounds ([1024 x i32], [1024 x i32]* @y, i64 0, i64 0), align 4
+ %.pre = load i32, ptr @y, align 4
br label %for.body
for.cond.cleanup: ; preds = %for.body
- ret i32* getelementptr inbounds ([1024 x i32], [1024 x i32]* @y, i64 0, i64 0)
+ ret ptr @y
for.body: ; preds = %for.body, %entry
%0 = phi i32 [ %.pre, %entry ], [ %add.2, %for.body ]
%indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next.2, %for.body ]
- %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv
- %1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds [1024 x i32], ptr @x, i64 0, i64 %indvars.iv
+ %1 = load i32, ptr %arrayidx2, align 4
%mul = mul nsw i32 %1, %1
%add = add nsw i32 %mul, %0
- %arrayidx6 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv
- store i32 %add, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds [1024 x i32], ptr @y, i64 0, i64 %indvars.iv
+ store i32 %add, ptr %arrayidx6, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx2.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv.next
- %2 = load i32, i32* %arrayidx2.1, align 4
+ %arrayidx2.1 = getelementptr inbounds [1024 x i32], ptr @x, i64 0, i64 %indvars.iv.next
+ %2 = load i32, ptr %arrayidx2.1, align 4
%mul.1 = mul nsw i32 %2, %2
%add.1 = add nsw i32 %mul.1, %add
- %arrayidx6.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv.next
- store i32 %add.1, i32* %arrayidx6.1, align 4
+ %arrayidx6.1 = getelementptr inbounds [1024 x i32], ptr @y, i64 0, i64 %indvars.iv.next
+ store i32 %add.1, ptr %arrayidx6.1, align 4
%indvars.iv.next.1 = add nuw nsw i64 %indvars.iv, 2
- %arrayidx2.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv.next.1
- %3 = load i32, i32* %arrayidx2.2, align 4
+ %arrayidx2.2 = getelementptr inbounds [1024 x i32], ptr @x, i64 0, i64 %indvars.iv.next.1
+ %3 = load i32, ptr %arrayidx2.2, align 4
%mul.2 = mul nsw i32 %3, %3
%add.2 = add nsw i32 %mul.2, %add.1
- %arrayidx6.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv.next.1
- store i32 %add.2, i32* %arrayidx6.2, align 4
+ %arrayidx6.2 = getelementptr inbounds [1024 x i32], ptr @y, i64 0, i64 %indvars.iv.next.1
+ store i32 %add.2, ptr %arrayidx6.2, align 4
%indvars.iv.next.2 = add nuw nsw i64 %indvars.iv, 3
%exitcond.2 = icmp eq i64 %indvars.iv.next.2, 1024
br i1 %exitcond.2, label %for.cond.cleanup, label %for.body
@x = dso_local local_unnamed_addr global <{ i32, i32, i32, i32, [1020 x i32] }> <{ i32 1, i32 2, i32 3, i32 4, [1020 x i32] zeroinitializer }>, align 4
@y = dso_local global [1024 x i32] zeroinitializer, align 4
-define dso_local i32* @foo() local_unnamed_addr {
+define dso_local ptr @foo() local_unnamed_addr {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, y@toc@ha
; CHECK-NEXT: stw r5, 8(r4)
; CHECK-NEXT: blr
entry:
- %.pre = load i32, i32* getelementptr inbounds ([1024 x i32], [1024 x i32]* @y, i64 0, i64 0), align 4
+ %.pre = load i32, ptr @y, align 4
br label %for.body
for.cond.cleanup: ; preds = %for.body
- ret i32* getelementptr inbounds ([1024 x i32], [1024 x i32]* @y, i64 0, i64 0)
+ ret ptr @y
for.body: ; preds = %for.body, %entry
%0 = phi i32 [ %.pre, %entry ], [ %add.2, %for.body ]
%indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next.2, %for.body ]
- %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv
- %1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds [1024 x i32], ptr @x, i64 0, i64 %indvars.iv
+ %1 = load i32, ptr %arrayidx2, align 4
%mul = mul nsw i32 %1, %1
%add = add nsw i32 %mul, %0
- %arrayidx6 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv
- store i32 %add, i32* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds [1024 x i32], ptr @y, i64 0, i64 %indvars.iv
+ store i32 %add, ptr %arrayidx6, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx2.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv.next
- %2 = load i32, i32* %arrayidx2.1, align 4
+ %arrayidx2.1 = getelementptr inbounds [1024 x i32], ptr @x, i64 0, i64 %indvars.iv.next
+ %2 = load i32, ptr %arrayidx2.1, align 4
%mul.1 = mul nsw i32 %2, %2
%add.1 = add nsw i32 %mul.1, %add
- %arrayidx6.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv.next
- store i32 %add.1, i32* %arrayidx6.1, align 4
+ %arrayidx6.1 = getelementptr inbounds [1024 x i32], ptr @y, i64 0, i64 %indvars.iv.next
+ store i32 %add.1, ptr %arrayidx6.1, align 4
%indvars.iv.next.1 = add nuw nsw i64 %indvars.iv, 2
- %arrayidx2.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv.next.1
- %3 = load i32, i32* %arrayidx2.2, align 4
+ %arrayidx2.2 = getelementptr inbounds [1024 x i32], ptr @x, i64 0, i64 %indvars.iv.next.1
+ %3 = load i32, ptr %arrayidx2.2, align 4
%mul.2 = mul nsw i32 %3, %3
%add.2 = add nsw i32 %mul.2, %add.1
- %arrayidx6.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv.next.1
- store i32 %add.2, i32* %arrayidx6.2, align 4
+ %arrayidx6.2 = getelementptr inbounds [1024 x i32], ptr @y, i64 0, i64 %indvars.iv.next.1
+ store i32 %add.2, ptr %arrayidx6.2, align 4
%indvars.iv.next.2 = add nuw nsw i64 %indvars.iv, 3
%exitcond.2 = icmp eq i64 %indvars.iv.next.2, 1024
br i1 %exitcond.2, label %for.cond.cleanup, label %for.body
%tobool = fcmp une double %mul, 0.000000e+00
%cond = select i1 %tobool, double %conv, double 0.000000e+00
%conv3 = fptosi double %cond to i16
- store i16 %conv3, i16* undef
+ store i16 %conv3, ptr undef
ret void
}
; CHECK-NEXT: blr
entry:
%x.addr = alloca double, align 8
- store double %x, double* %x.addr, align 8
- %0 = load double, double* %x.addr, align 8
+ store double %x, ptr %x.addr, align 8
+ %0 = load double, ptr %x.addr, align 8
%1 = call i32 asm sideeffect "efdctsi $0, $1", "=d,d"(double %0)
ret i32 %1
}
%c = fcmp ogt float %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp ugt float %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp ole float %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp ule float %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp oeq float %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp une float %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp olt float %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp oge float %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp uge float %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
; CHECK-NEXT: blr
entry:
%x.addr = alloca float, align 8
- store float %x, float* %x.addr, align 8
- %0 = load float, float* %x.addr, align 8
+ store float %x, ptr %x.addr, align 8
+ %0 = load float, ptr %x.addr, align 8
%1 = call i32 asm sideeffect "efsctsi $0, $1", "=f,f"(float %0)
ret i32 %1
; Check that it's not loading a double
ret float %v
}
-define void @test_double_abs(double * %aa) #0 {
+define void @test_double_abs(ptr %aa) #0 {
; SPE-LABEL: test_double_abs:
; SPE: # %bb.0: # %entry
; SPE-NEXT: evldd 4, 0(3)
; EFPU2-NEXT: stw 4, 0(3)
; EFPU2-NEXT: blr
entry:
- %0 = load double, double * %aa
+ %0 = load double, ptr %aa
%1 = tail call double @llvm.fabs.f64(double %0) #2
- store double %1, double * %aa
+ store double %1, ptr %aa
ret void
}
; Function Attrs: nounwind readnone
declare double @llvm.fabs.f64(double) #1
-define void @test_dnabs(double * %aa) #0 {
+define void @test_dnabs(ptr %aa) #0 {
; SPE-LABEL: test_dnabs:
; SPE: # %bb.0: # %entry
; SPE-NEXT: evldd 4, 0(3)
; EFPU2-NEXT: stw 4, 0(3)
; EFPU2-NEXT: blr
entry:
- %0 = load double, double * %aa
+ %0 = load double, ptr %aa
%1 = tail call double @llvm.fabs.f64(double %0) #2
%sub = fsub double -0.000000e+00, %1
- store double %sub, double * %aa
+ store double %sub, ptr %aa
ret void
}
%c = fcmp ogt double %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp ugt double %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp ule double %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp ule double %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp oeq double %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp ueq double %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp une double %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp olt double %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp ult double %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
%c = fcmp uge double %a, %b
br i1 %c, label %tr, label %fa
tr:
- store i32 1, i32* %r, align 4
+ store i32 1, ptr %r, align 4
br label %ret
fa:
- store i32 0, i32* %r, align 4
+ store i32 0, ptr %r, align 4
br label %ret
ret:
- %0 = load i32, i32* %r, align 4
+ %0 = load i32, ptr %r, align 4
ret i32 %0
}
ret void
}
-declare void @test_memset(i8* nocapture writeonly, i8, i32, i1)
+declare void @test_memset(ptr nocapture writeonly, i8, i32, i1)
@global_var1 = global i32 0, align 4
-define double @test_spill(double %a, i32 %a1, i64 %a2, i8 * %a3, i32 *%a4, i32* %a5) #0 {
+define double @test_spill(double %a, i32 %a1, i64 %a2, ptr %a3, ptr %a4, ptr %a5) #0 {
; SPE-LABEL: test_spill:
; SPE: # %bb.0: # %entry
; SPE-NEXT: mflr 0
%0 = fadd double %a, %a
call void asm sideeffect "","~{s0},~{s3},~{s4},~{s5},~{s6},~{s7},~{s8},~{s9},~{s10},~{s11},~{s12},~{s13},~{s14},~{s15},~{s16},~{s17},~{s18},~{s19},~{s20},~{s21},~{s22},~{s23},~{s24},~{s25},~{s26},~{s27},~{s28},~{s29},~{s30},~{s31}"() nounwind
%1 = fadd double %0, 3.14159
- %2 = bitcast [13 x i32]* %v1 to i8*
- call void @test_memset(i8* align 4 %2, i8 0, i32 24, i1 true)
- store i32 0, i32* %a5, align 4
+ call void @test_memset(ptr align 4 %v1, i8 0, i32 24, i1 true)
+ store i32 0, ptr %a5, align 4
call void @test_func2()
- %3 = bitcast [11 x i32]* %v2 to i8*
- call void @test_memset(i8* align 4 %3, i8 0, i32 20, i1 true)
+ call void @test_memset(ptr align 4 %v2, i8 0, i32 20, i1 true)
br label %return
return:
declare i32 @foo(double)
-define void @d(%struct.a* %e, %struct.a* %f) #0 {
+define void @d(ptr %e, ptr %f) #0 {
; SPE-LABEL: d:
; SPE: # %bb.0: # %entry
; SPE-NEXT: mflr 0
; EFPU2-NEXT: mtlr 0
; EFPU2-NEXT: blr
entry:
- %0 = getelementptr %struct.a, %struct.a* %f, i32 0, i32 0
- %1 = load float, float* undef
- %conv = fpext float %1 to double
- %2 = load float, float* %0
- %g = fpext float %2 to double
- %3 = call i32 @foo(double %g)
+ %0 = load float, ptr undef
+ %conv = fpext float %0 to double
+ %1 = load float, ptr %f
+ %g = fpext float %1 to double
+ %2 = call i32 @foo(double %g)
%h = call i32 @foo(double %conv)
- %n = sitofp i32 %3 to double
+ %n = sitofp i32 %2 to double
%k = fmul double %g, %n
%l = fptrunc double %k to float
- store float %l, float* undef
+ store float %l, ptr undef
ret void
}
attributes #0 = { nounwind }
ret void
if.end: ; preds = %entry
- %0 = call i64 asm sideeffect "mr 3,$1\0A\09mr 4,$2\0A\09rotldi 0,0,3 ; rotldi 0,0,13\0A\09rotldi 0,0,61 ; rotldi 0,0,51\0A\09or 1,1,1\0A\09mr $0,3", "=b,b,b,~{cc},~{memory},~{r3},~{r4}"(i32 0, i64* undef) #0
+ %0 = call i64 asm sideeffect "mr 3,$1\0A\09mr 4,$2\0A\09rotldi 0,0,3 ; rotldi 0,0,13\0A\09rotldi 0,0,61 ; rotldi 0,0,51\0A\09or 1,1,1\0A\09mr $0,3", "=b,b,b,~{cc},~{memory},~{r3},~{r4}"(i32 0, ptr undef) #0
br i1 undef, label %end0, label %end1 ; need successor blocks to force spill
end0:
; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: -disable-auto-paired-vec-st=false < %s | FileCheck %s \
; RUN: --check-prefix=CHECK-BE
-define dso_local void @test(<256 x i1>* %vpp, <256 x i1>* %vp2) local_unnamed_addr #0 {
+define dso_local void @test(ptr %vpp, ptr %vp2) local_unnamed_addr #0 {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stdu r1, -400(r1)
; CHECK-BE-NEXT: addi r1, r1, 416
; CHECK-BE-NEXT: blr
entry:
- %0 = bitcast <256 x i1>* %vpp to i8*
- %1 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* %0)
+ %0 = tail call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %vpp)
tail call void asm sideeffect "nop", "~{memory},~{vs0},~{vs1},~{vs2},~{vs3},~{vs4},~{vs5},~{vs6},~{vs7},~{vs8},~{vs9},~{vs10},~{vs11},~{vs12},~{vs13},~{vs14},~{vs15},~{vs16},~{vs17},~{vs18},~{vs19},~{vs20},~{vs21},~{vs22},~{vs23},~{vs24},~{vs25},~{vs26},~{vs27},~{vs28},~{vs29},~{vs30},~{vs31},~{vs32},~{vs33},~{vs34},~{vs35},~{vs36},~{vs37},~{vs38},~{vs39},~{vs40},~{vs41},~{vs42},~{vs43},~{vs44},~{vs45},~{vs46},~{vs47},~{vs48},~{vs49},~{vs50},~{vs51},~{vs52},~{vs53},~{vs54},~{vs55},~{vs56},~{vs57},~{vs58},~{vs59},~{vs60},~{vs61},~{vs62},~{vs63}"()
- %2 = bitcast <256 x i1>* %vp2 to i8*
- tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %1, i8* %2)
+ tail call void @llvm.ppc.vsx.stxvp(<256 x i1> %0, ptr %vp2)
ret void
}
-declare <256 x i1> @llvm.ppc.vsx.lxvp(i8*) #1
+declare <256 x i1> @llvm.ppc.vsx.lxvp(ptr) #1
-declare void @llvm.ppc.vsx.stxvp(<256 x i1>, i8*) #2
+declare void @llvm.ppc.vsx.stxvp(<256 x i1>, ptr) #2
attributes #0 = { nounwind }
br i1 undef, label %if.end, label %if.then
if.then: ; preds = %entry
- %call = tail call signext i32 bitcast (i32 (...)* @fn_call to i32 ()*)()
+ %call = tail call signext i32 @fn_call()
%cmp1 = icmp ne i32 %call, 0
br label %if.end
define void @foo() nounwind ssp {
; CHECK: foo:
- store <16 x i8> <i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16>, <16 x i8>* @a
+ store <16 x i8> <i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16>, ptr @a
; CHECK: vspltish [[REG:[0-9]+]], 8
; CHECK: vadduhm {{[0-9]+}}, [[REG]], [[REG]]
ret void
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-%"class.llvm::MachineOperand" = type { i8, [3 x i8], i64, i64*, i64 }
+%"class.llvm::MachineOperand" = type { i8, [3 x i8], i64, ptr, i64 }
; Function Attrs: nounwind
define void @_ZN4llvm17ScheduleDAGInstrs14addPhysRegDepsEPNS_5SUnitEj() #0 align 2 {
; CHECK-NOT: lhzu
entry:
- %0 = load %"class.llvm::MachineOperand"*, %"class.llvm::MachineOperand"** undef, align 8
+ %0 = load ptr, ptr undef, align 8
br i1 undef, label %_ZNK4llvm14MachineOperand6getRegEv.exit, label %cond.false.i123
cond.false.i123: ; preds = %_ZN4llvm12MachineInstr10getOperandEj.exit
unreachable
_ZNK4llvm14MachineOperand6getRegEv.exit: ; preds = %_ZN4llvm12MachineInstr10getOperandEj.exit
- %IsDef.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %0, i64 undef, i32 1
- %1 = bitcast [3 x i8]* %IsDef.i to i24*
- %bf.load.i = load i24, i24* %1, align 1
- %2 = and i24 %bf.load.i, 128
+ %IsDef.i = getelementptr inbounds %"class.llvm::MachineOperand", ptr %0, i64 undef, i32 1
+ %bf.load.i = load i24, ptr %IsDef.i, align 1
+ %1 = and i24 %bf.load.i, 128
br i1 undef, label %for.cond.cleanup, label %for.body.lr.ph
for.body.lr.ph: ; preds = %_ZNK4llvm14MachineOperand6getRegEv.exit
- %3 = zext i24 %2 to i32
+ %2 = zext i24 %1 to i32
br i1 undef, label %cond.false.i134, label %_ZNK4llvm18MCRegAliasIteratordeEv.exit
for.cond.cleanup: ; preds = %_ZNK4llvm14MachineOperand6getRegEv.exit
unreachable
_ZNK4llvm14MachineOperand6isDeadEv.exit262: ; preds = %if.end55
- %bf.load.i259 = load i24, i24* %1, align 1
+ %bf.load.i259 = load i24, ptr %IsDef.i, align 1
br i1 undef, label %if.then57, label %if.else59
if.then57: ; preds = %_ZNK4llvm14MachineOperand6isDeadEv.exit262
;
; RUN: opt -S -mtriple=powerpc64le -codegenprepare -force-split-store < %s | FileCheck %s
-define void @fun(i16* %Src, i16* %Dst) {
-; CHECK: store volatile i16 %8, i16* %Dst
- %1 = load i16, i16* %Src
+define void @fun(ptr %Src, ptr %Dst) {
+; CHECK: store volatile i16 %8, ptr %Dst
+ %1 = load i16, ptr %Src
%2 = trunc i16 %1 to i8
%3 = lshr i16 %1, 8
%4 = trunc i16 %3 to i8
%6 = zext i8 %4 to i16
%7 = shl nuw i16 %6, 8
%8 = or i16 %7, %5
- store volatile i16 %8, i16* %Dst
+ store volatile i16 %8, ptr %Dst
ret void
}
; CHECK-32-NEXT: mr r31, r0
; CHECK-32-NEXT: blr
%a = alloca i32, i32 %n, align 16
- %b = getelementptr inbounds i32, i32* %a, i64 1198
- store volatile i32 1, i32* %b
- %c = load volatile i32, i32* %a
+ %b = getelementptr inbounds i32, ptr %a, i64 1198
+ store volatile i32 1, ptr %b
+ %c = load volatile i32, ptr %a
ret i32 %c
}
; CHECK-32-NEXT: blr
%a = alloca i32, i32 %n, align 16
%i = add i32 %n, 1024
- %b = getelementptr inbounds i32, i32* %a, i32 %i
- store volatile i32 1, i32* %b
- %c = load volatile i32, i32* %a
+ %b = getelementptr inbounds i32, ptr %a, i32 %i
+ store volatile i32 1, ptr %b
+ %c = load volatile i32, ptr %a
ret i32 %c
}
; CHECK-32-NEXT: mr r31, r0
; CHECK-32-NEXT: blr
%a = alloca i32, i32 %n, align 16
- %b = getelementptr inbounds i32, i32* %a, i64 1198
- store volatile i32 1, i32* %b
- %c = load volatile i32, i32* %a
+ %b = getelementptr inbounds i32, ptr %a, i64 1198
+ store volatile i32 1, ptr %b
+ %c = load volatile i32, ptr %a
ret i32 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 64
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 4096
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 65536
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 65536
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 65536
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 1048576
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 1073741824
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 1000000007
- %b = getelementptr inbounds i8, i8* %a, i64 101
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 101
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 64
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 4096
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 65536
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 65536
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 65536
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 1048576
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 1073741824
- %b = getelementptr inbounds i8, i8* %a, i64 63
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 63
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: blr
entry:
%a = alloca i8, i64 1000000007
- %b = getelementptr inbounds i8, i8* %a, i64 101
- store volatile i8 3, i8* %a
- %c = load volatile i8, i8* %a
+ %b = getelementptr inbounds i8, ptr %a, i64 101
+ store volatile i8 3, ptr %a
+ %c = load volatile i8, ptr %a
ret i8 %c
}
; CHECK-32-NEXT: mr r31, r0
; CHECK-32-NEXT: blr
%a = alloca i32, i32 200, align 64
- %b = getelementptr inbounds i32, i32* %a, i64 %i
- store volatile i32 1, i32* %b
- %c = load volatile i32, i32* %a
+ %b = getelementptr inbounds i32, ptr %a, i64 %i
+ store volatile i32 1, ptr %b
+ %c = load volatile i32, ptr %a
ret i32 %c
}
; CHECK-32-NEXT: mr r31, r0
; CHECK-32-NEXT: blr
%a = alloca i32, i32 2000, align 2048
- %b = getelementptr inbounds i32, i32* %a, i64 %i
- store volatile i32 1, i32* %b
- %c = load volatile i32, i32* %a
+ %b = getelementptr inbounds i32, ptr %a, i64 %i
+ store volatile i32 1, ptr %b
+ %c = load volatile i32, ptr %a
ret i32 %c
}
; CHECK-32-NEXT: mr r31, r0
; CHECK-32-NEXT: blr
%a = alloca i32, i32 1000, align 1024
- %b = getelementptr inbounds i32, i32* %a, i64 %i
- store volatile i32 1, i32* %b
- %c = load volatile i32, i32* %a
+ %b = getelementptr inbounds i32, ptr %a, i64 %i
+ store volatile i32 1, ptr %b
+ %c = load volatile i32, ptr %a
ret i32 %c
}
; CHECK-32-NEXT: mr r31, r0
; CHECK-32-NEXT: blr
%a = alloca i32, i32 4096, align 32768
- %b = getelementptr inbounds i32, i32* %a, i64 %i
- store volatile i32 1, i32* %b
+ %b = getelementptr inbounds i32, ptr %a, i64 %i
+ store volatile i32 1, ptr %b
%1 = zext i32 %vla_size to i64
%vla = alloca i8, i64 %1, align 2048
- %2 = load volatile i8, i8* %vla, align 2048
+ %2 = load volatile i8, ptr %vla, align 2048
ret void
}
; AIX-NOT: __ssp_canary_word
define i32 @in_bounds() #0 {
%var = alloca i32, align 4
- store i32 0, i32* %var, align 4
- %gep = getelementptr inbounds i32, i32* %var, i32 0
- %ret = load i32, i32* %gep, align 4
+ store i32 0, ptr %var, align 4
+ %ret = load i32, ptr %var, align 4
ret i32 %ret
}
; AIX: __ssp_canary_word
define i32 @constant_out_of_bounds() #0 {
%var = alloca i32, align 4
- store i32 0, i32* %var, align 4
- %gep = getelementptr inbounds i32, i32* %var, i32 1
- %ret = load i32, i32* %gep, align 4
+ store i32 0, ptr %var, align 4
+ %gep = getelementptr inbounds i32, ptr %var, i32 1
+ %ret = load i32, ptr %gep, align 4
ret i32 %ret
}
; AIX: __ssp_canary_word
define i32 @nonconstant_out_of_bounds(i32 %n) #0 {
%var = alloca i32, align 4
- store i32 0, i32* %var, align 4
- %gep = getelementptr inbounds i32, i32* %var, i32 %n
- %ret = load i32, i32* %gep, align 4
+ store i32 0, ptr %var, align 4
+ %gep = getelementptr inbounds i32, ptr %var, i32 %n
+ %ret = load i32, ptr %gep, align 4
ret i32 %ret
}
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
- store i32 0, i32* %var1, align 4
- store i32 0, i32* %var2, align 4
+ store i32 0, ptr %var1, align 4
+ store i32 0, ptr %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %then
br label %then
then:
- %ptr = phi i32* [ %var1, %entry ], [ %var2, %if ]
- %gep = getelementptr inbounds i32, i32* %ptr, i32 0
- %ret = load i32, i32* %gep, align 4
+ %ptr = phi ptr [ %var1, %entry ], [ %var2, %if ]
+ %ret = load i32, ptr %ptr, align 4
ret i32 %ret
}
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
- store i32 0, i32* %var1, align 4
- store i32 0, i32* %var2, align 4
+ store i32 0, ptr %var1, align 4
+ store i32 0, ptr %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %then
br label %then
then:
- %ptr = phi i32* [ %var1, %entry ], [ %var2, %if ]
- %gep = getelementptr inbounds i32, i32* %ptr, i32 1
- %ret = load i32, i32* %gep, align 4
+ %ptr = phi ptr [ %var1, %entry ], [ %var2, %if ]
+ %gep = getelementptr inbounds i32, ptr %ptr, i32 1
+ %ret = load i32, ptr %gep, align 4
ret i32 %ret
}
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
- store i32 0, i32* %var1, align 4
- store i32 0, i32* %var2, align 4
+ store i32 0, ptr %var1, align 4
+ store i32 0, ptr %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %then
br label %then
then:
- %ptr = phi i32* [ %var1, %entry ], [ %var2, %if ]
- %gep = getelementptr inbounds i32, i32* %ptr, i32 %n
- %ret = load i32, i32* %gep, align 4
+ %ptr = phi ptr [ %var1, %entry ], [ %var2, %if ]
+ %gep = getelementptr inbounds i32, ptr %ptr, i32 %n
+ %ret = load i32, ptr %gep, align 4
ret i32 %ret
}
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
- store i32 0, i32* %var1, align 4
- store i32 0, i32* %var2, align 4
+ store i32 0, ptr %var1, align 4
+ store i32 0, ptr %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %else
if:
- %gep1 = getelementptr inbounds i32, i32* %var1, i32 0
br label %then
else:
- %gep2 = getelementptr inbounds i32, i32* %var2, i32 0
br label %then
then:
- %ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
- %ret = load i32, i32* %ptr, align 4
+ %ptr = phi ptr [ %var1, %if ], [ %var2, %else ]
+ %ret = load i32, ptr %ptr, align 4
ret i32 %ret
}
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
- store i32 0, i32* %var1, align 4
- store i32 0, i32* %var2, align 4
+ store i32 0, ptr %var1, align 4
+ store i32 0, ptr %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %else
if:
- %gep1 = getelementptr inbounds i32, i32* %var1, i32 0
br label %then
else:
- %gep2 = getelementptr inbounds i32, i32* %var2, i32 1
+ %gep2 = getelementptr inbounds i32, ptr %var2, i32 1
br label %then
then:
- %ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
- %ret = load i32, i32* %ptr, align 4
+ %ptr = phi ptr [ %var1, %if ], [ %gep2, %else ]
+ %ret = load i32, ptr %ptr, align 4
ret i32 %ret
}
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
- store i32 0, i32* %var1, align 4
- store i32 0, i32* %var2, align 4
+ store i32 0, ptr %var1, align 4
+ store i32 0, ptr %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %else
if:
- %gep1 = getelementptr inbounds i32, i32* %var1, i32 1
+ %gep1 = getelementptr inbounds i32, ptr %var1, i32 1
br label %then
else:
- %gep2 = getelementptr inbounds i32, i32* %var2, i32 0
br label %then
then:
- %ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
- %ret = load i32, i32* %ptr, align 4
+ %ptr = phi ptr [ %gep1, %if ], [ %var2, %else ]
+ %ret = load i32, ptr %ptr, align 4
ret i32 %ret
}
entry:
%var1 = alloca i64, align 4
%var2 = alloca i32, align 4
- store i64 0, i64* %var1, align 4
- store i32 0, i32* %var2, align 4
+ store i64 0, ptr %var1, align 4
+ store i32 0, ptr %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %then
if:
- %bitcast = bitcast i32* %var2 to i64*
br label %then
then:
- %ptr = phi i64* [ %var1, %entry ], [ %bitcast, %if ]
- %ret = load i64, i64* %ptr, align 4
+ %ptr = phi ptr [ %var1, %entry ], [ %var2, %if ]
+ %ret = load i64, ptr %ptr, align 4
ret i64 %ret
}
entry:
%var1 = alloca i32, align 4
%var2 = alloca i64, align 4
- store i32 0, i32* %var1, align 4
- store i64 0, i64* %var2, align 4
+ store i32 0, ptr %var1, align 4
+ store i64 0, ptr %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %then
if:
- %bitcast = bitcast i32* %var1 to i64*
br label %then
then:
- %ptr = phi i64* [ %var2, %entry ], [ %bitcast, %if ]
- %ret = load i64, i64* %ptr, align 4
+ %ptr = phi ptr [ %var2, %entry ], [ %var1, %if ]
+ %ret = load i64, ptr %ptr, align 4
ret i64 %ret
}
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
- store i32 0, i32* %var1, align 4
- store i32 0, i32* %var2, align 4
+ store i32 0, ptr %var1, align 4
+ store i32 0, ptr %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %else
if:
- %gep1 = getelementptr inbounds i32, i32* %var1, i32 0
br label %then
else:
- %gep2 = getelementptr inbounds i32, i32* %var2, i32 %n
+ %gep2 = getelementptr inbounds i32, ptr %var2, i32 %n
br label %then
then:
- %ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
- %ret = load i32, i32* %ptr, align 4
+ %ptr = phi ptr [ %var1, %if ], [ %gep2, %else ]
+ %ret = load i32, ptr %ptr, align 4
ret i32 %ret
}
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
- store i32 0, i32* %var1, align 4
- store i32 0, i32* %var2, align 4
+ store i32 0, ptr %var1, align 4
+ store i32 0, ptr %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %else
if:
- %gep1 = getelementptr inbounds i32, i32* %var1, i32 %n
+ %gep1 = getelementptr inbounds i32, ptr %var1, i32 %n
br label %then
else:
- %gep2 = getelementptr inbounds i32, i32* %var2, i32 0
br label %then
then:
- %ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
- %ret = load i32, i32* %ptr, align 4
+ %ptr = phi ptr [ %gep1, %if ], [ %var2, %else ]
+ %ret = load i32, ptr %ptr, align 4
ret i32 %ret
}
; AIX-NOT: __ssp_canary_word
define void @struct_in_bounds() #0 {
%var = alloca %struct.outer, align 4
- %outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 0, i32 1
- %innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 0, i32 1
- store i32 0, i32* %innergep, align 4
+ %outergep = getelementptr inbounds %struct.outer, ptr %var, i32 0, i32 1
+ %innergep = getelementptr inbounds %struct.inner, ptr %outergep, i32 0, i32 1
+ store i32 0, ptr %innergep, align 4
ret void
}
; AIX: __ssp_canary_word
define void @struct_constant_out_of_bounds_a() #0 {
%var = alloca %struct.outer, align 4
- %outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 1, i32 0
- %innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 0, i32 0
- store i32 0, i32* %innergep, align 4
+ %outergep = getelementptr inbounds %struct.outer, ptr %var, i32 1, i32 0
+ store i32 0, ptr %outergep, align 4
ret void
}
; AIX-NOT: __ssp_canary_word
define void @struct_constant_out_of_bounds_b() #0 {
%var = alloca %struct.outer, align 4
- %outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 0, i32 0
- %innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 1, i32 0
- store i32 0, i32* %innergep, align 4
+ %innergep = getelementptr inbounds %struct.inner, ptr %var, i32 1, i32 0
+ store i32 0, ptr %innergep, align 4
ret void
}
; AIX: __ssp_canary_word
define void @struct_constant_out_of_bounds_c() #0 {
%var = alloca %struct.outer, align 4
- %outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 0, i32 1
- %innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 1, i32 0
- store i32 0, i32* %innergep, align 4
+ %outergep = getelementptr inbounds %struct.outer, ptr %var, i32 0, i32 1
+ %innergep = getelementptr inbounds %struct.inner, ptr %outergep, i32 1, i32 0
+ store i32 0, ptr %innergep, align 4
ret void
}
; AIX: __ssp_canary_word
define void @struct_nonconstant_out_of_bounds_a(i32 %n) #0 {
%var = alloca %struct.outer, align 4
- %outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 %n, i32 0
- %innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 0, i32 0
- store i32 0, i32* %innergep, align 4
+ %outergep = getelementptr inbounds %struct.outer, ptr %var, i32 %n, i32 0
+ store i32 0, ptr %outergep, align 4
ret void
}
; AIX: __ssp_canary_word
define void @struct_nonconstant_out_of_bounds_b(i32 %n) #0 {
%var = alloca %struct.outer, align 4
- %outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 0, i32 0
- %innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 %n, i32 0
- store i32 0, i32* %innergep, align 4
+ %innergep = getelementptr inbounds %struct.inner, ptr %var, i32 %n, i32 0
+ store i32 0, ptr %innergep, align 4
ret void
}
; AIX-NOT: __ssp_canary_word
define i32 @bitcast_smaller_load() #0 {
%var = alloca i64, align 4
- store i64 0, i64* %var, align 4
- %bitcast = bitcast i64* %var to i32*
- %ret = load i32, i32* %bitcast, align 4
+ store i64 0, ptr %var, align 4
+ %ret = load i32, ptr %var, align 4
ret i32 %ret
}
; AIX-NOT: __ssp_canary_word
define i32 @bitcast_same_size_load() #0 {
%var = alloca i64, align 4
- store i64 0, i64* %var, align 4
- %bitcast = bitcast i64* %var to %struct.inner*
- %gep = getelementptr inbounds %struct.inner, %struct.inner* %bitcast, i32 0, i32 1
- %ret = load i32, i32* %gep, align 4
+ store i64 0, ptr %var, align 4
+ %gep = getelementptr inbounds %struct.inner, ptr %var, i32 0, i32 1
+ %ret = load i32, ptr %gep, align 4
ret i32 %ret
}
; AIX: __ssp_canary_word
define i64 @bitcast_larger_load() #0 {
%var = alloca i32, align 4
- store i32 0, i32* %var, align 4
- %bitcast = bitcast i32* %var to i64*
- %ret = load i64, i64* %bitcast, align 4
+ store i32 0, ptr %var, align 4
+ %ret = load i64, ptr %var, align 4
ret i64 %ret
}
; AIX: __ssp_canary_word
define i32 @bitcast_larger_store() #0 {
%var = alloca i32, align 4
- %bitcast = bitcast i32* %var to i64*
- store i64 0, i64* %bitcast, align 4
- %ret = load i32, i32* %var, align 4
+ store i64 0, ptr %var, align 4
+ %ret = load i32, ptr %var, align 4
ret i32 %ret
}
; AIX: __ssp_canary_word
define i64 @bitcast_larger_cmpxchg(i64 %desired, i64 %new) #0 {
%var = alloca i32, align 4
- %bitcast = bitcast i32* %var to i64*
- %pair = cmpxchg i64* %bitcast, i64 %desired, i64 %new seq_cst monotonic
+ %pair = cmpxchg ptr %var, i64 %desired, i64 %new seq_cst monotonic
%ret = extractvalue { i64, i1 } %pair, 0
ret i64 %ret
}
; AIX: __ssp_canary_word
define i64 @bitcast_larger_atomic_rmw() #0 {
%var = alloca i32, align 4
- %bitcast = bitcast i32* %var to i64*
- %ret = atomicrmw add i64* %bitcast, i64 1 monotonic
+ %ret = atomicrmw add ptr %var, i64 1 monotonic
ret i64 %ret
}
; AIX: __ssp_canary_word
define i32 @bitcast_overlap() #0 {
%var = alloca i32, align 4
- %bitcast = bitcast i32* %var to %struct.packed*
- %gep = getelementptr inbounds %struct.packed, %struct.packed* %bitcast, i32 0, i32 1
- %ret = load i32, i32* %gep, align 2
+ %gep = getelementptr inbounds %struct.packed, ptr %var, i32 0, i32 1
+ %ret = load i32, ptr %gep, align 2
ret i32 %ret
}
; AIX: __ssp_canary_word
define i32 @multi_dimensional_array() #0 {
%var = alloca %struct.multi_dimensional, align 4
- %gep1 = getelementptr inbounds %struct.multi_dimensional, %struct.multi_dimensional* %var, i32 0, i32 0
- %gep2 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* %gep1, i32 0, i32 10
- %gep3 = getelementptr inbounds [10 x i32], [10 x i32]* %gep2, i32 0, i32 5
- %ret = load i32, i32* %gep3, align 4
+ %gep2 = getelementptr inbounds [10 x [10 x i32]], ptr %var, i32 0, i32 10
+ %gep3 = getelementptr inbounds [10 x i32], ptr %gep2, i32 0, i32 5
+ %ret = load i32, ptr %gep3, align 4
ret i32 %ret
}
; CHECK-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
define i32 @test_n() local_unnamed_addr #0 {
entry:
- %t0 = tail call i32 bitcast (i32 (...)* @bar0 to i32 ()*)() #0
+ %t0 = tail call i32 @bar0() #0
ret i32 %t0
}
define i32 @test_a() local_unnamed_addr #0 {
entry:
%t0 = alloca i32, align 128
- %t1 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t0) #0
+ %t1 = tail call i32 @bar1(ptr %t0) #0
ret i32 %t1
}
; CHECK-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
define i32 @test_d(i32 %p0) local_unnamed_addr #0 {
%t0 = alloca i32, i32 %p0, align 4
- %t1 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t0) #0
+ %t1 = tail call i32 @bar1(ptr %t0) #0
ret i32 %t1
}
define i32 @test_s(i32 %p0) local_unnamed_addr #0 {
entry:
%t0 = alloca [16384 x i32]
- %t1 = getelementptr [16384 x i32], [16384 x i32]* %t0, i32 0, i32 0
- %t2 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t1) #0
+ %t2 = tail call i32 @bar1(ptr %t0) #0
ret i32 %t2
}
define i32 @test_ad(i32 %p0) local_unnamed_addr #0 {
%t0 = alloca i32, align 128
%t1 = alloca i32, i32 %p0, align 4
- %t2 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t0) #0
- %t3 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t1) #0
+ %t2 = tail call i32 @bar1(ptr %t0) #0
+ %t3 = tail call i32 @bar1(ptr %t1) #0
%t4 = add i32 %t2, %t3
ret i32 %t4
}
define i32 @test_as() local_unnamed_addr #0 {
%t0 = alloca i32, align 128
%t1 = alloca [16384 x i32]
- %t2 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t0) #0
- %t3 = getelementptr [16384 x i32], [16384 x i32]* %t1, i32 0, i32 0
- %t4 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t3) #0
+ %t2 = tail call i32 @bar1(ptr %t0) #0
+ %t4 = tail call i32 @bar1(ptr %t1) #0
%t5 = add i32 %t2, %t4
ret i32 %t5
}
define i32 @test_ds(i32 %p0) local_unnamed_addr #0 {
%t0 = alloca i32, i32 %p0, align 4
%t1 = alloca [16384 x i32]
- %t2 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t0) #0
- %t3 = getelementptr [16384 x i32], [16384 x i32]* %t1, i32 0, i32 0
- %t4 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t3) #0
+ %t2 = tail call i32 @bar1(ptr %t0) #0
+ %t4 = tail call i32 @bar1(ptr %t1) #0
%t5 = add i32 %t2, %t4
ret i32 %t5
}
%t1 = alloca i32, i32 %p0, align 4
%t2 = alloca [16384 x i32]
- %t3 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t0) #0
- %t4 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t1) #0
+ %t3 = tail call i32 @bar1(ptr %t0) #0
+ %t4 = tail call i32 @bar1(ptr %t1) #0
%t5 = add i32 %t3, %t4
- %t6 = getelementptr [16384 x i32], [16384 x i32]* %t2, i32 0, i32 0
- %t7 = tail call i32 bitcast (i32 (...)* @bar1 to i32 (i32*)*)(i32* %t6) #0
+ %t7 = tail call i32 @bar1(ptr %t2) #0
%t8 = add i32 %t5, %t7
ret i32 %t7
}
; FREEBSD32: bl __stack_chk_fail
; FREEBSD64: bl __stack_chk_fail
-@"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00" ; <[11 x i8]*> [#uses=1]
+@"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00" ; <ptr> [#uses=1]
-define void @test(i8* %a) nounwind ssp {
+define void @test(ptr %a) nounwind ssp {
entry:
- %a_addr = alloca i8* ; <i8**> [#uses=2]
- %buf = alloca [8 x i8] ; <[8 x i8]*> [#uses=2]
+ %a_addr = alloca ptr ; <ptr> [#uses=2]
+ %buf = alloca [8 x i8] ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i8* %a, i8** %a_addr
- %buf1 = bitcast [8 x i8]* %buf to i8* ; <i8*> [#uses=1]
- %0 = load i8*, i8** %a_addr, align 4 ; <i8*> [#uses=1]
- %1 = call i8* @strcpy(i8* %buf1, i8* %0) nounwind ; <i8*> [#uses=0]
- %buf2 = bitcast [8 x i8]* %buf to i8* ; <i8*> [#uses=1]
- %2 = call i32 (i8*, ...) @printf(i8* getelementptr ([11 x i8], [11 x i8]* @"\01LC", i32 0, i32 0), i8* %buf2) nounwind ; <i32> [#uses=0]
+ store ptr %a, ptr %a_addr
+ %0 = load ptr, ptr %a_addr, align 4 ; <ptr> [#uses=1]
+ %1 = call ptr @strcpy(ptr %buf, ptr %0) nounwind ; <ptr> [#uses=0]
+ %2 = call i32 (ptr, ...) @printf(ptr @"\01LC", ptr %buf) nounwind ; <i32> [#uses=0]
br label %return
return: ; preds = %entry
ret void
}
-declare i8* @strcpy(i8*, i8*) nounwind
+declare ptr @strcpy(ptr, ptr) nounwind
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind
%struct.s = type { i32, i32 }
-declare void @bar(i32*)
+declare void @bar(ptr)
@barbaz = external global i32
-define void @goo(%struct.s* byval(%struct.s) nocapture readonly %a) {
+define void @goo(ptr byval(%struct.s) nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
- %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32, i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
- store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32, i32* %b, align 4
- %2 = load i32, i32* @barbaz, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
- store i32 %2, i32* %arrayidx2, align 4
- call void @bar(i32* %arrayidx)
+ %0 = load i32, ptr %a, align 4
+ store i32 %0, ptr %x, align 32
+ %b = getelementptr inbounds %struct.s, ptr %a, i64 0, i32 1
+ %1 = load i32, ptr %b, align 4
+ %2 = load i32, ptr @barbaz, align 4
+ %arrayidx2 = getelementptr inbounds [2 x i32], ptr %x, i64 0, i64 1
+ store i32 %2, ptr %arrayidx2, align 4
+ call void @bar(ptr %x)
ret void
}
; CHECK-32-PIC: addic 29, 0, 12
; The large-frame-size case.
-define void @hoo(%struct.s* byval(%struct.s) nocapture readonly %a) {
+define void @hoo(ptr byval(%struct.s) nocapture readonly %a) {
entry:
%x = alloca [200000 x i32], align 32
- %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32, i32* %a1, align 4
- %arrayidx = getelementptr inbounds [200000 x i32], [200000 x i32]* %x, i64 0, i64 0
- store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32, i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [200000 x i32], [200000 x i32]* %x, i64 0, i64 1
- store i32 %1, i32* %arrayidx2, align 4
- call void @bar(i32* %arrayidx)
+ %0 = load i32, ptr %a, align 4
+ store i32 %0, ptr %x, align 32
+ %b = getelementptr inbounds %struct.s, ptr %a, i64 0, i32 1
+ %1 = load i32, ptr %b, align 4
+ %arrayidx2 = getelementptr inbounds [200000 x i32], ptr %x, i64 0, i64 1
+ store i32 %1, ptr %arrayidx2, align 4
+ call void @bar(ptr %x)
ret void
}
; Make sure that the FP save area is still allocated correctly relative to
; where r30 is saved.
-define void @loo(%struct.s* byval(%struct.s) nocapture readonly %a) {
+define void @loo(ptr byval(%struct.s) nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
- %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32, i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
- store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32, i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
- store i32 %1, i32* %arrayidx2, align 4
- call void @bar(i32* %arrayidx)
+ %0 = load i32, ptr %a, align 4
+ store i32 %0, ptr %x, align 32
+ %b = getelementptr inbounds %struct.s, ptr %a, i64 0, i32 1
+ %1 = load i32, ptr %b, align 4
+ %arrayidx2 = getelementptr inbounds [2 x i32], ptr %x, i64 0, i64 1
+ store i32 %1, ptr %arrayidx2, align 4
+ call void @bar(ptr %x)
call void asm sideeffect "", "~{f30}"() nounwind
ret void
}
@.str = private unnamed_addr constant [33 x i8] c"Successfully returned from main\0A\00", align 1
; Function Attrs: nounwind
-define dso_local signext i32 @main(i32 signext %argc, i8** nocapture readnone %argv) local_unnamed_addr #0 {
+define dso_local signext i32 @main(i32 signext %argc, ptr nocapture readnone %argv) local_unnamed_addr #0 {
; CHECK-LABEL: main:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mfocrf 12, 32
br i1 %cmp, label %return, label %if.end
if.end: ; preds = %entry
- %0 = bitcast [1 x %struct.__jmp_buf_tag]* %env_buffer to i8*
- call void @llvm.lifetime.start.p0i8(i64 656, i8* nonnull %0) #5
- %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %env_buffer, i64 0, i64 0
- %call = call signext i32 @_setjmp(%struct.__jmp_buf_tag* nonnull %arraydecay) #6
+ call void @llvm.lifetime.start.p0(i64 656, ptr nonnull %env_buffer) #5
+ %call = call signext i32 @_setjmp(ptr nonnull %env_buffer) #6
%cmp1 = icmp ne i32 %argc, 2
%cmp2 = icmp eq i32 %call, 0
%or.cond = and i1 %cmp1, %cmp2
br i1 %or.cond, label %if.then3, label %if.end5
if.then3: ; preds = %if.end
- %1 = alloca [8 x i8], align 16
- %.sub = getelementptr inbounds [8 x i8], [8 x i8]* %1, i64 0, i64 0
- store i8 -1, i8* %.sub, align 16
- call void @test(i8* nonnull %.sub, %struct.__jmp_buf_tag* nonnull %arraydecay) #7
+ %0 = alloca [8 x i8], align 16
+ store i8 -1, ptr %0, align 16
+ call void @test(ptr nonnull %0, ptr nonnull %env_buffer) #7
unreachable
if.end5: ; preds = %if.end
- %call6 = call signext i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([33 x i8], [33 x i8]* @.str, i64 0, i64 0))
- call void @llvm.lifetime.end.p0i8(i64 656, i8* nonnull %0) #5
+ %call6 = call signext i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str)
+ call void @llvm.lifetime.end.p0(i64 656, ptr nonnull %env_buffer) #5
br label %return
return: ; preds = %entry, %if.end5
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
; Function Attrs: nounwind returns_twice
-declare signext i32 @_setjmp(%struct.__jmp_buf_tag*) local_unnamed_addr
+declare signext i32 @_setjmp(ptr) local_unnamed_addr
; Function Attrs: noreturn
-declare void @test(i8*, %struct.__jmp_buf_tag*) local_unnamed_addr
+declare void @test(ptr, ptr) local_unnamed_addr
; Function Attrs: nofree nounwind
-declare noundef signext i32 @printf(i8* nocapture noundef readonly, ...) local_unnamed_addr
+declare noundef signext i32 @printf(ptr nocapture noundef readonly, ...) local_unnamed_addr
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
attributes #0 = { nounwind }
attributes #6 = { nounwind returns_twice }
define void @caller_meta_leaf() {
entry:
%metadata = alloca i64, i32 3, align 8
- store i64 11, i64* %metadata
- store i64 12, i64* %metadata
- store i64 13, i64* %metadata
+ store i64 11, ptr %metadata
+ store i64 12, ptr %metadata
+ store i64 13, ptr %metadata
; ISEL: ADJCALLSTACKDOWN 0, 0, implicit-def
; ISEL-NEXT: STACKMAP
; ISEL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def
- call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
+ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, ptr %metadata)
; FAST-ISEL: ADJCALLSTACKDOWN 0, 0, implicit-def
; FAST-ISEL-NEXT: STACKMAP
; FAST-ISEL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def
br label %CF
CF: ; preds = %CF80, %CF, %BB
- %L5 = load i64, i64* undef
- store i8 %0, i8* %A4
+ %L5 = load i64, ptr undef
+ store i8 %0, ptr %A4
%Shuff7 = shufflevector <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> %Shuff, <16 x i32> <i32 28, i32 30, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 undef, i32 20, i32 22, i32 24, i32 26>
- %PC10 = bitcast i8* %A4 to ppc_fp128*
br i1 undef, label %CF, label %CF77
CF77: ; preds = %CF81, %CF83, %CF77, %CF
br i1 undef, label %CF77, label %CF82
CF82: ; preds = %CF82, %CF77
- %L19 = load i64, i64* undef
- store <1 x ppc_fp128> zeroinitializer, <1 x ppc_fp128>* %A
- store i8 -65, i8* %A4
+ %L19 = load i64, ptr undef
+ store <1 x ppc_fp128> zeroinitializer, ptr %A
+ store i8 -65, ptr %A4
br i1 undef, label %CF82, label %CF83
CF83: ; preds = %CF82
- %L34 = load i64, i64* undef
+ %L34 = load i64, ptr undef
br i1 undef, label %CF77, label %CF81
CF81: ; preds = %CF83
%Shuff43 = shufflevector <16 x i32> %Shuff7, <16 x i32> undef, <16 x i32> <i32 15, i32 17, i32 19, i32 21, i32 23, i32 undef, i32 undef, i32 29, i32 31, i32 undef, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13>
- store ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128* %PC10
+ store ppc_fp128 0xM00000000000000000000000000000000, ptr %A4
br i1 undef, label %CF77, label %CF78
CF78: ; preds = %CF78, %CF81
br i1 undef, label %CF79, label %CF80
CF80: ; preds = %CF79
- store i64 %L19, i64* undef
+ store i64 %L19, ptr undef
%Cmp75 = icmp uge i32 206779, undef
br i1 %Cmp75, label %CF, label %CF76
CF76: ; preds = %CF80
- store i64 %L5, i64* undef
- store i64 %L34, i64* undef
+ store i64 %L5, ptr undef
+ store i64 %L34, ptr undef
ret void
}
-define void @autogen_SD88042(i8*, i32*, i8) {
+define void @autogen_SD88042(ptr, ptr, i8) {
BB:
%A4 = alloca <2 x i1>
%A = alloca <16 x float>
- %L = load i8, i8* %0
- %Sl = select i1 false, <16 x float>* %A, <16 x float>* %A
- %PC = bitcast <2 x i1>* %A4 to i64*
+ %L = load i8, ptr %0
+ %Sl = select i1 false, ptr %A, ptr %A
%Sl27 = select i1 false, i8 undef, i8 %L
br label %CF
CF: ; preds = %CF78, %CF, %BB
- %PC33 = bitcast i32* %1 to i32*
br i1 undef, label %CF, label %CF77
CF77: ; preds = %CF80, %CF77, %CF
- store <16 x float> zeroinitializer, <16 x float>* %Sl
- %L58 = load i32, i32* %PC33
- store i8 0, i8* %0
+ store <16 x float> zeroinitializer, ptr %Sl
+ %L58 = load i32, ptr %1
+ store i8 0, ptr %0
br i1 undef, label %CF77, label %CF80
CF80: ; preds = %CF77
- store i64 0, i64* %PC
+ store i64 0, ptr %A4
%E67 = extractelement <8 x i1> zeroinitializer, i32 1
br i1 %E67, label %CF77, label %CF78
br i1 %Cmp73, label %CF, label %CF76
CF76: ; preds = %CF78
- store i8 %2, i8* %0
- store i8 %Sl27, i8* %0
+ store i8 %2, ptr %0
+ store i8 %Sl27, ptr %0
ret void
}
-define void @autogen_SD37497(i8*, i32*, i64*) {
+define void @autogen_SD37497(ptr, ptr, ptr) {
BB:
%A1 = alloca i1
%I8 = insertelement <1 x i32> <i32 -1>, i32 454855, i32 0
%Cmp = icmp ult <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, undef
- %L10 = load i64, i64* %2
+ %L10 = load i64, ptr %2
%E11 = extractelement <4 x i1> %Cmp, i32 2
br label %CF72
CF72: ; preds = %CF74, %CF72, %BB
- store double 0xB47BB29A53790718, double* undef
+ store double 0xB47BB29A53790718, ptr undef
%E18 = extractelement <1 x i32> <i32 -1>, i32 0
%FC22 = sitofp <1 x i32> %I8 to <1 x float>
br i1 undef, label %CF72, label %CF74
CF74: ; preds = %CF72
- store i8 0, i8* %0
- %PC = bitcast i1* %A1 to i64*
- %L31 = load i64, i64* %PC
- store i64 477323, i64* %PC
- %Sl37 = select i1 false, i32* undef, i32* %1
+ store i8 0, ptr %0
+ %L31 = load i64, ptr %A1
+ store i64 477323, ptr %A1
+ %Sl37 = select i1 false, ptr undef, ptr %1
%Cmp38 = icmp ugt i1 undef, undef
br i1 %Cmp38, label %CF72, label %CF73
CF73: ; preds = %CF74
- store i64 %L31, i64* %PC
+ store i64 %L31, ptr %A1
%B55 = fdiv <1 x float> undef, %FC22
- %Sl63 = select i1 %E11, i32* undef, i32* %Sl37
- store i32 %E18, i32* %Sl63
- store i64 %L10, i64* %PC
+ %Sl63 = select i1 %E11, ptr undef, ptr %Sl37
+ store i32 %E18, ptr %Sl63
+ store i64 %L10, ptr %A1
ret void
}
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define i32 @test1(i64 %add, i64* %ptr) nounwind {
+define i32 @test1(i64 %add, ptr %ptr) nounwind {
entry:
- %p1 = getelementptr i64, i64* %ptr, i64 144115188075855
+ %p1 = getelementptr i64, ptr %ptr, i64 144115188075855
br label %for.cond2.preheader
for.cond2.preheader:
for.body4:
%lsr.iv = phi i32 [ %lsr.iv.next, %for.body4 ], [ 16000, %for.cond2.preheader ]
- %i0 = phi i64* [ %p1, %for.cond2.preheader ], [ %i6, %for.body4 ]
- %i6 = getelementptr i64, i64* %i0, i64 400000
- %i7 = getelementptr i64, i64* %i6, i64 300000
- %i8 = getelementptr i64, i64* %i6, i64 200000
- %i9 = getelementptr i64, i64* %i6, i64 100000
- store i64 %add, i64* %i6, align 32
- store i64 %add, i64* %i7, align 32
- store i64 %add, i64* %i8, align 32
- store i64 %add, i64* %i9, align 32
+ %i0 = phi ptr [ %p1, %for.cond2.preheader ], [ %i6, %for.body4 ]
+ %i6 = getelementptr i64, ptr %i0, i64 400000
+ %i7 = getelementptr i64, ptr %i6, i64 300000
+ %i8 = getelementptr i64, ptr %i6, i64 200000
+ %i9 = getelementptr i64, ptr %i6, i64 100000
+ store i64 %add, ptr %i6, align 32
+ store i64 %add, ptr %i7, align 32
+ store i64 %add, ptr %i8, align 32
+ store i64 %add, ptr %i9, align 32
%lsr.iv.next = add i32 %lsr.iv, -16
%exitcond.15 = icmp eq i32 %lsr.iv.next, 0
br i1 %exitcond.15, label %for.end, label %for.body4
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -mcpu=g5 | FileCheck %s
-define void @test(float %F, i8* %P) {
+define void @test(float %F, ptr %P) {
%I = fptosi float %F to i32
%X = trunc i32 %I to i8
- store i8 %X, i8* %P
+ store i8 %X, ptr %P
ret void
; CHECK: fctiwz 0, 1
; CHECK: stfiwx 0, 0, 4
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -mattr=stfiwx | FileCheck %s
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -mattr=-stfiwx | FileCheck -check-prefix=CHECK-LS %s
-define void @test1(float %a, i32* %b) nounwind {
+define void @test1(float %a, ptr %b) nounwind {
; CHECK-LABEL: @test1
; CHECK-LS-LABEL: @test1
%tmp.2 = fptosi float %a to i32 ; <i32> [#uses=1]
- store i32 %tmp.2, i32* %b
+ store i32 %tmp.2, ptr %b
ret void
; CHECK: stwu
; CHECK-LS: blr
}
-define void @test2(float %a, i32* %b, i32 %i) nounwind {
+define void @test2(float %a, ptr %b, i32 %i) nounwind {
; CHECK-LABEL: @test2
; CHECK-LS-LABEL: @test2
- %tmp.2 = getelementptr i32, i32* %b, i32 1 ; <i32*> [#uses=1]
- %tmp.5 = getelementptr i32, i32* %b, i32 %i ; <i32*> [#uses=1]
+ %tmp.2 = getelementptr i32, ptr %b, i32 1 ; <ptr> [#uses=1]
+ %tmp.5 = getelementptr i32, ptr %b, i32 %i ; <ptr> [#uses=1]
%tmp.7 = fptosi float %a to i32 ; <i32> [#uses=3]
- store i32 %tmp.7, i32* %tmp.5
- store i32 %tmp.7, i32* %tmp.2
- store i32 %tmp.7, i32* %b
+ store i32 %tmp.7, ptr %tmp.5
+ store i32 %tmp.7, ptr %tmp.2
+ store i32 %tmp.7, ptr %b
ret void
; CHECK: stwu
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK-PPC64LE
; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK-PPC64
-; i8* p;
+; ptr p;
; i32 m;
; p[0] = (m >> 0) & 0xFF;
; p[1] = (m >> 8) & 0xFF;
; p[2] = (m >> 16) & 0xFF;
; p[3] = (m >> 24) & 0xFF;
-define void @store_i32_by_i8(i32 signext %m, i8* %p) {
+define void @store_i32_by_i8(i32 signext %m, ptr %p) {
; CHECK-PPC64LE-LABEL: store_i32_by_i8:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: stw 3, 0(4)
; CHECK-PPC64-NEXT: blr
entry:
%conv = trunc i32 %m to i8
- store i8 %conv, i8* %p, align 1
+ store i8 %conv, ptr %p, align 1
%0 = lshr i32 %m, 8
%conv3 = trunc i32 %0 to i8
- %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 1
- store i8 %conv3, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 1
+ store i8 %conv3, ptr %arrayidx4, align 1
%1 = lshr i32 %m, 16
%conv7 = trunc i32 %1 to i8
- %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 2
- store i8 %conv7, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 2
+ store i8 %conv7, ptr %arrayidx8, align 1
%2 = lshr i32 %m, 24
%conv11 = trunc i32 %2 to i8
- %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 3
- store i8 %conv11, i8* %arrayidx12, align 1
+ %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 3
+ store i8 %conv11, ptr %arrayidx12, align 1
ret void
}
-; i8* p;
+; ptr p;
; i32 m;
; p[0] = (m >> 24) & 0xFF;
; p[1] = (m >> 16) & 0xFF;
; p[2] = (m >> 8) & 0xFF;
; p[3] = (m >> 0) & 0xFF;
-define void @store_i32_by_i8_bswap(i32 signext %m, i8* %p) {
+define void @store_i32_by_i8_bswap(i32 signext %m, ptr %p) {
; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: stwbrx 3, 0, 4
entry:
%0 = lshr i32 %m, 24
%conv = trunc i32 %0 to i8
- store i8 %conv, i8* %p, align 1
+ store i8 %conv, ptr %p, align 1
%1 = lshr i32 %m, 16
%conv3 = trunc i32 %1 to i8
- %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 1
- store i8 %conv3, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 1
+ store i8 %conv3, ptr %arrayidx4, align 1
%2 = lshr i32 %m, 8
%conv7 = trunc i32 %2 to i8
- %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 2
- store i8 %conv7, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 2
+ store i8 %conv7, ptr %arrayidx8, align 1
%conv11 = trunc i32 %m to i8
- %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 3
- store i8 %conv11, i8* %arrayidx12, align 1
+ %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 3
+ store i8 %conv11, ptr %arrayidx12, align 1
ret void
}
-; i8 *p;
+; ptr p;
; i64 m;
; p[0] = (m >> 0) & 0xFF;
; p[1] = (m >> 8) & 0xFF;
; p[5] = (m >> 40) & 0xFF;
; p[6] = (m >> 48) & 0xFF;
; p[7] = (m >> 56) & 0xFF;
-define void @store_i64_by_i8(i64 %m, i8* %p) {
+define void @store_i64_by_i8(i64 %m, ptr %p) {
; CHECK-PPC64LE-LABEL: store_i64_by_i8:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: std 3, 0(4)
; CHECK-PPC64-NEXT: blr
entry:
%conv = trunc i64 %m to i8
- store i8 %conv, i8* %p, align 1
+ store i8 %conv, ptr %p, align 1
%0 = lshr i64 %m, 8
%conv3 = trunc i64 %0 to i8
- %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 1
- store i8 %conv3, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 1
+ store i8 %conv3, ptr %arrayidx4, align 1
%1 = lshr i64 %m, 16
%conv7 = trunc i64 %1 to i8
- %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 2
- store i8 %conv7, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 2
+ store i8 %conv7, ptr %arrayidx8, align 1
%2 = lshr i64 %m, 24
%conv11 = trunc i64 %2 to i8
- %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 3
- store i8 %conv11, i8* %arrayidx12, align 1
+ %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 3
+ store i8 %conv11, ptr %arrayidx12, align 1
%3 = lshr i64 %m, 32
%conv15 = trunc i64 %3 to i8
- %arrayidx16 = getelementptr inbounds i8, i8* %p, i64 4
- store i8 %conv15, i8* %arrayidx16, align 1
+ %arrayidx16 = getelementptr inbounds i8, ptr %p, i64 4
+ store i8 %conv15, ptr %arrayidx16, align 1
%4 = lshr i64 %m, 40
%conv19 = trunc i64 %4 to i8
- %arrayidx20 = getelementptr inbounds i8, i8* %p, i64 5
- store i8 %conv19, i8* %arrayidx20, align 1
+ %arrayidx20 = getelementptr inbounds i8, ptr %p, i64 5
+ store i8 %conv19, ptr %arrayidx20, align 1
%5 = lshr i64 %m, 48
%conv23 = trunc i64 %5 to i8
- %arrayidx24 = getelementptr inbounds i8, i8* %p, i64 6
- store i8 %conv23, i8* %arrayidx24, align 1
+ %arrayidx24 = getelementptr inbounds i8, ptr %p, i64 6
+ store i8 %conv23, ptr %arrayidx24, align 1
%6 = lshr i64 %m, 56
%conv27 = trunc i64 %6 to i8
- %arrayidx28 = getelementptr inbounds i8, i8* %p, i64 7
- store i8 %conv27, i8* %arrayidx28, align 1
+ %arrayidx28 = getelementptr inbounds i8, ptr %p, i64 7
+ store i8 %conv27, ptr %arrayidx28, align 1
ret void
}
-; i8 *p;
+; ptr p;
; i64 m;
; p[7] = (m >> 0) & 0xFF;
; p[6] = (m >> 8) & 0xFF;
; p[2] = (m >> 40) & 0xFF;
; p[1] = (m >> 48) & 0xFF;
; p[0] = (m >> 56) & 0xFF;
-define void @store_i64_by_i8_bswap(i64 %m, i8* %p) {
+define void @store_i64_by_i8_bswap(i64 %m, ptr %p) {
; CHECK-PPC64LE-LABEL: store_i64_by_i8_bswap:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: stdbrx 3, 0, 4
; CHECK-PPC64-NEXT: blr
entry:
%conv = trunc i64 %m to i8
- %arrayidx = getelementptr inbounds i8, i8* %p, i64 7
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i64 7
+ store i8 %conv, ptr %arrayidx, align 1
%0 = lshr i64 %m, 8
%conv3 = trunc i64 %0 to i8
- %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 6
- store i8 %conv3, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 6
+ store i8 %conv3, ptr %arrayidx4, align 1
%1 = lshr i64 %m, 16
%conv7 = trunc i64 %1 to i8
- %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 5
- store i8 %conv7, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 5
+ store i8 %conv7, ptr %arrayidx8, align 1
%2 = lshr i64 %m, 24
%conv11 = trunc i64 %2 to i8
- %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 4
- store i8 %conv11, i8* %arrayidx12, align 1
+ %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 4
+ store i8 %conv11, ptr %arrayidx12, align 1
%3 = lshr i64 %m, 32
%conv15 = trunc i64 %3 to i8
- %arrayidx16 = getelementptr inbounds i8, i8* %p, i64 3
- store i8 %conv15, i8* %arrayidx16, align 1
+ %arrayidx16 = getelementptr inbounds i8, ptr %p, i64 3
+ store i8 %conv15, ptr %arrayidx16, align 1
%4 = lshr i64 %m, 40
%conv19 = trunc i64 %4 to i8
- %arrayidx20 = getelementptr inbounds i8, i8* %p, i64 2
- store i8 %conv19, i8* %arrayidx20, align 1
+ %arrayidx20 = getelementptr inbounds i8, ptr %p, i64 2
+ store i8 %conv19, ptr %arrayidx20, align 1
%5 = lshr i64 %m, 48
%conv23 = trunc i64 %5 to i8
- %arrayidx24 = getelementptr inbounds i8, i8* %p, i64 1
- store i8 %conv23, i8* %arrayidx24, align 1
+ %arrayidx24 = getelementptr inbounds i8, ptr %p, i64 1
+ store i8 %conv23, ptr %arrayidx24, align 1
%6 = lshr i64 %m, 56
%conv27 = trunc i64 %6 to i8
- store i8 %conv27, i8* %p, align 1
+ store i8 %conv27, ptr %p, align 1
ret void
}
-; i32 t; i8 *p;
+; i32 t; ptr p;
; i64 m = t * 7;
; p[7] = (m >> 0) & 0xFF;
; p[6] = (m >> 8) & 0xFF;
; p[2] = (m >> 40) & 0xFF;
; p[1] = (m >> 48) & 0xFF;
; p[0] = (m >> 56) & 0xFF;
-define void @store_i64_by_i8_bswap_uses(i32 signext %t, i8* %p) {
+define void @store_i64_by_i8_bswap_uses(i32 signext %t, ptr %p) {
; CHECK-PPC64LE-LABEL: store_i64_by_i8_bswap_uses:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: slwi 5, 3, 3
%mul = mul nsw i32 %t, 7
%conv = sext i32 %mul to i64
%conv1 = trunc i32 %mul to i8
- %arrayidx = getelementptr inbounds i8, i8* %p, i64 7
- store i8 %conv1, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i64 7
+ store i8 %conv1, ptr %arrayidx, align 1
%0 = lshr i64 %conv, 8
%conv4 = trunc i64 %0 to i8
- %arrayidx5 = getelementptr inbounds i8, i8* %p, i64 6
- store i8 %conv4, i8* %arrayidx5, align 1
+ %arrayidx5 = getelementptr inbounds i8, ptr %p, i64 6
+ store i8 %conv4, ptr %arrayidx5, align 1
%1 = lshr i64 %conv, 16
%conv8 = trunc i64 %1 to i8
- %arrayidx9 = getelementptr inbounds i8, i8* %p, i64 5
- store i8 %conv8, i8* %arrayidx9, align 1
+ %arrayidx9 = getelementptr inbounds i8, ptr %p, i64 5
+ store i8 %conv8, ptr %arrayidx9, align 1
%2 = lshr i64 %conv, 24
%conv12 = trunc i64 %2 to i8
- %arrayidx13 = getelementptr inbounds i8, i8* %p, i64 4
- store i8 %conv12, i8* %arrayidx13, align 1
+ %arrayidx13 = getelementptr inbounds i8, ptr %p, i64 4
+ store i8 %conv12, ptr %arrayidx13, align 1
%shr14 = ashr i64 %conv, 32
%conv16 = trunc i64 %shr14 to i8
- %arrayidx17 = getelementptr inbounds i8, i8* %p, i64 3
- store i8 %conv16, i8* %arrayidx17, align 1
+ %arrayidx17 = getelementptr inbounds i8, ptr %p, i64 3
+ store i8 %conv16, ptr %arrayidx17, align 1
%shr18 = ashr i64 %conv, 40
%conv20 = trunc i64 %shr18 to i8
- %arrayidx21 = getelementptr inbounds i8, i8* %p, i64 2
- store i8 %conv20, i8* %arrayidx21, align 1
+ %arrayidx21 = getelementptr inbounds i8, ptr %p, i64 2
+ store i8 %conv20, ptr %arrayidx21, align 1
%shr22 = ashr i64 %conv, 48
%conv24 = trunc i64 %shr22 to i8
- %arrayidx25 = getelementptr inbounds i8, i8* %p, i64 1
- store i8 %conv24, i8* %arrayidx25, align 1
+ %arrayidx25 = getelementptr inbounds i8, ptr %p, i64 1
+ store i8 %conv24, ptr %arrayidx25, align 1
%shr26 = ashr i64 %conv, 56
%conv28 = trunc i64 %shr26 to i8
- store i8 %conv28, i8* %p, align 1
+ store i8 %conv28, ptr %p, align 1
ret void
}
; One of the stores is volatile
-; i8 *p;
+; ptr p;
; p0 = volatile *p;
; p[3] = (m >> 0) & 0xFF;
; p[2] = (m >> 8) & 0xFF;
; p[1] = (m >> 16) & 0xFF;
; *p0 = (m >> 24) & 0xFF;
-define void @store_i32_by_i8_bswap_volatile(i32 signext %m, i8* %p) {
+define void @store_i32_by_i8_bswap_volatile(i32 signext %m, ptr %p) {
; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_volatile:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: li 5, 2
; CHECK-PPC64-NEXT: blr
entry:
%conv = trunc i32 %m to i8
- %arrayidx = getelementptr inbounds i8, i8* %p, i64 3
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i64 3
+ store i8 %conv, ptr %arrayidx, align 1
%0 = lshr i32 %m, 8
%conv3 = trunc i32 %0 to i8
- %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 2
- store i8 %conv3, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 2
+ store i8 %conv3, ptr %arrayidx4, align 1
%1 = lshr i32 %m, 16
%conv7 = trunc i32 %1 to i8
- %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 1
- store i8 %conv7, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 1
+ store i8 %conv7, ptr %arrayidx8, align 1
%2 = lshr i32 %m, 24
%conv11 = trunc i32 %2 to i8
- store volatile i8 %conv11, i8* %p, align 1
+ store volatile i8 %conv11, ptr %p, align 1
ret void
}
; There is a store in between individual stores
-; i8* p, q;
+; ptr p, q;
; p[3] = (m >> 0) & 0xFF;
; p[2] = (m >> 8) & 0xFF;
; *q = 3;
; p[1] = (m >> 16) & 0xFF;
; p[0] = (m >> 24) & 0xFF;
-define void @store_i32_by_i8_bswap_store_in_between(i32 signext %m, i8* %p, i8* %q) {
+define void @store_i32_by_i8_bswap_store_in_between(i32 signext %m, ptr %p, ptr %q) {
; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_store_in_between:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: li 6, 2
; CHECK-PPC64-NEXT: blr
entry:
%conv = trunc i32 %m to i8
- %arrayidx = getelementptr inbounds i8, i8* %p, i64 3
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i64 3
+ store i8 %conv, ptr %arrayidx, align 1
%0 = lshr i32 %m, 8
%conv3 = trunc i32 %0 to i8
- %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 2
- store i8 %conv3, i8* %arrayidx4, align 1
- store i8 3, i8* %q, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 2
+ store i8 %conv3, ptr %arrayidx4, align 1
+ store i8 3, ptr %q, align 1
%1 = lshr i32 %m, 16
%conv7 = trunc i32 %1 to i8
- %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 1
- store i8 %conv7, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 1
+ store i8 %conv7, ptr %arrayidx8, align 1
%2 = lshr i32 %m, 24
%conv11 = trunc i32 %2 to i8
- store i8 %conv11, i8* %p, align 1
+ store i8 %conv11, ptr %p, align 1
ret void
}
-define void @store_i32_by_i8_bswap_unrelated_store(i32 signext %m, i8* %p, i8* %q) {
+define void @store_i32_by_i8_bswap_unrelated_store(i32 signext %m, ptr %p, ptr %q) {
; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_unrelated_store:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: srwi 6, 3, 8
; CHECK-PPC64-NEXT: blr
entry:
%conv = trunc i32 %m to i8
- %arrayidx = getelementptr inbounds i8, i8* %p, i64 3
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i64 3
+ store i8 %conv, ptr %arrayidx, align 1
%0 = lshr i32 %m, 8
%conv3 = trunc i32 %0 to i8
- %arrayidx4 = getelementptr inbounds i8, i8* %q, i64 2
- store i8 %conv3, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %q, i64 2
+ store i8 %conv3, ptr %arrayidx4, align 1
%1 = lshr i32 %m, 16
%conv7 = trunc i32 %1 to i8
- %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 1
- store i8 %conv7, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 1
+ store i8 %conv7, ptr %arrayidx8, align 1
%2 = lshr i32 %m, 24
%conv11 = trunc i32 %2 to i8
- store i8 %conv11, i8* %p, align 1
+ store i8 %conv11, ptr %p, align 1
ret void
}
; i32 m;
-; i8* p;
+; ptr p;
; p[3] = (m >> 8) & 0xFF;
; p[4] = (m >> 0) & 0xFF;
; p[2] = (m >> 16) & 0xFF;
; p[1] = (m >> 24) & 0xFF;
-define void @store_i32_by_i8_bswap_nonzero_offset(i32 signext %m, i8* %p) {
+define void @store_i32_by_i8_bswap_nonzero_offset(i32 signext %m, ptr %p) {
; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_nonzero_offset:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: addi 4, 4, 1
entry:
%0 = lshr i32 %m, 8
%conv = trunc i32 %0 to i8
- %arrayidx = getelementptr inbounds i8, i8* %p, i64 3
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i64 3
+ store i8 %conv, ptr %arrayidx, align 1
%conv3 = trunc i32 %m to i8
- %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 4
- store i8 %conv3, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 4
+ store i8 %conv3, ptr %arrayidx4, align 1
%1 = lshr i32 %m, 16
%conv7 = trunc i32 %1 to i8
- %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 2
- store i8 %conv7, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 2
+ store i8 %conv7, ptr %arrayidx8, align 1
%2 = lshr i32 %m, 24
%conv11 = trunc i32 %2 to i8
- %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 1
- store i8 %conv11, i8* %arrayidx12, align 1
+ %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 1
+ store i8 %conv11, ptr %arrayidx12, align 1
ret void
}
; i32 m;
-; i8* p;
+; ptr p;
; p[-3] = (m >> 8) & 0xFF;
; p[-4] = (m >> 0) & 0xFF;
; p[-2] = (m >> 16) & 0xFF;
; p[-1] = (m >> 24) & 0xFF;
-define void @store_i32_by_i8_neg_offset(i32 signext %m, i8* %p) {
+define void @store_i32_by_i8_neg_offset(i32 signext %m, ptr %p) {
; CHECK-PPC64LE-LABEL: store_i32_by_i8_neg_offset:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: stw 3, -4(4)
entry:
%0 = lshr i32 %m, 8
%conv = trunc i32 %0 to i8
- %arrayidx = getelementptr inbounds i8, i8* %p, i64 -3
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i64 -3
+ store i8 %conv, ptr %arrayidx, align 1
%conv3 = trunc i32 %m to i8
- %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 -4
- store i8 %conv3, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 -4
+ store i8 %conv3, ptr %arrayidx4, align 1
%1 = lshr i32 %m, 16
%conv7 = trunc i32 %1 to i8
- %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 -2
- store i8 %conv7, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 -2
+ store i8 %conv7, ptr %arrayidx8, align 1
%2 = lshr i32 %m, 24
%conv11 = trunc i32 %2 to i8
- %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 -1
- store i8 %conv11, i8* %arrayidx12, align 1
+ %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 -1
+ store i8 %conv11, ptr %arrayidx12, align 1
ret void
}
; i32 m;
-; i8* p;
+; ptr p;
; p[-3] = (m >> 16) & 0xFF;
; p[-4] = (m >> 24) & 0xFF;
; p[-2] = (m >> 8) & 0xFF;
; p[-1] = (m >> 0) & 0xFF;
-define void @store_i32_by_i8_bswap_neg_offset(i32 signext %m, i8* %p) {
+define void @store_i32_by_i8_bswap_neg_offset(i32 signext %m, ptr %p) {
; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_neg_offset:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: addi 4, 4, -4
entry:
%0 = lshr i32 %m, 16
%conv = trunc i32 %0 to i8
- %arrayidx = getelementptr inbounds i8, i8* %p, i64 -3
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i64 -3
+ store i8 %conv, ptr %arrayidx, align 1
%1 = lshr i32 %m, 24
%conv3 = trunc i32 %1 to i8
- %arrayidx4 = getelementptr inbounds i8, i8* %p, i64 -4
- store i8 %conv3, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %p, i64 -4
+ store i8 %conv3, ptr %arrayidx4, align 1
%2 = lshr i32 %m, 8
%conv7 = trunc i32 %2 to i8
- %arrayidx8 = getelementptr inbounds i8, i8* %p, i64 -2
- store i8 %conv7, i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, ptr %p, i64 -2
+ store i8 %conv7, ptr %arrayidx8, align 1
%conv11 = trunc i32 %m to i8
- %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 -1
- store i8 %conv11, i8* %arrayidx12, align 1
+ %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 -1
+ store i8 %conv11, ptr %arrayidx12, align 1
ret void
}
; i32 m, i;
-; i8* p;
+; ptr p;
; p[i-3] = (m >> 16) & 0xFF;
; p[i-4] = (m >> 24) & 0xFF;
; p[i-2] = (m >> 8) & 0xFF;
; p[i-1] = (m >> 0) & 0xFF;
-define void @store_i32_by_i8_bswap_base_index_offset(i32 %m, i32 %i, i8* %p) {
+define void @store_i32_by_i8_bswap_base_index_offset(i32 %m, i32 %i, ptr %p) {
; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_base_index_offset:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: extsw 4, 4
%conv = trunc i32 %0 to i8
%sub = add nsw i32 %i, -3
%idxprom = sext i32 %sub to i64
- %arrayidx = getelementptr inbounds i8, i8* %p, i64 %idxprom
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %p, i64 %idxprom
+ store i8 %conv, ptr %arrayidx, align 1
%1 = lshr i32 %m, 24
%conv3 = trunc i32 %1 to i8
%sub4 = add nsw i32 %i, -4
%idxprom5 = sext i32 %sub4 to i64
- %arrayidx6 = getelementptr inbounds i8, i8* %p, i64 %idxprom5
- store i8 %conv3, i8* %arrayidx6, align 1
+ %arrayidx6 = getelementptr inbounds i8, ptr %p, i64 %idxprom5
+ store i8 %conv3, ptr %arrayidx6, align 1
%2 = lshr i32 %m, 8
%conv9 = trunc i32 %2 to i8
%sub10 = add nsw i32 %i, -2
%idxprom11 = sext i32 %sub10 to i64
- %arrayidx12 = getelementptr inbounds i8, i8* %p, i64 %idxprom11
- store i8 %conv9, i8* %arrayidx12, align 1
+ %arrayidx12 = getelementptr inbounds i8, ptr %p, i64 %idxprom11
+ store i8 %conv9, ptr %arrayidx12, align 1
%conv15 = trunc i32 %m to i8
%sub16 = add nsw i32 %i, -1
%idxprom17 = sext i32 %sub16 to i64
- %arrayidx18 = getelementptr inbounds i8, i8* %p, i64 %idxprom17
- store i8 %conv15, i8* %arrayidx18, align 1
+ %arrayidx18 = getelementptr inbounds i8, ptr %p, i64 %idxprom17
+ store i8 %conv15, ptr %arrayidx18, align 1
ret void
}
-; i8* p;
+; ptr p;
; i32 i, m;
-; i8* p0 = p + i;
-; i8* p1 = p + i + 1;
-; i8* p2 = p + i + 2;
-; i8 *p3 = p + i + 3;
+; ptr p0 = p + i;
+; ptr p1 = p + i + 1;
+; ptr p2 = p + i + 2;
+; ptr p3 = p + i + 3;
; p0[3] = (m >> 24) & 0xFF;
; p1[3] = (m >> 16) & 0xFF;
; p2[3] = (m >> 8) & 0xFF;
; p3[3] = (m >> 0) & 0xFF;
-define void @store_i32_by_i8_bswap_complicated(i32 %m, i32 %i, i8* %p) {
+define void @store_i32_by_i8_bswap_complicated(i32 %m, i32 %i, ptr %p) {
; CHECK-PPC64LE-LABEL: store_i32_by_i8_bswap_complicated:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: extsw 4, 4
; CHECK-PPC64-NEXT: blr
entry:
%idx.ext = sext i32 %i to i64
- %add.ptr = getelementptr inbounds i8, i8* %p, i64 %idx.ext
- %add.ptr3 = getelementptr inbounds i8, i8* %add.ptr, i64 1
- %add.ptr6 = getelementptr inbounds i8, i8* %add.ptr, i64 2
- %add.ptr9 = getelementptr inbounds i8, i8* %add.ptr, i64 3
+ %add.ptr = getelementptr inbounds i8, ptr %p, i64 %idx.ext
+ %add.ptr3 = getelementptr inbounds i8, ptr %add.ptr, i64 1
+ %add.ptr6 = getelementptr inbounds i8, ptr %add.ptr, i64 2
+ %add.ptr9 = getelementptr inbounds i8, ptr %add.ptr, i64 3
%0 = lshr i32 %m, 24
%conv = trunc i32 %0 to i8
- store i8 %conv, i8* %add.ptr9, align 1
+ store i8 %conv, ptr %add.ptr9, align 1
%1 = lshr i32 %m, 16
%conv12 = trunc i32 %1 to i8
- %arrayidx13 = getelementptr inbounds i8, i8* %add.ptr3, i64 3
- store i8 %conv12, i8* %arrayidx13, align 1
+ %arrayidx13 = getelementptr inbounds i8, ptr %add.ptr3, i64 3
+ store i8 %conv12, ptr %arrayidx13, align 1
%2 = lshr i32 %m, 8
%conv16 = trunc i32 %2 to i8
- %arrayidx17 = getelementptr inbounds i8, i8* %add.ptr6, i64 3
- store i8 %conv16, i8* %arrayidx17, align 1
+ %arrayidx17 = getelementptr inbounds i8, ptr %add.ptr6, i64 3
+ store i8 %conv16, ptr %arrayidx17, align 1
%conv20 = trunc i32 %m to i8
- %arrayidx21 = getelementptr inbounds i8, i8* %add.ptr9, i64 3
- store i8 %conv20, i8* %arrayidx21, align 1
+ %arrayidx21 = getelementptr inbounds i8, ptr %add.ptr9, i64 3
+ store i8 %conv20, ptr %arrayidx21, align 1
ret void
}
-; i8* p; i32 m;
+; ptr p; i32 m;
; p[0] = (m >> 8) & 0xFF;
; p[1] = (m >> 0) & 0xFF;
-define void @store_i16_by_i8_bswap(i16 %m, i8* %p) {
+define void @store_i16_by_i8_bswap(i16 %m, ptr %p) {
; CHECK-PPC64LE-LABEL: store_i16_by_i8_bswap:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: sthbrx 3, 0, 4
entry:
%0 = lshr i16 %m, 8
%conv1 = trunc i16 %0 to i8
- store i8 %conv1, i8* %p, align 1
+ store i8 %conv1, ptr %p, align 1
%conv5 = trunc i16 %m to i8
- %arrayidx6 = getelementptr inbounds i8, i8* %p, i64 1
- store i8 %conv5, i8* %arrayidx6, align 1
+ %arrayidx6 = getelementptr inbounds i8, ptr %p, i64 1
+ store i8 %conv5, ptr %arrayidx6, align 1
ret void
}
-; i8* p; i32 m;
+; ptr p; i32 m;
; p[0] = (m >> 0) & 0xFF;
; p[1] = (m >> 8) & 0xFF;
-define void @store_16_by_i8(i16 %m, i8* %p) {
+define void @store_16_by_i8(i16 %m, ptr %p) {
; CHECK-PPC64LE-LABEL: store_16_by_i8:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: sth 3, 0(4)
; CHECK-PPC64-NEXT: blr
entry:
%conv1 = trunc i16 %m to i8
- store i8 %conv1, i8* %p, align 1
+ store i8 %conv1, ptr %p, align 1
%0 = lshr i16 %m, 8
%conv5 = trunc i16 %0 to i8
- %arrayidx6 = getelementptr inbounds i8, i8* %p, i64 1
- store i8 %conv5, i8* %arrayidx6, align 1
+ %arrayidx6 = getelementptr inbounds i8, ptr %p, i64 1
+ store i8 %conv5, ptr %arrayidx6, align 1
ret void
}
; This was found when testing the hexxagon in testsuite
-; i8* p; i8 v;
+; ptr p; i8 v;
; p[0] = v;
; p[1] = v;
-define void @store_same_value_to_consecutive_mem(i8* %p, i8 zeroext %v) {
+define void @store_same_value_to_consecutive_mem(ptr %p, i8 zeroext %v) {
; CHECK-PPC64LE-LABEL: store_same_value_to_consecutive_mem:
; CHECK-PPC64LE: # %bb.0: # %entry
; CHECK-PPC64LE-NEXT: stb 4, 0(3)
; CHECK-PPC64-NEXT: stb 4, 1(3)
; CHECK-PPC64-NEXT: blr
entry:
- store i8 %v, i8* %p, align 1
- %arrayidx1 = getelementptr inbounds i8, i8* %p, i64 1
- store i8 %v, i8* %arrayidx1, align 1
+ store i8 %v, ptr %p, align 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %p, i64 1
+ store i8 %v, ptr %arrayidx1, align 1
ret void
}
@IVal = external local_unnamed_addr global i32, align 4
@LVal = external local_unnamed_addr global i64, align 8
@USVal = external local_unnamed_addr global i16, align 2
-@arr = external local_unnamed_addr global i64*, align 8
-@arri = external local_unnamed_addr global i32*, align 8
+@arr = external local_unnamed_addr global ptr, align 8
+@arri = external local_unnamed_addr global ptr, align 8
; Test the same constant can be used by different stores.
%struct.S = type { i64, i8, i16, i32 }
-define void @foo(%struct.S* %p) {
+define void @foo(ptr %p) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0:
; CHECK-NEXT: li 4, 0
; CHECK-NEXT: sth 4, 10(3)
; CHECK-NEXT: stw 4, 12(3)
; CHECK-NEXT: blr
- %l4 = bitcast %struct.S* %p to i64*
- store i64 0, i64* %l4, align 8
- %c = getelementptr %struct.S, %struct.S* %p, i64 0, i32 1
- store i8 0, i8* %c, align 8
- %s = getelementptr %struct.S, %struct.S* %p, i64 0, i32 2
- store i16 0, i16* %s, align 2
- %i = getelementptr %struct.S, %struct.S* %p, i64 0, i32 3
- store i32 0, i32* %i, align 4
+ store i64 0, ptr %p, align 8
+ %c = getelementptr %struct.S, ptr %p, i64 0, i32 1
+ store i8 0, ptr %c, align 8
+ %s = getelementptr %struct.S, ptr %p, i64 0, i32 2
+ store i16 0, ptr %s, align 2
+ %i = getelementptr %struct.S, ptr %p, i64 0, i32 3
+ store i32 0, ptr %i, align 4
ret void
}
-define void @bar(%struct.S* %p) {
+define void @bar(ptr %p) {
; CHECK-LABEL: bar:
; CHECK: # %bb.0:
; CHECK-NEXT: li 4, 2
; CHECK-NEXT: std 4, 0(3)
; CHECK-NEXT: stb 4, 8(3)
; CHECK-NEXT: blr
- %i = getelementptr %struct.S, %struct.S* %p, i64 0, i32 3
- store i32 2, i32* %i, align 4
- %s = getelementptr %struct.S, %struct.S* %p, i64 0, i32 2
- store i16 2, i16* %s, align 2
- %c = getelementptr %struct.S, %struct.S* %p, i64 0, i32 1
- store i8 2, i8* %c, align 8
- %l4 = bitcast %struct.S* %p to i64*
- store i64 2, i64* %l4, align 8
+ %i = getelementptr %struct.S, ptr %p, i64 0, i32 3
+ store i32 2, ptr %i, align 4
+ %s = getelementptr %struct.S, ptr %p, i64 0, i32 2
+ store i16 2, ptr %s, align 2
+ %c = getelementptr %struct.S, ptr %p, i64 0, i32 1
+ store i8 2, ptr %c, align 8
+ store i64 2, ptr %p, align 8
ret void
}
; CHECK-NEXT: stw 7, 0(5)
; CHECK-NEXT: blr
entry:
- store i8 -7, i8* @CVal, align 1
- store i16 -7, i16* @SVal, align 2
- store i32 -7, i32* @IVal, align 4
- store i64 -7, i64* @LVal, align 8
+ store i8 -7, ptr @CVal, align 1
+ store i16 -7, ptr @SVal, align 2
+ store i32 -7, ptr @IVal, align 4
+ store i64 -7, ptr @LVal, align 8
ret void
}
; CHECK-NEXT: stw 7, 0(5)
; CHECK-NEXT: blr
entry:
- store i8 8, i8* @CVal, align 1
- store i16 8, i16* @SVal, align 2
- store i32 8, i32* @IVal, align 4
- store i64 8, i64* @LVal, align 8
+ store i8 8, ptr @CVal, align 1
+ store i16 8, ptr @SVal, align 2
+ store i32 8, ptr @IVal, align 4
+ store i64 8, ptr @LVal, align 8
ret void
}
; CHECK-NEXT: std 6, 0(5)
; CHECK-NEXT: blr
entry:
- store i16 -32768, i16* @SVal, align 2
- store i32 -32768, i32* @IVal, align 4
- store i64 -32768, i64* @LVal, align 8
+ store i16 -32768, ptr @SVal, align 2
+ store i32 -32768, ptr @IVal, align 4
+ store i64 -32768, ptr @LVal, align 8
ret void
}
; CHECK-NEXT: std 6, 0(5)
; CHECK-NEXT: blr
entry:
- store i16 32767, i16* @SVal, align 2
- store i32 32767, i32* @IVal, align 4
- store i64 32767, i64* @LVal, align 8
+ store i16 32767, ptr @SVal, align 2
+ store i32 32767, ptr @IVal, align 4
+ store i64 32767, ptr @LVal, align 8
ret void
}
; CHECK-NEXT: std 5, 0(4)
; CHECK-NEXT: blr
entry:
- store i32 -32769, i32* @IVal, align 4
- store i64 -32769, i64* @LVal, align 8
+ store i32 -32769, ptr @IVal, align 4
+ store i64 -32769, ptr @LVal, align 8
ret void
}
; CHECK-NEXT: std 6, 0(5)
; CHECK-NEXT: blr
entry:
- store i16 -32768, i16* @USVal, align 2
- store i32 32768, i32* @IVal, align 4
- store i64 32768, i64* @LVal, align 8
+ store i16 -32768, ptr @USVal, align 2
+ store i32 32768, ptr @IVal, align 4
+ store i64 32768, ptr @LVal, align 8
ret void
}
br i1 %cmp7, label %for.body.lr.ph, label %for.cond.cleanup
for.body.lr.ph: ; preds = %entry
- %0 = load i64*, i64** @arr, align 8
- %1 = load i32*, i32** @arri, align 8
+ %0 = load ptr, ptr @arr, align 8
+ %1 = load ptr, ptr @arri, align 8
%wide.trip.count = zext i32 %Len to i64
br label %for.body
for.body: ; preds = %for.body, %for.body.lr.ph
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i64, i64* %0, i64 %indvars.iv
- store i64 -7, i64* %arrayidx, align 8
- %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
- store i32 -7, i32* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds i64, ptr %0, i64 %indvars.iv
+ store i64 -7, ptr %arrayidx, align 8
+ %arrayidx2 = getelementptr inbounds i32, ptr %1, i64 %indvars.iv
+ store i32 -7, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.cond.cleanup, label %for.body
; CHECK-NEXT: stb 5, 0(4)
; CHECK-NEXT: blr
entry:
- store i32 255, i32* @IVal, align 4
- store i8 -1, i8* @CVal, align 1
+ store i32 255, ptr @IVal, align 4
+ store i8 -1, ptr @CVal, align 1
ret void
}
; CHECK-NEXT: sth 5, 0(4)
; CHECK-NEXT: blr
entry:
- store i32 65535, i32* @IVal, align 4
- store i16 -1, i16* @SVal, align 2
+ store i32 65535, ptr @IVal, align 4
+ store i16 -1, ptr @SVal, align 2
ret void
}
%struct.UST = type { i32, i32 }
; Function Attrs: nounwind
-define i32 @ustc1(%struct.USST* noundef byval(%struct.USST) align 4 %s) {
+define i32 @ustc1(ptr noundef byval(%struct.USST) align 4 %s) {
; CHECK-LABEL: ustc1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: stw 4, 24(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.USST, %struct.USST* %s, i32 0, i32 0
- %0 = load i16, i16* %a, align 4
+ %0 = load i16, ptr %s, align 4
%conv = zext i16 %0 to i32
%shr = ashr i32 %conv, 8
ret i32 %shr
}
; Function Attrs: nounwind
-define i32 @ustc2(%struct.USST* noundef byval(%struct.USST) align 4 %s) {
+define i32 @ustc2(ptr noundef byval(%struct.USST) align 4 %s) {
; CHECK-LABEL: ustc2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: stw 4, 24(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.USST, %struct.USST* %s, i32 0, i32 0
- %0 = load i16, i16* %a, align 4
+ %0 = load i16, ptr %s, align 4
%conv = zext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nounwind
-define i32 @stc1(%struct.SST* noundef byval(%struct.SST) align 4 %s) {
+define i32 @stc1(ptr noundef byval(%struct.SST) align 4 %s) {
; CHECK-LABEL: stc1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: stw 4, 24(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.SST, %struct.SST* %s, i32 0, i32 0
- %0 = load i16, i16* %a, align 4
+ %0 = load i16, ptr %s, align 4
%conv = sext i16 %0 to i32
%shr = ashr i32 %conv, 8
ret i32 %shr
}
; Function Attrs: nounwind
-define i32 @stc2(%struct.SST* noundef byval(%struct.SST) align 4 %s) {
+define i32 @stc2(ptr noundef byval(%struct.SST) align 4 %s) {
; CHECK-LABEL: stc2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: stw 4, 24(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.SST, %struct.SST* %s, i32 0, i32 0
- %0 = load i16, i16* %a, align 4
+ %0 = load i16, ptr %s, align 4
%conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nounwind
-define i32 @ctc(%struct.CST* noundef byval(%struct.CST) align 4 %s) {
+define i32 @ctc(ptr noundef byval(%struct.CST) align 4 %s) {
; CHECK-LABEL: ctc:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: stw 4, 24(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.CST, %struct.CST* %s, i32 0, i32 0
- %0 = load i8, i8* %a, align 4
+ %0 = load i8, ptr %s, align 4
%conv = zext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nounwind
-define i32 @sctc(%struct.SCST* noundef byval(%struct.SCST) align 4 %s) {
+define i32 @sctc(ptr noundef byval(%struct.SCST) align 4 %s) {
; CHECK-LABEL: sctc:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: stw 4, 24(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.SCST, %struct.SCST* %s, i32 0, i32 0
- %0 = load i8, i8* %a, align 4
+ %0 = load i8, ptr %s, align 4
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nounwind
-define i32 @tc44(%struct.ST* noundef byval(%struct.ST) align 4 %s) {
+define i32 @tc44(ptr noundef byval(%struct.ST) align 4 %s) {
; CHECK-LABEL: tc44:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw 3, 24(1)
; CHECK-NEXT: stw 4, 28(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 4
+ %0 = load i32, ptr %s, align 4
ret i32 %0
}
; Function Attrs: nounwind
-define i32 @tc41(%struct.ST* noundef byval(%struct.ST) align 4 %s) {
+define i32 @tc41(ptr noundef byval(%struct.ST) align 4 %s) {
; CHECK-LABEL: tc41:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw 3, 24(1)
; CHECK-NEXT: stw 4, 28(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 4
+ %0 = load i32, ptr %s, align 4
%shr = ashr i32 %0, 24
ret i32 %shr
}
; Function Attrs: nounwind
-define i32 @tc42(%struct.ST* noundef byval(%struct.ST) align 4 %s) {
+define i32 @tc42(ptr noundef byval(%struct.ST) align 4 %s) {
; CHECK-LABEL: tc42:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw 3, 24(1)
; CHECK-NEXT: stw 4, 28(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 4
+ %0 = load i32, ptr %s, align 4
%shr = ashr i32 %0, 16
ret i32 %shr
}
; Function Attrs: nounwind
-define i32 @tc43(%struct.ST* noundef byval(%struct.ST) align 4 %s) {
+define i32 @tc43(ptr noundef byval(%struct.ST) align 4 %s) {
; CHECK-LABEL: tc43:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw 3, 24(1)
; CHECK-NEXT: stw 4, 28(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 4
+ %0 = load i32, ptr %s, align 4
%shr = ashr i32 %0, 8
ret i32 %shr
}
; Function Attrs: nounwind
-define i32 @utc44(%struct.UST* noundef byval(%struct.UST) align 4 %s) {
+define i32 @utc44(ptr noundef byval(%struct.UST) align 4 %s) {
; CHECK-LABEL: utc44:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw 3, 24(1)
; CHECK-NEXT: stw 4, 28(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 4
+ %0 = load i32, ptr %s, align 4
ret i32 %0
}
; Function Attrs: nounwind
-define i32 @utc41(%struct.UST* noundef byval(%struct.UST) align 4 %s) {
+define i32 @utc41(ptr noundef byval(%struct.UST) align 4 %s) {
; CHECK-LABEL: utc41:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw 3, 24(1)
; CHECK-NEXT: stw 4, 28(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 4
+ %0 = load i32, ptr %s, align 4
%shr = lshr i32 %0, 24
ret i32 %shr
}
; Function Attrs: nounwind
-define i32 @utc42(%struct.UST* noundef byval(%struct.UST) align 4 %s) {
+define i32 @utc42(ptr noundef byval(%struct.UST) align 4 %s) {
; CHECK-LABEL: utc42:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw 3, 24(1)
; CHECK-NEXT: stw 4, 28(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 4
+ %0 = load i32, ptr %s, align 4
%shr = lshr i32 %0, 16
ret i32 %shr
}
; Function Attrs: nounwind
-define i32 @utc43(%struct.UST* noundef byval(%struct.UST) align 4 %s) {
+define i32 @utc43(ptr noundef byval(%struct.UST) align 4 %s) {
; CHECK-LABEL: utc43:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stw 3, 24(1)
; CHECK-NEXT: stw 4, 28(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 4
+ %0 = load i32, ptr %s, align 4
%shr = lshr i32 %0, 8
ret i32 %shr
}
%struct.ULST = type { i64, i64 }
; Function Attrs: nounwind
-define zeroext i32 @ustc1(%struct.USST* noundef byval(%struct.USST) align 8 %s) {
+define zeroext i32 @ustc1(ptr noundef byval(%struct.USST) align 8 %s) {
; CHECK-LABEL: ustc1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.USST, %struct.USST* %s, i32 0, i32 0
- %0 = load i16, i16* %a, align 8
+ %0 = load i16, ptr %s, align 8
%conv = zext i16 %0 to i32
%shr = ashr i32 %conv, 8
ret i32 %shr
}
; Function Attrs: nounwind
-define zeroext i32 @ustc2(%struct.USST* noundef byval(%struct.USST) align 8 %s) {
+define zeroext i32 @ustc2(ptr noundef byval(%struct.USST) align 8 %s) {
; CHECK-LABEL: ustc2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.USST, %struct.USST* %s, i32 0, i32 0
- %0 = load i16, i16* %a, align 8
+ %0 = load i16, ptr %s, align 8
%conv = zext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nounwind
-define signext i32 @stc1(%struct.SST* noundef byval(%struct.SST) align 8 %s) {
+define signext i32 @stc1(ptr noundef byval(%struct.SST) align 8 %s) {
; CHECK-LABEL: stc1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: srawi 3, 3, 8
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.SST, %struct.SST* %s, i32 0, i32 0
- %0 = load i16, i16* %a, align 8
+ %0 = load i16, ptr %s, align 8
%conv = sext i16 %0 to i32
%shr = ashr i32 %conv, 8
ret i32 %shr
}
; Function Attrs: nounwind
-define signext i32 @stc2(%struct.SST* noundef byval(%struct.SST) align 8 %s) {
+define signext i32 @stc2(ptr noundef byval(%struct.SST) align 8 %s) {
; CHECK-LABEL: stc2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.SST, %struct.SST* %s, i32 0, i32 0
- %0 = load i16, i16* %a, align 8
+ %0 = load i16, ptr %s, align 8
%conv = sext i16 %0 to i32
ret i32 %conv
}
; Function Attrs: nounwind
-define signext i32 @ctc(%struct.CST* noundef byval(%struct.CST) align 8 %s) {
+define signext i32 @ctc(ptr noundef byval(%struct.CST) align 8 %s) {
; CHECK-LABEL: ctc:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.CST, %struct.CST* %s, i32 0, i32 0
- %0 = load i8, i8* %a, align 8
+ %0 = load i8, ptr %s, align 8
%conv = zext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nounwind
-define signext i32 @sctc(%struct.SCST* noundef byval(%struct.SCST) align 8 %s) {
+define signext i32 @sctc(ptr noundef byval(%struct.SCST) align 8 %s) {
; CHECK-LABEL: sctc:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.SCST, %struct.SCST* %s, i32 0, i32 0
- %0 = load i8, i8* %a, align 8
+ %0 = load i8, ptr %s, align 8
%conv = sext i8 %0 to i32
ret i32 %conv
}
; Function Attrs: nounwind
-define signext i32 @tc44(%struct.ST* noundef byval(%struct.ST) align 8 %s) {
+define signext i32 @tc44(ptr noundef byval(%struct.ST) align 8 %s) {
; CHECK-LABEL: tc44:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 8
+ %0 = load i32, ptr %s, align 8
ret i32 %0
}
; Function Attrs: nounwind
-define signext i32 @tc41(%struct.ST* noundef byval(%struct.ST) align 8 %s) {
+define signext i32 @tc41(ptr noundef byval(%struct.ST) align 8 %s) {
; CHECK-LABEL: tc41:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 8
+ %0 = load i32, ptr %s, align 8
%shr = ashr i32 %0, 24
ret i32 %shr
}
; Function Attrs: nounwind
-define signext i32 @tc42(%struct.ST* noundef byval(%struct.ST) align 8 %s) {
+define signext i32 @tc42(ptr noundef byval(%struct.ST) align 8 %s) {
; CHECK-LABEL: tc42:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 8
+ %0 = load i32, ptr %s, align 8
%shr = ashr i32 %0, 16
ret i32 %shr
}
; Function Attrs: nounwind
-define signext i32 @tc43(%struct.ST* noundef byval(%struct.ST) align 8 %s) {
+define signext i32 @tc43(ptr noundef byval(%struct.ST) align 8 %s) {
; CHECK-LABEL: tc43:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ST, %struct.ST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 8
+ %0 = load i32, ptr %s, align 8
%shr = ashr i32 %0, 8
ret i32 %shr
}
; Function Attrs: nounwind
-define zeroext i32 @utc44(%struct.UST* noundef byval(%struct.UST) align 8 %s) #0 {
+define zeroext i32 @utc44(ptr noundef byval(%struct.UST) align 8 %s) #0 {
; CHECK-LABEL: utc44:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 8
+ %0 = load i32, ptr %s, align 8
ret i32 %0
}
; Function Attrs: nounwind
-define zeroext i32 @utc41(%struct.UST* noundef byval(%struct.UST) align 8 %s) {
+define zeroext i32 @utc41(ptr noundef byval(%struct.UST) align 8 %s) {
; CHECK-LABEL: utc41:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 8
+ %0 = load i32, ptr %s, align 8
%shr = lshr i32 %0, 24
ret i32 %shr
}
; Function Attrs: nounwind
-define zeroext i32 @utc42(%struct.UST* noundef byval(%struct.UST) align 8 %s) {
+define zeroext i32 @utc42(ptr noundef byval(%struct.UST) align 8 %s) {
; CHECK-LABEL: utc42:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 8
+ %0 = load i32, ptr %s, align 8
%shr = lshr i32 %0, 16
ret i32 %shr
}
; Function Attrs: nounwind
-define zeroext i32 @utc43(%struct.UST* noundef byval(%struct.UST) align 8 %s) {
+define zeroext i32 @utc43(ptr noundef byval(%struct.UST) align 8 %s) {
; CHECK-LABEL: utc43:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: std 4, 48(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.UST, %struct.UST* %s, i32 0, i32 0
- %0 = load i32, i32* %a, align 8
+ %0 = load i32, ptr %s, align 8
%shr = lshr i32 %0, 8
ret i32 %shr
}
; Function Attrs: nounwind
-define i64 @ltc88(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc88(ptr noundef byval(%struct.LST) align 8 %s) {
; CHECK-LABEL: ltc88:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = ashr i64 %0, 8
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ltc86(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc86(ptr noundef byval(%struct.LST) align 8 %s) {
; CHECK-LABEL: ltc86:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = ashr i64 %0, 16
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ltc85(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc85(ptr noundef byval(%struct.LST) align 8 %s) {
; CHECK-LABEL: ltc85:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = ashr i64 %0, 24
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ltc84(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc84(ptr noundef byval(%struct.LST) align 8 %s) {
; CHECK-LABEL: ltc84:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = ashr i64 %0, 32
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ltc83(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc83(ptr noundef byval(%struct.LST) align 8 %s) {
; CHECK-LABEL: ltc83:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = ashr i64 %0, 40
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ltc82(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc82(ptr noundef byval(%struct.LST) align 8 %s) {
; CHECK-LABEL: ltc82:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = ashr i64 %0, 48
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ltc81(%struct.LST* noundef byval(%struct.LST) align 8 %s) {
+define i64 @ltc81(ptr noundef byval(%struct.LST) align 8 %s) {
; CHECK-LABEL: ltc81:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.LST, %struct.LST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = ashr i64 %0, 56
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ultc88(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc88(ptr noundef byval(%struct.ULST) align 8 %s) {
; CHECK-LABEL: ultc88:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: std 3, 48(1)
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
ret i64 %0
}
; Function Attrs: nounwind
-define i64 @ultc87(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc87(ptr noundef byval(%struct.ULST) align 8 %s) {
; CHECK-LABEL: ultc87:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = lshr i64 %0, 8
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ultc86(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc86(ptr noundef byval(%struct.ULST) align 8 %s) {
; CHECK-LABEL: ultc86:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = lshr i64 %0, 16
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ultc85(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc85(ptr noundef byval(%struct.ULST) align 8 %s) {
; CHECK-LABEL: ultc85:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = lshr i64 %0, 24
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ultc84(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc84(ptr noundef byval(%struct.ULST) align 8 %s) {
; CHECK-LABEL: ultc84:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = lshr i64 %0, 32
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ultc83(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc83(ptr noundef byval(%struct.ULST) align 8 %s) {
; CHECK-LABEL: ultc83:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = lshr i64 %0, 40
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ultc82(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc82(ptr noundef byval(%struct.ULST) align 8 %s) {
; CHECK-LABEL: ultc82:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = lshr i64 %0, 48
ret i64 %shr
}
; Function Attrs: nounwind
-define i64 @ultc81(%struct.ULST* noundef byval(%struct.ULST) align 8 %s) {
+define i64 @ultc81(ptr noundef byval(%struct.ULST) align 8 %s) {
; CHECK-LABEL: ultc81:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: std 4, 56(1)
; CHECK-NEXT: blr
entry:
- %a = getelementptr inbounds %struct.ULST, %struct.ULST* %s, i32 0, i32 0
- %0 = load i64, i64* %a, align 8
+ %0 = load i64, ptr %s, align 8
%shr = lshr i64 %0, 56
ret i64 %shr
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- | not grep lwz
-define i32 @test(i32* %P) {
- store i32 1, i32* %P
- %V = load i32, i32* %P ; <i32> [#uses=1]
+define i32 @test(ptr %P) {
+ store i32 1, ptr %P
+ %V = load i32, ptr %P ; <i32> [#uses=1]
ret i32 %V
}
; RUN: -mcpu=pwr10 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names \
; RUN: < %s | FileCheck %s --check-prefix=CHECK-BE
-define void @test1(<4 x i32> %A, i32* %a) {
+define void @test1(<4 x i32> %A, ptr %a) {
; CHECK-LE-LABEL: test1:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: stxvrwx v2, 0, r5
; CHECK-BE-NEXT: blr
entry:
%vecext = extractelement <4 x i32> %A, i32 0
- store i32 %vecext, i32* %a, align 4
+ store i32 %vecext, ptr %a, align 4
ret void
}
-define void @test2(<4 x float> %A, float* %a) {
+define void @test2(<4 x float> %A, ptr %a) {
; CHECK-LE-LABEL: test2:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: stxvrwx v2, 0, r5
; CHECK-BE-NEXT: blr
entry:
%vecext = extractelement <4 x float> %A, i32 0
- store float %vecext, float* %a, align 4
+ store float %vecext, ptr %a, align 4
ret void
}
-define void @test3(<2 x double> %A, double* %a) {
+define void @test3(<2 x double> %A, ptr %a) {
; CHECK-LE-LABEL: test3:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: stxvrdx v2, 0, r5
; CHECK-BE-NEXT: blr
entry:
%vecext = extractelement <2 x double> %A, i32 0
- store double %vecext, double* %a, align 8
+ store double %vecext, ptr %a, align 8
ret void
}
-define void @test4(<2 x i64> %A, i64* %a) {
+define void @test4(<2 x i64> %A, ptr %a) {
; CHECK-LE-LABEL: test4:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: stxvrdx v2, 0, r5
; CHECK-BE-NEXT: blr
entry:
%vecext = extractelement <2 x i64> %A, i32 0
- store i64 %vecext, i64* %a, align 8
+ store i64 %vecext, ptr %a, align 8
ret void
}
-define void @test5(<8 x i16> %A, i16* %a) {
+define void @test5(<8 x i16> %A, ptr %a) {
; CHECK-LE-LABEL: test5:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: stxvrhx v2, 0, r5
; CHECK-BE-NEXT: blr
entry:
%vecext = extractelement <8 x i16> %A, i32 0
- store i16 %vecext, i16* %a, align 2
+ store i16 %vecext, ptr %a, align 2
ret void
}
-define void @test6(<16 x i8> %A, i8* %a) {
+define void @test6(<16 x i8> %A, ptr %a) {
; CHECK-LE-LABEL: test6:
; CHECK-LE: # %bb.0: # %entry
; CHECK-LE-NEXT: stxvrbx v2, 0, r5
; CHECK-BE-NEXT: blr
entry:
%vecext = extractelement <16 x i8> %A, i32 0
- store i8 %vecext, i8* %a, align 1
+ store i8 %vecext, ptr %a, align 1
ret void
}
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define i8* @test_stbu(i8* %base, i8 zeroext %val) nounwind {
+define ptr @test_stbu(ptr %base, i8 zeroext %val) nounwind {
entry:
- %arrayidx = getelementptr inbounds i8, i8* %base, i64 16
- store i8 %val, i8* %arrayidx, align 1
- ret i8* %arrayidx
+ %arrayidx = getelementptr inbounds i8, ptr %base, i64 16
+ store i8 %val, ptr %arrayidx, align 1
+ ret ptr %arrayidx
}
; CHECK: @test_stbu
; CHECK: %entry
; CHECK-NEXT: stbu
; CHECK-NEXT: blr
-define i8* @test_stbux(i8* %base, i8 zeroext %val, i64 %offset) nounwind {
+define ptr @test_stbux(ptr %base, i8 zeroext %val, i64 %offset) nounwind {
entry:
- %arrayidx = getelementptr inbounds i8, i8* %base, i64 %offset
- store i8 %val, i8* %arrayidx, align 1
- ret i8* %arrayidx
+ %arrayidx = getelementptr inbounds i8, ptr %base, i64 %offset
+ store i8 %val, ptr %arrayidx, align 1
+ ret ptr %arrayidx
}
; CHECK: @test_stbux
; CHECK: %entry
; CHECK-NEXT: stbux
; CHECK-NEXT: blr
-define i16* @test_sthu(i16* %base, i16 zeroext %val) nounwind {
+define ptr @test_sthu(ptr %base, i16 zeroext %val) nounwind {
entry:
- %arrayidx = getelementptr inbounds i16, i16* %base, i64 16
- store i16 %val, i16* %arrayidx, align 2
- ret i16* %arrayidx
+ %arrayidx = getelementptr inbounds i16, ptr %base, i64 16
+ store i16 %val, ptr %arrayidx, align 2
+ ret ptr %arrayidx
}
; CHECK: @test_sthu
; CHECK: %entry
; CHECK-NEXT: sthu
; CHECK-NEXT: blr
-define i16* @test_sthux(i16* %base, i16 zeroext %val, i64 %offset) nounwind {
+define ptr @test_sthux(ptr %base, i16 zeroext %val, i64 %offset) nounwind {
entry:
- %arrayidx = getelementptr inbounds i16, i16* %base, i64 %offset
- store i16 %val, i16* %arrayidx, align 2
- ret i16* %arrayidx
+ %arrayidx = getelementptr inbounds i16, ptr %base, i64 %offset
+ store i16 %val, ptr %arrayidx, align 2
+ ret ptr %arrayidx
}
; CHECK: @test_sthux
; CHECK: %entry
; CHECK-NEXT: sthux
; CHECK-NEXT: blr
-define i32* @test_stwu(i32* %base, i32 zeroext %val) nounwind {
+define ptr @test_stwu(ptr %base, i32 zeroext %val) nounwind {
entry:
- %arrayidx = getelementptr inbounds i32, i32* %base, i64 16
- store i32 %val, i32* %arrayidx, align 4
- ret i32* %arrayidx
+ %arrayidx = getelementptr inbounds i32, ptr %base, i64 16
+ store i32 %val, ptr %arrayidx, align 4
+ ret ptr %arrayidx
}
; CHECK: @test_stwu
; CHECK: %entry
; CHECK-NEXT: stwu
; CHECK-NEXT: blr
-define i32* @test_stwux(i32* %base, i32 zeroext %val, i64 %offset) nounwind {
+define ptr @test_stwux(ptr %base, i32 zeroext %val, i64 %offset) nounwind {
entry:
- %arrayidx = getelementptr inbounds i32, i32* %base, i64 %offset
- store i32 %val, i32* %arrayidx, align 4
- ret i32* %arrayidx
+ %arrayidx = getelementptr inbounds i32, ptr %base, i64 %offset
+ store i32 %val, ptr %arrayidx, align 4
+ ret ptr %arrayidx
}
; CHECK: @test_stwux
; CHECK: %entry
; CHECK-NEXT: stwux
; CHECK-NEXT: blr
-define i8* @test_stbu8(i8* %base, i64 %val) nounwind {
+define ptr @test_stbu8(ptr %base, i64 %val) nounwind {
entry:
%conv = trunc i64 %val to i8
- %arrayidx = getelementptr inbounds i8, i8* %base, i64 16
- store i8 %conv, i8* %arrayidx, align 1
- ret i8* %arrayidx
+ %arrayidx = getelementptr inbounds i8, ptr %base, i64 16
+ store i8 %conv, ptr %arrayidx, align 1
+ ret ptr %arrayidx
}
; CHECK: @test_stbu8
; CHECK: %entry
; CHECK-NEXT: stbu
; CHECK-NEXT: blr
-define i8* @test_stbux8(i8* %base, i64 %val, i64 %offset) nounwind {
+define ptr @test_stbux8(ptr %base, i64 %val, i64 %offset) nounwind {
entry:
%conv = trunc i64 %val to i8
- %arrayidx = getelementptr inbounds i8, i8* %base, i64 %offset
- store i8 %conv, i8* %arrayidx, align 1
- ret i8* %arrayidx
+ %arrayidx = getelementptr inbounds i8, ptr %base, i64 %offset
+ store i8 %conv, ptr %arrayidx, align 1
+ ret ptr %arrayidx
}
; CHECK: @test_stbux8
; CHECK: %entry
; CHECK-NEXT: stbux
; CHECK-NEXT: blr
-define i16* @test_sthu8(i16* %base, i64 %val) nounwind {
+define ptr @test_sthu8(ptr %base, i64 %val) nounwind {
entry:
%conv = trunc i64 %val to i16
- %arrayidx = getelementptr inbounds i16, i16* %base, i64 16
- store i16 %conv, i16* %arrayidx, align 2
- ret i16* %arrayidx
+ %arrayidx = getelementptr inbounds i16, ptr %base, i64 16
+ store i16 %conv, ptr %arrayidx, align 2
+ ret ptr %arrayidx
}
; CHECK: @test_sthu
; CHECK: %entry
; CHECK-NEXT: sthu
; CHECK-NEXT: blr
-define i16* @test_sthux8(i16* %base, i64 %val, i64 %offset) nounwind {
+define ptr @test_sthux8(ptr %base, i64 %val, i64 %offset) nounwind {
entry:
%conv = trunc i64 %val to i16
- %arrayidx = getelementptr inbounds i16, i16* %base, i64 %offset
- store i16 %conv, i16* %arrayidx, align 2
- ret i16* %arrayidx
+ %arrayidx = getelementptr inbounds i16, ptr %base, i64 %offset
+ store i16 %conv, ptr %arrayidx, align 2
+ ret ptr %arrayidx
}
; CHECK: @test_sthux
; CHECK: %entry
; CHECK-NEXT: sthux
; CHECK-NEXT: blr
-define i32* @test_stwu8(i32* %base, i64 %val) nounwind {
+define ptr @test_stwu8(ptr %base, i64 %val) nounwind {
entry:
%conv = trunc i64 %val to i32
- %arrayidx = getelementptr inbounds i32, i32* %base, i64 16
- store i32 %conv, i32* %arrayidx, align 4
- ret i32* %arrayidx
+ %arrayidx = getelementptr inbounds i32, ptr %base, i64 16
+ store i32 %conv, ptr %arrayidx, align 4
+ ret ptr %arrayidx
}
; CHECK: @test_stwu
; CHECK: %entry
; CHECK-NEXT: stwu
; CHECK-NEXT: blr
-define i32* @test_stwux8(i32* %base, i64 %val, i64 %offset) nounwind {
+define ptr @test_stwux8(ptr %base, i64 %val, i64 %offset) nounwind {
entry:
%conv = trunc i64 %val to i32
- %arrayidx = getelementptr inbounds i32, i32* %base, i64 %offset
- store i32 %conv, i32* %arrayidx, align 4
- ret i32* %arrayidx
+ %arrayidx = getelementptr inbounds i32, ptr %base, i64 %offset
+ store i32 %conv, ptr %arrayidx, align 4
+ ret ptr %arrayidx
}
; CHECK: @test_stwux
; CHECK: %entry
; CHECK-NEXT: stwux
; CHECK-NEXT: blr
-define i64* @test_stdu(i64* %base, i64 %val) nounwind {
+define ptr @test_stdu(ptr %base, i64 %val) nounwind {
entry:
- %arrayidx = getelementptr inbounds i64, i64* %base, i64 16
- store i64 %val, i64* %arrayidx, align 8
- ret i64* %arrayidx
+ %arrayidx = getelementptr inbounds i64, ptr %base, i64 16
+ store i64 %val, ptr %arrayidx, align 8
+ ret ptr %arrayidx
}
; CHECK: @test_stdu
; CHECK: %entry
; CHECK-NEXT: stdu
; CHECK-NEXT: blr
-define i64* @test_stdux(i64* %base, i64 %val, i64 %offset) nounwind {
+define ptr @test_stdux(ptr %base, i64 %val, i64 %offset) nounwind {
entry:
- %arrayidx = getelementptr inbounds i64, i64* %base, i64 %offset
- store i64 %val, i64* %arrayidx, align 8
- ret i64* %arrayidx
+ %arrayidx = getelementptr inbounds i64, ptr %base, i64 %offset
+ store i64 %val, ptr %arrayidx, align 8
+ ret ptr %arrayidx
}
; CHECK: @test_stdux
; CHECK: %entry
; ==========================================
; Function Attrs: norecurse nounwind
-define void @qpConv2sdw(fp128* nocapture readonly %a, i64* nocapture %b) {
+define void @qpConv2sdw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: qpConv2sdw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv 2, 0(3)
; CHECK-PWR8-NEXT: mtlr 0
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptosi fp128 %0 to i64
- store i64 %conv, i64* %b, align 8
+ store i64 %conv, ptr %b, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpConv2sw(fp128* nocapture readonly %a, i32* nocapture %b) {
+define void @qpConv2sw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: qpConv2sw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv 2, 0(3)
; CHECK-PWR8-NEXT: mtlr 0
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptosi fp128 %0 to i32
- store i32 %conv, i32* %b, align 4
+ store i32 %conv, ptr %b, align 4
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpConv2udw(fp128* nocapture readonly %a, i64* nocapture %b) {
+define void @qpConv2udw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: qpConv2udw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv 2, 0(3)
; CHECK-PWR8-NEXT: mtlr 0
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptoui fp128 %0 to i64
- store i64 %conv, i64* %b, align 8
+ store i64 %conv, ptr %b, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @qpConv2uw(fp128* nocapture readonly %a, i32* nocapture %b) {
+define void @qpConv2uw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: qpConv2uw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv 2, 0(3)
; CHECK-PWR8-NEXT: mtlr 0
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load fp128, fp128* %a, align 16
+ %0 = load fp128, ptr %a, align 16
%conv = fptoui fp128 %0 to i32
- store i32 %conv, i32* %b, align 4
+ store i32 %conv, ptr %b, align 4
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2sdw(double* nocapture readonly %a, i64* nocapture %b) {
+define void @dpConv2sdw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: dpConv2sdw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: stxsdx 0, 0, 4
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptosi double %0 to i64
- store i64 %conv, i64* %b, align 8
+ store i64 %conv, ptr %b, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2sw(double* nocapture readonly %a, i32* nocapture %b) {
+define void @dpConv2sw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: dpConv2sw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: stfiwx 0, 0, 4
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptosi double %0 to i32
- store i32 %conv, i32* %b, align 4
+ store i32 %conv, ptr %b, align 4
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2shw(double* nocapture readonly %a, i16* nocapture %b) {
+define void @dpConv2shw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: dpConv2shw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: sth 3, 0(4)
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptosi double %0 to i16
- store i16 %conv, i16* %b, align 2
+ store i16 %conv, ptr %b, align 2
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2sb(double* nocapture readonly %a, i8* nocapture %b) {
+define void @dpConv2sb(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: dpConv2sb:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: stb 3, 0(4)
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptosi double %0 to i8
- store i8 %conv, i8* %b, align 1
+ store i8 %conv, ptr %b, align 1
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2sdw(float* nocapture readonly %a, i64* nocapture %b) {
+define void @spConv2sdw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: spConv2sdw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: stxsdx 0, 0, 4
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptosi float %0 to i64
- store i64 %conv, i64* %b, align 8
+ store i64 %conv, ptr %b, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2sw(float* nocapture readonly %a, i32* nocapture %b) {
+define void @spConv2sw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: spConv2sw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: stfiwx 0, 0, 4
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptosi float %0 to i32
- store i32 %conv, i32* %b, align 4
+ store i32 %conv, ptr %b, align 4
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2shw(float* nocapture readonly %a, i16* nocapture %b) {
+define void @spConv2shw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: spConv2shw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: sth 3, 0(4)
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptosi float %0 to i16
- store i16 %conv, i16* %b, align 2
+ store i16 %conv, ptr %b, align 2
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2sb(float* nocapture readonly %a, i8* nocapture %b) {
+define void @spConv2sb(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: spConv2sb:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: stb 3, 0(4)
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptosi float %0 to i8
- store i8 %conv, i8* %b, align 1
+ store i8 %conv, ptr %b, align 1
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2sdw_x(double* nocapture readonly %a, i64* nocapture %b,
+define void @dpConv2sdw_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: dpConv2sdw_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 signext %idx) {
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptosi double %0 to i64
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds i64, i64* %b, i64 %idxprom
- store i64 %conv, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %b, i64 %idxprom
+ store i64 %conv, ptr %arrayidx, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2sw_x(double* nocapture readonly %a, i32* nocapture %b,
+define void @dpConv2sw_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: dpConv2sw_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 signext %idx) {
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptosi double %0 to i32
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
- store i32 %conv, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 %idxprom
+ store i32 %conv, ptr %arrayidx, align 4
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2shw_x(double* nocapture readonly %a, i16* nocapture %b,
+define void @dpConv2shw_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: dpConv2shw_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 signext %idx) {
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptosi double %0 to i16
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds i16, i16* %b, i64 %idxprom
- store i16 %conv, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %b, i64 %idxprom
+ store i16 %conv, ptr %arrayidx, align 2
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2sb_x(double* nocapture readonly %a, i8* nocapture %b,
+define void @dpConv2sb_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: dpConv2sb_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 signext %idx) {
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptosi double %0 to i8
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds i8, i8* %b, i64 %idxprom
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %b, i64 %idxprom
+ store i8 %conv, ptr %arrayidx, align 1
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2sdw_x(float* nocapture readonly %a, i64* nocapture %b,
+define void @spConv2sdw_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: spConv2sdw_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 signext %idx) {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptosi float %0 to i64
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds i64, i64* %b, i64 %idxprom
- store i64 %conv, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %b, i64 %idxprom
+ store i64 %conv, ptr %arrayidx, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2sw_x(float* nocapture readonly %a, i32* nocapture %b,
+define void @spConv2sw_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: spConv2sw_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 signext %idx) {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptosi float %0 to i32
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
- store i32 %conv, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 %idxprom
+ store i32 %conv, ptr %arrayidx, align 4
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2shw_x(float* nocapture readonly %a, i16* nocapture %b,
+define void @spConv2shw_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: spConv2shw_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 signext %idx) {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptosi float %0 to i16
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds i16, i16* %b, i64 %idxprom
- store i16 %conv, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %b, i64 %idxprom
+ store i16 %conv, ptr %arrayidx, align 2
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2sb_x(float* nocapture readonly %a, i8* nocapture %b,
+define void @spConv2sb_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: spConv2sb_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 signext %idx) {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptosi float %0 to i8
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds i8, i8* %b, i64 %idxprom
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %b, i64 %idxprom
+ store i8 %conv, ptr %arrayidx, align 1
ret void
; ==========================================
; Function Attrs: norecurse nounwind
-define void @dpConv2udw(double* nocapture readonly %a, i64* nocapture %b) {
+define void @dpConv2udw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: dpConv2udw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: stxsdx 0, 0, 4
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptoui double %0 to i64
- store i64 %conv, i64* %b, align 8
+ store i64 %conv, ptr %b, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2uw(double* nocapture readonly %a, i32* nocapture %b) {
+define void @dpConv2uw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: dpConv2uw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: stfiwx 0, 0, 4
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptoui double %0 to i32
- store i32 %conv, i32* %b, align 4
+ store i32 %conv, ptr %b, align 4
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2uhw(double* nocapture readonly %a, i16* nocapture %b) {
+define void @dpConv2uhw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: dpConv2uhw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: sth 3, 0(4)
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptoui double %0 to i16
- store i16 %conv, i16* %b, align 2
+ store i16 %conv, ptr %b, align 2
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2ub(double* nocapture readonly %a, i8* nocapture %b) {
+define void @dpConv2ub(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: dpConv2ub:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: stb 3, 0(4)
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptoui double %0 to i8
- store i8 %conv, i8* %b, align 1
+ store i8 %conv, ptr %b, align 1
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2udw(float* nocapture readonly %a, i64* nocapture %b) {
+define void @spConv2udw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: spConv2udw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: stxsdx 0, 0, 4
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptoui float %0 to i64
- store i64 %conv, i64* %b, align 8
+ store i64 %conv, ptr %b, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2uw(float* nocapture readonly %a, i32* nocapture %b) {
+define void @spConv2uw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: spConv2uw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: stfiwx 0, 0, 4
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptoui float %0 to i32
- store i32 %conv, i32* %b, align 4
+ store i32 %conv, ptr %b, align 4
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2uhw(float* nocapture readonly %a, i16* nocapture %b) {
+define void @spConv2uhw(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: spConv2uhw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: sth 3, 0(4)
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptoui float %0 to i16
- store i16 %conv, i16* %b, align 2
+ store i16 %conv, ptr %b, align 2
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2ub(float* nocapture readonly %a, i8* nocapture %b) {
+define void @spConv2ub(ptr nocapture readonly %a, ptr nocapture %b) {
; CHECK-LABEL: spConv2ub:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: stb 3, 0(4)
; CHECK-PWR8-NEXT: blr
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptoui float %0 to i8
- store i8 %conv, i8* %b, align 1
+ store i8 %conv, ptr %b, align 1
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2udw_x(double* nocapture readonly %a, i64* nocapture %b,
+define void @dpConv2udw_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: dpConv2udw_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 zeroext %idx) {
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptoui double %0 to i64
%idxprom = zext i32 %idx to i64
- %arrayidx = getelementptr inbounds i64, i64* %b, i64 %idxprom
- store i64 %conv, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %b, i64 %idxprom
+ store i64 %conv, ptr %arrayidx, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2uw_x(double* nocapture readonly %a, i32* nocapture %b,
+define void @dpConv2uw_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: dpConv2uw_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 zeroext %idx) {
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptoui double %0 to i32
%idxprom = zext i32 %idx to i64
- %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
- store i32 %conv, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 %idxprom
+ store i32 %conv, ptr %arrayidx, align 4
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2uhw_x(double* nocapture readonly %a, i16* nocapture %b,
+define void @dpConv2uhw_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: dpConv2uhw_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 zeroext %idx) {
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptoui double %0 to i16
%idxprom = zext i32 %idx to i64
- %arrayidx = getelementptr inbounds i16, i16* %b, i64 %idxprom
- store i16 %conv, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %b, i64 %idxprom
+ store i16 %conv, ptr %arrayidx, align 2
ret void
}
; Function Attrs: norecurse nounwind
-define void @dpConv2ub_x(double* nocapture readonly %a, i8* nocapture %b,
+define void @dpConv2ub_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: dpConv2ub_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 zeroext %idx) {
entry:
- %0 = load double, double* %a, align 8
+ %0 = load double, ptr %a, align 8
%conv = fptoui double %0 to i8
%idxprom = zext i32 %idx to i64
- %arrayidx = getelementptr inbounds i8, i8* %b, i64 %idxprom
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %b, i64 %idxprom
+ store i8 %conv, ptr %arrayidx, align 1
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2udw_x(float* nocapture readonly %a, i64* nocapture %b,
+define void @spConv2udw_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: spConv2udw_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 zeroext %idx) {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptoui float %0 to i64
%idxprom = zext i32 %idx to i64
- %arrayidx = getelementptr inbounds i64, i64* %b, i64 %idxprom
- store i64 %conv, i64* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds i64, ptr %b, i64 %idxprom
+ store i64 %conv, ptr %arrayidx, align 8
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2uw_x(float* nocapture readonly %a, i32* nocapture %b,
+define void @spConv2uw_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: spConv2uw_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 zeroext %idx) {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptoui float %0 to i32
%idxprom = zext i32 %idx to i64
- %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
- store i32 %conv, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 %idxprom
+ store i32 %conv, ptr %arrayidx, align 4
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2uhw_x(float* nocapture readonly %a, i16* nocapture %b,
+define void @spConv2uhw_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: spConv2uhw_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 zeroext %idx) {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptoui float %0 to i16
%idxprom = zext i32 %idx to i64
- %arrayidx = getelementptr inbounds i16, i16* %b, i64 %idxprom
- store i16 %conv, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds i16, ptr %b, i64 %idxprom
+ store i16 %conv, ptr %arrayidx, align 2
ret void
}
; Function Attrs: norecurse nounwind
-define void @spConv2ub_x(float* nocapture readonly %a, i8* nocapture %b,
+define void @spConv2ub_x(ptr nocapture readonly %a, ptr nocapture %b,
; CHECK-LABEL: spConv2ub_x:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfs 0, 0(3)
; CHECK-PWR8-NEXT: blr
i32 zeroext %idx) {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%conv = fptoui float %0 to i8
%idxprom = zext i32 %idx to i64
- %arrayidx = getelementptr inbounds i8, i8* %b, i64 %idxprom
- store i8 %conv, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %b, i64 %idxprom
+ store i8 %conv, ptr %arrayidx, align 1
ret void
%p5 = alloca %struct.s5, align 4
%p6 = alloca %struct.s6, align 4
%p7 = alloca %struct.s7, align 4
- %0 = bitcast %struct.s1* %p1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i1 false)
- %1 = bitcast %struct.s2* %p2 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %1, i8* align 2 bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i1 false)
- %2 = bitcast %struct.s3* %p3 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %2, i8* align 2 bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i1 false)
- %3 = bitcast %struct.s4* %p4 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i1 false)
- %4 = bitcast %struct.s5* %p5 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i1 false)
- %5 = bitcast %struct.s6* %p6 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %5, i8* align 4 bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i1 false)
- %6 = bitcast %struct.s7* %p7 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i1 false)
- %call = call i32 @callee1(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.s1* byval(%struct.s1) %p1, %struct.s2* byval(%struct.s2) %p2, %struct.s3* byval(%struct.s3) %p3, %struct.s4* byval(%struct.s4) %p4, %struct.s5* byval(%struct.s5) %p5, %struct.s6* byval(%struct.s6) %p6, %struct.s7* byval(%struct.s7) %p7)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller1.p1, i64 1, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p2, ptr align 2 @caller1.p2, i64 2, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p3, ptr align 2 @caller1.p3, i64 4, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p4, ptr align 4 @caller1.p4, i64 4, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p5, ptr align 4 @caller1.p5, i64 8, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p6, ptr align 4 @caller1.p6, i64 8, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p7, ptr align 4 @caller1.p7, i64 8, i1 false)
+ %call = call i32 @callee1(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, ptr byval(%struct.s1) %p1, ptr byval(%struct.s2) %p2, ptr byval(%struct.s3) %p3, ptr byval(%struct.s4) %p4, ptr byval(%struct.s5) %p5, ptr byval(%struct.s6) %p6, ptr byval(%struct.s7) %p7)
ret i32 %call
; CHECK: stb {{[0-9]+}}, 119(1)
; CHECK: std {{[0-9]+}}, 160(1)
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
-define internal i32 @callee1(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.s1* byval(%struct.s1) %v1, %struct.s2* byval(%struct.s2) %v2, %struct.s3* byval(%struct.s3) %v3, %struct.s4* byval(%struct.s4) %v4, %struct.s5* byval(%struct.s5) %v5, %struct.s6* byval(%struct.s6) %v6, %struct.s7* byval(%struct.s7) %v7) nounwind {
+define internal i32 @callee1(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, ptr byval(%struct.s1) %v1, ptr byval(%struct.s2) %v2, ptr byval(%struct.s3) %v3, ptr byval(%struct.s4) %v4, ptr byval(%struct.s5) %v5, ptr byval(%struct.s6) %v6, ptr byval(%struct.s7) %v7) nounwind {
entry:
%z1.addr = alloca i32, align 4
%z2.addr = alloca i32, align 4
%z6.addr = alloca i32, align 4
%z7.addr = alloca i32, align 4
%z8.addr = alloca i32, align 4
- store i32 %z1, i32* %z1.addr, align 4
- store i32 %z2, i32* %z2.addr, align 4
- store i32 %z3, i32* %z3.addr, align 4
- store i32 %z4, i32* %z4.addr, align 4
- store i32 %z5, i32* %z5.addr, align 4
- store i32 %z6, i32* %z6.addr, align 4
- store i32 %z7, i32* %z7.addr, align 4
- store i32 %z8, i32* %z8.addr, align 4
- %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0
- %0 = load i8, i8* %a, align 1
+ store i32 %z1, ptr %z1.addr, align 4
+ store i32 %z2, ptr %z2.addr, align 4
+ store i32 %z3, ptr %z3.addr, align 4
+ store i32 %z4, ptr %z4.addr, align 4
+ store i32 %z5, ptr %z5.addr, align 4
+ store i32 %z6, ptr %z6.addr, align 4
+ store i32 %z7, ptr %z7.addr, align 4
+ store i32 %z8, ptr %z8.addr, align 4
+ %0 = load i8, ptr %v1, align 1
%conv = zext i8 %0 to i32
- %a1 = getelementptr inbounds %struct.s2, %struct.s2* %v2, i32 0, i32 0
- %1 = load i16, i16* %a1, align 2
+ %1 = load i16, ptr %v2, align 2
%conv2 = sext i16 %1 to i32
%add = add nsw i32 %conv, %conv2
- %a3 = getelementptr inbounds %struct.s3, %struct.s3* %v3, i32 0, i32 0
- %2 = load i16, i16* %a3, align 2
+ %2 = load i16, ptr %v3, align 2
%conv4 = sext i16 %2 to i32
%add5 = add nsw i32 %add, %conv4
- %a6 = getelementptr inbounds %struct.s4, %struct.s4* %v4, i32 0, i32 0
- %3 = load i32, i32* %a6, align 4
+ %3 = load i32, ptr %v4, align 4
%add7 = add nsw i32 %add5, %3
- %a8 = getelementptr inbounds %struct.s5, %struct.s5* %v5, i32 0, i32 0
- %4 = load i32, i32* %a8, align 4
+ %4 = load i32, ptr %v5, align 4
%add9 = add nsw i32 %add7, %4
- %a10 = getelementptr inbounds %struct.s6, %struct.s6* %v6, i32 0, i32 0
- %5 = load i32, i32* %a10, align 4
+ %5 = load i32, ptr %v6, align 4
%add11 = add nsw i32 %add9, %5
- %a12 = getelementptr inbounds %struct.s7, %struct.s7* %v7, i32 0, i32 0
- %6 = load i32, i32* %a12, align 4
+ %6 = load i32, ptr %v7, align 4
%add13 = add nsw i32 %add11, %6
ret i32 %add13
%p5 = alloca %struct.t5, align 1
%p6 = alloca %struct.t6, align 1
%p7 = alloca %struct.t7, align 1
- %0 = bitcast %struct.t1* %p1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i1 false)
- %1 = bitcast %struct.t2* %p2 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i1 false)
- %2 = bitcast %struct.t3* %p3 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i1 false)
- %3 = bitcast %struct.t4* %p4 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i1 false)
- %4 = bitcast %struct.t5* %p5 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i1 false)
- %5 = bitcast %struct.t6* %p6 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i1 false)
- %6 = bitcast %struct.t7* %p7 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i1 false)
- %call = call i32 @callee2(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.t1* byval(%struct.t1) %p1, %struct.t2* byval(%struct.t2) %p2, %struct.t3* byval(%struct.t3) %p3, %struct.t4* byval(%struct.t4) %p4, %struct.t5* byval(%struct.t5) %p5, %struct.t6* byval(%struct.t6) %p6, %struct.t7* byval(%struct.t7) %p7)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller2.p1, i64 1, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p2, ptr @caller2.p2, i64 2, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p3, ptr @caller2.p3, i64 3, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p4, ptr @caller2.p4, i64 4, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p5, ptr @caller2.p5, i64 5, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p6, ptr @caller2.p6, i64 6, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p7, ptr @caller2.p7, i64 7, i1 false)
+ %call = call i32 @callee2(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, ptr byval(%struct.t1) %p1, ptr byval(%struct.t2) %p2, ptr byval(%struct.t3) %p3, ptr byval(%struct.t4) %p4, ptr byval(%struct.t5) %p5, ptr byval(%struct.t6) %p6, ptr byval(%struct.t7) %p7)
ret i32 %call
; CHECK: stb {{[0-9]+}}, 119(1)
; CHECK: stw {{[0-9]+}}, 161(1)
}
-define internal i32 @callee2(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.t1* byval(%struct.t1) %v1, %struct.t2* byval(%struct.t2) %v2, %struct.t3* byval(%struct.t3) %v3, %struct.t4* byval(%struct.t4) %v4, %struct.t5* byval(%struct.t5) %v5, %struct.t6* byval(%struct.t6) %v6, %struct.t7* byval(%struct.t7) %v7) nounwind {
+define internal i32 @callee2(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, ptr byval(%struct.t1) %v1, ptr byval(%struct.t2) %v2, ptr byval(%struct.t3) %v3, ptr byval(%struct.t4) %v4, ptr byval(%struct.t5) %v5, ptr byval(%struct.t6) %v6, ptr byval(%struct.t7) %v7) nounwind {
entry:
%z1.addr = alloca i32, align 4
%z2.addr = alloca i32, align 4
%z6.addr = alloca i32, align 4
%z7.addr = alloca i32, align 4
%z8.addr = alloca i32, align 4
- store i32 %z1, i32* %z1.addr, align 4
- store i32 %z2, i32* %z2.addr, align 4
- store i32 %z3, i32* %z3.addr, align 4
- store i32 %z4, i32* %z4.addr, align 4
- store i32 %z5, i32* %z5.addr, align 4
- store i32 %z6, i32* %z6.addr, align 4
- store i32 %z7, i32* %z7.addr, align 4
- store i32 %z8, i32* %z8.addr, align 4
- %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0
- %0 = load i8, i8* %a, align 1
+ store i32 %z1, ptr %z1.addr, align 4
+ store i32 %z2, ptr %z2.addr, align 4
+ store i32 %z3, ptr %z3.addr, align 4
+ store i32 %z4, ptr %z4.addr, align 4
+ store i32 %z5, ptr %z5.addr, align 4
+ store i32 %z6, ptr %z6.addr, align 4
+ store i32 %z7, ptr %z7.addr, align 4
+ store i32 %z8, ptr %z8.addr, align 4
+ %0 = load i8, ptr %v1, align 1
%conv = zext i8 %0 to i32
- %a1 = getelementptr inbounds %struct.t2, %struct.t2* %v2, i32 0, i32 0
- %1 = load i16, i16* %a1, align 1
+ %1 = load i16, ptr %v2, align 1
%conv2 = sext i16 %1 to i32
%add = add nsw i32 %conv, %conv2
- %a3 = getelementptr inbounds %struct.t3, %struct.t3* %v3, i32 0, i32 0
- %2 = load i16, i16* %a3, align 1
+ %2 = load i16, ptr %v3, align 1
%conv4 = sext i16 %2 to i32
%add5 = add nsw i32 %add, %conv4
- %a6 = getelementptr inbounds %struct.t4, %struct.t4* %v4, i32 0, i32 0
- %3 = load i32, i32* %a6, align 1
+ %3 = load i32, ptr %v4, align 1
%add7 = add nsw i32 %add5, %3
- %a8 = getelementptr inbounds %struct.t5, %struct.t5* %v5, i32 0, i32 0
- %4 = load i32, i32* %a8, align 1
+ %4 = load i32, ptr %v5, align 1
%add9 = add nsw i32 %add7, %4
- %a10 = getelementptr inbounds %struct.t6, %struct.t6* %v6, i32 0, i32 0
- %5 = load i32, i32* %a10, align 1
+ %5 = load i32, ptr %v6, align 1
%add11 = add nsw i32 %add9, %5
- %a12 = getelementptr inbounds %struct.t7, %struct.t7* %v7, i32 0, i32 0
- %6 = load i32, i32* %a12, align 1
+ %6 = load i32, ptr %v7, align 1
%add13 = add nsw i32 %add11, %6
ret i32 %add13
%p5 = alloca %struct.s5, align 4
%p6 = alloca %struct.s6, align 4
%p7 = alloca %struct.s7, align 4
- %0 = bitcast %struct.s1* %p1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i1 false)
- %1 = bitcast %struct.s2* %p2 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %1, i8* align 2 bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i1 false)
- %2 = bitcast %struct.s3* %p3 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %2, i8* align 2 bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i1 false)
- %3 = bitcast %struct.s4* %p4 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i1 false)
- %4 = bitcast %struct.s5* %p5 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i1 false)
- %5 = bitcast %struct.s6* %p6 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %5, i8* align 4 bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i1 false)
- %6 = bitcast %struct.s7* %p7 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i1 false)
- %call = call i32 @callee1(%struct.s1* byval(%struct.s1) %p1, %struct.s2* byval(%struct.s2) %p2, %struct.s3* byval(%struct.s3) %p3, %struct.s4* byval(%struct.s4) %p4, %struct.s5* byval(%struct.s5) %p5, %struct.s6* byval(%struct.s6) %p6, %struct.s7* byval(%struct.s7) %p7)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller1.p1, i64 1, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p2, ptr align 2 @caller1.p2, i64 2, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p3, ptr align 2 @caller1.p3, i64 4, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p4, ptr align 4 @caller1.p4, i64 4, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p5, ptr align 4 @caller1.p5, i64 8, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p6, ptr align 4 @caller1.p6, i64 8, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p7, ptr align 4 @caller1.p7, i64 8, i1 false)
+ %call = call i32 @callee1(ptr byval(%struct.s1) %p1, ptr byval(%struct.s2) %p2, ptr byval(%struct.s3) %p3, ptr byval(%struct.s4) %p4, ptr byval(%struct.s5) %p5, ptr byval(%struct.s6) %p6, ptr byval(%struct.s7) %p7)
ret i32 %call
; CHECK-LABEL: caller1
; CHECK: lbz 3, 160(31)
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
-define internal i32 @callee1(%struct.s1* byval(%struct.s1) %v1, %struct.s2* byval(%struct.s2) %v2, %struct.s3* byval(%struct.s3) %v3, %struct.s4* byval(%struct.s4) %v4, %struct.s5* byval(%struct.s5) %v5, %struct.s6* byval(%struct.s6) %v6, %struct.s7* byval(%struct.s7) %v7) nounwind {
+define internal i32 @callee1(ptr byval(%struct.s1) %v1, ptr byval(%struct.s2) %v2, ptr byval(%struct.s3) %v3, ptr byval(%struct.s4) %v4, ptr byval(%struct.s5) %v5, ptr byval(%struct.s6) %v6, ptr byval(%struct.s7) %v7) nounwind {
entry:
- %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0
- %0 = load i8, i8* %a, align 1
+ %0 = load i8, ptr %v1, align 1
%conv = zext i8 %0 to i32
- %a1 = getelementptr inbounds %struct.s2, %struct.s2* %v2, i32 0, i32 0
- %1 = load i16, i16* %a1, align 2
+ %1 = load i16, ptr %v2, align 2
%conv2 = sext i16 %1 to i32
%add = add nsw i32 %conv, %conv2
- %a3 = getelementptr inbounds %struct.s3, %struct.s3* %v3, i32 0, i32 0
- %2 = load i16, i16* %a3, align 2
+ %2 = load i16, ptr %v3, align 2
%conv4 = sext i16 %2 to i32
%add5 = add nsw i32 %add, %conv4
- %a6 = getelementptr inbounds %struct.s4, %struct.s4* %v4, i32 0, i32 0
- %3 = load i32, i32* %a6, align 4
+ %3 = load i32, ptr %v4, align 4
%add7 = add nsw i32 %add5, %3
- %a8 = getelementptr inbounds %struct.s5, %struct.s5* %v5, i32 0, i32 0
- %4 = load i32, i32* %a8, align 4
+ %4 = load i32, ptr %v5, align 4
%add9 = add nsw i32 %add7, %4
- %a10 = getelementptr inbounds %struct.s6, %struct.s6* %v6, i32 0, i32 0
- %5 = load i32, i32* %a10, align 4
+ %5 = load i32, ptr %v6, align 4
%add11 = add nsw i32 %add9, %5
- %a12 = getelementptr inbounds %struct.s7, %struct.s7* %v7, i32 0, i32 0
- %6 = load i32, i32* %a12, align 4
+ %6 = load i32, ptr %v7, align 4
%add13 = add nsw i32 %add11, %6
ret i32 %add13
%p5 = alloca %struct.t5, align 1
%p6 = alloca %struct.t6, align 1
%p7 = alloca %struct.t7, align 1
- %0 = bitcast %struct.t1* %p1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i1 false)
- %1 = bitcast %struct.t2* %p2 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i1 false)
- %2 = bitcast %struct.t3* %p3 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i1 false)
- %3 = bitcast %struct.t4* %p4 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i1 false)
- %4 = bitcast %struct.t5* %p5 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i1 false)
- %5 = bitcast %struct.t6* %p6 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i1 false)
- %6 = bitcast %struct.t7* %p7 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i1 false)
- %call = call i32 @callee2(%struct.t1* byval(%struct.t1) %p1, %struct.t2* byval(%struct.t2) %p2, %struct.t3* byval(%struct.t3) %p3, %struct.t4* byval(%struct.t4) %p4, %struct.t5* byval(%struct.t5) %p5, %struct.t6* byval(%struct.t6) %p6, %struct.t7* byval(%struct.t7) %p7)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller2.p1, i64 1, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p2, ptr @caller2.p2, i64 2, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p3, ptr @caller2.p3, i64 3, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p4, ptr @caller2.p4, i64 4, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p5, ptr @caller2.p5, i64 5, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p6, ptr @caller2.p6, i64 6, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %p7, ptr @caller2.p7, i64 7, i1 false)
+ %call = call i32 @callee2(ptr byval(%struct.t1) %p1, ptr byval(%struct.t2) %p2, ptr byval(%struct.t3) %p3, ptr byval(%struct.t4) %p4, ptr byval(%struct.t5) %p5, ptr byval(%struct.t6) %p6, ptr byval(%struct.t7) %p7)
ret i32 %call
; CHECK-LABEL: caller2
; CHECK: lbz 3, 160(31)
}
-define internal i32 @callee2(%struct.t1* byval(%struct.t1) %v1, %struct.t2* byval(%struct.t2) %v2, %struct.t3* byval(%struct.t3) %v3, %struct.t4* byval(%struct.t4) %v4, %struct.t5* byval(%struct.t5) %v5, %struct.t6* byval(%struct.t6) %v6, %struct.t7* byval(%struct.t7) %v7) nounwind {
+define internal i32 @callee2(ptr byval(%struct.t1) %v1, ptr byval(%struct.t2) %v2, ptr byval(%struct.t3) %v3, ptr byval(%struct.t4) %v4, ptr byval(%struct.t5) %v5, ptr byval(%struct.t6) %v6, ptr byval(%struct.t7) %v7) nounwind {
entry:
- %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0
- %0 = load i8, i8* %a, align 1
+ %0 = load i8, ptr %v1, align 1
%conv = zext i8 %0 to i32
- %a1 = getelementptr inbounds %struct.t2, %struct.t2* %v2, i32 0, i32 0
- %1 = load i16, i16* %a1, align 1
+ %1 = load i16, ptr %v2, align 1
%conv2 = sext i16 %1 to i32
%add = add nsw i32 %conv, %conv2
- %a3 = getelementptr inbounds %struct.t3, %struct.t3* %v3, i32 0, i32 0
- %2 = load i16, i16* %a3, align 1
+ %2 = load i16, ptr %v3, align 1
%conv4 = sext i16 %2 to i32
%add5 = add nsw i32 %add, %conv4
- %a6 = getelementptr inbounds %struct.t4, %struct.t4* %v4, i32 0, i32 0
- %3 = load i32, i32* %a6, align 1
+ %3 = load i32, ptr %v4, align 1
%add7 = add nsw i32 %add5, %3
- %a8 = getelementptr inbounds %struct.t5, %struct.t5* %v5, i32 0, i32 0
- %4 = load i32, i32* %a8, align 1
+ %4 = load i32, ptr %v5, align 1
%add9 = add nsw i32 %add7, %4
- %a10 = getelementptr inbounds %struct.t6, %struct.t6* %v6, i32 0, i32 0
- %5 = load i32, i32* %a10, align 1
+ %5 = load i32, ptr %v6, align 1
%add11 = add nsw i32 %add9, %5
- %a12 = getelementptr inbounds %struct.t7, %struct.t7* %v7, i32 0, i32 0
- %6 = load i32, i32* %a12, align 1
+ %6 = load i32, ptr %v7, align 1
%add13 = add nsw i32 %add11, %6
ret i32 %add13
define void @_GLOBAL__I_a() nounwind section ".text.startup" {
entry:
- store i32 5, i32* getelementptr inbounds (%class.Two.0.5, %class.Two.0.5* @foo, i32 0, i32 0), align 4
- store i32 6, i32* getelementptr inbounds (%class.Two.0.5, %class.Two.0.5* @foo, i32 0, i32 1), align 4
+ store i32 5, ptr @foo, align 4
+ store i32 6, ptr getelementptr inbounds (%class.Two.0.5, ptr @foo, i32 0, i32 1), align 4
ret void
}
%0 = type { i32, i32 }
; Function Attrs: norecurse nounwind writeonly
-define void @initCombList(%0* nocapture, i32 signext) local_unnamed_addr #0 {
+define void @initCombList(ptr nocapture, i32 signext) local_unnamed_addr #0 {
; CHECK-LABEL: initCombList:
; CHECK: addi 4, 4, -8
; CHECK: stwu [[REG:[0-9]+]], 64(3)
br i1 undef, label %6, label %4
; <label>:4: ; preds = %2
- store i32 0, i32* undef, align 4, !tbaa !1
+ store i32 0, ptr undef, align 4, !tbaa !1
%5 = add nuw nsw i64 0, 1
br label %6
; <label>:8: ; preds = %8, %6
%9 = phi i64 [ %21, %8 ], [ %7, %6 ]
- %10 = getelementptr inbounds %0, %0* %0, i64 %9, i32 1
- store i32 0, i32* %10, align 4, !tbaa !1
+ %10 = getelementptr inbounds %0, ptr %0, i64 %9, i32 1
+ store i32 0, ptr %10, align 4, !tbaa !1
%11 = add nuw nsw i64 %9, 1
- %12 = getelementptr inbounds %0, %0* %0, i64 %11, i32 1
- store i32 0, i32* %12, align 4, !tbaa !1
+ %12 = getelementptr inbounds %0, ptr %0, i64 %11, i32 1
+ store i32 0, ptr %12, align 4, !tbaa !1
%13 = add nsw i64 %9, 2
- %14 = getelementptr inbounds %0, %0* %0, i64 %13, i32 1
- store i32 0, i32* %14, align 4, !tbaa !1
+ %14 = getelementptr inbounds %0, ptr %0, i64 %13, i32 1
+ store i32 0, ptr %14, align 4, !tbaa !1
%15 = add nsw i64 %9, 3
- %16 = getelementptr inbounds %0, %0* %0, i64 %15, i32 1
- store i32 0, i32* %16, align 4, !tbaa !1
+ %16 = getelementptr inbounds %0, ptr %0, i64 %15, i32 1
+ store i32 0, ptr %16, align 4, !tbaa !1
%17 = add nsw i64 %9, 4
- %18 = getelementptr inbounds %0, %0* %0, i64 %17, i32 1
- store i32 0, i32* %18, align 4, !tbaa !1
+ %18 = getelementptr inbounds %0, ptr %0, i64 %17, i32 1
+ store i32 0, ptr %18, align 4, !tbaa !1
%19 = add nsw i64 %9, 6
- %20 = getelementptr inbounds %0, %0* %0, i64 %19, i32 1
- store i32 0, i32* %20, align 4, !tbaa !1
+ %20 = getelementptr inbounds %0, ptr %0, i64 %19, i32 1
+ store i32 0, ptr %20, align 4, !tbaa !1
%21 = add nsw i64 %9, 8
%22 = icmp eq i64 %21, %3
br i1 %22, label %23, label %8, !llvm.loop !6
%"class.std::_Rb_tree.19.101.511.536" = type { %"struct.std::_Rb_tree<std::pair<const char *, const char *>, std::pair<const std::pair<const char *, const char *>, int>, std::_Select1st<std::pair<const std::pair<const char *, const char *>, int>>, std::less<std::pair<const char *, const char *>>, std::allocator<std::pair<const std::pair<const char *, const char *>, int>> >::_Rb_tree_impl.18.100.510.535" }
%"struct.std::_Rb_tree<std::pair<const char *, const char *>, std::pair<const std::pair<const char *, const char *>, int>, std::_Select1st<std::pair<const std::pair<const char *, const char *>, int>>, std::less<std::pair<const char *, const char *>>, std::allocator<std::pair<const std::pair<const char *, const char *>, int>> >::_Rb_tree_impl.18.100.510.535" = type { %"struct.std::less.16.98.508.533", %"struct.std::_Rb_tree_node_base.17.99.509.534", i64 }
%"struct.std::less.16.98.508.533" = type { i8 }
-%"struct.std::_Rb_tree_node_base.17.99.509.534" = type { i32, %"struct.std::_Rb_tree_node_base.17.99.509.534"*, %"struct.std::_Rb_tree_node_base.17.99.509.534"*, %"struct.std::_Rb_tree_node_base.17.99.509.534"* }
+%"struct.std::_Rb_tree_node_base.17.99.509.534" = type { i32, ptr, ptr, ptr }
-define void @test1(%class.spell_checker.21.103.513.538* %this) unnamed_addr align 2 {
+define void @test1(ptr %this) unnamed_addr align 2 {
entry:
- %_M_header.i.i.i.i.i.i = getelementptr inbounds %class.spell_checker.21.103.513.538, %class.spell_checker.21.103.513.538* %this, i64 0, i32 0, i32 0, i32 0, i32 1
- %0 = bitcast %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i to i8*
- call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 40, i1 false) nounwind
- store %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i, %"struct.std::_Rb_tree_node_base.17.99.509.534"** undef, align 8
+ %_M_header.i.i.i.i.i.i = getelementptr inbounds %class.spell_checker.21.103.513.538, ptr %this, i64 0, i32 0, i32 0, i32 0, i32 1
+ call void @llvm.memset.p0.i64(ptr align 4 %_M_header.i.i.i.i.i.i, i8 0, i64 40, i1 false) nounwind
+ store ptr %_M_header.i.i.i.i.i.i, ptr undef, align 8
unreachable
}
; CHECK: @test1
; CHECK: stwu
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
if.end15: ; preds = %while.end
%idxprom.i.i230 = sext i32 %i.1 to i64
- %arrayidx18 = getelementptr inbounds [100 x i32], [100 x i32]* @multvec_i, i64 0, i64 %idxprom.i.i230
- store i32 0, i32* %arrayidx18, align 4
+ %arrayidx18 = getelementptr inbounds [100 x i32], ptr @multvec_i, i64 0, i64 %idxprom.i.i230
+ store i32 0, ptr %arrayidx18, align 4
br i1 undef, label %while.body21, label %while.end90
while.body21: ; preds = %if.end15
unreachable
while.end90: ; preds = %if.end15
- store i32 0, i32* %arrayidx18, align 4
+ store i32 0, ptr %arrayidx18, align 4
br label %return
return: ; preds = %while.end90, %while.end, %entry
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-define void @jbd2_journal_commit_transaction(i32 %input1, i32* %input2, i32* %input3, i8** %input4) #0 {
+define void @jbd2_journal_commit_transaction(i32 %input1, ptr %input2, ptr %input3, ptr %input4) #0 {
entry:
br label %while.body392
while.body392: ; preds = %wait_on_buffer.exit1319, %while.body392.lr.ph
- %0 = load i8*, i8** %input4, align 8
- %add.ptr399 = getelementptr inbounds i8, i8* %0, i64 -72
- %b_state.i.i1314 = bitcast i8* %add.ptr399 to i64*
+ %0 = load ptr, ptr %input4, align 8
+ %add.ptr399 = getelementptr inbounds i8, ptr %0, i64 -72
%ivar = add i32 %input1, 1
%tobool.i1316 = icmp eq i32 %input1, 0
br i1 %tobool.i1316, label %wait_on_buffer.exit1319, label %while.end418
wait_on_buffer.exit1319: ; preds = %while.body392
- %1 = load volatile i64, i64* %b_state.i.i1314, align 8
+ %1 = load volatile i64, ptr %add.ptr399, align 8
%conv.i.i1322 = and i64 %1, 1
%lnot404 = icmp eq i64 %conv.i.i1322, 0
%.err.4 = select i1 %lnot404, i32 -5, i32 %input1
- %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* elementtype(i64) %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* elementtype(i64) %b_state.i.i1314) #0
- store i8* %0, i8** %input4, align 8
- %cmp.i1312 = icmp eq i32* %input2, %input3
+ %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(ptr elementtype(i64) %add.ptr399, i64 262144, ptr %add.ptr399, ptr elementtype(i64) %add.ptr399) #0
+ store ptr %0, ptr %input4, align 8
+ %cmp.i1312 = icmp eq ptr %input2, %input3
br i1 %cmp.i1312, label %while.end418, label %while.body392
while.end418: ; preds = %wait_on_buffer.exit1319, %do.body378
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-define void @jbd2_journal_commit_transaction(i32* %journal, i64 %inp1, i32 %inp2,
- i32* %inp3, i32** %inp4,
- i32** %inp5, i1 %inp6,
+define void @jbd2_journal_commit_transaction(ptr %journal, i64 %inp1, i32 %inp2,
+ ptr %inp3, ptr %inp4,
+ ptr %inp5, i1 %inp6,
i1 %inp7, i1 %inp8) #0 {
entry:
br i1 undef, label %do.body, label %if.then5
br label %while.body392
while.body392: ; preds = %wait_on_buffer.exit1319, %while.body392.lr.ph
- %0 = load i8*, i8** undef, align 8
- %add.ptr399 = getelementptr inbounds i8, i8* %0, i64 -72
- %b_state.i.i1314 = bitcast i8* %add.ptr399 to i64*
+ %0 = load ptr, ptr undef, align 8
+ %add.ptr399 = getelementptr inbounds i8, ptr %0, i64 -72
%tobool.i1316 = icmp eq i64 %inp1, 0
br i1 %tobool.i1316, label %wait_on_buffer.exit1319, label %if.then.i1317
unreachable
wait_on_buffer.exit1319: ; preds = %while.body392
- %1 = load volatile i64, i64* %b_state.i.i1314, align 8
+ %1 = load volatile i64, ptr %add.ptr399, align 8
%conv.i.i1322 = and i64 %1, 1
%lnot404 = icmp eq i64 %conv.i.i1322, 0
%.err.4 = select i1 %lnot404, i32 -5, i32 %inp2
- %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* elementtype(i64) %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* elementtype(i64) %b_state.i.i1314) #1
- %prev.i.i.i1325 = getelementptr inbounds i8, i8* %0, i64 8
- %3 = load i32*, i32** %inp4, align 8
- store i32* %3, i32** %inp5, align 8
- call void @__brelse(i32* %3) #1
+ %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(ptr elementtype(i64) %add.ptr399, i64 262144, ptr %add.ptr399, ptr elementtype(i64) %add.ptr399) #1
+ %prev.i.i.i1325 = getelementptr inbounds i8, ptr %0, i64 8
+ %3 = load ptr, ptr %inp4, align 8
+ store ptr %3, ptr %inp5, align 8
+ call void @__brelse(ptr %3) #1
br i1 %inp8, label %while.end418, label %while.body392
; CHECK-LABEL: @jbd2_journal_commit_transaction
br i1 %inp7, label %if.end421, label %if.then420
if.then420: ; preds = %while.end418
- call void @jbd2_journal_abort(i32* %journal, i32 signext %err.4.lcssa) #1
+ call void @jbd2_journal_abort(ptr %journal, i32 signext %err.4.lcssa) #1
br label %if.end421
if.end421: ; preds = %if.then420, %while.end418
unreachable
}
-declare void @jbd2_journal_abort(i32*, i32 signext)
+declare void @jbd2_journal_abort(ptr, i32 signext)
-declare void @__brelse(i32*)
+declare void @__brelse(ptr)
attributes #0 = { nounwind }
attributes #1 = { nounwind }
; PPC64-NOT: stdu 1, -{{[0-9]+}}(1)
; PPC64: blr
-define i8* @smallstack() nounwind {
+define ptr @smallstack() nounwind {
entry:
%0 = alloca i8, i32 4
- ret i8* %0
+ ret ptr %0
}
; PPC32-LABEL: smallstack:
; PPC32: stwu 1, -16(1)
; PPC64-NOT: stdu 1, -{{[0-9]+}}(1)
; PPC64: blr
-define i8* @bigstack() nounwind {
+define ptr @bigstack() nounwind {
entry:
%0 = alloca i8, i32 290
- ret i8* %0
+ ret ptr %0
}
; PPC32-LABEL: bigstack:
; PPC32: stwu 1, -304(1)
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le < %s | FileCheck %s
-define i64 @test1(i64* %a, i64* %b) {
+define i64 @test1(ptr %a, ptr %b) {
; CHECK-LABEL: test1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: stxsdx 34, 0, 5
; CHECK-NEXT: blr
entry:
- %lhs = load i64, i64* %a, align 8
- %rhs = load i64, i64* %b, align 8
+ %lhs = load i64, ptr %a, align 8
+ %rhs = load i64, ptr %b, align 8
%sum = add i64 %lhs, %rhs
%lv = insertelement <2 x i64> undef, i64 %lhs, i32 0
%rv = insertelement <2 x i64> undef, i64 %rhs, i32 0
%add = call <16 x i8> @llvm.ppc.altivec.vavgsb(<16 x i8> %lhc, <16 x i8> %rhc)
%cb = bitcast <16 x i8> %add to <2 x i64>
%fv = extractelement <2 x i64> %cb, i32 0
- store i64 %fv, i64* %a, align 8
+ store i64 %fv, ptr %a, align 8
ret i64 %sum
}
-define i64 @test2(i64* %a, i64* %b) {
+define i64 @test2(ptr %a, ptr %b) {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mr 5, 3
; CHECK-NEXT: stxsdx 34, 0, 5
; CHECK-NEXT: blr
entry:
- %lhs = load i64, i64* %a, align 8
- %rhs = load i64, i64* %b, align 8
+ %lhs = load i64, ptr %a, align 8
+ %rhs = load i64, ptr %b, align 8
%sum = add i64 %lhs, %rhs
%lv = insertelement <2 x i64> undef, i64 %lhs, i32 0
%rv = insertelement <2 x i64> undef, i64 %rhs, i32 0
%add = add <8 x i16> %lhc, %rhc
%cb = bitcast <8 x i16> %add to <2 x i64>
%fv = extractelement <2 x i64> %cb, i32 0
- store i64 %fv, i64* %a, align 8
+ store i64 %fv, ptr %a, align 8
ret i64 %sum
}
; Ensure that vec-ops with multiple uses aren't simplified.
-define signext i16 @vecop_uses(i16* %addr) {
+define signext i16 @vecop_uses(ptr %addr) {
; CHECK-LABEL: vecop_uses:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: extsh 3, 3
; CHECK-NEXT: blr
entry:
- %0 = bitcast i16* %addr to <16 x i16>*
- %1 = load <16 x i16>, <16 x i16>* %0, align 2
- %2 = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %1)
- ret i16 %2
+ %0 = load <16 x i16>, ptr %addr, align 2
+ %1 = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %0)
+ ret i16 %1
}
-define signext i32 @vecop_uses2([4 x i32]* %a, [4 x i32]* %b, [4 x i32]* %c) {
+define signext i32 @vecop_uses2(ptr %a, ptr %b, ptr %c) {
; CHECK-LABEL: vecop_uses2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-NEXT: stxvd2x 0, 0, 5
; CHECK-NEXT: blr
entry:
- %0 = bitcast [4 x i32]* %a to <4 x i32>*
- %1 = load <4 x i32>, <4 x i32>* %0, align 4
- %2 = bitcast [4 x i32]* %b to <4 x i32>*
- %3 = load <4 x i32>, <4 x i32>* %2, align 4
- %4 = mul <4 x i32> %3, %1
- %5 = bitcast [4 x i32]* %c to <4 x i32>*
- store <4 x i32> %4, <4 x i32>* %5, align 4
- %6 = extractelement <4 x i32> %1, i32 3
- ret i32 %6
+ %0 = load <4 x i32>, ptr %a, align 4
+ %1 = load <4 x i32>, ptr %b, align 4
+ %2 = mul <4 x i32> %1, %0
+ store <4 x i32> %2, ptr %c, align 4
+ %3 = extractelement <4 x i32> %0, i32 3
+ ret i32 %3
}
declare <16 x i8> @llvm.ppc.altivec.vavgsb(<16 x i8>, <16 x i8>)
vector.body:
%index = phi i64 [ 0, %entry ], [ %index.next.3, %vector.body ]
- %0 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index
- %1 = bitcast i32* %0 to <4 x i32>*
- %wide.load = load <4 x i32>, <4 x i32>* %1, align 8
- %2 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index
- %3 = bitcast i32* %2 to <4 x i32>*
- %wide.load13 = load <4 x i32>, <4 x i32>* %3, align 8
- %4 = add nsw <4 x i32> %wide.load13, %wide.load
- %5 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index
- %6 = bitcast i32* %5 to <4 x i32>*
- %wide.load14 = load <4 x i32>, <4 x i32>* %6, align 8
- %7 = mul nsw <4 x i32> %4, %wide.load14
- %8 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index
- %9 = bitcast i32* %8 to <4 x i32>*
- store <4 x i32> %7, <4 x i32>* %9, align 8
+ %0 = getelementptr inbounds [4096 x i32], ptr @cb, i64 0, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 8
+ %1 = getelementptr inbounds [4096 x i32], ptr @cc, i64 0, i64 %index
+ %wide.load13 = load <4 x i32>, ptr %1, align 8
+ %2 = add nsw <4 x i32> %wide.load13, %wide.load
+ %3 = getelementptr inbounds [4096 x i32], ptr @cd, i64 0, i64 %index
+ %wide.load14 = load <4 x i32>, ptr %3, align 8
+ %4 = mul nsw <4 x i32> %2, %wide.load14
+ %5 = getelementptr inbounds [4096 x i32], ptr @ca, i64 0, i64 %index
+ store <4 x i32> %4, ptr %5, align 8
%index.next = add nuw nsw i64 %index, 4
- %10 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next
- %11 = bitcast i32* %10 to <4 x i32>*
- %wide.load.1 = load <4 x i32>, <4 x i32>* %11, align 8
- %12 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next
- %13 = bitcast i32* %12 to <4 x i32>*
- %wide.load13.1 = load <4 x i32>, <4 x i32>* %13, align 8
- %14 = add nsw <4 x i32> %wide.load13.1, %wide.load.1
- %15 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next
- %16 = bitcast i32* %15 to <4 x i32>*
- %wide.load14.1 = load <4 x i32>, <4 x i32>* %16, align 8
- %17 = mul nsw <4 x i32> %14, %wide.load14.1
- %18 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next
- %19 = bitcast i32* %18 to <4 x i32>*
- store <4 x i32> %17, <4 x i32>* %19, align 8
+ %6 = getelementptr inbounds [4096 x i32], ptr @cb, i64 0, i64 %index.next
+ %wide.load.1 = load <4 x i32>, ptr %6, align 8
+ %7 = getelementptr inbounds [4096 x i32], ptr @cc, i64 0, i64 %index.next
+ %wide.load13.1 = load <4 x i32>, ptr %7, align 8
+ %8 = add nsw <4 x i32> %wide.load13.1, %wide.load.1
+ %9 = getelementptr inbounds [4096 x i32], ptr @cd, i64 0, i64 %index.next
+ %wide.load14.1 = load <4 x i32>, ptr %9, align 8
+ %10 = mul nsw <4 x i32> %8, %wide.load14.1
+ %11 = getelementptr inbounds [4096 x i32], ptr @ca, i64 0, i64 %index.next
+ store <4 x i32> %10, ptr %11, align 8
%index.next.1 = add nuw nsw i64 %index.next, 4
- %20 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next.1
- %21 = bitcast i32* %20 to <4 x i32>*
- %wide.load.2 = load <4 x i32>, <4 x i32>* %21, align 8
- %22 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next.1
- %23 = bitcast i32* %22 to <4 x i32>*
- %wide.load13.2 = load <4 x i32>, <4 x i32>* %23, align 8
- %24 = add nsw <4 x i32> %wide.load13.2, %wide.load.2
- %25 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next.1
- %26 = bitcast i32* %25 to <4 x i32>*
- %wide.load14.2 = load <4 x i32>, <4 x i32>* %26, align 8
- %27 = mul nsw <4 x i32> %24, %wide.load14.2
- %28 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next.1
- %29 = bitcast i32* %28 to <4 x i32>*
- store <4 x i32> %27, <4 x i32>* %29, align 8
+ %12 = getelementptr inbounds [4096 x i32], ptr @cb, i64 0, i64 %index.next.1
+ %wide.load.2 = load <4 x i32>, ptr %12, align 8
+ %13 = getelementptr inbounds [4096 x i32], ptr @cc, i64 0, i64 %index.next.1
+ %wide.load13.2 = load <4 x i32>, ptr %13, align 8
+ %14 = add nsw <4 x i32> %wide.load13.2, %wide.load.2
+ %15 = getelementptr inbounds [4096 x i32], ptr @cd, i64 0, i64 %index.next.1
+ %wide.load14.2 = load <4 x i32>, ptr %15, align 8
+ %16 = mul nsw <4 x i32> %14, %wide.load14.2
+ %17 = getelementptr inbounds [4096 x i32], ptr @ca, i64 0, i64 %index.next.1
+ store <4 x i32> %16, ptr %17, align 8
%index.next.2 = add nuw nsw i64 %index.next.1, 4
- %30 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next.2
- %31 = bitcast i32* %30 to <4 x i32>*
- %wide.load.3 = load <4 x i32>, <4 x i32>* %31, align 8
- %32 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next.2
- %33 = bitcast i32* %32 to <4 x i32>*
- %wide.load13.3 = load <4 x i32>, <4 x i32>* %33, align 8
- %34 = add nsw <4 x i32> %wide.load13.3, %wide.load.3
- %35 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next.2
- %36 = bitcast i32* %35 to <4 x i32>*
- %wide.load14.3 = load <4 x i32>, <4 x i32>* %36, align 8
- %37 = mul nsw <4 x i32> %34, %wide.load14.3
- %38 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next.2
- %39 = bitcast i32* %38 to <4 x i32>*
- store <4 x i32> %37, <4 x i32>* %39, align 8
+ %18 = getelementptr inbounds [4096 x i32], ptr @cb, i64 0, i64 %index.next.2
+ %wide.load.3 = load <4 x i32>, ptr %18, align 8
+ %19 = getelementptr inbounds [4096 x i32], ptr @cc, i64 0, i64 %index.next.2
+ %wide.load13.3 = load <4 x i32>, ptr %19, align 8
+ %20 = add nsw <4 x i32> %wide.load13.3, %wide.load.3
+ %21 = getelementptr inbounds [4096 x i32], ptr @cd, i64 0, i64 %index.next.2
+ %wide.load14.3 = load <4 x i32>, ptr %21, align 8
+ %22 = mul nsw <4 x i32> %20, %wide.load14.3
+ %23 = getelementptr inbounds [4096 x i32], ptr @ca, i64 0, i64 %index.next.2
+ store <4 x i32> %22, ptr %23, align 8
%index.next.3 = add nuw nsw i64 %index.next.2, 4
- %40 = icmp eq i64 %index.next.3, 4096
- br i1 %40, label %for.end, label %vector.body
+ %24 = icmp eq i64 %index.next.3, 4096
+ br i1 %24, label %for.end, label %vector.body
for.end:
ret void
; Function Attrs: nounwind
define void @cfoo() {
entry:
- %0 = load <16 x i8>, <16 x i8>* @vc, align 8
+ %0 = load <16 x i8>, ptr @vc, align 8
%vecinit30 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
- store <16 x i8> %vecinit30, <16 x i8>* @vcr, align 8
+ store <16 x i8> %vecinit30, ptr @vcr, align 8
ret void
}
; Function Attrs: nounwind
define void @sfoo() {
entry:
- %0 = load <8 x i16>, <8 x i16>* @vs, align 8
+ %0 = load <8 x i16>, ptr @vs, align 8
%vecinit14 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6>
- store <8 x i16> %vecinit14, <8 x i16>* @vsr, align 8
+ store <8 x i16> %vecinit14, ptr @vsr, align 8
ret void
}
; Function Attrs: nounwind
define void @ifoo() {
entry:
- %0 = load <4 x i32>, <4 x i32>* @vi, align 8
+ %0 = load <4 x i32>, ptr @vi, align 8
%vecinit6 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- store <4 x i32> %vecinit6, <4 x i32>* @vir, align 8
+ store <4 x i32> %vecinit6, ptr @vir, align 8
ret void
}
entry:
%0 = insertelement <2 x double> undef, double %s, i32 0
%1 = shufflevector <2 x double> %0, <2 x double> undef, <2 x i32> zeroinitializer
- %2 = load <2 x double>, <2 x double>* @a, align 16
+ %2 = load <2 x double>, ptr @a, align 16
%3 = fadd <2 x double> %0, %2
- store <2 x double> %3, <2 x double>* @b, align 16
+ store <2 x double> %3, ptr @b, align 16
ret void
}
define void @bar() {
entry:
%x = alloca <2 x i64>, align 16
- %0 = bitcast <2 x i64>* %x to i8*
- call void @llvm.lifetime.start.p0i8(i64 16, i8* %0)
- %arrayidx = getelementptr inbounds <2 x i64>, <2 x i64>* %x, i64 0, i64 0
- store <2 x i64> <i64 0, i64 1>, <2 x i64>* %x, align 16
- call void @foo(i64* %arrayidx)
- call void @llvm.lifetime.end.p0i8(i64 16, i8* %0)
+ call void @llvm.lifetime.start.p0(i64 16, ptr %x)
+ store <2 x i64> <i64 0, i64 1>, ptr %x, align 16
+ call void @foo(ptr %x)
+ call void @llvm.lifetime.end.p0(i64 16, ptr %x)
ret void
}
; CHECK: stxvd2x
; CHECK-NOT: xxswapd
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @foo(i64*)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @foo(ptr)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
define void @bar0(double %y) {
entry:
- %0 = load <2 x double>, <2 x double>* @x, align 16
+ %0 = load <2 x double>, ptr @x, align 16
%vecins = insertelement <2 x double> %0, double %y, i32 0
- store <2 x double> %vecins, <2 x double>* @z, align 16
+ store <2 x double> %vecins, ptr @z, align 16
ret void
}
define void @bar1(double %y) {
entry:
- %0 = load <2 x double>, <2 x double>* @x, align 16
+ %0 = load <2 x double>, ptr @x, align 16
%vecins = insertelement <2 x double> %0, double %y, i32 1
- store <2 x double> %vecins, <2 x double>* @z, align 16
+ store <2 x double> %vecins, ptr @z, align 16
ret void
}
define void @baz0() {
entry:
- %0 = load <2 x double>, <2 x double>* @z, align 16
- %1 = load <2 x double>, <2 x double>* @x, align 16
+ %0 = load <2 x double>, ptr @z, align 16
+ %1 = load <2 x double>, ptr @x, align 16
%vecins = shufflevector <2 x double> %0, <2 x double> %1, <2 x i32> <i32 0, i32 2>
- store <2 x double> %vecins, <2 x double>* @z, align 16
+ store <2 x double> %vecins, ptr @z, align 16
ret void
}
define void @baz1() {
entry:
- %0 = load <2 x double>, <2 x double>* @z, align 16
- %1 = load <2 x double>, <2 x double>* @x, align 16
+ %0 = load <2 x double>, ptr @z, align 16
+ %1 = load <2 x double>, ptr @x, align 16
%vecins = shufflevector <2 x double> %0, <2 x double> %1, <2 x i32> <i32 3, i32 1>
- store <2 x double> %vecins, <2 x double>* @z, align 16
+ store <2 x double> %vecins, ptr @z, align 16
ret void
}
; CHECK-P9-NOVECTOR-NEXT: stxvd2x vs0, 0, r3
; CHECK-P9-NOVECTOR-NEXT: blr
entry:
- %0 = load <2 x double>, <2 x double>* @x, align 16
- %1 = load double, double* @y, align 8
+ %0 = load <2 x double>, ptr @x, align 16
+ %1 = load double, ptr @y, align 8
%vecins = insertelement <2 x double> %0, double %1, i32 0
- store <2 x double> %vecins, <2 x double>* @z, align 16
+ store <2 x double> %vecins, ptr @z, align 16
ret void
}
; CHECK-P9-NOVECTOR-NEXT: stxvd2x vs0, 0, r3
; CHECK-P9-NOVECTOR-NEXT: blr
entry:
- %0 = load <2 x double>, <2 x double>* @x, align 16
- %1 = load double, double* @y, align 8
+ %0 = load <2 x double>, ptr @x, align 16
+ %1 = load double, ptr @y, align 8
%vecins = insertelement <2 x double> %0, double %1, i32 1
- store <2 x double> %vecins, <2 x double>* @z, align 16
+ store <2 x double> %vecins, ptr @z, align 16
ret void
}
; CHECK: blr
; Function Attrs: noinline
-define void @zg(i8* %.G0011_640.0, i8* %.G0012_642.0, <2 x double>* %JJ, <2 x double>* %.ka0000_391, double %.unpack, double %.unpack66) #0 {
+define void @zg(ptr %.G0011_640.0, ptr %.G0012_642.0, ptr %JJ, ptr %.ka0000_391, double %.unpack, double %.unpack66) #0 {
L.JA291:
- %Z.L.JA291.2 = load <2 x double>, <2 x double>* %.ka0000_391, align 16
- store <2 x double> %Z.L.JA291.2, <2 x double>* %JJ, align 8
- %Z.L.JA291.3 = bitcast i8* %.G0012_642.0 to <2 x double>*
- %Z.L.JA291.4 = load <2 x double>, <2 x double>* %Z.L.JA291.3, align 1
- %.elt136 = bitcast i8* %.G0011_640.0 to double*
- %.unpack137 = load double, double* %.elt136, align 1
- %.elt138 = getelementptr inbounds i8, i8* %.G0011_640.0, i64 8
- %Z.L.JA291.5 = bitcast i8* %.elt138 to double*
- %.unpack139 = load double, double* %Z.L.JA291.5, align 1
+ %Z.L.JA291.2 = load <2 x double>, ptr %.ka0000_391, align 16
+ store <2 x double> %Z.L.JA291.2, ptr %JJ, align 8
+ %Z.L.JA291.4 = load <2 x double>, ptr %.G0012_642.0, align 1
+ %.unpack137 = load double, ptr %.G0011_640.0, align 1
+ %.elt138 = getelementptr inbounds i8, ptr %.G0011_640.0, i64 8
+ %.unpack139 = load double, ptr %.elt138, align 1
%Z.L.JA291.6 = insertelement <2 x double> undef, double %.unpack137, i32 0
%Z.L.JA291.7 = insertelement <2 x double> %Z.L.JA291.6, double %.unpack137, i32 1
%Z.L.JA291.8 = fmul <2 x double> %Z.L.JA291.2, %Z.L.JA291.7
%Z.L.JA291.14 = fadd <2 x double> %Z.L.JA291.8, %Z.L.JA291.12
%Z.L.JA291.15 = shufflevector <2 x double> %Z.L.JA291.13, <2 x double> %Z.L.JA291.14, <2 x i32> <i32 0, i32 3>
%Z.L.JA291.16 = fsub <2 x double> %Z.L.JA291.4, %Z.L.JA291.15
- %Z.L.JA291.17 = bitcast i8* %.G0012_642.0 to <2 x double>*
- store <2 x double> %Z.L.JA291.16, <2 x double>* %Z.L.JA291.17, align 8
- %.. = bitcast <2 x double>* %JJ to i32*
- %.pre = load i32, i32* %.., align 32
+ store <2 x double> %Z.L.JA291.16, ptr %.G0012_642.0, align 8
+ %.pre = load i32, ptr %JJ, align 32
ret void
}
; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr10 \
; RUN: -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s \
; RUN: --check-prefix=CHECK-P9UP
-define dso_local void @test(i64* %Src, i64* nocapture %Tgt) local_unnamed_addr {
+define dso_local void @test(ptr %Src, ptr nocapture %Tgt) local_unnamed_addr {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-P9UP-NEXT: stxv 0, 0(4)
; CHECK-P9UP-NEXT: blr
entry:
- %0 = bitcast i64* %Src to i8*
- %1 = tail call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %0) #2
- %2 = bitcast i64* %Tgt to <2 x double>*
- store <2 x double> %1, <2 x double>* %2, align 1
+ %0 = tail call <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr %Src) #2
+ store <2 x double> %0, ptr %Tgt, align 1
ret void
}
-declare <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8*) #1
+declare <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr) #1
; CHECK: bclr
; CHECK: # %if.then
; Function Attrs: nounwind
-define void @__fmax_double3_3D_exec(<2 x double>* %input6, i1 %bool1, i1 %bool2) #0 {
+define void @__fmax_double3_3D_exec(ptr %input6, i1 %bool1, i1 %bool2) #0 {
entry:
br i1 %bool1, label %if.then.i, label %fmax_double3.exit
if.then.i: ; preds = %entry
- store <2 x double> zeroinitializer, <2 x double>* %input6, align 32
+ store <2 x double> zeroinitializer, ptr %input6, align 32
br label %fmax_double3.exit
fmax_double3.exit: ; preds = %if.then.i, %entry
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
declare void @f1()
declare void @f2()
br label %dup2
dup1: ; preds = %sw.0, %sw.1
- call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull undef) #0
+ call void @llvm.lifetime.end.p0(i64 8, ptr nonnull undef) #0
unreachable
dup2: ; preds = %if.then, %if.else
- call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull undef) #0
+ call void @llvm.lifetime.end.p0(i64 8, ptr nonnull undef) #0
unreachable
}
;CHECK-NEXT: beq 0, .[[LATCHLABEL]]
;CHECK: [[OPT4LABEL]]:
;CHECK: b .[[LATCHLABEL]]
-define void @loop_test(i32* %tags, i32 %count) {
+define void @loop_test(ptr %tags, i32 %count) {
entry:
br label %for.check
for.check:
%count.loop = phi i32 [%count, %entry], [%count.sub, %for.latch]
%done.count = icmp ugt i32 %count.loop, 0
- %tag_ptr = getelementptr inbounds i32, i32* %tags, i32 %count
- %tag = load i32, i32* %tag_ptr
+ %tag_ptr = getelementptr inbounds i32, ptr %tags, i32 %count
+ %tag = load i32, ptr %tag_ptr
%done.tag = icmp eq i32 %tag, 0
%done = and i1 %done.count, %done.tag
br i1 %done, label %test1, label %exit, !prof !1
; The tests check the behavior of the tail call decision when the callee is speculatable.
; Callee should be tail called in this function since it is at a tail call position.
-define dso_local double @speculatable_callee_return_use_only (double* nocapture %res, double %a) #0 {
+define dso_local double @speculatable_callee_return_use_only (ptr nocapture %res, double %a) #0 {
; CHECK-LABEL: speculatable_callee_return_use_only:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: b callee
}
; Callee should not be tail called since it is not at a tail call position.
-define dso_local void @speculatable_callee_non_return_use_only (double* nocapture %res, double %a) #0 {
+define dso_local void @speculatable_callee_non_return_use_only (ptr nocapture %res, double %a) #0 {
; CHECK-LABEL: speculatable_callee_non_return_use_only:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-NEXT: blr
entry:
%call = tail call double @callee(double %a) #2
- store double %call, double* %res, align 8
+ store double %call, ptr %res, align 8
ret void
}
; Callee should not be tail called since it is not at a tail call position.
-define dso_local double @speculatable_callee_multi_use (double* nocapture %res, double %a) #0 {
+define dso_local double @speculatable_callee_multi_use (ptr nocapture %res, double %a) #0 {
; CHECK-LABEL: speculatable_callee_multi_use:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-NEXT: blr
entry:
%call = tail call double @callee(double %a) #2
- store double %call, double* %res, align 8
+ store double %call, ptr %res, align 8
ret double %call
}
; Callee should not be tail called since it is not at a tail call position.
; FIXME: A speculatable callee can be tail called if it is moved into a valid tail call position.
-define dso_local double @speculatable_callee_intermediate_instructions (double* nocapture %res, double %a) #0 {
+define dso_local double @speculatable_callee_intermediate_instructions (ptr nocapture %res, double %a) #0 {
; CHECK-LABEL: speculatable_callee_intermediate_instructions:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
entry:
%call = tail call double @callee(double %a) #2
- store double 5.2, double* %res, align 8
+ store double 5.2, ptr %res, align 8
ret double %call
}
%class.basic_string.11.42.73 = type { %"class.__gnu_cxx::__versa_string.10.41.72" }
%"class.__gnu_cxx::__versa_string.10.41.72" = type { %"class.__gnu_cxx::__sso_string_base.9.40.71" }
%"class.__gnu_cxx::__sso_string_base.9.40.71" = type { %"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.7.38.69", i64, %union.anon.8.39.70 }
-%"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.7.38.69" = type { i8* }
+%"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.7.38.69" = type { ptr }
%union.anon.8.39.70 = type { i64, [8 x i8] }
-declare void @TestBaz(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg)
+declare void @TestBaz(ptr noalias sret(%class.basic_string.11.42.73) %arg)
-define dso_local void @TestBar(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg) {
+define dso_local void @TestBar(ptr noalias sret(%class.basic_string.11.42.73) %arg) {
bb:
- call void @TestBaz(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg)
+ call void @TestBaz(ptr noalias sret(%class.basic_string.11.42.73) %arg)
ret void
}
-define dso_local void @TestFoo(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg) {
+define dso_local void @TestFoo(ptr noalias sret(%class.basic_string.11.42.73) %arg) {
; CHECK-LABEL: TestFoo:
; CHECK: #TC_RETURNd8 TestBar 0
bb:
- %tmp = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 2
- %tmp1 = bitcast %class.basic_string.11.42.73* %arg to %union.anon.8.39.70**
- store %union.anon.8.39.70* %tmp, %union.anon.8.39.70** %tmp1, align 8
- %tmp2 = bitcast %union.anon.8.39.70* %tmp to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* nonnull undef, i64 13, i1 false)
- %tmp3 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 1
- store i64 13, i64* %tmp3, align 8
- %tmp4 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 2, i32 1, i64 5
- store i8 0, i8* %tmp4, align 1
- tail call void @TestBar(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg)
+ %tmp = getelementptr inbounds %class.basic_string.11.42.73, ptr %arg, i64 0, i32 0, i32 0, i32 2
+ store ptr %tmp, ptr %arg, align 8
+ tail call void @llvm.memcpy.p0.p0.i64(ptr %tmp, ptr nonnull undef, i64 13, i1 false)
+ %tmp3 = getelementptr inbounds %class.basic_string.11.42.73, ptr %arg, i64 0, i32 0, i32 0, i32 1
+ store i64 13, ptr %tmp3, align 8
+ %tmp4 = getelementptr inbounds %class.basic_string.11.42.73, ptr %arg, i64 0, i32 0, i32 0, i32 2, i32 1, i64 5
+ store i8 0, ptr %tmp4, align 1
+ tail call void @TestBar(ptr noalias sret(%class.basic_string.11.42.73) %arg)
ret void
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #0
attributes #0 = { argmemonly nounwind }
br i1 undef, label %dummy.exit.i, label %if.then.i.i.i636
if.then.i.i.i636: ; preds = %if.then14.i
- %0 = load i8*, i8** undef, align 8
+ %0 = load ptr, ptr undef, align 8
call void @free() #3
br label %dummy.exit.i
declare signext i32 @fn2(...) local_unnamed_addr #1
; Function Attrs: nounwind
-define dso_local i32 @testCompare1(%struct.tree_common* nocapture readonly %arg1) nounwind {
+define dso_local i32 @testCompare1(ptr nocapture readonly %arg1) nounwind {
; BE-LABEL: testCompare1:
; BE: # %bb.0: # %entry
; BE-NEXT: mflr r0
; CHECK-P10-CMP-BE-NEXT: #TC_RETURNd8 fn2@notoc 0
entry:
- %bf.load = load i8, i8* bitcast (i32 (%struct.tree_common*)* @testCompare1 to i8*), align 4
+ %bf.load = load i8, ptr @testCompare1, align 4
%bf.clear = and i8 %bf.load, 1
- %0 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %arg1, i64 0, i32 0
- %bf.load1 = load i8, i8* %0, align 4
+ %bf.load1 = load i8, ptr %arg1, align 4
%bf.clear2 = and i8 %bf.load1, 1
%cmp = icmp ugt i8 %bf.clear, %bf.clear2
%conv = zext i1 %cmp to i32
- %call = tail call signext i32 bitcast (i32 (...)* @fn2 to i32 (i32)*)(i32 signext %conv) #2
+ %call = tail call signext i32 @fn2(i32 signext %conv) #2
ret i32 undef
}
declare signext i32 @fn2(...) local_unnamed_addr #1
; Function Attrs: nounwind
-define dso_local i32 @testCompare1(%struct.tree_common* nocapture readonly %arg1) nounwind {
+define dso_local i32 @testCompare1(ptr nocapture readonly %arg1) nounwind {
; BE-LABEL: testCompare1:
; BE: # %bb.0: # %entry
; BE-NEXT: mflr r0
; CHECK-P10-CMP-BE-NEXT: b fn2@notoc
; CHECK-P10-CMP-BE-NEXT: #TC_RETURNd8 fn2@notoc 0
entry:
- %bf.load = load i8, i8* bitcast (i32 (%struct.tree_common*)* @testCompare1 to i8*), align 4
+ %bf.load = load i8, ptr @testCompare1, align 4
%bf.clear = and i8 %bf.load, 1
- %0 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %arg1, i64 0, i32 0
- %bf.load1 = load i8, i8* %0, align 4
+ %bf.load1 = load i8, ptr %arg1, align 4
%bf.clear2 = and i8 %bf.load1, 1
%cmp = icmp ult i8 %bf.clear, %bf.clear2
%conv = zext i1 %cmp to i32
- %call = tail call signext i32 bitcast (i32 (...)* @fn2 to i32 (i32)*)(i32 signext %conv) #2
+ %call = tail call signext i32 @fn2(i32 signext %conv) #2
ret i32 undef
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i8 %a, 0
%conv2 = zext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i8 %a, 0
%conv2 = sext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i32 %a, 0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i16 %a, 0
%conv2 = zext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i16 %a, 0
%conv2 = sext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i8 %a, 0
%conv2 = zext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i8 %a, 0
%conv2 = sext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i32 %a, 0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i16 %a, 0
%conv2 = zext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i16 %a, 0
%conv2 = sext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp sge i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp sge i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp sge i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp sge i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp sge i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sge i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sgt i64 %a, -1
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sgt i64 %a, -1
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sge i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp sge i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp uge i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob
+ store i8 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp uge i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob
+ store i8 %conv3, ptr @glob
ret void
; CHECK-TBD-LABEL: @test_igeuc_sext_store
; CHECK-TBD: subf [[REG1:r[0-9]+]], r3, r4
entry:
%cmp = icmp uge i8 %a, 0
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob
+ store i8 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp uge i8 %a, 0
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob
+ store i8 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp uge i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob
+ store i32 %conv, ptr @glob
ret void
}
entry:
%cmp = icmp uge i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob
+ store i32 %sub, ptr @glob
ret void
}
entry:
%cmp = icmp uge i32 %a, 0
%conv1 = zext i1 %cmp to i32
- store i32 %conv1, i32* @glob
+ store i32 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp uge i32 %a, 0
%conv1 = sext i1 %cmp to i32
- store i32 %conv1, i32* @glob
+ store i32 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp uge i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp uge i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp uge i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp uge i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp uge i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob
+ store i16 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp uge i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob
+ store i16 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp uge i16 %a, 0
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob
+ store i16 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp uge i16 %a, 0
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob
+ store i16 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp sgt i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp sgt i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp sgt i8 %a, 0
%conv2 = zext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp sgt i8 %a, 0
%conv2 = sext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp sgt i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp sgt i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp sgt i32 %a, 0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp sgt i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp sgt i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sgt i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sgt i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sgt i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sgt i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp sgt i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp sgt i16 %a, 0
%conv2 = zext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp sgt i16 %a, 0
%conv2 = sext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ugt i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ugt i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ne i8 %a, 0
%conv2 = zext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ne i8 %a, 0
%conv2 = sext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ugt i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ugt i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ne i32 %a, 0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ne i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ugt i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ugt i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i16 %a, 0
%conv2 = zext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i16 %a, 0
%conv2 = sext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp sle i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp sle i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp sle i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp sle i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp sle i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sle i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp slt i64 %a, 1
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp slt i64 %a, 1
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sle i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp sle i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ule i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob
+ store i8 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp ule i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob
+ store i8 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp eq i8 %a, 0
%conv2 = zext i1 %cmp to i8
- store i8 %conv2, i8* @glob
+ store i8 %conv2, ptr @glob
ret void
}
entry:
%cmp = icmp eq i8 %a, 0
%conv2 = sext i1 %cmp to i8
- store i8 %conv2, i8* @glob
+ store i8 %conv2, ptr @glob
ret void
}
entry:
%cmp = icmp ule i32 %a, %b
%sub = zext i1 %cmp to i32
- store i32 %sub, i32* @glob
+ store i32 %sub, ptr @glob
ret void
}
entry:
%cmp = icmp ule i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob
+ store i32 %sub, ptr @glob
ret void
}
entry:
%cmp = icmp eq i32 %a, 0
%sub = zext i1 %cmp to i32
- store i32 %sub, i32* @glob
+ store i32 %sub, ptr @glob
ret void
}
entry:
%cmp = icmp eq i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob
+ store i32 %sub, ptr @glob
ret void
}
entry:
%cmp = icmp ule i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp ule i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp ule i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp ule i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp ule i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob
+ store i16 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp ule i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob
+ store i16 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp ule i16 %a, 0
%conv2 = zext i1 %cmp to i16
- store i16 %conv2, i16* @glob
+ store i16 %conv2, ptr @glob
ret void
}
entry:
%cmp = icmp ule i16 %a, 0
%conv2 = sext i1 %cmp to i16
- store i16 %conv2, i16* @glob
+ store i16 %conv2, ptr @glob
ret void
}
entry:
%cmp = icmp slt i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp slt i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp slt i8 %a, 0
%conv2 = sext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp slt i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp slt i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp slt i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp slt i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp slt i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp slt i64 %a, 0
%conv2 = sext i1 %cmp to i64
- store i64 %conv2, i64* @glob, align 8
+ store i64 %conv2, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp slt i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp slt i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp slt i16 %a, 0
%sub = sext i1 %cmp to i16
- store i16 %sub, i16* @glob, align 2
+ store i16 %sub, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ult i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ult i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ult i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ult i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ult i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ult i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ne i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ne i8 %a, 0
%conv2 = zext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ne i8 %a, 0
%conv2 = sext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ne i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ne i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ne i32 %a, 0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ne i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i16 %a, 0
%conv2 = zext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i16 %a, 0
%conv2 = sext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ne i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ne i8 %a, 0
%conv2 = zext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ne i8 %a, 0
%conv2 = sext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ne i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ne i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ne i32 %a, 0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ne i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i16 %a, 0
%conv2 = zext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i16 %a, 0
%conv2 = sext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i8 %a, 0
%conv2 = zext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i8 %a, 0
%conv2 = sext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i32 %a, 0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i16 %a, 0
%conv2 = zext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i16 %a, 0
%conv2 = sext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i8 %a, 0
%conv2 = zext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i8 %a, 0
%conv2 = sext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i32 %a, 0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i16 %a, 0
%conv2 = zext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp eq i16 %a, 0
%conv2 = sext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp sge i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp sge i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp sge i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp sge i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp sge i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sge i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sgt i64 %a, -1
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sgt i64 %a, -1
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sge i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp sge i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp uge i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob
+ store i8 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp uge i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob
+ store i8 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp uge i8 %a, 0
%conv1 = zext i1 %cmp to i8
- store i8 %conv1, i8* @glob
+ store i8 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp uge i8 %a, 0
%conv1 = sext i1 %cmp to i8
- store i8 %conv1, i8* @glob
+ store i8 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp uge i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob
+ store i32 %conv, ptr @glob
ret void
}
entry:
%cmp = icmp uge i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob
+ store i32 %sub, ptr @glob
ret void
}
entry:
%cmp = icmp uge i32 %a, 0
%sub = zext i1 %cmp to i32
- store i32 %sub, i32* @glob
+ store i32 %sub, ptr @glob
ret void
}
entry:
%cmp = icmp uge i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob
+ store i32 %sub, ptr @glob
ret void
}
entry:
%cmp = icmp uge i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp uge i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp uge i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
; CHECK-NEXT: std r4, glob@toc@l(r3)
; CHECK-NEXT: blr
entry:
- store i64 -1, i64* @glob
+ store i64 -1, ptr @glob
ret void
}
entry:
%cmp = icmp uge i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob
+ store i16 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp uge i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob
+ store i16 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp uge i16 %a, 0
%conv1 = zext i1 %cmp to i16
- store i16 %conv1, i16* @glob
+ store i16 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp uge i16 %a, 0
%conv1 = sext i1 %cmp to i16
- store i16 %conv1, i16* @glob
+ store i16 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp sgt i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sgt i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sgt i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sgt i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ugt i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ugt i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ne i8 %a, 0
%conv2 = zext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ne i8 %a, 0
%conv2 = sext i1 %cmp to i8
- store i8 %conv2, i8* @glob, align 1
+ store i8 %conv2, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ugt i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ugt i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ne i32 %a, 0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ne i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ugt i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ugt i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i16 %a, 0
%conv2 = zext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i16 %a, 0
%conv2 = sext i1 %cmp to i16
- store i16 %conv2, i16* @glob, align 2
+ store i16 %conv2, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp sle i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp sle i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp sle i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp sle i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp sle i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sle i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp slt i64 %a, 1
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp slt i64 %a, 1
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp sle i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp sle i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ule i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob
+ store i8 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp ule i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob
+ store i8 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp ule i8 %a, 0
%conv2 = zext i1 %cmp to i8
- store i8 %conv2, i8* @glob
+ store i8 %conv2, ptr @glob
ret void
}
entry:
%cmp = icmp ule i8 %a, 0
%conv2 = sext i1 %cmp to i8
- store i8 %conv2, i8* @glob
+ store i8 %conv2, ptr @glob
ret void
}
entry:
%cmp = icmp ule i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob
+ store i32 %conv, ptr @glob
ret void
}
entry:
%cmp = icmp ule i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob
+ store i32 %sub, ptr @glob
ret void
}
entry:
%cmp = icmp ule i32 %a, 0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob
+ store i32 %conv, ptr @glob
ret void
}
entry:
%cmp = icmp ule i32 %a, 0
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob
+ store i32 %sub, ptr @glob
ret void
}
entry:
%cmp = icmp ule i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp ule i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp ule i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp ule i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob
+ store i64 %conv1, ptr @glob
ret void
}
entry:
%cmp = icmp ule i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob
+ store i16 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp ule i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob
+ store i16 %conv3, ptr @glob
ret void
}
entry:
%cmp = icmp ule i16 %a, 0
%conv2 = zext i1 %cmp to i16
- store i16 %conv2, i16* @glob
+ store i16 %conv2, ptr @glob
ret void
}
entry:
%cmp = icmp ule i16 %a, 0
%conv2 = sext i1 %cmp to i16
- store i16 %conv2, i16* @glob
+ store i16 %conv2, ptr @glob
ret void
}
entry:
%cmp = icmp slt i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp slt i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp slt i64 %a, 0
%sub = sext i1 %cmp to i64
- store i64 %sub, i64* @glob, align 8
+ store i64 %sub, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ult i8 %a, %b
%conv3 = zext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ult i8 %a, %b
%conv3 = sext i1 %cmp to i8
- store i8 %conv3, i8* @glob, align 1
+ store i8 %conv3, ptr @glob, align 1
ret void
}
entry:
%cmp = icmp ult i32 %a, %b
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @glob, align 4
+ store i32 %conv, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ult i32 %a, %b
%sub = sext i1 %cmp to i32
- store i32 %sub, i32* @glob, align 4
+ store i32 %sub, ptr @glob, align 4
ret void
}
; CHECK-NEXT: stw r4, 0(r3)
; CHECK-NEXT: blr
entry:
- store i32 0, i32* @glob, align 4
+ store i32 0, ptr @glob, align 4
ret void
}
; CHECK-NEXT: stw r4, 0(r3)
; CHECK-NEXT: blr
entry:
- store i32 0, i32* @glob, align 4
+ store i32 0, ptr @glob, align 4
ret void
}
entry:
%cmp = icmp ult i16 %a, %b
%conv3 = zext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ult i16 %a, %b
%conv3 = sext i1 %cmp to i16
- store i16 %conv3, i16* @glob, align 2
+ store i16 %conv3, ptr @glob, align 2
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, %b
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, 0
%conv1 = zext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
entry:
%cmp = icmp ne i64 %a, 0
%conv1 = sext i1 %cmp to i64
- store i64 %conv1, i64* @glob, align 8
+ store i64 %conv1, ptr @glob, align 8
ret void
}
; CHECK: bl .foo
; CHECK-NEXT: nop
- call void bitcast (void (...)* @foo to void ()*)()
+ call void @foo()
ret void
}
define i32 @main() {
entry:
%0 = call i32 @foo()
- %1 = call i32 bitcast (i32 (...)* @extern_foo to i32 ()*)()
+ %1 = call i32 @extern_foo()
%2 = call i32 @static_foo()
%3 = add nsw i32 %0, %1
%4 = add nsw i32 %3, %2
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s --check-prefix=CHECK-64
; Function Attrs: nounwind readnone
-declare i8* @llvm.thread.pointer() #1
+declare ptr @llvm.thread.pointer() #1
-define i8* @thread_pointer() {
+define ptr @thread_pointer() {
; CHECK-32-LABEL: @thread_pointer
; CHECK-32: mr 3, 2
; CHECK-32: blr
; CHECK-64-LABEL: @thread_pointer
; CHECK-64: mr 3, 13
; CHECK-64: blr
- %1 = tail call i8* @llvm.thread.pointer()
- ret i8* %1
+ %1 = tail call ptr @llvm.thread.pointer()
+ ret ptr %1
}
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
-%"class.llvm::PrettyStackTraceEntry" = type { i32 (...)**, %"class.llvm::PrettyStackTraceEntry"* }
+%"class.llvm::PrettyStackTraceEntry" = type { ptr, ptr }
-@_ZTVN4llvm21PrettyStackTraceEntryE = unnamed_addr constant [5 x i8*] [i8* null, i8* null, i8* bitcast (void (%"class.llvm::PrettyStackTraceEntry"*)* @_ZN4llvm21PrettyStackTraceEntryD2Ev to i8*), i8* bitcast (void (%"class.llvm::PrettyStackTraceEntry"*)* @_ZN4llvm21PrettyStackTraceEntryD0Ev to i8*), i8* bitcast (void ()* @__cxa_pure_virtual to i8*)], align 8
-@_ZL20PrettyStackTraceHead = internal thread_local unnamed_addr global %"class.llvm::PrettyStackTraceEntry"* null, align 8
+@_ZTVN4llvm21PrettyStackTraceEntryE = unnamed_addr constant [5 x ptr] [ptr null, ptr null, ptr @_ZN4llvm21PrettyStackTraceEntryD2Ev, ptr @_ZN4llvm21PrettyStackTraceEntryD0Ev, ptr @__cxa_pure_virtual], align 8
+@_ZL20PrettyStackTraceHead = internal thread_local unnamed_addr global ptr null, align 8
@.str = private unnamed_addr constant [87 x i8] c"PrettyStackTraceHead == this && \22Pretty stack trace entry destruction is out of order\22\00", align 1
@.str1 = private unnamed_addr constant [64 x i8] c"/home/wschmidt/llvm/llvm-test2/lib/Support/PrettyStackTrace.cpp\00", align 1
@__PRETTY_FUNCTION__._ZN4llvm21PrettyStackTraceEntryD2Ev = private unnamed_addr constant [62 x i8] c"virtual llvm::PrettyStackTraceEntry::~PrettyStackTraceEntry()\00", align 1
-declare void @_ZN4llvm21PrettyStackTraceEntryD2Ev(%"class.llvm::PrettyStackTraceEntry"* %this) unnamed_addr
+declare void @_ZN4llvm21PrettyStackTraceEntryD2Ev(ptr %this) unnamed_addr
declare void @__cxa_pure_virtual()
-declare void @__assert_fail(i8*, i8*, i32 zeroext, i8*)
-declare void @_ZdlPv(i8*)
+declare void @__assert_fail(ptr, ptr, i32 zeroext, ptr)
+declare void @_ZdlPv(ptr)
-define void @_ZN4llvm21PrettyStackTraceEntryD0Ev(%"class.llvm::PrettyStackTraceEntry"* %this) unnamed_addr align 2 {
+define void @_ZN4llvm21PrettyStackTraceEntryD0Ev(ptr %this) unnamed_addr align 2 {
entry:
- %0 = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry", %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 0
- store i32 (...)** bitcast (i8** getelementptr inbounds ([5 x i8*], [5 x i8*]* @_ZTVN4llvm21PrettyStackTraceEntryE, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8
- %1 = load %"class.llvm::PrettyStackTraceEntry"*, %"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead, align 8
- %cmp.i = icmp eq %"class.llvm::PrettyStackTraceEntry"* %1, %this
+ store ptr getelementptr inbounds ([5 x ptr], ptr @_ZTVN4llvm21PrettyStackTraceEntryE, i64 0, i64 2), ptr %this, align 8
+ %0 = load ptr, ptr @_ZL20PrettyStackTraceHead, align 8
+ %cmp.i = icmp eq ptr %0, %this
br i1 %cmp.i, label %_ZN4llvm21PrettyStackTraceEntryD2Ev.exit, label %cond.false.i
cond.false.i: ; preds = %entry
- tail call void @__assert_fail(i8* getelementptr inbounds ([87 x i8], [87 x i8]* @.str, i64 0, i64 0), i8* getelementptr inbounds ([64 x i8], [64 x i8]* @.str1, i64 0, i64 0), i32 zeroext 119, i8* getelementptr inbounds ([62 x i8], [62 x i8]* @__PRETTY_FUNCTION__._ZN4llvm21PrettyStackTraceEntryD2Ev, i64 0, i64 0))
+ tail call void @__assert_fail(ptr @.str, ptr @.str1, i32 zeroext 119, ptr @__PRETTY_FUNCTION__._ZN4llvm21PrettyStackTraceEntryD2Ev)
unreachable
_ZN4llvm21PrettyStackTraceEntryD2Ev.exit: ; preds = %entry
- %NextEntry.i.i = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry", %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 1
- %2 = bitcast %"class.llvm::PrettyStackTraceEntry"** %NextEntry.i.i to i64*
- %3 = load i64, i64* %2, align 8
- store i64 %3, i64* bitcast (%"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead to i64*), align 8
- %4 = bitcast %"class.llvm::PrettyStackTraceEntry"* %this to i8*
- tail call void @_ZdlPv(i8* %4)
+ %NextEntry.i.i = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry", ptr %this, i64 0, i32 1
+ %1 = load i64, ptr %NextEntry.i.i, align 8
+ store i64 %1, ptr @_ZL20PrettyStackTraceHead, align 8
+ tail call void @_ZdlPv(ptr %this)
ret void
}
define i32 @foo() !dbg !12 {
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval, align 4
- %0 = load i32, i32* @i, align 4, !dbg !16
+ store i32 0, ptr %retval, align 4
+ %0 = load i32, ptr @i, align 4, !dbg !16
ret i32 %0, !dbg !16
}
define signext i32 @main() nounwind {
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32, i32* @a, align 4
+ store i32 0, ptr %retval
+ %0 = load i32, ptr @a, align 4
ret i32 %0
}
define signext i32 @main2() nounwind {
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32, i32* @a2, align 4
+ store i32 0, ptr %retval
+ %0 = load i32, ptr @a2, align 4
ret i32 %0
}
; CHECK-NEXT: lbzx 3, 3, var_char@tls
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* @var_char, align 1, !tbaa !4
+ %0 = load i8, ptr @var_char, align 1, !tbaa !4
ret i8 %0
}
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %a to i8
- store i8 %conv, i8* @var_char, align 1, !tbaa !4
+ store i8 %conv, ptr @var_char, align 1, !tbaa !4
ret void
}
; CHECK-NEXT: stbx 5, 4, var_char@tls
; CHECK-NEXT: blr
entry:
- %0 = load i8, i8* @var_char, align 1, !tbaa !4
+ %0 = load i8, ptr @var_char, align 1, !tbaa !4
%add = add i8 %0, %a
- store i8 %add, i8* @var_char, align 1, !tbaa !4
+ store i8 %add, ptr @var_char, align 1, !tbaa !4
ret i8 %add
}
; CHECK-NEXT: lhzx 3, 3, var_short@tls
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* @var_short, align 2, !tbaa !7
+ %0 = load i16, ptr @var_short, align 2, !tbaa !7
ret i16 %0
}
; CHECK-NEXT: blr
entry:
%conv = trunc i32 %a to i16
- store i16 %conv, i16* @var_short, align 2, !tbaa !7
+ store i16 %conv, ptr @var_short, align 2, !tbaa !7
ret void
}
; CHECK-NEXT: sthx 5, 4, var_short@tls
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* @var_short, align 2, !tbaa !7
+ %0 = load i16, ptr @var_short, align 2, !tbaa !7
%add = add i16 %0, %a
- store i16 %add, i16* @var_short, align 2, !tbaa !7
+ store i16 %add, ptr @var_short, align 2, !tbaa !7
ret i16 %add
}
; CHECK-NEXT: lwzx 3, 3, var_int@tls
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @var_int, align 4, !tbaa !9
+ %0 = load i32, ptr @var_int, align 4, !tbaa !9
ret i32 %0
}
; CHECK-NEXT: stwx 3, 4, var_int@tls
; CHECK-NEXT: blr
entry:
- store i32 %a, i32* @var_int, align 4, !tbaa !9
+ store i32 %a, ptr @var_int, align 4, !tbaa !9
ret void
}
; CHECK-NEXT: stwx 5, 4, var_int@tls
; CHECK-NEXT: blr
entry:
- %0 = load i32, i32* @var_int, align 4, !tbaa !9
+ %0 = load i32, ptr @var_int, align 4, !tbaa !9
%add = add nsw i32 %0, %a
- store i32 %add, i32* @var_int, align 4, !tbaa !9
+ store i32 %add, ptr @var_int, align 4, !tbaa !9
ret i32 %add
}
; CHECK-NEXT: ldx 3, 3, var_long_long@tls
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* @var_long_long, align 8, !tbaa !11
+ %0 = load i64, ptr @var_long_long, align 8, !tbaa !11
ret i64 %0
}
; CHECK-NEXT: blr
entry:
%conv = sext i32 %a to i64
- store i64 %conv, i64* @var_long_long, align 8, !tbaa !11
+ store i64 %conv, ptr @var_long_long, align 8, !tbaa !11
ret void
}
; CHECK-NEXT: stdx 3, 4, var_long_long@tls
; CHECK-NEXT: blr
entry:
- %0 = load i64, i64* @var_long_long, align 8, !tbaa !11
+ %0 = load i64, ptr @var_long_long, align 8, !tbaa !11
%add = add nsw i64 %0, %a
- store i64 %add, i64* @var_long_long, align 8, !tbaa !11
+ store i64 %add, ptr @var_long_long, align 8, !tbaa !11
ret i64 %add
}
; Test back-to-back stores of TLS variables to ensure call sequences no
; longer overlap.
-@__once_callable = external thread_local global i8**
-@__once_call = external thread_local global void ()*
+@__once_callable = external thread_local global ptr
+@__once_call = external thread_local global ptr
-define i64 @call_once(i64 %flag, i8* %ptr) {
+define i64 @call_once(i64 %flag, ptr %ptr) {
entry:
- %var = alloca i8*, align 8
- store i8* %ptr, i8** %var, align 8
- store i8** %var, i8*** @__once_callable, align 8
- store void ()* @__once_call_impl, void ()** @__once_call, align 8
+ %var = alloca ptr, align 8
+ store ptr %ptr, ptr %var, align 8
+ store ptr %var, ptr @__once_callable, align 8
+ store ptr @__once_call_impl, ptr @__once_call, align 8
ret i64 %flag
}
;OPT1: addis [[REG1:[1-9][0-9]*]], 13, a@tprel@ha
;OPT1-NEXT: li [[REG3:[0-9]+]], 42
;OPT1: stw [[REG3]], a@tprel@l([[REG1]])
- store i32 42, i32* @a, align 4
+ store i32 42, ptr @a, align 4
ret i32 0
}
define dso_local signext i32 @main2() nounwind {
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32, i32* @a2, align 4
+ store i32 0, ptr %retval
+ %0 = load i32, ptr @a2, align 4
ret i32 %0
}
; RUN: llc -verify-machineinstrs -mtriple="powerpc64le-unknown-linux-gnu" -relocation-model=pic < %s | FileCheck %s
-@a = thread_local global i32* null, align 8
+@a = thread_local global ptr null, align 8
-define void @test_foo(i32* nocapture %x01, i32* nocapture %x02, i32* nocapture %x03, i32* nocapture %x04, i32* nocapture %x05, i32* nocapture %x06, i32* nocapture %x07, i32* nocapture %x08) #0 {
+define void @test_foo(ptr nocapture %x01, ptr nocapture %x02, ptr nocapture %x03, ptr nocapture %x04, ptr nocapture %x05, ptr nocapture %x06, ptr nocapture %x07, ptr nocapture %x08) #0 {
entry:
; CHECK-LABEL: test_foo:
; CHECK-DAG: stw 3, 0([[BACKUP_10]])
; CHECK: blr
- %0 = load i32*, i32** @a, align 8
- %cmp = icmp eq i32* %0, null
+ %0 = load ptr, ptr @a, align 8
+ %cmp = icmp eq ptr %0, null
br i1 %cmp, label %return, label %if.end
if.end: ; preds = %entry
- store i32 0, i32* %x01, align 4
- store i32 0, i32* %x02, align 4
- store i32 0, i32* %x03, align 4
- store i32 0, i32* %x04, align 4
- store i32 0, i32* %x05, align 4
- store i32 0, i32* %x06, align 4
- store i32 0, i32* %x07, align 4
- store i32 0, i32* %x08, align 4
+ store i32 0, ptr %x01, align 4
+ store i32 0, ptr %x02, align 4
+ store i32 0, ptr %x03, align 4
+ store i32 0, ptr %x04, align 4
+ store i32 0, ptr %x05, align 4
+ store i32 0, ptr %x06, align 4
+ store i32 0, ptr %x07, align 4
+ store i32 0, ptr %x08, align 4
br label %return
return: ; preds = %entry, %if.end
; CHECK: mflr 0
; CHECK: __tls_get_addr
-%struct1.2.41 = type { %struct2.0.39, %struct3.1.40, %struct1.2.41* }
+%struct1.2.41 = type { %struct2.0.39, %struct3.1.40, ptr }
%struct2.0.39 = type { i64, i32, i32, i32, i32 }
%struct3.1.40 = type { [160 x i8] }
-@tls_var = external thread_local global %struct1.2.41*, align 8
+@tls_var = external thread_local global ptr, align 8
define i32 @foo_test() {
- %1 = load %struct1.2.41*, %struct1.2.41** @tls_var, align 8
+ %1 = load ptr, ptr @tls_var, align 8
- %2 = getelementptr inbounds %struct1.2.41, %struct1.2.41* %1, i64 0, i32 0, i32 3
- %3 = load i32, i32* %2, align 8
+ %2 = getelementptr inbounds %struct1.2.41, ptr %1, i64 0, i32 0, i32 3
+ %3 = load i32, ptr %2, align 8
%4 = add nsw i32 %3, -1
%5 = icmp eq i32 %4, 0
br i1 %5, label %bb7, label %foo.exit
bb7: ; preds = %3
- tail call void undef(%struct1.2.41* undef, %struct1.2.41* nonnull undef)
+ tail call void undef(ptr undef, ptr nonnull undef)
br label %foo.exit
foo.exit: ; preds = %8, %3, %2, %0
; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s | FileCheck %s --check-prefix CHECK
@i1 = external constant i32 #0
-@i2 = constant i32* @i1 #0
+@i2 = constant ptr @i1 #0
define i32 @read() {
- %1 = load i32, i32* @i1, align 4
+ %1 = load i32, ptr @i1, align 4
ret i32 %1
}
-define i32** @retptr() {
- ret i32** @i2
+define ptr @retptr() {
+ ret ptr @i2
}
; CHECK: .read:
define dso_local void @write_int(i32 signext %in) {
entry:
- store i32 %in, i32* @i, align 4
+ store i32 %in, ptr @i, align 4
ret void
}
; CHECK32: name: write_int
define dso_local i64 @read_ll() {
entry:
- %0 = load i64, i64* @ll, align 8
+ %0 = load i64, ptr @ll, align 8
ret i64 %0
}
; CHECK32: name: read_ll
define dso_local float @read_float() {
entry:
- %0 = load float, float* @f, align 4
+ %0 = load float, ptr @f, align 4
ret float %0
}
; CHECK32: name: read_float
define dso_local void @write_double(double %in) {
entry:
- store double %in, double* @d, align 8
+ store double %in, ptr @d, align 8
ret void
}
; CHECK32: name: write_double
; TEST64-NEXT: stfd 1, 0(3)
-define dso_local nonnull i32* @addr() {
+define dso_local nonnull ptr @addr() {
entry:
- ret i32* @i
+ ret ptr @i
}
; CHECK32: name: addr
; CHECK32: %[[SCRATCH:[0-9]+]]:gprc = ADDItoc @i, $r2
; CHECK-P8-NEXT: lfs 1, .LCPI2_0@toc@l(4)
; CHECK-P8-NEXT: xsaddsp 1, 0, 1
; CHECK-P8-NEXT: blr
- %1 = load float, float* getelementptr inbounds ([10 x float], [10 x float]* @FArr, i64 0, i64 3), align 4
+ %1 = load float, ptr getelementptr inbounds ([10 x float], ptr @FArr, i64 0, i64 3), align 4
%2 = fadd float %1, 0x400B333340000000
ret float %2
}
; CHECK-P8-NEXT: lfd 1, .LCPI4_0@toc@l(4)
; CHECK-P8-NEXT: xsadddp 1, 0, 1
; CHECK-P8-NEXT: blr
- %1 = load double, double* getelementptr inbounds ([200 x double], [200 x double]* @d, i64 0, i64 3), align 8
+ %1 = load double, ptr getelementptr inbounds ([200 x double], ptr @d, i64 0, i64 3), align 8
%2 = fadd double %1, 6.880000e+00
ret double %2
}
; CHECK-P8-NEXT: lfdx 0, 3, 4
; CHECK-P8-NEXT: xsadddp 1, 0, 1
; CHECK-P8-NEXT: blr
- %1 = load double, double* getelementptr inbounds ([20000 x double], [20000 x double]* @arr, i64 0, i64 4096), align 8
+ %1 = load double, ptr getelementptr inbounds ([20000 x double], ptr @arr, i64 0, i64 4096), align 8
%2 = fadd double %1, 6.880000e+00
ret double %2
}
; CHECK-P8-NEXT: xxswapd 34, 0
; CHECK-P8-NEXT: blr
entry:
- %0 = load <4 x i32>, <4 x i32>* getelementptr inbounds ([10 x <4 x i32>], [10 x <4 x i32>]* @vec_arr, i64 0, i64 2), align 16
+ %0 = load <4 x i32>, ptr getelementptr inbounds ([10 x <4 x i32>], ptr @vec_arr, i64 0, i64 2), align 16
ret <4 x i32> %0
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s
-define dso_local void @test(void (i32)* nocapture %fp, i32 signext %Arg, i32 signext %Len) local_unnamed_addr #0 {
+define dso_local void @test(ptr nocapture %fp, i32 signext %Arg, i32 signext %Len) local_unnamed_addr #0 {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
module asm "\09.objc_class_name_NSBitmapImageRep=0"
module asm "\09.globl .objc_class_name_NSBitmapImageRep"
%struct.CGImage = type opaque
- %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]" = type { %struct.NSBitmapImageRep*, void (%struct.__block_1*, %struct.CGImage*)* }
+ %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]" = type { ptr, ptr }
%struct.NSBitmapImageRep = type { %struct.NSImageRep }
%struct.NSImageRep = type { }
%struct.NSZone = type opaque
- %struct.__block_1 = type { %struct.__invoke_impl, %struct.NSZone*, %struct.NSBitmapImageRep** }
+ %struct.__block_1 = type { %struct.__invoke_impl, ptr, ptr }
%struct.__builtin_trampoline = type { [40 x i8] }
- %struct.__invoke_impl = type { i8*, i32, i32, i8* }
+ %struct.__invoke_impl = type { ptr, i32, i32, ptr }
%struct._objc__method_prototype_list = type opaque
- %struct._objc_class = type { %struct._objc_class*, %struct._objc_class*, i8*, i32, i32, i32, %struct._objc_ivar_list*, %struct._objc_method_list*, %struct.objc_cache*, %struct._objc_protocol**, i8*, %struct._objc_class_ext* }
+ %struct._objc_class = type { ptr, ptr, ptr, i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr }
%struct._objc_class_ext = type opaque
%struct._objc_ivar_list = type opaque
- %struct._objc_method = type { %struct.objc_selector*, i8*, i8* }
+ %struct._objc_method = type { ptr, ptr, ptr }
%struct._objc_method_list = type opaque
- %struct._objc_module = type { i32, i32, i8*, %struct._objc_symtab* }
- %struct._objc_protocol = type { %struct._objc_protocol_extension*, i8*, %struct._objc_protocol**, %struct._objc__method_prototype_list*, %struct._objc__method_prototype_list* }
+ %struct._objc_module = type { i32, i32, ptr, ptr }
+ %struct._objc_protocol = type { ptr, ptr, ptr, ptr, ptr }
%struct._objc_protocol_extension = type opaque
- %struct._objc_super = type { %struct.objc_object*, %struct._objc_class* }
- %struct._objc_symtab = type { i32, %struct.objc_selector**, i16, i16, [1 x i8*] }
- %struct.anon = type { %struct._objc__method_prototype_list*, i32, [1 x %struct._objc_method] }
+ %struct._objc_super = type { ptr, ptr }
+ %struct._objc_symtab = type { i32, ptr, i16, i16, [1 x ptr] }
+ %struct.anon = type { ptr, i32, [1 x %struct._objc_method] }
%struct.objc_cache = type opaque
%struct.objc_object = type opaque
%struct.objc_selector = type opaque
%struct.objc_super = type opaque
-@_NSConcreteStackBlock = external global i8* ; <i8**> [#uses=1]
-@"\01L_OBJC_SELECTOR_REFERENCES_1" = internal global %struct.objc_selector* bitcast ([34 x i8]* @"\01L_OBJC_METH_VAR_NAME_1" to %struct.objc_selector*), section "__OBJC,__message_refs,literal_pointers,no_dead_strip" ; <%struct.objc_selector**> [#uses=2]
-@"\01L_OBJC_CLASS_NSBitmapImageRep" = internal global %struct._objc_class { %struct._objc_class* @"\01L_OBJC_METACLASS_NSBitmapImageRep", %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), i8* getelementptr ([17 x i8], [17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i32 0, i32 1, i32 0, %struct._objc_ivar_list* null, %struct._objc_method_list* bitcast ({ i8*, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" to %struct._objc_method_list*), %struct.objc_cache* null, %struct._objc_protocol** null, i8* null, %struct._objc_class_ext* null }, section "__OBJC,__class,regular,no_dead_strip" ; <%struct._objc_class*> [#uses=3]
-@"\01L_OBJC_SELECTOR_REFERENCES_0" = internal global %struct.objc_selector* bitcast ([14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0" to %struct.objc_selector*), section "__OBJC,__message_refs,literal_pointers,no_dead_strip" ; <%struct.objc_selector**> [#uses=2]
-@"\01L_OBJC_SYMBOLS" = internal global { i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] } { i32 0, %struct.objc_selector** null, i16 1, i16 0, [1 x %struct._objc_class*] [ %struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep" ] }, section "__OBJC,__symbols,regular,no_dead_strip" ; <{ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }*> [#uses=2]
-@"\01L_OBJC_METH_VAR_NAME_0" = internal global [14 x i8] c"copyWithZone:\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[14 x i8]*> [#uses=2]
-@"\01L_OBJC_METH_VAR_TYPE_0" = internal global [20 x i8] c"@12@0:4^{_NSZone=}8\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[20 x i8]*> [#uses=1]
-@"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" = internal global { i8*, i32, [1 x %struct._objc_method] } { i8* null, i32 1, [1 x %struct._objc_method] [ %struct._objc_method { %struct.objc_selector* bitcast ([14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0" to %struct.objc_selector*), i8* getelementptr ([20 x i8], [20 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0), i8* bitcast (%struct.objc_object* (%struct.NSBitmapImageRep*, %struct.objc_selector*, %struct.NSZone*)* @"-[NSBitmapImageRep copyWithZone:]" to i8*) } ] }, section "__OBJC,__inst_meth,regular,no_dead_strip" ; <{ i8*, i32, [1 x %struct._objc_method] }*> [#uses=2]
-@"\01L_OBJC_CLASS_NAME_0" = internal global [17 x i8] c"NSBitmapImageRep\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[17 x i8]*> [#uses=1]
-@"\01L_OBJC_CLASS_NAME_1" = internal global [11 x i8] c"NSImageRep\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[11 x i8]*> [#uses=2]
-@"\01L_OBJC_METACLASS_NSBitmapImageRep" = internal global %struct._objc_class { %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), i8* getelementptr ([17 x i8], [17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i32 0, i32 2, i32 48, %struct._objc_ivar_list* null, %struct._objc_method_list* null, %struct.objc_cache* null, %struct._objc_protocol** null, i8* null, %struct._objc_class_ext* null }, section "__OBJC,__meta_class,regular,no_dead_strip" ; <%struct._objc_class*> [#uses=2]
-@"\01L_OBJC_METH_VAR_NAME_1" = internal global [34 x i8] c"_performBlockUsingBackingCGImage:\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[34 x i8]*> [#uses=2]
-@"\01L_OBJC_IMAGE_INFO" = internal constant [2 x i32] zeroinitializer, section "__OBJC, __image_info,regular" ; <[2 x i32]*> [#uses=1]
-@"\01L_OBJC_CLASS_NAME_2" = internal global [1 x i8] zeroinitializer, section "__TEXT,__cstring,cstring_literals", align 4 ; <[1 x i8]*> [#uses=1]
-@"\01L_OBJC_MODULES" = internal global %struct._objc_module { i32 7, i32 16, i8* getelementptr ([1 x i8], [1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0), %struct._objc_symtab* bitcast ({ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }* @"\01L_OBJC_SYMBOLS" to %struct._objc_symtab*) }, section "__OBJC,__module_info,regular,no_dead_strip" ; <%struct._objc_module*> [#uses=1]
-@llvm.used = appending global [14 x i8*] [ i8* bitcast (%struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_1" to i8*), i8* bitcast (%struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep" to i8*), i8* bitcast (%struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_0" to i8*), i8* bitcast ({ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }* @"\01L_OBJC_SYMBOLS" to i8*), i8* getelementptr ([14 x i8], [14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0", i32 0, i32 0), i8* getelementptr ([20 x i8], [20 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0), i8* bitcast ({ i8*, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" to i8*), i8* getelementptr ([17 x i8], [17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i8* getelementptr ([11 x i8], [11 x i8]* @"\01L_OBJC_CLASS_NAME_1", i32 0, i32 0), i8* bitcast (%struct._objc_class* @"\01L_OBJC_METACLASS_NSBitmapImageRep" to i8*), i8* getelementptr ([34 x i8], [34 x i8]* @"\01L_OBJC_METH_VAR_NAME_1", i32 0, i32 0), i8* bitcast ([2 x i32]* @"\01L_OBJC_IMAGE_INFO" to i8*), i8* getelementptr ([1 x i8], [1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0), i8* bitcast (%struct._objc_module* @"\01L_OBJC_MODULES" to i8*) ], section "llvm.metadata" ; <[14 x i8*]*> [#uses=0]
+@_NSConcreteStackBlock = external global ptr ; <ptr> [#uses=1]
+@"\01L_OBJC_SELECTOR_REFERENCES_1" = internal global ptr @"\01L_OBJC_METH_VAR_NAME_1", section "__OBJC,__message_refs,literal_pointers,no_dead_strip" ; <ptr> [#uses=2]
+@"\01L_OBJC_CLASS_NSBitmapImageRep" = internal global %struct._objc_class { ptr @"\01L_OBJC_METACLASS_NSBitmapImageRep", ptr @"\01L_OBJC_CLASS_NAME_1", ptr @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 1, i32 0, ptr null, ptr @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep", ptr null, ptr null, ptr null, ptr null }, section "__OBJC,__class,regular,no_dead_strip" ; <ptr> [#uses=3]
+@"\01L_OBJC_SELECTOR_REFERENCES_0" = internal global ptr @"\01L_OBJC_METH_VAR_NAME_0", section "__OBJC,__message_refs,literal_pointers,no_dead_strip" ; <ptr> [#uses=2]
+@"\01L_OBJC_SYMBOLS" = internal global { i32, ptr, i16, i16, [1 x ptr] } { i32 0, ptr null, i16 1, i16 0, [1 x ptr] [ ptr @"\01L_OBJC_CLASS_NSBitmapImageRep" ] }, section "__OBJC,__symbols,regular,no_dead_strip" ; <ptr> [#uses=2]
+@"\01L_OBJC_METH_VAR_NAME_0" = internal global [14 x i8] c"copyWithZone:\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <ptr> [#uses=2]
+@"\01L_OBJC_METH_VAR_TYPE_0" = internal global [20 x i8] c"@12@0:4^{_NSZone=}8\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <ptr> [#uses=1]
+@"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" = internal global { ptr, i32, [1 x %struct._objc_method] } { ptr null, i32 1, [1 x %struct._objc_method] [ %struct._objc_method { ptr @"\01L_OBJC_METH_VAR_NAME_0", ptr @"\01L_OBJC_METH_VAR_TYPE_0", ptr @"-[NSBitmapImageRep copyWithZone:]" } ] }, section "__OBJC,__inst_meth,regular,no_dead_strip" ; <ptr> [#uses=2]
+@"\01L_OBJC_CLASS_NAME_0" = internal global [17 x i8] c"NSBitmapImageRep\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <ptr> [#uses=1]
+@"\01L_OBJC_CLASS_NAME_1" = internal global [11 x i8] c"NSImageRep\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <ptr> [#uses=2]
+@"\01L_OBJC_METACLASS_NSBitmapImageRep" = internal global %struct._objc_class { ptr @"\01L_OBJC_CLASS_NAME_1", ptr @"\01L_OBJC_CLASS_NAME_1", ptr @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 2, i32 48, ptr null, ptr null, ptr null, ptr null, ptr null, ptr null }, section "__OBJC,__meta_class,regular,no_dead_strip" ; <ptr> [#uses=2]
+@"\01L_OBJC_METH_VAR_NAME_1" = internal global [34 x i8] c"_performBlockUsingBackingCGImage:\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <ptr> [#uses=2]
+@"\01L_OBJC_IMAGE_INFO" = internal constant [2 x i32] zeroinitializer, section "__OBJC, __image_info,regular" ; <ptr> [#uses=1]
+@"\01L_OBJC_CLASS_NAME_2" = internal global [1 x i8] zeroinitializer, section "__TEXT,__cstring,cstring_literals", align 4 ; <ptr> [#uses=1]
+@"\01L_OBJC_MODULES" = internal global %struct._objc_module { i32 7, i32 16, ptr @"\01L_OBJC_CLASS_NAME_2", ptr @"\01L_OBJC_SYMBOLS" }, section "__OBJC,__module_info,regular,no_dead_strip" ; <ptr> [#uses=1]
+@llvm.used = appending global [14 x ptr] [ ptr @"\01L_OBJC_SELECTOR_REFERENCES_1", ptr @"\01L_OBJC_CLASS_NSBitmapImageRep", ptr @"\01L_OBJC_SELECTOR_REFERENCES_0", ptr @"\01L_OBJC_SYMBOLS", ptr @"\01L_OBJC_METH_VAR_NAME_0", ptr @"\01L_OBJC_METH_VAR_TYPE_0", ptr @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep", ptr @"\01L_OBJC_CLASS_NAME_0", ptr @"\01L_OBJC_CLASS_NAME_1", ptr @"\01L_OBJC_METACLASS_NSBitmapImageRep", ptr @"\01L_OBJC_METH_VAR_NAME_1", ptr @"\01L_OBJC_IMAGE_INFO", ptr @"\01L_OBJC_CLASS_NAME_2", ptr @"\01L_OBJC_MODULES" ], section "llvm.metadata" ; <ptr> [#uses=0]
-define internal %struct.objc_object* @"-[NSBitmapImageRep copyWithZone:]"(%struct.NSBitmapImageRep* %self, %struct.objc_selector* %_cmd, %struct.NSZone* %zone) nounwind {
+define internal ptr @"-[NSBitmapImageRep copyWithZone:]"(ptr %self, ptr %_cmd, ptr %zone) nounwind {
entry:
- %self_addr = alloca %struct.NSBitmapImageRep* ; <%struct.NSBitmapImageRep**> [#uses=2]
- %_cmd_addr = alloca %struct.objc_selector* ; <%struct.objc_selector**> [#uses=1]
- %zone_addr = alloca %struct.NSZone* ; <%struct.NSZone**> [#uses=2]
- %retval = alloca %struct.objc_object* ; <%struct.objc_object**> [#uses=1]
- %__block_holder_tmp_1.0 = alloca %struct.__block_1 ; <%struct.__block_1*> [#uses=7]
- %new = alloca %struct.NSBitmapImageRep* ; <%struct.NSBitmapImageRep**> [#uses=2]
- %self.1 = alloca %struct.objc_object* ; <%struct.objc_object**> [#uses=2]
- %0 = alloca i8* ; <i8**> [#uses=2]
- %TRAMP.9 = alloca %struct.__builtin_trampoline, align 4 ; <%struct.__builtin_trampoline*> [#uses=1]
- %1 = alloca void (%struct.__block_1*, %struct.CGImage*)* ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=2]
- %2 = alloca %struct.NSBitmapImageRep* ; <%struct.NSBitmapImageRep**> [#uses=2]
- %FRAME.7 = alloca %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]" ; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*> [#uses=5]
+ %self_addr = alloca ptr ; <ptr> [#uses=2]
+ %_cmd_addr = alloca ptr ; <ptr> [#uses=1]
+ %zone_addr = alloca ptr ; <ptr> [#uses=2]
+ %retval = alloca ptr ; <ptr> [#uses=1]
+ %__block_holder_tmp_1.0 = alloca %struct.__block_1 ; <ptr> [#uses=7]
+ %new = alloca ptr ; <ptr> [#uses=2]
+ %self.1 = alloca ptr ; <ptr> [#uses=2]
+ %0 = alloca ptr ; <ptr> [#uses=2]
+ %TRAMP.9 = alloca %struct.__builtin_trampoline, align 4 ; <ptr> [#uses=1]
+ %1 = alloca ptr ; <ptr> [#uses=2]
+ %2 = alloca ptr ; <ptr> [#uses=2]
+ %FRAME.7 = alloca %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]" ; <ptr> [#uses=5]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store %struct.NSBitmapImageRep* %self, %struct.NSBitmapImageRep** %self_addr
- store %struct.objc_selector* %_cmd, %struct.objc_selector** %_cmd_addr
- store %struct.NSZone* %zone, %struct.NSZone** %zone_addr
- %3 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
- %4 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %self_addr, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
- store %struct.NSBitmapImageRep* %4, %struct.NSBitmapImageRep** %3, align 4
- %TRAMP.91 = bitcast %struct.__builtin_trampoline* %TRAMP.9 to i8* ; <i8*> [#uses=1]
- %FRAME.72 = bitcast %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7 to i8* ; <i8*> [#uses=1]
- call void @llvm.init.trampoline(i8* %TRAMP.91, i8* bitcast (void (%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*, %struct.__block_1*, %struct.CGImage*)* @__helper_1.1632 to i8*), i8* %FRAME.72) ; <i8*> [#uses=1]
- %tramp = call i8* @llvm.adjust.trampoline(i8* %TRAMP.91)
- store i8* %tramp, i8** %0, align 4
- %5 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
- %6 = load i8*, i8** %0, align 4 ; <i8*> [#uses=1]
- %7 = bitcast i8* %6 to void (%struct.__block_1*, %struct.CGImage*)* ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
- store void (%struct.__block_1*, %struct.CGImage*)* %7, void (%struct.__block_1*, %struct.CGImage*)** %5, align 4
- store %struct.NSBitmapImageRep* null, %struct.NSBitmapImageRep** %new, align 4
- %8 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %9 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %8, i32 0, i32 0 ; <i8**> [#uses=1]
- store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %9, align 4
- %10 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %11 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %10, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 67108864, i32* %11, align 4
- %12 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %13 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %12, i32 0, i32 2 ; <i32*> [#uses=1]
- store i32 24, i32* %13, align 4
- %14 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
- %15 = load void (%struct.__block_1*, %struct.CGImage*)*, void (%struct.__block_1*, %struct.CGImage*)** %14, align 4 ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
- store void (%struct.__block_1*, %struct.CGImage*)* %15, void (%struct.__block_1*, %struct.CGImage*)** %1, align 4
- %16 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %17 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %16, i32 0, i32 3 ; <i8**> [#uses=1]
- %18 = load void (%struct.__block_1*, %struct.CGImage*)*, void (%struct.__block_1*, %struct.CGImage*)** %1, align 4 ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
- %19 = bitcast void (%struct.__block_1*, %struct.CGImage*)* %18 to i8* ; <i8*> [#uses=1]
- store i8* %19, i8** %17, align 4
- %20 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 1 ; <%struct.NSZone**> [#uses=1]
- %21 = load %struct.NSZone*, %struct.NSZone** %zone_addr, align 4 ; <%struct.NSZone*> [#uses=1]
- store %struct.NSZone* %21, %struct.NSZone** %20, align 4
- %22 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 2 ; <%struct.NSBitmapImageRep***> [#uses=1]
- store %struct.NSBitmapImageRep** %new, %struct.NSBitmapImageRep*** %22, align 4
- %23 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
- %24 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %23, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
- store %struct.NSBitmapImageRep* %24, %struct.NSBitmapImageRep** %2, align 4
- %25 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %2, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
- %26 = bitcast %struct.NSBitmapImageRep* %25 to %struct.objc_object* ; <%struct.objc_object*> [#uses=1]
- store %struct.objc_object* %26, %struct.objc_object** %self.1, align 4
- %27 = load %struct.objc_selector*, %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_1", align 4 ; <%struct.objc_selector*> [#uses=1]
- %__block_holder_tmp_1.03 = bitcast %struct.__block_1* %__block_holder_tmp_1.0 to void (%struct.CGImage*)* ; <void (%struct.CGImage*)*> [#uses=1]
- %28 = load %struct.objc_object*, %struct.objc_object** %self.1, align 4 ; <%struct.objc_object*> [#uses=1]
- %29 = call %struct.objc_object* (%struct.objc_object*, %struct.objc_selector*, ...) inttoptr (i64 4294901504 to %struct.objc_object* (%struct.objc_object*, %struct.objc_selector*, ...)*)(%struct.objc_object* %28, %struct.objc_selector* %27, void (%struct.CGImage*)* %__block_holder_tmp_1.03) nounwind ; <%struct.objc_object*> [#uses=0]
+ store ptr %self, ptr %self_addr
+ store ptr %_cmd, ptr %_cmd_addr
+ store ptr %zone, ptr %zone_addr
+ %3 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", ptr %FRAME.7, i32 0, i32 0 ; <ptr> [#uses=1]
+ %4 = load ptr, ptr %self_addr, align 4 ; <ptr> [#uses=1]
+ store ptr %4, ptr %3, align 4
+ call void @llvm.init.trampoline(ptr %TRAMP.9, ptr @__helper_1.1632, ptr %FRAME.7) ; <ptr> [#uses=1]
+ %tramp = call ptr @llvm.adjust.trampoline(ptr %TRAMP.9)
+ store ptr %tramp, ptr %0, align 4
+ %5 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", ptr %FRAME.7, i32 0, i32 1 ; <ptr> [#uses=1]
+ %6 = load ptr, ptr %0, align 4 ; <ptr> [#uses=1]
+ store ptr %6, ptr %5, align 4
+ store ptr null, ptr %new, align 4
+ %7 = getelementptr %struct.__block_1, ptr %__block_holder_tmp_1.0, i32 0, i32 0 ; <ptr> [#uses=1]
+ %8 = getelementptr %struct.__invoke_impl, ptr %7, i32 0, i32 0 ; <ptr> [#uses=1]
+ store ptr @_NSConcreteStackBlock, ptr %8, align 4
+ %9 = getelementptr %struct.__block_1, ptr %__block_holder_tmp_1.0, i32 0, i32 0 ; <ptr> [#uses=1]
+ %10 = getelementptr %struct.__invoke_impl, ptr %9, i32 0, i32 1 ; <ptr> [#uses=1]
+ store i32 67108864, ptr %10, align 4
+ %11 = getelementptr %struct.__block_1, ptr %__block_holder_tmp_1.0, i32 0, i32 0 ; <ptr> [#uses=1]
+ %12 = getelementptr %struct.__invoke_impl, ptr %11, i32 0, i32 2 ; <ptr> [#uses=1]
+ store i32 24, ptr %12, align 4
+ %13 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", ptr %FRAME.7, i32 0, i32 1 ; <ptr> [#uses=1]
+ %14 = load ptr, ptr %13, align 4 ; <ptr> [#uses=1]
+ store ptr %14, ptr %1, align 4
+ %15 = getelementptr %struct.__block_1, ptr %__block_holder_tmp_1.0, i32 0, i32 0 ; <ptr> [#uses=1]
+ %16 = getelementptr %struct.__invoke_impl, ptr %15, i32 0, i32 3 ; <ptr> [#uses=1]
+ %17 = load ptr, ptr %1, align 4 ; <ptr> [#uses=1]
+ store ptr %17, ptr %16, align 4
+ %18 = getelementptr %struct.__block_1, ptr %__block_holder_tmp_1.0, i32 0, i32 1 ; <ptr> [#uses=1]
+ %19 = load ptr, ptr %zone_addr, align 4 ; <ptr> [#uses=1]
+ store ptr %19, ptr %18, align 4
+ %20 = getelementptr %struct.__block_1, ptr %__block_holder_tmp_1.0, i32 0, i32 2 ; <ptr> [#uses=1]
+ store ptr %new, ptr %20, align 4
+ %21 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", ptr %FRAME.7, i32 0, i32 0 ; <ptr> [#uses=1]
+ %22 = load ptr, ptr %21, align 4 ; <ptr> [#uses=1]
+ store ptr %22, ptr %2, align 4
+ %23 = load ptr, ptr %2, align 4 ; <ptr> [#uses=1]
+ store ptr %23, ptr %self.1, align 4
+ %24 = load ptr, ptr @"\01L_OBJC_SELECTOR_REFERENCES_1", align 4 ; <ptr> [#uses=1]
+ %25 = load ptr, ptr %self.1, align 4 ; <ptr> [#uses=1]
+ %26 = call ptr (ptr, ptr, ...) inttoptr (i64 4294901504 to ptr)(ptr %25, ptr %24, ptr %__block_holder_tmp_1.0) nounwind ; <ptr> [#uses=0]
br label %return
return: ; preds = %entry
- %retval5 = load %struct.objc_object*, %struct.objc_object** %retval ; <%struct.objc_object*> [#uses=1]
- ret %struct.objc_object* %retval5
+ %retval5 = load ptr, ptr %retval ; <ptr> [#uses=1]
+ ret ptr %retval5
}
-declare void @llvm.init.trampoline(i8*, i8*, i8*) nounwind
-declare i8* @llvm.adjust.trampoline(i8*) nounwind
+declare void @llvm.init.trampoline(ptr, ptr, ptr) nounwind
+declare ptr @llvm.adjust.trampoline(ptr) nounwind
-define internal void @__helper_1.1632(%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* nest %CHAIN.8, %struct.__block_1* %_self, %struct.CGImage* %cgImage) nounwind {
+define internal void @__helper_1.1632(ptr nest %CHAIN.8, ptr %_self, ptr %cgImage) nounwind {
entry:
- %CHAIN.8_addr = alloca %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* ; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"**> [#uses=2]
- %_self_addr = alloca %struct.__block_1* ; <%struct.__block_1**> [#uses=3]
- %cgImage_addr = alloca %struct.CGImage* ; <%struct.CGImage**> [#uses=1]
- %zone = alloca %struct.NSZone* ; <%struct.NSZone**> [#uses=2]
- %objc_super = alloca %struct._objc_super ; <%struct._objc_super*> [#uses=3]
- %new = alloca %struct.NSBitmapImageRep** ; <%struct.NSBitmapImageRep***> [#uses=2]
- %objc_super.5 = alloca %struct.objc_super* ; <%struct.objc_super**> [#uses=2]
- %0 = alloca %struct.NSBitmapImageRep* ; <%struct.NSBitmapImageRep**> [#uses=2]
+ %CHAIN.8_addr = alloca ptr ; <ptr> [#uses=2]
+ %_self_addr = alloca ptr ; <ptr> [#uses=3]
+ %cgImage_addr = alloca ptr ; <ptr> [#uses=1]
+ %zone = alloca ptr ; <ptr> [#uses=2]
+ %objc_super = alloca %struct._objc_super ; <ptr> [#uses=3]
+ %new = alloca ptr ; <ptr> [#uses=2]
+ %objc_super.5 = alloca ptr ; <ptr> [#uses=2]
+ %0 = alloca ptr ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %CHAIN.8, %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr
- store %struct.__block_1* %_self, %struct.__block_1** %_self_addr
- store %struct.CGImage* %cgImage, %struct.CGImage** %cgImage_addr
- %1 = load %struct.__block_1*, %struct.__block_1** %_self_addr, align 4 ; <%struct.__block_1*> [#uses=1]
- %2 = getelementptr %struct.__block_1, %struct.__block_1* %1, i32 0, i32 2 ; <%struct.NSBitmapImageRep***> [#uses=1]
- %3 = load %struct.NSBitmapImageRep**, %struct.NSBitmapImageRep*** %2, align 4 ; <%struct.NSBitmapImageRep**> [#uses=1]
- store %struct.NSBitmapImageRep** %3, %struct.NSBitmapImageRep*** %new, align 4
- %4 = load %struct.__block_1*, %struct.__block_1** %_self_addr, align 4 ; <%struct.__block_1*> [#uses=1]
- %5 = getelementptr %struct.__block_1, %struct.__block_1* %4, i32 0, i32 1 ; <%struct.NSZone**> [#uses=1]
- %6 = load %struct.NSZone*, %struct.NSZone** %5, align 4 ; <%struct.NSZone*> [#uses=1]
- store %struct.NSZone* %6, %struct.NSZone** %zone, align 4
- %7 = load %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*, %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr, align 4 ; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*> [#uses=1]
- %8 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
- %9 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %8, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
- store %struct.NSBitmapImageRep* %9, %struct.NSBitmapImageRep** %0, align 4
- %10 = load %struct.NSBitmapImageRep*, %struct.NSBitmapImageRep** %0, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
- %11 = bitcast %struct.NSBitmapImageRep* %10 to %struct.objc_object* ; <%struct.objc_object*> [#uses=1]
- %12 = getelementptr %struct._objc_super, %struct._objc_super* %objc_super, i32 0, i32 0 ; <%struct.objc_object**> [#uses=1]
- store %struct.objc_object* %11, %struct.objc_object** %12, align 4
- %13 = load %struct._objc_class*, %struct._objc_class** getelementptr (%struct._objc_class, %struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep", i32 0, i32 1), align 4 ; <%struct._objc_class*> [#uses=1]
- %14 = getelementptr %struct._objc_super, %struct._objc_super* %objc_super, i32 0, i32 1 ; <%struct._objc_class**> [#uses=1]
- store %struct._objc_class* %13, %struct._objc_class** %14, align 4
- %objc_super1 = bitcast %struct._objc_super* %objc_super to %struct.objc_super* ; <%struct.objc_super*> [#uses=1]
- store %struct.objc_super* %objc_super1, %struct.objc_super** %objc_super.5, align 4
- %15 = load %struct.objc_selector*, %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_0", align 4 ; <%struct.objc_selector*> [#uses=1]
- %16 = load %struct.objc_super*, %struct.objc_super** %objc_super.5, align 4 ; <%struct.objc_super*> [#uses=1]
- %17 = load %struct.NSZone*, %struct.NSZone** %zone, align 4 ; <%struct.NSZone*> [#uses=1]
- %18 = call %struct.objc_object* (%struct.objc_super*, %struct.objc_selector*, ...) @objc_msgSendSuper(%struct.objc_super* %16, %struct.objc_selector* %15, %struct.NSZone* %17) nounwind ; <%struct.objc_object*> [#uses=1]
- %19 = bitcast %struct.objc_object* %18 to %struct.NSBitmapImageRep* ; <%struct.NSBitmapImageRep*> [#uses=1]
- %20 = load %struct.NSBitmapImageRep**, %struct.NSBitmapImageRep*** %new, align 4 ; <%struct.NSBitmapImageRep**> [#uses=1]
- store %struct.NSBitmapImageRep* %19, %struct.NSBitmapImageRep** %20, align 4
+ store ptr %CHAIN.8, ptr %CHAIN.8_addr
+ store ptr %_self, ptr %_self_addr
+ store ptr %cgImage, ptr %cgImage_addr
+ %1 = load ptr, ptr %_self_addr, align 4 ; <ptr> [#uses=1]
+ %2 = getelementptr %struct.__block_1, ptr %1, i32 0, i32 2 ; <ptr> [#uses=1]
+ %3 = load ptr, ptr %2, align 4 ; <ptr> [#uses=1]
+ store ptr %3, ptr %new, align 4
+ %4 = load ptr, ptr %_self_addr, align 4 ; <ptr> [#uses=1]
+ %5 = getelementptr %struct.__block_1, ptr %4, i32 0, i32 1 ; <ptr> [#uses=1]
+ %6 = load ptr, ptr %5, align 4 ; <ptr> [#uses=1]
+ store ptr %6, ptr %zone, align 4
+ %7 = load ptr, ptr %CHAIN.8_addr, align 4 ; <ptr> [#uses=1]
+ %8 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", ptr %7, i32 0, i32 0 ; <ptr> [#uses=1]
+ %9 = load ptr, ptr %8, align 4 ; <ptr> [#uses=1]
+ store ptr %9, ptr %0, align 4
+ %10 = load ptr, ptr %0, align 4 ; <ptr> [#uses=1]
+ %11 = getelementptr %struct._objc_super, ptr %objc_super, i32 0, i32 0 ; <ptr> [#uses=1]
+ store ptr %10, ptr %11, align 4
+ %12 = load ptr, ptr getelementptr (%struct._objc_class, ptr @"\01L_OBJC_CLASS_NSBitmapImageRep", i32 0, i32 1), align 4 ; <ptr> [#uses=1]
+ %13 = getelementptr %struct._objc_super, ptr %objc_super, i32 0, i32 1 ; <ptr> [#uses=1]
+ store ptr %12, ptr %13, align 4
+ store ptr %objc_super, ptr %objc_super.5, align 4
+ %14 = load ptr, ptr @"\01L_OBJC_SELECTOR_REFERENCES_0", align 4 ; <ptr> [#uses=1]
+ %15 = load ptr, ptr %objc_super.5, align 4 ; <ptr> [#uses=1]
+ %16 = load ptr, ptr %zone, align 4 ; <ptr> [#uses=1]
+ %17 = call ptr (ptr, ptr, ...) @objc_msgSendSuper(ptr %15, ptr %14, ptr %16) nounwind ; <ptr> [#uses=1]
+ %18 = load ptr, ptr %new, align 4 ; <ptr> [#uses=1]
+ store ptr %17, ptr %18, align 4
br label %return
return: ; preds = %entry
ret void
}
-declare %struct.objc_object* @objc_msgSendSuper(%struct.objc_super*, %struct.objc_selector*, ...)
+declare ptr @objc_msgSendSuper(ptr, ptr, ...)
; CHECK-NEXT: # %bb.1: # %exit
; CHECK-NEXT: .LBB0_2: # %cond.false
entry:
- %bf.load.i = load i64, i64* null, align 8
+ %bf.load.i = load i64, ptr null, align 8
%bf.lshr.i = lshr i64 %bf.load.i, 32
%0 = trunc i64 %bf.lshr.i to i32
%bf.cast.i = and i32 %0, 65535
; Ensure we don't crash by trying to convert directly from a subword load
; to a ppc_fp128 as we do for conversions to f32/f64.
-define ppc_fp128 @test(i16* nocapture readonly %Ptr) {
+define ppc_fp128 @test(ptr nocapture readonly %Ptr) {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz 3, 0(3)
; CHECK-NEXT: xscvuxddp 1, 0
; CHECK-NEXT: blr
entry:
- %0 = load i16, i16* %Ptr, align 2
+ %0 = load i16, ptr %Ptr, align 2
%conv = uitofp i16 %0 to ppc_fp128
ret ppc_fp128 %conv
}
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-declare <4 x i32> @llvm.ppc.altivec.lvx(i8*) #1
+declare <4 x i32> @llvm.ppc.altivec.lvx(ptr) #1
-define <4 x i32> @test1(<4 x i32>* %h) #0 {
+define <4 x i32> @test1(ptr %h) #0 {
entry:
- %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
- %hv = bitcast <4 x i32>* %h1 to i8*
- %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
+ %h1 = getelementptr <4 x i32>, ptr %h, i64 1
+ %vl = call <4 x i32> @llvm.ppc.altivec.lvx(ptr %h1)
- %v0 = load <4 x i32>, <4 x i32>* %h, align 8
+ %v0 = load <4 x i32>, ptr %h, align 8
%a = add <4 x i32> %v0, %vl
ret <4 x i32> %a
; CHECK: blr
}
-declare void @llvm.ppc.altivec.stvx(<4 x i32>, i8*) #0
+declare void @llvm.ppc.altivec.stvx(<4 x i32>, ptr) #0
-define <4 x i32> @test2(<4 x i32>* %h, <4 x i32> %d) #0 {
+define <4 x i32> @test2(ptr %h, <4 x i32> %d) #0 {
entry:
- %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
- %hv = bitcast <4 x i32>* %h1 to i8*
- call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
+ %h1 = getelementptr <4 x i32>, ptr %h, i64 1
+ call void @llvm.ppc.altivec.stvx(<4 x i32> %d, ptr %h1)
- %v0 = load <4 x i32>, <4 x i32>* %h, align 8
+ %v0 = load <4 x i32>, ptr %h, align 8
ret <4 x i32> %v0
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define void @foo(float* noalias nocapture %a, float* noalias nocapture %b) #0 {
+define void @foo(ptr noalias nocapture %a, ptr noalias nocapture %b) #0 {
vector.ph:
br label %vector.body
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds float, float* %b, i64 %index
- %1 = bitcast float* %0 to <4 x float>*
- %wide.load = load <4 x float>, <4 x float>* %1, align 4
+ %0 = getelementptr inbounds float, ptr %b, i64 %index
+ %wide.load = load <4 x float>, ptr %0, align 4
%.sum11 = or i64 %index, 4
- %2 = getelementptr float, float* %b, i64 %.sum11
- %3 = bitcast float* %2 to <4 x float>*
- %wide.load8 = load <4 x float>, <4 x float>* %3, align 4
- %4 = fadd <4 x float> %wide.load, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
- %5 = fadd <4 x float> %wide.load8, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
- %6 = getelementptr inbounds float, float* %a, i64 %index
- %7 = bitcast float* %6 to <4 x float>*
- store <4 x float> %4, <4 x float>* %7, align 4
+ %1 = getelementptr float, ptr %b, i64 %.sum11
+ %wide.load8 = load <4 x float>, ptr %1, align 4
+ %2 = fadd <4 x float> %wide.load, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
+ %3 = fadd <4 x float> %wide.load8, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
+ %4 = getelementptr inbounds float, ptr %a, i64 %index
+ store <4 x float> %2, ptr %4, align 4
%.sum12 = or i64 %index, 4
- %8 = getelementptr float, float* %a, i64 %.sum12
- %9 = bitcast float* %8 to <4 x float>*
- store <4 x float> %5, <4 x float>* %9, align 4
+ %5 = getelementptr float, ptr %a, i64 %.sum12
+ store <4 x float> %3, ptr %5, align 4
%index.next = add i64 %index, 8
- %10 = icmp eq i64 %index.next, 16000
- br i1 %10, label %for.end, label %vector.body
+ %6 = icmp eq i64 %index.next, 16000
+ br i1 %6, label %for.end, label %vector.body
; CHECK: @foo
; CHECK-DAG: li [[C0:[0-9]+]], 0
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-define void @foo(float* noalias nocapture %x, float* noalias nocapture readonly %y) #0 {
+define void @foo(ptr noalias nocapture %x, ptr noalias nocapture readonly %y) #0 {
entry:
br label %vector.body
; CHECK: lvsl
; CHECK: blr
%index = phi i64 [ 0, %entry ], [ %index.next.15, %vector.body ]
- %0 = getelementptr inbounds float, float* %y, i64 %index
- %1 = bitcast float* %0 to <4 x float>*
- %wide.load = load <4 x float>, <4 x float>* %1, align 4
- %2 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load)
- %3 = getelementptr inbounds float, float* %x, i64 %index
- %4 = bitcast float* %3 to <4 x float>*
- store <4 x float> %2, <4 x float>* %4, align 4
+ %0 = getelementptr inbounds float, ptr %y, i64 %index
+ %wide.load = load <4 x float>, ptr %0, align 4
+ %1 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load)
+ %2 = getelementptr inbounds float, ptr %x, i64 %index
+ store <4 x float> %1, ptr %2, align 4
%index.next = add i64 %index, 4
- %5 = getelementptr inbounds float, float* %y, i64 %index.next
- %6 = bitcast float* %5 to <4 x float>*
- %wide.load.1 = load <4 x float>, <4 x float>* %6, align 4
- %7 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.1)
- %8 = getelementptr inbounds float, float* %x, i64 %index.next
- %9 = bitcast float* %8 to <4 x float>*
- store <4 x float> %7, <4 x float>* %9, align 4
+ %3 = getelementptr inbounds float, ptr %y, i64 %index.next
+ %wide.load.1 = load <4 x float>, ptr %3, align 4
+ %4 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.1)
+ %5 = getelementptr inbounds float, ptr %x, i64 %index.next
+ store <4 x float> %4, ptr %5, align 4
%index.next.1 = add i64 %index.next, 4
- %10 = getelementptr inbounds float, float* %y, i64 %index.next.1
- %11 = bitcast float* %10 to <4 x float>*
- %wide.load.2 = load <4 x float>, <4 x float>* %11, align 4
- %12 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.2)
- %13 = getelementptr inbounds float, float* %x, i64 %index.next.1
- %14 = bitcast float* %13 to <4 x float>*
- store <4 x float> %12, <4 x float>* %14, align 4
+ %6 = getelementptr inbounds float, ptr %y, i64 %index.next.1
+ %wide.load.2 = load <4 x float>, ptr %6, align 4
+ %7 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.2)
+ %8 = getelementptr inbounds float, ptr %x, i64 %index.next.1
+ store <4 x float> %7, ptr %8, align 4
%index.next.2 = add i64 %index.next.1, 4
- %15 = getelementptr inbounds float, float* %y, i64 %index.next.2
- %16 = bitcast float* %15 to <4 x float>*
- %wide.load.3 = load <4 x float>, <4 x float>* %16, align 4
- %17 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.3)
- %18 = getelementptr inbounds float, float* %x, i64 %index.next.2
- %19 = bitcast float* %18 to <4 x float>*
- store <4 x float> %17, <4 x float>* %19, align 4
+ %9 = getelementptr inbounds float, ptr %y, i64 %index.next.2
+ %wide.load.3 = load <4 x float>, ptr %9, align 4
+ %10 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.3)
+ %11 = getelementptr inbounds float, ptr %x, i64 %index.next.2
+ store <4 x float> %10, ptr %11, align 4
%index.next.3 = add i64 %index.next.2, 4
- %20 = getelementptr inbounds float, float* %y, i64 %index.next.3
- %21 = bitcast float* %20 to <4 x float>*
- %wide.load.4 = load <4 x float>, <4 x float>* %21, align 4
- %22 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.4)
- %23 = getelementptr inbounds float, float* %x, i64 %index.next.3
- %24 = bitcast float* %23 to <4 x float>*
- store <4 x float> %22, <4 x float>* %24, align 4
+ %12 = getelementptr inbounds float, ptr %y, i64 %index.next.3
+ %wide.load.4 = load <4 x float>, ptr %12, align 4
+ %13 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.4)
+ %14 = getelementptr inbounds float, ptr %x, i64 %index.next.3
+ store <4 x float> %13, ptr %14, align 4
%index.next.4 = add i64 %index.next.3, 4
- %25 = getelementptr inbounds float, float* %y, i64 %index.next.4
- %26 = bitcast float* %25 to <4 x float>*
- %wide.load.5 = load <4 x float>, <4 x float>* %26, align 4
- %27 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.5)
- %28 = getelementptr inbounds float, float* %x, i64 %index.next.4
- %29 = bitcast float* %28 to <4 x float>*
- store <4 x float> %27, <4 x float>* %29, align 4
+ %15 = getelementptr inbounds float, ptr %y, i64 %index.next.4
+ %wide.load.5 = load <4 x float>, ptr %15, align 4
+ %16 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.5)
+ %17 = getelementptr inbounds float, ptr %x, i64 %index.next.4
+ store <4 x float> %16, ptr %17, align 4
%index.next.5 = add i64 %index.next.4, 4
- %30 = getelementptr inbounds float, float* %y, i64 %index.next.5
- %31 = bitcast float* %30 to <4 x float>*
- %wide.load.6 = load <4 x float>, <4 x float>* %31, align 4
- %32 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.6)
- %33 = getelementptr inbounds float, float* %x, i64 %index.next.5
- %34 = bitcast float* %33 to <4 x float>*
- store <4 x float> %32, <4 x float>* %34, align 4
+ %18 = getelementptr inbounds float, ptr %y, i64 %index.next.5
+ %wide.load.6 = load <4 x float>, ptr %18, align 4
+ %19 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.6)
+ %20 = getelementptr inbounds float, ptr %x, i64 %index.next.5
+ store <4 x float> %19, ptr %20, align 4
%index.next.6 = add i64 %index.next.5, 4
- %35 = getelementptr inbounds float, float* %y, i64 %index.next.6
- %36 = bitcast float* %35 to <4 x float>*
- %wide.load.7 = load <4 x float>, <4 x float>* %36, align 4
- %37 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.7)
- %38 = getelementptr inbounds float, float* %x, i64 %index.next.6
- %39 = bitcast float* %38 to <4 x float>*
- store <4 x float> %37, <4 x float>* %39, align 4
+ %21 = getelementptr inbounds float, ptr %y, i64 %index.next.6
+ %wide.load.7 = load <4 x float>, ptr %21, align 4
+ %22 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.7)
+ %23 = getelementptr inbounds float, ptr %x, i64 %index.next.6
+ store <4 x float> %22, ptr %23, align 4
%index.next.7 = add i64 %index.next.6, 4
- %40 = getelementptr inbounds float, float* %y, i64 %index.next.7
- %41 = bitcast float* %40 to <4 x float>*
- %wide.load.8 = load <4 x float>, <4 x float>* %41, align 4
- %42 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.8)
- %43 = getelementptr inbounds float, float* %x, i64 %index.next.7
- %44 = bitcast float* %43 to <4 x float>*
- store <4 x float> %42, <4 x float>* %44, align 4
+ %24 = getelementptr inbounds float, ptr %y, i64 %index.next.7
+ %wide.load.8 = load <4 x float>, ptr %24, align 4
+ %25 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.8)
+ %26 = getelementptr inbounds float, ptr %x, i64 %index.next.7
+ store <4 x float> %25, ptr %26, align 4
%index.next.8 = add i64 %index.next.7, 4
- %45 = getelementptr inbounds float, float* %y, i64 %index.next.8
- %46 = bitcast float* %45 to <4 x float>*
- %wide.load.9 = load <4 x float>, <4 x float>* %46, align 4
- %47 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.9)
- %48 = getelementptr inbounds float, float* %x, i64 %index.next.8
- %49 = bitcast float* %48 to <4 x float>*
- store <4 x float> %47, <4 x float>* %49, align 4
+ %27 = getelementptr inbounds float, ptr %y, i64 %index.next.8
+ %wide.load.9 = load <4 x float>, ptr %27, align 4
+ %28 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.9)
+ %29 = getelementptr inbounds float, ptr %x, i64 %index.next.8
+ store <4 x float> %28, ptr %29, align 4
%index.next.9 = add i64 %index.next.8, 4
- %50 = getelementptr inbounds float, float* %y, i64 %index.next.9
- %51 = bitcast float* %50 to <4 x float>*
- %wide.load.10 = load <4 x float>, <4 x float>* %51, align 4
- %52 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.10)
- %53 = getelementptr inbounds float, float* %x, i64 %index.next.9
- %54 = bitcast float* %53 to <4 x float>*
- store <4 x float> %52, <4 x float>* %54, align 4
+ %30 = getelementptr inbounds float, ptr %y, i64 %index.next.9
+ %wide.load.10 = load <4 x float>, ptr %30, align 4
+ %31 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.10)
+ %32 = getelementptr inbounds float, ptr %x, i64 %index.next.9
+ store <4 x float> %31, ptr %32, align 4
%index.next.10 = add i64 %index.next.9, 4
- %55 = getelementptr inbounds float, float* %y, i64 %index.next.10
- %56 = bitcast float* %55 to <4 x float>*
- %wide.load.11 = load <4 x float>, <4 x float>* %56, align 4
- %57 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.11)
- %58 = getelementptr inbounds float, float* %x, i64 %index.next.10
- %59 = bitcast float* %58 to <4 x float>*
- store <4 x float> %57, <4 x float>* %59, align 4
+ %33 = getelementptr inbounds float, ptr %y, i64 %index.next.10
+ %wide.load.11 = load <4 x float>, ptr %33, align 4
+ %34 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.11)
+ %35 = getelementptr inbounds float, ptr %x, i64 %index.next.10
+ store <4 x float> %34, ptr %35, align 4
%index.next.11 = add i64 %index.next.10, 4
- %60 = getelementptr inbounds float, float* %y, i64 %index.next.11
- %61 = bitcast float* %60 to <4 x float>*
- %wide.load.12 = load <4 x float>, <4 x float>* %61, align 4
- %62 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.12)
- %63 = getelementptr inbounds float, float* %x, i64 %index.next.11
- %64 = bitcast float* %63 to <4 x float>*
- store <4 x float> %62, <4 x float>* %64, align 4
+ %36 = getelementptr inbounds float, ptr %y, i64 %index.next.11
+ %wide.load.12 = load <4 x float>, ptr %36, align 4
+ %37 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.12)
+ %38 = getelementptr inbounds float, ptr %x, i64 %index.next.11
+ store <4 x float> %37, ptr %38, align 4
%index.next.12 = add i64 %index.next.11, 4
- %65 = getelementptr inbounds float, float* %y, i64 %index.next.12
- %66 = bitcast float* %65 to <4 x float>*
- %wide.load.13 = load <4 x float>, <4 x float>* %66, align 4
- %67 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.13)
- %68 = getelementptr inbounds float, float* %x, i64 %index.next.12
- %69 = bitcast float* %68 to <4 x float>*
- store <4 x float> %67, <4 x float>* %69, align 4
+ %39 = getelementptr inbounds float, ptr %y, i64 %index.next.12
+ %wide.load.13 = load <4 x float>, ptr %39, align 4
+ %40 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.13)
+ %41 = getelementptr inbounds float, ptr %x, i64 %index.next.12
+ store <4 x float> %40, ptr %41, align 4
%index.next.13 = add i64 %index.next.12, 4
- %70 = getelementptr inbounds float, float* %y, i64 %index.next.13
- %71 = bitcast float* %70 to <4 x float>*
- %wide.load.14 = load <4 x float>, <4 x float>* %71, align 4
- %72 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.14)
- %73 = getelementptr inbounds float, float* %x, i64 %index.next.13
- %74 = bitcast float* %73 to <4 x float>*
- store <4 x float> %72, <4 x float>* %74, align 4
+ %42 = getelementptr inbounds float, ptr %y, i64 %index.next.13
+ %wide.load.14 = load <4 x float>, ptr %42, align 4
+ %43 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.14)
+ %44 = getelementptr inbounds float, ptr %x, i64 %index.next.13
+ store <4 x float> %43, ptr %44, align 4
%index.next.14 = add i64 %index.next.13, 4
- %75 = getelementptr inbounds float, float* %y, i64 %index.next.14
- %76 = bitcast float* %75 to <4 x float>*
- %wide.load.15 = load <4 x float>, <4 x float>* %76, align 4
- %77 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.15)
- %78 = getelementptr inbounds float, float* %x, i64 %index.next.14
- %79 = bitcast float* %78 to <4 x float>*
- store <4 x float> %77, <4 x float>* %79, align 4
+ %45 = getelementptr inbounds float, ptr %y, i64 %index.next.14
+ %wide.load.15 = load <4 x float>, ptr %45, align 4
+ %46 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.15)
+ %47 = getelementptr inbounds float, ptr %x, i64 %index.next.14
+ store <4 x float> %46, ptr %47, align 4
%index.next.15 = add i64 %index.next.14, 4
- %80 = icmp eq i64 %index.next.15, 2048
- br i1 %80, label %for.end, label %vector.body
+ %48 = icmp eq i64 %index.next.15, 2048
+ br i1 %48, label %for.end, label %vector.body
for.end: ; preds = %vector.body
ret void
; Function Attrs: nounwind readonly
declare <4 x float> @llvm_cos_v4f32(<4 x float>) #1
-define <2 x double> @bar(double* %x) {
+define <2 x double> @bar(ptr %x) {
entry:
- %p = bitcast double* %x to <2 x double>*
- %r = load <2 x double>, <2 x double>* %p, align 8
+ %r = load <2 x double>, ptr %x, align 8
; CHECK-LABEL: @bar
; CHECK-NOT: lvsl
; RUN: llc -mtriple powerpc64-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -verify-machineinstrs -vec-extabi < %s | FileCheck %s
-define <16 x i8> @test_l_v16i8(<16 x i8>* %p) #0 {
+define <16 x i8> @test_l_v16i8(ptr %p) #0 {
; CHECK-LABEL: test_l_v16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 15
; CHECK-NEXT: vperm 2, 4, 2, 3
; CHECK-NEXT: blr
entry:
- %r = load <16 x i8>, <16 x i8>* %p, align 1
+ %r = load <16 x i8>, ptr %p, align 1
ret <16 x i8> %r
}
-define <32 x i8> @test_l_v32i8(<32 x i8>* %p) #0 {
+define <32 x i8> @test_l_v32i8(ptr %p) #0 {
; CHECK-LABEL: test_l_v32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 31
; CHECK-NEXT: vperm 2, 0, 4, 5
; CHECK-NEXT: blr
entry:
- %r = load <32 x i8>, <32 x i8>* %p, align 1
+ %r = load <32 x i8>, ptr %p, align 1
ret <32 x i8> %r
}
-define <8 x i16> @test_l_v8i16(<8 x i16>* %p) #0 {
+define <8 x i16> @test_l_v8i16(ptr %p) #0 {
; CHECK-LABEL: test_l_v8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 15
; CHECK-NEXT: vperm 2, 4, 2, 3
; CHECK-NEXT: blr
entry:
- %r = load <8 x i16>, <8 x i16>* %p, align 2
+ %r = load <8 x i16>, ptr %p, align 2
ret <8 x i16> %r
}
-define <16 x i16> @test_l_v16i16(<16 x i16>* %p) #0 {
+define <16 x i16> @test_l_v16i16(ptr %p) #0 {
; CHECK-LABEL: test_l_v16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 31
; CHECK-NEXT: vperm 2, 0, 4, 5
; CHECK-NEXT: blr
entry:
- %r = load <16 x i16>, <16 x i16>* %p, align 2
+ %r = load <16 x i16>, ptr %p, align 2
ret <16 x i16> %r
}
-define <4 x i32> @test_l_v4i32(<4 x i32>* %p) #0 {
+define <4 x i32> @test_l_v4i32(ptr %p) #0 {
; CHECK-LABEL: test_l_v4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 15
; CHECK-NEXT: vperm 2, 4, 2, 3
; CHECK-NEXT: blr
entry:
- %r = load <4 x i32>, <4 x i32>* %p, align 4
+ %r = load <4 x i32>, ptr %p, align 4
ret <4 x i32> %r
}
-define <8 x i32> @test_l_v8i32(<8 x i32>* %p) #0 {
+define <8 x i32> @test_l_v8i32(ptr %p) #0 {
; CHECK-LABEL: test_l_v8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 31
; CHECK-NEXT: vperm 2, 0, 4, 5
; CHECK-NEXT: blr
entry:
- %r = load <8 x i32>, <8 x i32>* %p, align 4
+ %r = load <8 x i32>, ptr %p, align 4
ret <8 x i32> %r
}
-define <2 x i64> @test_l_v2i64(<2 x i64>* %p) #0 {
+define <2 x i64> @test_l_v2i64(ptr %p) #0 {
; CHECK-LABEL: test_l_v2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-NEXT: blr
entry:
- %r = load <2 x i64>, <2 x i64>* %p, align 8
+ %r = load <2 x i64>, ptr %p, align 8
ret <2 x i64> %r
}
-define <4 x i64> @test_l_v4i64(<4 x i64>* %p) #0 {
+define <4 x i64> @test_l_v4i64(ptr %p) #0 {
; CHECK-LABEL: test_l_v4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvd2x 35, 3, 4
; CHECK-NEXT: blr
entry:
- %r = load <4 x i64>, <4 x i64>* %p, align 8
+ %r = load <4 x i64>, ptr %p, align 8
ret <4 x i64> %r
}
-define <4 x float> @test_l_v4float(<4 x float>* %p) #0 {
+define <4 x float> @test_l_v4float(ptr %p) #0 {
; CHECK-LABEL: test_l_v4float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 15
; CHECK-NEXT: vperm 2, 4, 2, 3
; CHECK-NEXT: blr
entry:
- %r = load <4 x float>, <4 x float>* %p, align 4
+ %r = load <4 x float>, ptr %p, align 4
ret <4 x float> %r
}
-define <8 x float> @test_l_v8float(<8 x float>* %p) #0 {
+define <8 x float> @test_l_v8float(ptr %p) #0 {
; CHECK-LABEL: test_l_v8float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 31
; CHECK-NEXT: vperm 2, 0, 4, 5
; CHECK-NEXT: blr
entry:
- %r = load <8 x float>, <8 x float>* %p, align 4
+ %r = load <8 x float>, ptr %p, align 4
ret <8 x float> %r
}
-define <2 x double> @test_l_v2double(<2 x double>* %p) #0 {
+define <2 x double> @test_l_v2double(ptr %p) #0 {
; CHECK-LABEL: test_l_v2double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-NEXT: blr
entry:
- %r = load <2 x double>, <2 x double>* %p, align 8
+ %r = load <2 x double>, ptr %p, align 8
ret <2 x double> %r
}
-define <4 x double> @test_l_v4double(<4 x double>* %p) #0 {
+define <4 x double> @test_l_v4double(ptr %p) #0 {
; CHECK-LABEL: test_l_v4double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvd2x 35, 3, 4
; CHECK-NEXT: blr
entry:
- %r = load <4 x double>, <4 x double>* %p, align 8
+ %r = load <4 x double>, ptr %p, align 8
ret <4 x double> %r
}
-define <16 x i8> @test_l_p8v16i8(<16 x i8>* %p) #2 {
+define <16 x i8> @test_l_p8v16i8(ptr %p) #2 {
; CHECK-LABEL: test_l_p8v16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
- %r = load <16 x i8>, <16 x i8>* %p, align 1
+ %r = load <16 x i8>, ptr %p, align 1
ret <16 x i8> %r
}
-define <32 x i8> @test_l_p8v32i8(<32 x i8>* %p) #2 {
+define <32 x i8> @test_l_p8v32i8(ptr %p) #2 {
; CHECK-LABEL: test_l_p8v32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
- %r = load <32 x i8>, <32 x i8>* %p, align 1
+ %r = load <32 x i8>, ptr %p, align 1
ret <32 x i8> %r
}
-define <8 x i16> @test_l_p8v8i16(<8 x i16>* %p) #2 {
+define <8 x i16> @test_l_p8v8i16(ptr %p) #2 {
; CHECK-LABEL: test_l_p8v8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
- %r = load <8 x i16>, <8 x i16>* %p, align 2
+ %r = load <8 x i16>, ptr %p, align 2
ret <8 x i16> %r
}
-define <16 x i16> @test_l_p8v16i16(<16 x i16>* %p) #2 {
+define <16 x i16> @test_l_p8v16i16(ptr %p) #2 {
; CHECK-LABEL: test_l_p8v16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
- %r = load <16 x i16>, <16 x i16>* %p, align 2
+ %r = load <16 x i16>, ptr %p, align 2
ret <16 x i16> %r
}
-define <4 x i32> @test_l_p8v4i32(<4 x i32>* %p) #2 {
+define <4 x i32> @test_l_p8v4i32(ptr %p) #2 {
; CHECK-LABEL: test_l_p8v4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
- %r = load <4 x i32>, <4 x i32>* %p, align 4
+ %r = load <4 x i32>, ptr %p, align 4
ret <4 x i32> %r
}
-define <8 x i32> @test_l_p8v8i32(<8 x i32>* %p) #2 {
+define <8 x i32> @test_l_p8v8i32(ptr %p) #2 {
; CHECK-LABEL: test_l_p8v8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
- %r = load <8 x i32>, <8 x i32>* %p, align 4
+ %r = load <8 x i32>, ptr %p, align 4
ret <8 x i32> %r
}
-define <2 x i64> @test_l_p8v2i64(<2 x i64>* %p) #2 {
+define <2 x i64> @test_l_p8v2i64(ptr %p) #2 {
; CHECK-LABEL: test_l_p8v2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-NEXT: blr
entry:
- %r = load <2 x i64>, <2 x i64>* %p, align 8
+ %r = load <2 x i64>, ptr %p, align 8
ret <2 x i64> %r
}
-define <4 x i64> @test_l_p8v4i64(<4 x i64>* %p) #2 {
+define <4 x i64> @test_l_p8v4i64(ptr %p) #2 {
; CHECK-LABEL: test_l_p8v4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvd2x 35, 3, 4
; CHECK-NEXT: blr
entry:
- %r = load <4 x i64>, <4 x i64>* %p, align 8
+ %r = load <4 x i64>, ptr %p, align 8
ret <4 x i64> %r
}
-define <4 x float> @test_l_p8v4float(<4 x float>* %p) #2 {
+define <4 x float> @test_l_p8v4float(ptr %p) #2 {
; CHECK-LABEL: test_l_p8v4float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
- %r = load <4 x float>, <4 x float>* %p, align 4
+ %r = load <4 x float>, ptr %p, align 4
ret <4 x float> %r
}
-define <8 x float> @test_l_p8v8float(<8 x float>* %p) #2 {
+define <8 x float> @test_l_p8v8float(ptr %p) #2 {
; CHECK-LABEL: test_l_p8v8float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
- %r = load <8 x float>, <8 x float>* %p, align 4
+ %r = load <8 x float>, ptr %p, align 4
ret <8 x float> %r
}
-define <2 x double> @test_l_p8v2double(<2 x double>* %p) #2 {
+define <2 x double> @test_l_p8v2double(ptr %p) #2 {
; CHECK-LABEL: test_l_p8v2double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-NEXT: blr
entry:
- %r = load <2 x double>, <2 x double>* %p, align 8
+ %r = load <2 x double>, ptr %p, align 8
ret <2 x double> %r
}
-define <4 x double> @test_l_p8v4double(<4 x double>* %p) #2 {
+define <4 x double> @test_l_p8v4double(ptr %p) #2 {
; CHECK-LABEL: test_l_p8v4double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvd2x 35, 3, 4
; CHECK-NEXT: blr
entry:
- %r = load <4 x double>, <4 x double>* %p, align 8
+ %r = load <4 x double>, ptr %p, align 8
ret <4 x double> %r
}
-define void @test_s_v16i8(<16 x i8>* %p, <16 x i8> %v) #0 {
+define void @test_s_v16i8(ptr %p, <16 x i8> %v) #0 {
; CHECK-LABEL: test_s_v16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
- store <16 x i8> %v, <16 x i8>* %p, align 1
+ store <16 x i8> %v, ptr %p, align 1
ret void
}
-define void @test_s_v32i8(<32 x i8>* %p, <32 x i8> %v) #0 {
+define void @test_s_v32i8(ptr %p, <32 x i8> %v) #0 {
; CHECK-LABEL: test_s_v32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: stxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
- store <32 x i8> %v, <32 x i8>* %p, align 1
+ store <32 x i8> %v, ptr %p, align 1
ret void
}
-define void @test_s_v8i16(<8 x i16>* %p, <8 x i16> %v) #0 {
+define void @test_s_v8i16(ptr %p, <8 x i16> %v) #0 {
; CHECK-LABEL: test_s_v8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
- store <8 x i16> %v, <8 x i16>* %p, align 2
+ store <8 x i16> %v, ptr %p, align 2
ret void
}
-define void @test_s_v16i16(<16 x i16>* %p, <16 x i16> %v) #0 {
+define void @test_s_v16i16(ptr %p, <16 x i16> %v) #0 {
; CHECK-LABEL: test_s_v16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: stxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
- store <16 x i16> %v, <16 x i16>* %p, align 2
+ store <16 x i16> %v, ptr %p, align 2
ret void
}
-define void @test_s_v4i32(<4 x i32>* %p, <4 x i32> %v) #0 {
+define void @test_s_v4i32(ptr %p, <4 x i32> %v) #0 {
; CHECK-LABEL: test_s_v4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
- store <4 x i32> %v, <4 x i32>* %p, align 4
+ store <4 x i32> %v, ptr %p, align 4
ret void
}
-define void @test_s_v8i32(<8 x i32>* %p, <8 x i32> %v) #0 {
+define void @test_s_v8i32(ptr %p, <8 x i32> %v) #0 {
; CHECK-LABEL: test_s_v8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: stxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
- store <8 x i32> %v, <8 x i32>* %p, align 4
+ store <8 x i32> %v, ptr %p, align 4
ret void
}
-define void @test_s_v2i64(<2 x i64>* %p, <2 x i64> %v) #0 {
+define void @test_s_v2i64(ptr %p, <2 x i64> %v) #0 {
; CHECK-LABEL: test_s_v2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvd2x 34, 0, 3
; CHECK-NEXT: blr
entry:
- store <2 x i64> %v, <2 x i64>* %p, align 8
+ store <2 x i64> %v, ptr %p, align 8
ret void
}
-define void @test_s_v4i64(<4 x i64>* %p, <4 x i64> %v) #0 {
+define void @test_s_v4i64(ptr %p, <4 x i64> %v) #0 {
; CHECK-LABEL: test_s_v4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: stxvd2x 35, 3, 4
; CHECK-NEXT: blr
entry:
- store <4 x i64> %v, <4 x i64>* %p, align 8
+ store <4 x i64> %v, ptr %p, align 8
ret void
}
-define void @test_s_v4float(<4 x float>* %p, <4 x float> %v) #0 {
+define void @test_s_v4float(ptr %p, <4 x float> %v) #0 {
; CHECK-LABEL: test_s_v4float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
- store <4 x float> %v, <4 x float>* %p, align 4
+ store <4 x float> %v, ptr %p, align 4
ret void
}
-define void @test_s_v8float(<8 x float>* %p, <8 x float> %v) #0 {
+define void @test_s_v8float(ptr %p, <8 x float> %v) #0 {
; CHECK-LABEL: test_s_v8float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: stxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
- store <8 x float> %v, <8 x float>* %p, align 4
+ store <8 x float> %v, ptr %p, align 4
ret void
}
-define void @test_s_v2double(<2 x double>* %p, <2 x double> %v) #0 {
+define void @test_s_v2double(ptr %p, <2 x double> %v) #0 {
; CHECK-LABEL: test_s_v2double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvd2x 34, 0, 3
; CHECK-NEXT: blr
entry:
- store <2 x double> %v, <2 x double>* %p, align 8
+ store <2 x double> %v, ptr %p, align 8
ret void
}
-define void @test_s_v4double(<4 x double>* %p, <4 x double> %v) #0 {
+define void @test_s_v4double(ptr %p, <4 x double> %v) #0 {
; CHECK-LABEL: test_s_v4double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: stxvd2x 35, 3, 4
; CHECK-NEXT: blr
entry:
- store <4 x double> %v, <4 x double>* %p, align 8
+ store <4 x double> %v, ptr %p, align 8
ret void
}
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define <16 x i8> @test_l_v16i8(<16 x i8>* %p) #0 {
+define <16 x i8> @test_l_v16i8(ptr %p) #0 {
entry:
- %r = load <16 x i8>, <16 x i8>* %p, align 1
+ %r = load <16 x i8>, ptr %p, align 1
ret <16 x i8> %r
; CHECK-NOT: v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<(load (s248) from %ir.p + 4294967281, align 1)>
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define void @copy_to_conceal(<8 x i16>* %inp) #0 {
+define void @copy_to_conceal(ptr %inp) #0 {
; CHECK-LABEL: copy_to_conceal:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxor 2, 2, 2
; CHECK-VSX-NEXT: stxvw4x 0, 0, 3
; CHECK-VSX-NEXT: blr
entry:
- store <8 x i16> zeroinitializer, <8 x i16>* %inp, align 2
+ store <8 x i16> zeroinitializer, ptr %inp, align 2
br label %if.end210
if.end210: ; preds = %entry
; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -ppc-convert-rr-to-ri=false -ppc-asm-full-reg-names < %s | FileCheck %s
; ISEL matches address mode xaddr.
-define i8 @test_xaddr(i8* %p) {
+define i8 @test_xaddr(ptr %p) {
; CHECK-LABEL: test_xaddr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li r4, 0
; CHECK-NEXT: lbzx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %p.addr = alloca i8*, align 8
- store i8* %p, i8** %p.addr, align 8
- %0 = load i8*, i8** %p.addr, align 8
- %add.ptr = getelementptr inbounds i8, i8* %0, i64 40000
- %1 = load i8, i8* %add.ptr, align 1
+ %p.addr = alloca ptr, align 8
+ store ptr %p, ptr %p.addr, align 8
+ %0 = load ptr, ptr %p.addr, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %0, i64 40000
+ %1 = load i8, ptr %add.ptr, align 1
ret i8 %1
}
; ISEL matches address mode xaddrX4.
-define i64 @test_xaddrX4(i8* %p) {
+define i64 @test_xaddrX4(ptr %p) {
; CHECK-LABEL: test_xaddrX4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li r4, 3
; CHECK-NEXT: ldx r3, r3, r4
; CHECK-NEXT: blr
entry:
- %p.addr = alloca i8*, align 8
- store i8* %p, i8** %p.addr, align 8
- %0 = load i8*, i8** %p.addr, align 8
- %add.ptr = getelementptr inbounds i8, i8* %0, i64 3
- %1 = bitcast i8* %add.ptr to i64*
- %2 = load i64, i64* %1, align 8
- ret i64 %2
+ %p.addr = alloca ptr, align 8
+ store ptr %p, ptr %p.addr, align 8
+ %0 = load ptr, ptr %p.addr, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %0, i64 3
+ %1 = load i64, ptr %add.ptr, align 8
+ ret i64 %1
}
; ISEL matches address mode xaddrX16.
-define <2 x double> @test_xaddrX16(double* %arr) {
+define <2 x double> @test_xaddrX16(ptr %arr) {
; CHECK-LABEL: test_xaddrX16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li r4, 40
; CHECK-NEXT: lxvx vs34, r3, r4
; CHECK-NEXT: blr
entry:
- %arrayidx1 = getelementptr inbounds double, double* %arr, i64 5
- %0 = bitcast double* %arrayidx1 to <2 x double>*
- %1 = load <2 x double>, <2 x double>* %0, align 16
- ret <2 x double> %1
+ %arrayidx1 = getelementptr inbounds double, ptr %arr, i64 5
+ %0 = load <2 x double>, ptr %arrayidx1, align 16
+ ret <2 x double> %0
}
; ISEL matches address mode xoaddr.
-define void @test_xoaddr(i32* %arr, i32* %arrTo) {
+define void @test_xoaddr(ptr %arr, ptr %arrTo) {
; CHECK-LABEL: test_xoaddr:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li r5, 8
; CHECK-NEXT: stxvx vs0, r4, r3
; CHECK-NEXT: blr
entry:
- %arrayidx = getelementptr inbounds i32, i32* %arrTo, i64 1
- %0 = bitcast i32* %arrayidx to <4 x i32>*
- %arrayidx1 = getelementptr inbounds i32, i32* %arr, i64 2
- %1 = bitcast i32* %arrayidx1 to <4 x i32>*
- %2 = load <4 x i32>, <4 x i32>* %1, align 8
- store <4 x i32> %2, <4 x i32>* %0, align 8
+ %arrayidx = getelementptr inbounds i32, ptr %arrTo, i64 1
+ %arrayidx1 = getelementptr inbounds i32, ptr %arr, i64 2
+ %0 = load <4 x i32>, ptr %arrayidx1, align 8
+ store <4 x i32> %0, ptr %arrayidx, align 8
ret void
}
; ISEL matches address mode xaddrX4 and generates LI which can be moved outside of
; loop.
-define i64 @test_xaddrX4_loop(i8* %p) {
+define i64 @test_xaddrX4_loop(ptr %p) {
; CHECK-LABEL: test_xaddrX4_loop:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi r4, r3, -8
%i.015 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%res.014 = phi i64 [ 0, %entry ], [ %add, %for.body ]
%mul = shl i64 %i.015, 3
- %add.ptr = getelementptr inbounds i8, i8* %p, i64 %mul
- %0 = bitcast i8* %add.ptr to i64*
- %1 = load i64, i64* %0, align 8
- %add.ptr3 = getelementptr inbounds i8, i8* %add.ptr, i64 3
- %2 = bitcast i8* %add.ptr3 to i64*
- %3 = load i64, i64* %2, align 8
- %mul4 = mul i64 %3, %1
+ %add.ptr = getelementptr inbounds i8, ptr %p, i64 %mul
+ %0 = load i64, ptr %add.ptr, align 8
+ %add.ptr3 = getelementptr inbounds i8, ptr %add.ptr, i64 3
+ %1 = load i64, ptr %add.ptr3, align 8
+ %mul4 = mul i64 %1, %0
%add = add i64 %mul4, %res.014
%inc = add nuw nsw i64 %i.015, 1
%exitcond = icmp eq i64 %inc, 8
%2 = type { %3 }
%3 = type { %4 }
%4 = type { %5 }
-%5 = type { i8*, i32, i32 }
+%5 = type { ptr, i32, i32 }
%6 = type { [160 x i8] }
%7 = type { %8, i32, %8 }
-%8 = type { i8*, i64 }
+%8 = type { ptr, i64 }
$abc = comdat any
; This test checks that X-Form load, lxvx, is being produced here instead of
; the DQ-Form, lxv. We should not be producing lxv here as the frame index
; value is unaligned (not a multiple of 16).
-define void @abc(%0* %arg, [5 x i64] %arg1, [5 x i64] %arg2, [5 x i64] %arg3, [5 x i64] %arg4) local_unnamed_addr #0 comdat {
+define void @abc(ptr %arg, [5 x i64] %arg1, [5 x i64] %arg2, [5 x i64] %arg3, [5 x i64] %arg4) local_unnamed_addr #0 comdat {
; CHECK-P9-LE-LABEL: abc:
; CHECK-P9-LE: # %bb.0: # %bb
; CHECK-P9-LE-NEXT: addi r6, r1, 120
%i16 = extractvalue [5 x i64] %arg3, 3
%i17 = extractvalue [5 x i64] %arg4, 0
%i18 = extractvalue [5 x i64] %arg4, 4
- store i64 %i5, i64* undef, align 8
- store i64 %i6, i64* null, align 8
- %i19 = getelementptr inbounds [4 x %7], [4 x %7]* %i, i64 0, i64 0, i32 2
- %i20 = bitcast %8* %i19 to i64*
- store i64 %i7, i64* %i20, align 8
- store i64 %i8, i64* undef, align 8
- store i64 %i9, i64* null, align 8
- store i64 %i10, i64* undef, align 8
- %i21 = getelementptr inbounds [4 x %7], [4 x %7]* %i, i64 0, i64 1, i32 2
- %i22 = bitcast %8* %i21 to i64*
- store i64 %i11, i64* %i22, align 8
- %i23 = getelementptr inbounds [4 x %7], [4 x %7]* %i, i64 0, i64 1, i32 2, i32 1
- store i64 %i12, i64* %i23, align 8
- %i24 = getelementptr inbounds [4 x %7], [4 x %7]* %i, i64 0, i64 2
- %i25 = bitcast %7* %i24 to i64*
- store i64 %i13, i64* %i25, align 8
- %i26 = getelementptr inbounds [4 x %7], [4 x %7]* %i, i64 0, i64 2, i32 0, i32 1
- store i64 %i14, i64* %i26, align 8
- store i64 %i15, i64* undef, align 8
- store i64 %i16, i64* null, align 8
- store i64 %i17, i64* undef, align 8
- store i64 undef, i64* null, align 8
- store i64 %i18, i64* undef, align 8
- %i27 = getelementptr inbounds %0, %0* %arg, i64 0, i32 0, i32 0, i32 0
- %i28 = getelementptr inbounds %3, %3* %i27, i64 1, i32 0
- store %4* %i28, %4** undef, align 8
+ store i64 %i5, ptr undef, align 8
+ store i64 %i6, ptr null, align 8
+ %i19 = getelementptr inbounds [4 x %7], ptr %i, i64 0, i64 0, i32 2
+ store i64 %i7, ptr %i19, align 8
+ store i64 %i8, ptr undef, align 8
+ store i64 %i9, ptr null, align 8
+ store i64 %i10, ptr undef, align 8
+ %i21 = getelementptr inbounds [4 x %7], ptr %i, i64 0, i64 1, i32 2
+ store i64 %i11, ptr %i21, align 8
+ %i23 = getelementptr inbounds [4 x %7], ptr %i, i64 0, i64 1, i32 2, i32 1
+ store i64 %i12, ptr %i23, align 8
+ %i24 = getelementptr inbounds [4 x %7], ptr %i, i64 0, i64 2
+ store i64 %i13, ptr %i24, align 8
+ %i26 = getelementptr inbounds [4 x %7], ptr %i, i64 0, i64 2, i32 0, i32 1
+ store i64 %i14, ptr %i26, align 8
+ store i64 %i15, ptr undef, align 8
+ store i64 %i16, ptr null, align 8
+ store i64 %i17, ptr undef, align 8
+ store i64 undef, ptr null, align 8
+ store i64 %i18, ptr undef, align 8
+ %i28 = getelementptr inbounds %3, ptr %arg, i64 1, i32 0
+ store ptr %i28, ptr undef, align 8
ret void
}
; UNALIGN: lfs f1, 12(r1)
; UNALIGN: blr
entry:
- %0 = load i8, i8* getelementptr inbounds (%struct.anon, %struct.anon* @s, i32 0, i32 1, i32 1), align 1
+ %0 = load i8, ptr getelementptr inbounds (%struct.anon, ptr @s, i32 0, i32 1, i32 1), align 1
%conv = zext i8 %0 to i32
%shl = shl nuw i32 %conv, 24
- %1 = load i8, i8* getelementptr inbounds (%struct.anon, %struct.anon* @s, i32 0, i32 1, i32 2), align 2
+ %1 = load i8, ptr getelementptr inbounds (%struct.anon, ptr @s, i32 0, i32 1, i32 2), align 2
%conv1 = zext i8 %1 to i32
%shl2 = shl nuw nsw i32 %conv1, 16
%add = or i32 %shl2, %shl
- %2 = load i8, i8* getelementptr inbounds (%struct.anon, %struct.anon* @s, i32 0, i32 1, i32 3), align 1
+ %2 = load i8, ptr getelementptr inbounds (%struct.anon, ptr @s, i32 0, i32 1, i32 3), align 1
%conv3 = zext i8 %2 to i32
%shl4 = shl nuw nsw i32 %conv3, 8
%add5 = or i32 %add, %shl4
- %3 = load i8, i8* getelementptr inbounds (%struct.anon, %struct.anon* @s, i32 0, i32 1, i32 4), align 4
+ %3 = load i8, ptr getelementptr inbounds (%struct.anon, ptr @s, i32 0, i32 1, i32 4), align 4
%conv6 = zext i8 %3 to i32
%add7 = or i32 %add5, %conv6
%4 = bitcast i32 %add7 to float
%conv8 = fpext float %4 to double
- %call = tail call i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), double %conv8)
+ %call = tail call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str, double %conv8)
ret i32 0
}
; Function Attrs: nofree nounwind
-declare i32 @printf(i8* nocapture readonly, ...) local_unnamed_addr
+declare i32 @printf(ptr nocapture readonly, ...) local_unnamed_addr
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+vsx | FileCheck -check-prefix=CHECK-VSX %s
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32"
-define void @foo1(i16* %p, i16* %r) nounwind {
+define void @foo1(ptr %p, ptr %r) nounwind {
; CHECK-LABEL: foo1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz 3, 0(3)
; CHECK-VSX-NEXT: sth 3, 0(4)
; CHECK-VSX-NEXT: blr
entry:
- %v = load i16, i16* %p, align 1
- store i16 %v, i16* %r, align 1
+ %v = load i16, ptr %p, align 1
+ store i16 %v, ptr %r, align 1
ret void
}
-define void @foo2(i32* %p, i32* %r) nounwind {
+define void @foo2(ptr %p, ptr %r) nounwind {
; CHECK-LABEL: foo2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 3, 0(3)
; CHECK-VSX-NEXT: stw 3, 0(4)
; CHECK-VSX-NEXT: blr
entry:
- %v = load i32, i32* %p, align 1
- store i32 %v, i32* %r, align 1
+ %v = load i32, ptr %p, align 1
+ store i32 %v, ptr %r, align 1
ret void
}
-define void @foo3(i64* %p, i64* %r) nounwind {
+define void @foo3(ptr %p, ptr %r) nounwind {
; CHECK-LABEL: foo3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld 3, 0(3)
; CHECK-VSX-NEXT: std 3, 0(4)
; CHECK-VSX-NEXT: blr
entry:
- %v = load i64, i64* %p, align 1
- store i64 %v, i64* %r, align 1
+ %v = load i64, ptr %p, align 1
+ store i64 %v, ptr %r, align 1
ret void
}
-define void @foo4(float* %p, float* %r) nounwind {
+define void @foo4(ptr %p, ptr %r) nounwind {
; CHECK-LABEL: foo4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz 3, 0(3)
; CHECK-VSX-NEXT: stw 3, 0(4)
; CHECK-VSX-NEXT: blr
entry:
- %v = load float, float* %p, align 1
- store float %v, float* %r, align 1
+ %v = load float, ptr %p, align 1
+ store float %v, ptr %r, align 1
ret void
}
-define void @foo5(double* %p, double* %r) nounwind {
+define void @foo5(ptr %p, ptr %r) nounwind {
; CHECK-LABEL: foo5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld 3, 0(3)
; CHECK-VSX-NEXT: std 3, 0(4)
; CHECK-VSX-NEXT: blr
entry:
- %v = load double, double* %p, align 1
- store double %v, double* %r, align 1
+ %v = load double, ptr %p, align 1
+ store double %v, ptr %r, align 1
ret void
}
-define void @foo6(<4 x float>* %p, <4 x float>* %r) nounwind {
+define void @foo6(ptr %p, ptr %r) nounwind {
; These loads and stores are legalized into aligned loads and stores
; using aligned stack slots.
; CHECK-LABEL: foo6:
; stack slots, but lvsl/vperm is better still. (On P8 lxvw4x is preferable.)
; Using unaligned stxvw4x is preferable on both machines.
entry:
- %v = load <4 x float>, <4 x float>* %p, align 1
- store <4 x float> %v, <4 x float>* %r, align 1
+ %v = load <4 x float>, ptr %p, align 1
+ store <4 x float> %v, ptr %r, align 1
ret void
}
; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s
-@_ZTIi = external constant i8*
+@_ZTIi = external constant ptr
; Function is marked as nounwind but it still throws with __cxa_throw and
; calls __cxa_call_unexpected.
; Need to make sure that we do not only have a debug frame.
; Function Attrs: noreturn nounwind
-define void @_Z4funcv() local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @_Z4funcv() local_unnamed_addr #0 personality ptr @__gxx_personality_v0 {
entry:
- %exception = tail call i8* @__cxa_allocate_exception(i64 4)
- %0 = bitcast i8* %exception to i32*
- store i32 100, i32* %0, align 16
- invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null)
+ %exception = tail call ptr @__cxa_allocate_exception(i64 4)
+ store i32 100, ptr %exception, align 16
+ invoke void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null)
to label %unreachable unwind label %lpad
lpad: ; preds = %entry
- %1 = landingpad { i8*, i32 }
- filter [0 x i8*] zeroinitializer
- %2 = extractvalue { i8*, i32 } %1, 0
- tail call void @__cxa_call_unexpected(i8* %2)
+ %0 = landingpad { ptr, i32 }
+ filter [0 x ptr] zeroinitializer
+ %1 = extractvalue { ptr, i32 } %0, 0
+ tail call void @__cxa_call_unexpected(ptr %1)
unreachable
unreachable: ; preds = %entry
; CHECK: .cfi_endproc
}
-declare i8* @__cxa_allocate_exception(i64) local_unnamed_addr
+declare ptr @__cxa_allocate_exception(i64) local_unnamed_addr
-declare void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr
+declare void @__cxa_throw(ptr, ptr, ptr) local_unnamed_addr
declare i32 @__gxx_personality_v0(...)
-declare void @__cxa_call_unexpected(i8*) local_unnamed_addr
+declare void @__cxa_call_unexpected(ptr) local_unnamed_addr
attributes #0 = { noreturn nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+vsx,-power9-vector" "unsafe-fp-math"="false" "use-soft-float"="false" }
%v8i16 = type <8 x i16>
%v16i8 = type <16 x i8>
-define void @test_v4i32_pos_even(%v4i32* %P, %v4i32* %S) {
- %p = load %v4i32, %v4i32* %P
+define void @test_v4i32_pos_even(ptr %P, ptr %S) {
+ %p = load %v4i32, ptr %P
%r = add %v4i32 %p, < i32 18, i32 18, i32 18, i32 18 >
- store %v4i32 %r, %v4i32* %S
+ store %v4i32 %r, ptr %S
ret void
}
; CHECK: vspltisw [[REG1:[0-9]+]], 9
; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG1]]
-define void @test_v4i32_neg_even(%v4i32* %P, %v4i32* %S) {
- %p = load %v4i32, %v4i32* %P
+define void @test_v4i32_neg_even(ptr %P, ptr %S) {
+ %p = load %v4i32, ptr %P
%r = add %v4i32 %p, < i32 -28, i32 -28, i32 -28, i32 -28 >
- store %v4i32 %r, %v4i32* %S
+ store %v4i32 %r, ptr %S
ret void
}
; CHECK: vspltisw [[REG1:[0-9]+]], -14
; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG1]]
-define void @test_v8i16_pos_even(%v8i16* %P, %v8i16* %S) {
- %p = load %v8i16, %v8i16* %P
+define void @test_v8i16_pos_even(ptr %P, ptr %S) {
+ %p = load %v8i16, ptr %P
%r = add %v8i16 %p, < i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30 >
- store %v8i16 %r, %v8i16* %S
+ store %v8i16 %r, ptr %S
ret void
}
; CHECK: vspltish [[REG1:[0-9]+]], 15
; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG1]]
-define void @test_v8i16_neg_even(%v8i16* %P, %v8i16* %S) {
- %p = load %v8i16, %v8i16* %P
+define void @test_v8i16_neg_even(ptr %P, ptr %S) {
+ %p = load %v8i16, ptr %P
%r = add %v8i16 %p, < i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32 >
- store %v8i16 %r, %v8i16* %S
+ store %v8i16 %r, ptr %S
ret void
}
; CHECK: vspltish [[REG1:[0-9]+]], -16
; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG1]]
-define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) {
- %p = load %v16i8, %v16i8* %P
+define void @test_v16i8_pos_even(ptr %P, ptr %S) {
+ %p = load %v16i8, ptr %P
%r = add %v16i8 %p, < i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16 >
- store %v16i8 %r, %v16i8* %S
+ store %v16i8 %r, ptr %S
ret void
}
; CHECK: vspltisb [[REG1:[0-9]+]], 8
; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG1]]
-define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) {
- %p = load %v16i8, %v16i8* %P
+define void @test_v16i8_neg_even(ptr %P, ptr %S) {
+ %p = load %v16i8, ptr %P
%r = add %v16i8 %p, < i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18 >
- store %v16i8 %r, %v16i8* %S
+ store %v16i8 %r, ptr %S
ret void
}
; CHECK: vspltisb [[REG1:[0-9]+]], -9
; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG1]]
-define void @test_v4i32_pos_odd(%v4i32* %P, %v4i32* %S) {
- %p = load %v4i32, %v4i32* %P
+define void @test_v4i32_pos_odd(ptr %P, ptr %S) {
+ %p = load %v4i32, ptr %P
%r = add %v4i32 %p, < i32 27, i32 27, i32 27, i32 27 >
- store %v4i32 %r, %v4i32* %S
+ store %v4i32 %r, ptr %S
ret void
}
; CHECK: vspltisw [[REG1:[0-9]+]], 11
; CHECK: vsubuwm {{[0-9]+}}, [[REG1]], [[REG2]]
-define void @test_v4i32_neg_odd(%v4i32* %P, %v4i32* %S) {
- %p = load %v4i32, %v4i32* %P
+define void @test_v4i32_neg_odd(ptr %P, ptr %S) {
+ %p = load %v4i32, ptr %P
%r = add %v4i32 %p, < i32 -27, i32 -27, i32 -27, i32 -27 >
- store %v4i32 %r, %v4i32* %S
+ store %v4i32 %r, ptr %S
ret void
}
; CHECK: vspltisw [[REG1:[0-9]+]], -11
; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG2]]
-define void @test_v8i16_pos_odd(%v8i16* %P, %v8i16* %S) {
- %p = load %v8i16, %v8i16* %P
+define void @test_v8i16_pos_odd(ptr %P, ptr %S) {
+ %p = load %v8i16, ptr %P
%r = add %v8i16 %p, < i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31 >
- store %v8i16 %r, %v8i16* %S
+ store %v8i16 %r, ptr %S
ret void
}
; CHECK: vspltish [[REG1:[0-9]+]], 15
; CHECK: vsubuhm {{[0-9]+}}, [[REG1]], [[REG2]]
-define void @test_v8i16_neg_odd(%v8i16* %P, %v8i16* %S) {
- %p = load %v8i16, %v8i16* %P
+define void @test_v8i16_neg_odd(ptr %P, ptr %S) {
+ %p = load %v8i16, ptr %P
%r = add %v8i16 %p, < i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31 >
- store %v8i16 %r, %v8i16* %S
+ store %v8i16 %r, ptr %S
ret void
}
; CHECK: vspltish [[REG1:[0-9]+]], -15
; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG2]]
-define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) {
- %p = load %v16i8, %v16i8* %P
+define void @test_v16i8_pos_odd(ptr %P, ptr %S) {
+ %p = load %v16i8, ptr %P
%r = add %v16i8 %p, < i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17 >
- store %v16i8 %r, %v16i8* %S
+ store %v16i8 %r, ptr %S
ret void
}
; CHECK: vspltisb [[REG1:[0-9]+]], 1
; CHECK: vsububm {{[0-9]+}}, [[REG1]], [[REG2]]
-define void @test_v16i8_neg_odd(%v16i8* %P, %v16i8* %S) {
- %p = load %v16i8, %v16i8* %P
+define void @test_v16i8_neg_odd(ptr %P, ptr %S) {
+ %p = load %v16i8, ptr %P
%r = add %v16i8 %p, < i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17 >
- store %v16i8 %r, %v16i8* %S
+ store %v16i8 %r, ptr %S
ret void
}
define void @foo(float inreg %s.coerce) nounwind {
entry:
%s = alloca %struct.Sf1, align 4
- %coerce.dive = getelementptr %struct.Sf1, %struct.Sf1* %s, i32 0, i32 0
- store float %s.coerce, float* %coerce.dive, align 1
- %coerce.dive1 = getelementptr %struct.Sf1, %struct.Sf1* %s, i32 0, i32 0
- %0 = load float, float* %coerce.dive1, align 1
+ store float %s.coerce, ptr %s, align 1
+ %0 = load float, ptr %s, align 1
call void (i32, ...) @testvaSf1(i32 1, float inreg %0)
ret void
}
; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names < %s -mtriple=powerpc64le-unknown-linux-gnu | FileCheck -check-prefix=P64 %s
; PR8327
-define i8* @test1(i8** %foo) nounwind {
+define ptr @test1(ptr %foo) nounwind {
; P32-LABEL: test1:
; P32: # %bb.0:
; P32-NEXT: lbz r4, 0(r3)
; P64-NEXT: std r5, 0(r3)
; P64-NEXT: ld r3, 0(r4)
; P64-NEXT: blr
- %A = va_arg i8** %foo, i8*
- ret i8* %A
+ %A = va_arg ptr %foo, ptr
+ ret ptr %A
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5 | grep vcmpbfp | count 1
-define void @test(<4 x float>* %x, <4 x float>* %y, i32* %P) {
+define void @test(ptr %x, ptr %y, ptr %P) {
entry:
- %tmp = load <4 x float>, <4 x float>* %x ; <<4 x float>> [#uses=1]
- %tmp2 = load <4 x float>, <4 x float>* %y ; <<4 x float>> [#uses=1]
+ %tmp = load <4 x float>, ptr %x ; <<4 x float>> [#uses=1]
+ %tmp2 = load <4 x float>, ptr %y ; <<4 x float>> [#uses=1]
%tmp.upgrd.1 = call i32 @llvm.ppc.altivec.vcmpbfp.p( i32 1, <4 x float> %tmp, <4 x float> %tmp2 ) ; <i32> [#uses=1]
- %tmp4 = load <4 x float>, <4 x float>* %x ; <<4 x float>> [#uses=1]
- %tmp6 = load <4 x float>, <4 x float>* %y ; <<4 x float>> [#uses=1]
+ %tmp4 = load <4 x float>, ptr %x ; <<4 x float>> [#uses=1]
+ %tmp6 = load <4 x float>, ptr %y ; <<4 x float>> [#uses=1]
%tmp.upgrd.2 = call <4 x i32> @llvm.ppc.altivec.vcmpbfp( <4 x float> %tmp4, <4 x float> %tmp6 ) ; <<4 x i32>> [#uses=1]
%tmp7 = bitcast <4 x i32> %tmp.upgrd.2 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp7, <4 x float>* %x
- store i32 %tmp.upgrd.1, i32* %P
+ store <4 x float> %tmp7, ptr %x
+ store i32 %tmp.upgrd.1, ptr %P
ret void
}
; Function Attrs: nounwind
define void @test1(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, <4 x float> inreg %vs.coerce) #0 {
entry:
- store <4 x float> %vs.coerce, <4 x float>* @ve, align 16
+ store <4 x float> %vs.coerce, ptr @ve, align 16
ret void
; CHECK-LABEL: @test1
}
; Function Attrs: nounwind
-define void @test2(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, %struct.s2* byval(%struct.s2) nocapture readonly %vs) #0 {
+define void @test2(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, ptr byval(%struct.s2) nocapture readonly %vs) #0 {
entry:
- %m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0
- %0 = load i64, i64* %m, align 8
- store i64 %0, i64* @n, align 8
- %v = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 1
- %1 = load <4 x float>, <4 x float>* %v, align 16
- store <4 x float> %1, <4 x float>* @ve, align 16
+ %0 = load i64, ptr %vs, align 8
+ store i64 %0, ptr @n, align 8
+ %v = getelementptr inbounds %struct.s2, ptr %vs, i64 0, i32 1
+ %1 = load <4 x float>, ptr %v, align 16
+ store <4 x float> %1, ptr @ve, align 16
ret void
; CHECK-LABEL: @test2
}
; Function Attrs: nounwind
-define void @test3(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, %struct.s2* byval(%struct.s2) nocapture readonly %vs) #0 {
+define void @test3(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, ptr byval(%struct.s2) nocapture readonly %vs) #0 {
entry:
- %m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0
- %0 = load i64, i64* %m, align 8
- store i64 %0, i64* @n, align 8
- %v = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 1
- %1 = load <4 x float>, <4 x float>* %v, align 16
- store <4 x float> %1, <4 x float>* @ve, align 16
+ %0 = load i64, ptr %vs, align 8
+ store i64 %0, ptr @n, align 8
+ %v = getelementptr inbounds %struct.s2, ptr %vs, i64 0, i32 1
+ %1 = load <4 x float>, ptr %v, align 16
+ store <4 x float> %1, ptr @ve, align 16
ret void
; CHECK-LABEL: @test3
; RUN: -verify-machineinstrs -vec-extabi | \
; RUN: FileCheck %s --check-prefixes=AIX,AIX32
-define dso_local void @test(i32* %Arr, i32 signext %Len) {
+define dso_local void @test(ptr %Arr, i32 signext %Len) {
; CHECK-LABEL: test:
; CHECK: lxv [[REG:vs[0-9]+]], 0(r{{[0-9]+}})
; CHECK-NOT: [[REG]]
%induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
%0 = add i32 %index, 0
%1 = sext i32 %0 to i64
- %2 = getelementptr inbounds i32, i32* %Arr, i64 %1
- %3 = getelementptr inbounds i32, i32* %2, i32 0
- %4 = bitcast i32* %3 to <4 x i32>*
- %wide.load = load <4 x i32>, <4 x i32>* %4, align 4
- %5 = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %wide.load)
- %6 = sext i32 %0 to i64
- %7 = getelementptr inbounds i32, i32* %Arr, i64 %6
- %8 = getelementptr inbounds i32, i32* %7, i32 0
- %9 = bitcast i32* %8 to <4 x i32>*
- store <4 x i32> %5, <4 x i32>* %9, align 4
+ %2 = getelementptr inbounds i32, ptr %Arr, i64 %1
+ %wide.load = load <4 x i32>, ptr %2, align 4
+ %3 = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %wide.load)
+ %4 = sext i32 %0 to i64
+ %5 = getelementptr inbounds i32, ptr %Arr, i64 %4
+ store <4 x i32> %3, ptr %5, align 4
%index.next = add i32 %index, 4
- %10 = icmp eq i32 %index.next, %n.vec
- br i1 %10, label %middle.block, label %vector.body
+ %6 = icmp eq i32 %index.next, %n.vec
+ br i1 %6, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
%cmp.n = icmp eq i32 %Len, %n.vec
for.body: ; preds = %for.inc, %scalar.ph
%i.02 = phi i32 [ %bc.resume.val, %scalar.ph ], [ %inc, %for.inc ]
%idxprom = sext i32 %i.02 to i64
- %arrayidx = getelementptr inbounds i32, i32* %Arr, i64 %idxprom
- %11 = load i32, i32* %arrayidx, align 4
- %12 = call i32 @llvm.bswap.i32(i32 %11)
+ %arrayidx = getelementptr inbounds i32, ptr %Arr, i64 %idxprom
+ %7 = load i32, ptr %arrayidx, align 4
+ %8 = call i32 @llvm.bswap.i32(i32 %7)
%idxprom1 = sext i32 %i.02 to i64
- %arrayidx2 = getelementptr inbounds i32, i32* %Arr, i64 %idxprom1
- store i32 %12, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %Arr, i64 %idxprom1
+ store i32 %8, ptr %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
; RUN: -ppc-asm-full-reg-names -verify-machineinstrs \
; RUN: < %s | FileCheck %s -check-prefix=CHECK-BE
-define dso_local void @testutof(<8 x i16> %a, float* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @testutof(<8 x i16> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
; CHECK-LABEL: testutof:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 14
entry:
%vecext = extractelement <8 x i16> %a, i32 0
%conv = uitofp i16 %vecext to float
- store float %conv, float* %ptr, align 4
+ store float %conv, ptr %ptr, align 4
ret void
}
-define dso_local void @testutod(<8 x i16> %a, double* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @testutod(<8 x i16> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
; CHECK-LABEL: testutod:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 14
entry:
%vecext = extractelement <8 x i16> %a, i32 0
%conv = uitofp i16 %vecext to double
- store double %conv, double* %ptr, align 8
+ store double %conv, ptr %ptr, align 8
ret void
}
-define dso_local void @teststof(<8 x i16> %a, float* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @teststof(<8 x i16> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
; CHECK-LABEL: teststof:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 14
entry:
%vecext = extractelement <8 x i16> %a, i32 0
%conv = sitofp i16 %vecext to float
- store float %conv, float* %ptr, align 4
+ store float %conv, ptr %ptr, align 4
ret void
}
-define dso_local void @teststod(<8 x i16> %a, double* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @teststod(<8 x i16> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
; CHECK-LABEL: teststod:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 14
entry:
%vecext = extractelement <8 x i16> %a, i32 0
%conv = sitofp i16 %vecext to double
- store double %conv, double* %ptr, align 8
+ store double %conv, ptr %ptr, align 8
ret void
}
-define dso_local void @testsubtod(<16 x i8> %a, double* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @testsubtod(<16 x i8> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
; CHECK-LABEL: testsubtod:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 15
entry:
%vecext = extractelement <16 x i8> %a, i32 0
%conv = uitofp i8 %vecext to double
- store double %conv, double* %ptr, align 8
+ store double %conv, ptr %ptr, align 8
ret void
}
-define dso_local void @testsbtod(<16 x i8> %a, double* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @testsbtod(<16 x i8> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
; CHECK-LABEL: testsbtod:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractuh v2, v2, 15
entry:
%vecext = extractelement <16 x i8> %a, i32 0
%conv = sitofp i8 %vecext to double
- store double %conv, double* %ptr, align 8
+ store double %conv, ptr %ptr, align 8
ret void
}
-define dso_local void @testsubtof(<16 x i8> %a, float* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @testsubtof(<16 x i8> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
; CHECK-LABEL: testsubtof:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 15
entry:
%vecext = extractelement <16 x i8> %a, i32 0
%conv = uitofp i8 %vecext to float
- store float %conv, float* %ptr, align 8
+ store float %conv, ptr %ptr, align 8
ret void
}
-define dso_local void @testsbtof(<16 x i8> %a, float* nocapture %ptr) local_unnamed_addr #0 {
+define dso_local void @testsbtof(<16 x i8> %a, ptr nocapture %ptr) local_unnamed_addr #0 {
; CHECK-LABEL: testsbtof:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vextractub v2, v2, 15
entry:
%vecext = extractelement <16 x i8> %a, i32 0
%conv = sitofp i8 %vecext to float
- store float %conv, float* %ptr, align 8
+ store float %conv, ptr %ptr, align 8
ret void
}
; RUN: -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
; RUN: FileCheck %s --check-prefix=CHECK-BE
-define void @test8(<8 x double>* nocapture %Sink, <8 x i16>* nocapture readonly %SrcPtr) {
+define void @test8(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-P8-LABEL: test8:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI0_0@toc@ha
; CHECK-BE-NEXT: stxv vs3, 48(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <8 x i16>, <8 x i16>* %SrcPtr, align 16
+ %0 = load <8 x i16>, ptr %SrcPtr, align 16
%1 = uitofp <8 x i16> %0 to <8 x double>
- store <8 x double> %1, <8 x double>* %Sink, align 16
+ store <8 x double> %1, ptr %Sink, align 16
ret void
}
-define void @test4(<4 x double>* nocapture %Sink, <4 x i16>* nocapture readonly %SrcPtr) {
+define void @test4(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-P8-LABEL: test4:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI1_0@toc@ha
; CHECK-BE-NEXT: stxv vs1, 16(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <4 x i16>, <4 x i16>* %SrcPtr, align 16
+ %0 = load <4 x i16>, ptr %SrcPtr, align 16
%1 = uitofp <4 x i16> %0 to <4 x double>
- store <4 x double> %1, <4 x double>* %Sink, align 16
+ store <4 x double> %1, ptr %Sink, align 16
ret void
}
-define void @test2(<2 x double>* nocapture %Sink, <2 x i16>* nocapture readonly %SrcPtr) {
+define void @test2(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-P8-LABEL: test2:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI2_0@toc@ha
; CHECK-BE-NEXT: stxv vs0, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <2 x i16>, <2 x i16>* %SrcPtr, align 16
+ %0 = load <2 x i16>, ptr %SrcPtr, align 16
%1 = uitofp <2 x i16> %0 to <2 x double>
- store <2 x double> %1, <2 x double>* %Sink, align 16
+ store <2 x double> %1, ptr %Sink, align 16
ret void
}
-define void @stest8(<8 x double>* nocapture %Sink, <8 x i16>* nocapture readonly %SrcPtr) {
+define void @stest8(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-P8-LABEL: stest8:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI3_0@toc@ha
; CHECK-BE-NEXT: stxv vs3, 48(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <8 x i16>, <8 x i16>* %SrcPtr, align 16
+ %0 = load <8 x i16>, ptr %SrcPtr, align 16
%1 = sitofp <8 x i16> %0 to <8 x double>
- store <8 x double> %1, <8 x double>* %Sink, align 16
+ store <8 x double> %1, ptr %Sink, align 16
ret void
}
-define void @stest4(<4 x double>* nocapture %Sink, <4 x i16>* nocapture readonly %SrcPtr) {
+define void @stest4(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-P8-LABEL: stest4:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI4_0@toc@ha
; CHECK-BE-NEXT: stxv vs1, 16(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <4 x i16>, <4 x i16>* %SrcPtr, align 16
+ %0 = load <4 x i16>, ptr %SrcPtr, align 16
%1 = sitofp <4 x i16> %0 to <4 x double>
- store <4 x double> %1, <4 x double>* %Sink, align 16
+ store <4 x double> %1, ptr %Sink, align 16
ret void
}
-define void @stest2(<2 x double>* nocapture %Sink, <2 x i16>* nocapture readonly %SrcPtr) {
+define void @stest2(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-P8-LABEL: stest2:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI5_0@toc@ha
; CHECK-BE-NEXT: stxv vs0, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <2 x i16>, <2 x i16>* %SrcPtr, align 16
+ %0 = load <2 x i16>, ptr %SrcPtr, align 16
%1 = sitofp <2 x i16> %0 to <2 x double>
- store <2 x double> %1, <2 x double>* %Sink, align 16
+ store <2 x double> %1, ptr %Sink, align 16
ret void
}
; RUN: -mattr=+vsx -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
; RUN: FileCheck %s --check-prefix=CHECK-BE
-define void @test8i8(<8 x i8>* nocapture %Sink, <8 x i16>* nocapture readonly %SrcPtr) {
+define void @test8i8(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-LABEL: test8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x vs0, 0, r4
; CHECK-BE-NEXT: std r4, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <8 x i16>, <8 x i16>* %SrcPtr, align 16
+ %0 = load <8 x i16>, ptr %SrcPtr, align 16
%1 = trunc <8 x i16> %0 to <8 x i8>
- store <8 x i8> %1, <8 x i8>* %Sink, align 16
+ store <8 x i8> %1, ptr %Sink, align 16
ret void
}
-define void @test4i8(<4 x i8>* nocapture %Sink, <4 x i16>* nocapture readonly %SrcPtr) {
+define void @test4i8(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-LABEL: test4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x vs0, 0, r4
; CHECK-BE-NEXT: stw r4, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <4 x i16>, <4 x i16>* %SrcPtr, align 16
+ %0 = load <4 x i16>, ptr %SrcPtr, align 16
%1 = trunc <4 x i16> %0 to <4 x i8>
- store <4 x i8> %1, <4 x i8>* %Sink, align 16
+ store <4 x i8> %1, ptr %Sink, align 16
ret void
}
-define void @test4i8w(<4 x i8>* nocapture %Sink, <4 x i32>* nocapture readonly %SrcPtr) {
+define void @test4i8w(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-LABEL: test4i8w:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LCPI2_0@toc@ha
; CHECK-BE-NEXT: stw r4, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <4 x i32>, <4 x i32>* %SrcPtr, align 16
+ %0 = load <4 x i32>, ptr %SrcPtr, align 16
%1 = trunc <4 x i32> %0 to <4 x i8>
- store <4 x i8> %1, <4 x i8>* %Sink, align 16
+ store <4 x i8> %1, ptr %Sink, align 16
ret void
}
-define void @test2i8(<2 x i8>* nocapture %Sink, <2 x i16>* nocapture readonly %SrcPtr) {
+define void @test2i8(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-LABEL: test2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x vs0, 0, r4
; CHECK-BE-NEXT: sth r4, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <2 x i16>, <2 x i16>* %SrcPtr, align 16
+ %0 = load <2 x i16>, ptr %SrcPtr, align 16
%1 = trunc <2 x i16> %0 to <2 x i8>
- store <2 x i8> %1, <2 x i8>* %Sink, align 16
+ store <2 x i8> %1, ptr %Sink, align 16
ret void
}
-define void @test4i16(<4 x i16>* nocapture %Sink, <4 x i32>* nocapture readonly %SrcPtr) {
+define void @test4i16(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-LABEL: test4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x vs0, 0, r4
; CHECK-BE-NEXT: std r4, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <4 x i32>, <4 x i32>* %SrcPtr, align 16
+ %0 = load <4 x i32>, ptr %SrcPtr, align 16
%1 = trunc <4 x i32> %0 to <4 x i16>
- store <4 x i16> %1, <4 x i16>* %Sink, align 16
+ store <4 x i16> %1, ptr %Sink, align 16
ret void
}
-define void @test2i16(<2 x i16>* nocapture %Sink, <2 x i32>* nocapture readonly %SrcPtr) {
+define void @test2i16(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-LABEL: test2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x vs0, 0, r4
; CHECK-BE-NEXT: stw r4, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <2 x i32>, <2 x i32>* %SrcPtr, align 16
+ %0 = load <2 x i32>, ptr %SrcPtr, align 16
%1 = trunc <2 x i32> %0 to <2 x i16>
- store <2 x i16> %1, <2 x i16>* %Sink, align 16
+ store <2 x i16> %1, ptr %Sink, align 16
ret void
}
-define void @test2i16d(<2 x i16>* nocapture %Sink, <2 x i64>* nocapture readonly %SrcPtr) {
+define void @test2i16d(ptr nocapture %Sink, ptr nocapture readonly %SrcPtr) {
; CHECK-LABEL: test2i16d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LCPI6_0@toc@ha
; CHECK-BE-NEXT: stw r4, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %0 = load <2 x i64>, <2 x i64>* %SrcPtr, align 16
+ %0 = load <2 x i64>, ptr %SrcPtr, align 16
%1 = trunc <2 x i64> %0 to <2 x i16>
- store <2 x i16> %1, <2 x i16>* %Sink, align 16
+ store <2 x i16> %1, ptr %Sink, align 16
ret void
}
; CHECK: .byte 30
; CHECK: .byte 29
; CHECK: .byte 3
-@baz = common global <16 x i8> zeroinitializer ; <<16 x i8>*> [#uses=1]
+@baz = common global <16 x i8> zeroinitializer ; <ptr> [#uses=1]
define void @foo(<16 x i8> %x) nounwind ssp {
entry:
- %x_addr = alloca <16 x i8> ; <<16 x i8>*> [#uses=2]
- %temp = alloca <16 x i8> ; <<16 x i8>*> [#uses=2]
+ %x_addr = alloca <16 x i8> ; <ptr> [#uses=2]
+ %temp = alloca <16 x i8> ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store <16 x i8> %x, <16 x i8>* %x_addr
- store <16 x i8> <i8 22, i8 21, i8 20, i8 3, i8 25, i8 24, i8 23, i8 3, i8 28, i8 27, i8 26, i8 3, i8 31, i8 30, i8 29, i8 3>, <16 x i8>* %temp, align 16
- %0 = load <16 x i8>, <16 x i8>* %x_addr, align 16 ; <<16 x i8>> [#uses=1]
- %1 = load <16 x i8>, <16 x i8>* %temp, align 16 ; <<16 x i8>> [#uses=1]
+ store <16 x i8> %x, ptr %x_addr
+ store <16 x i8> <i8 22, i8 21, i8 20, i8 3, i8 25, i8 24, i8 23, i8 3, i8 28, i8 27, i8 26, i8 3, i8 31, i8 30, i8 29, i8 3>, ptr %temp, align 16
+ %0 = load <16 x i8>, ptr %x_addr, align 16 ; <<16 x i8>> [#uses=1]
+ %1 = load <16 x i8>, ptr %temp, align 16 ; <<16 x i8>> [#uses=1]
%tmp = add <16 x i8> %0, %1 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp, <16 x i8>* @baz, align 16
+ store <16 x i8> %tmp, ptr @baz, align 16
br label %return
return: ; preds = %entry
; A predicate compare used immediately by a branch should not generate an mfcr.
-define void @test(<4 x float>* %A, <4 x float>* %B) {
- %tmp = load <4 x float>, <4 x float>* %A ; <<4 x float>> [#uses=1]
- %tmp3 = load <4 x float>, <4 x float>* %B ; <<4 x float>> [#uses=1]
+define void @test(ptr %A, ptr %B) {
+ %tmp = load <4 x float>, ptr %A ; <<4 x float>> [#uses=1]
+ %tmp3 = load <4 x float>, ptr %B ; <<4 x float>> [#uses=1]
%tmp.upgrd.1 = tail call i32 @llvm.ppc.altivec.vcmpeqfp.p( i32 1, <4 x float> %tmp, <4 x float> %tmp3 ) ; <i32> [#uses=1]
%tmp.upgrd.2 = icmp eq i32 %tmp.upgrd.1, 0 ; <i1> [#uses=1]
br i1 %tmp.upgrd.2, label %cond_true, label %UnifiedReturnBlock
cond_true: ; preds = %0
- store <4 x float> zeroinitializer, <4 x float>* %B
+ store <4 x float> zeroinitializer, ptr %B
ret void
UnifiedReturnBlock: ; preds = %0
; CHECK-NEXT: stvx 2, 4, 3
; CHECK-NEXT: blr
entry:
- %tmp0 = load <16 x i8>, <16 x i8>* @a, align 16
+ %tmp0 = load <16 x i8>, ptr @a, align 16
%tmp180.i = extractelement <16 x i8> %tmp0, i32 0 ; <i8> [#uses=1]
%tmp181.i = insertelement <16 x i8> <i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp180.i, i32 2 ; <<16 x i8>> [#uses=1]
%tmp182.i = extractelement <16 x i8> %tmp0, i32 1 ; <i8> [#uses=1]
%tmp196.i = insertelement <16 x i8> %tmp195.i, i8 0, i32 12 ; <<16 x i8>> [#uses=1]
%tmp197.i = insertelement <16 x i8> %tmp196.i, i8 0, i32 13 ; <<16 x i8>> [#uses=1]
%tmp201 = shufflevector <16 x i8> %tmp197.i, <16 x i8> %tmp0, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 28, i32 29>; ModuleID = 'try.c'
- store <16 x i8> %tmp201, <16 x i8>* @c, align 16
+ store <16 x i8> %tmp201, ptr @c, align 16
br label %return
return: ; preds = %bb2
; RUN: llc -verify-machineinstrs -O0 -mcpu=pwr7 -mtriple=powerpc64-ibm-aix-xcoff -vec-extabi < %s | FileCheck %s --check-prefixes=CHECK,BE
; RUN: llc -verify-machineinstrs -O0 -mcpu=pwr7 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,LE
-define void @test1(<4 x i32>* %P1, <4 x i32>* %P2, <4 x float>* %P3) nounwind {
+define void @test1(ptr %P1, ptr %P2, ptr %P3) nounwind {
; BE-LABEL: test1:
; BE: # %bb.0:
; BE-NEXT: lxvw4x 0, 0, 3
; LE-NEXT: xxswapd 0, 0
; LE-NEXT: stxvd2x 0, 0, 5
; LE-NEXT: blr
- %tmp = load <4 x i32>, <4 x i32>* %P1 ; <<4 x i32>> [#uses=1]
+ %tmp = load <4 x i32>, ptr %P1 ; <<4 x i32>> [#uses=1]
%tmp4 = and <4 x i32> %tmp, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 > ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp4, <4 x i32>* %P1
- %tmp7 = load <4 x i32>, <4 x i32>* %P2 ; <<4 x i32>> [#uses=1]
+ store <4 x i32> %tmp4, ptr %P1
+ %tmp7 = load <4 x i32>, ptr %P2 ; <<4 x i32>> [#uses=1]
%tmp9 = and <4 x i32> %tmp7, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 > ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp9, <4 x i32>* %P2
- %tmp.upgrd.1 = load <4 x float>, <4 x float>* %P3 ; <<4 x float>> [#uses=1]
+ store <4 x i32> %tmp9, ptr %P2
+ %tmp.upgrd.1 = load <4 x float>, ptr %P3 ; <<4 x float>> [#uses=1]
%tmp11 = bitcast <4 x float> %tmp.upgrd.1 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp12 = and <4 x i32> %tmp11, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 > ; <<4 x i32>> [#uses=1]
%tmp13 = bitcast <4 x i32> %tmp12 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp13, <4 x float>* %P3
+ store <4 x float> %tmp13, ptr %P3
ret void
}
@cte_int = global <4 x i32> <i32 6, i32 6, i32 6, i32 6>, align 16
-define void @v4f32_to_v4i32(<4 x float> %x, <4 x i32>* nocapture %y) nounwind {
+define void @v4f32_to_v4i32(<4 x float> %x, ptr nocapture %y) nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @cte_float, align 16
+ %0 = load <4 x float>, ptr @cte_float, align 16
%mul = fmul <4 x float> %0, %x
%1 = fptosi <4 x float> %mul to <4 x i32>
- store <4 x i32> %1, <4 x i32>* %y, align 16
+ store <4 x i32> %1, ptr %y, align 16
ret void
}
;CHECK-LABEL: v4f32_to_v4i32:
;CHECK: vctsxs {{[0-9]+}}, {{[0-9]+}}, 0
-define void @v4f32_to_v4u32(<4 x float> %x, <4 x i32>* nocapture %y) nounwind {
+define void @v4f32_to_v4u32(<4 x float> %x, ptr nocapture %y) nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @cte_float, align 16
+ %0 = load <4 x float>, ptr @cte_float, align 16
%mul = fmul <4 x float> %0, %x
%1 = fptoui <4 x float> %mul to <4 x i32>
- store <4 x i32> %1, <4 x i32>* %y, align 16
+ store <4 x i32> %1, ptr %y, align 16
ret void
}
;CHECK-LABEL: v4f32_to_v4u32:
;CHECK: vctuxs {{[0-9]+}}, {{[0-9]+}}, 0
-define void @v4i32_to_v4f32(<4 x i32> %x, <4 x float>* nocapture %y) nounwind {
+define void @v4i32_to_v4f32(<4 x i32> %x, ptr nocapture %y) nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @cte_int, align 16
+ %0 = load <4 x i32>, ptr @cte_int, align 16
%mul = mul <4 x i32> %0, %x
%1 = sitofp <4 x i32> %mul to <4 x float>
- store <4 x float> %1, <4 x float>* %y, align 16
+ store <4 x float> %1, ptr %y, align 16
ret void
}
;CHECK-LABEL: v4i32_to_v4f32:
;CHECK: vcfsx {{[0-9]+}}, {{[0-9]+}}, 0
-define void @v4u32_to_v4f32(<4 x i32> %x, <4 x float>* nocapture %y) nounwind {
+define void @v4u32_to_v4f32(<4 x i32> %x, ptr nocapture %y) nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @cte_int, align 16
+ %0 = load <4 x i32>, ptr @cte_int, align 16
%mul = mul <4 x i32> %0, %x
%1 = uitofp <4 x i32> %mul to <4 x float>
- store <4 x float> %1, <4 x float>* %y, align 16
+ store <4 x float> %1, ptr %y, align 16
ret void
}
;CHECK-LABEL: v4u32_to_v4f32:
ret i64 %1
}
-define <8 x i16> @test8elt(<8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define <8 x i16> @test8elt(ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lxvd2x vs0, 0, r3
; CHECK-BE-NEXT: xxmrghd v2, vs0, vs1
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x float>, <8 x float>* %0, align 32
+ %a = load <8 x float>, ptr %0, align 32
%1 = fptoui <8 x float> %a to <8 x i16>
ret <8 x i16> %1
}
-define void @test16elt(<16 x i16>* noalias nocapture sret(<16 x i16>) %agg.result, <16 x float>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt(ptr noalias nocapture sret(<16 x i16>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lxvd2x vs0, 0, r4
; CHECK-BE-NEXT: stxv vs0, 16(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x float>, <16 x float>* %0, align 64
+ %a = load <16 x float>, ptr %0, align 64
%1 = fptoui <16 x float> %a to <16 x i16>
- store <16 x i16> %1, <16 x i16>* %agg.result, align 32
+ store <16 x i16> %1, ptr %agg.result, align 32
ret void
}
ret i64 %1
}
-define <8 x i16> @test8elt_signed(<8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define <8 x i16> @test8elt_signed(ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lxvd2x vs0, 0, r3
; CHECK-BE-NEXT: xxmrghd v2, vs0, vs1
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x float>, <8 x float>* %0, align 32
+ %a = load <8 x float>, ptr %0, align 32
%1 = fptosi <8 x float> %a to <8 x i16>
ret <8 x i16> %1
}
-define void @test16elt_signed(<16 x i16>* noalias nocapture sret(<16 x i16>) %agg.result, <16 x float>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x i16>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lxvd2x vs0, 0, r4
; CHECK-BE-NEXT: stxv vs0, 16(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x float>, <16 x float>* %0, align 64
+ %a = load <16 x float>, ptr %0, align 64
%1 = fptosi <16 x float> %a to <16 x i16>
- store <16 x i16> %1, <16 x i16>* %agg.result, align 32
+ store <16 x i16> %1, ptr %agg.result, align 32
ret void
}
ret <2 x i64> %1
}
-define void @test4elt(<4 x i64>* noalias nocapture sret(<4 x i64>) %agg.result, <4 x float> %a) local_unnamed_addr #1 {
+define void @test4elt(ptr noalias nocapture sret(<4 x i64>) %agg.result, <4 x float> %a) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxmrglw vs0, v2, v2
; CHECK-BE-NEXT: blr
entry:
%0 = fptoui <4 x float> %a to <4 x i64>
- store <4 x i64> %0, <4 x i64>* %agg.result, align 32
+ store <4 x i64> %0, ptr %agg.result, align 32
ret void
}
-define void @test8elt(<8 x i64>* noalias nocapture sret(<8 x i64>) %agg.result, <8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs2, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x float>, <8 x float>* %0, align 32
+ %a = load <8 x float>, ptr %0, align 32
%1 = fptoui <8 x float> %a to <8 x i64>
- store <8 x i64> %1, <8 x i64>* %agg.result, align 64
+ store <8 x i64> %1, ptr %agg.result, align 64
ret void
}
-define void @test16elt(<16 x i64>* noalias nocapture sret(<16 x i64>) %agg.result, <16 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r7, 48
; CHECK-BE-NEXT: stxv vs2, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x float>, <16 x float>* %0, align 64
+ %a = load <16 x float>, ptr %0, align 64
%1 = fptoui <16 x float> %a to <16 x i64>
- store <16 x i64> %1, <16 x i64>* %agg.result, align 128
+ store <16 x i64> %1, ptr %agg.result, align 128
ret void
}
ret <2 x i64> %1
}
-define void @test4elt_signed(<4 x i64>* noalias nocapture sret(<4 x i64>) %agg.result, <4 x float> %a) local_unnamed_addr #1 {
+define void @test4elt_signed(ptr noalias nocapture sret(<4 x i64>) %agg.result, <4 x float> %a) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxmrglw vs0, v2, v2
; CHECK-BE-NEXT: blr
entry:
%0 = fptoui <4 x float> %a to <4 x i64>
- store <4 x i64> %0, <4 x i64>* %agg.result, align 32
+ store <4 x i64> %0, ptr %agg.result, align 32
ret void
}
-define void @test8elt_signed(<8 x i64>* noalias nocapture sret(<8 x i64>) %agg.result, <8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs2, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x float>, <8 x float>* %0, align 32
+ %a = load <8 x float>, ptr %0, align 32
%1 = fptoui <8 x float> %a to <8 x i64>
- store <8 x i64> %1, <8 x i64>* %agg.result, align 64
+ store <8 x i64> %1, ptr %agg.result, align 64
ret void
}
-define void @test16elt_signed(<16 x i64>* noalias nocapture sret(<16 x i64>) %agg.result, <16 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r7, 48
; CHECK-BE-NEXT: stxv vs2, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x float>, <16 x float>* %0, align 64
+ %a = load <16 x float>, ptr %0, align 64
%1 = fptoui <16 x float> %a to <16 x i64>
- store <16 x i64> %1, <16 x i64>* %agg.result, align 128
+ store <16 x i64> %1, ptr %agg.result, align 128
ret void
}
ret i32 %1
}
-define i64 @test8elt(<8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define i64 @test8elt(ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lxvd2x vs0, 0, r3
; CHECK-BE-NEXT: mffprd r3, f0
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x float>, <8 x float>* %0, align 32
+ %a = load <8 x float>, ptr %0, align 32
%1 = fptoui <8 x float> %a to <8 x i8>
%2 = bitcast <8 x i8> %1 to i64
ret i64 %2
}
-define <16 x i8> @test16elt(<16 x float>* nocapture readonly) local_unnamed_addr #3 {
+define <16 x i8> @test16elt(ptr nocapture readonly) local_unnamed_addr #3 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lxvd2x vs0, 0, r3
; CHECK-BE-NEXT: xxmrghd v2, vs0, vs2
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x float>, <16 x float>* %0, align 64
+ %a = load <16 x float>, ptr %0, align 64
%1 = fptoui <16 x float> %a to <16 x i8>
ret <16 x i8> %1
}
ret i32 %1
}
-define i64 @test8elt_signed(<8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define i64 @test8elt_signed(ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lxvd2x vs0, 0, r3
; CHECK-BE-NEXT: mffprd r3, f0
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x float>, <8 x float>* %0, align 32
+ %a = load <8 x float>, ptr %0, align 32
%1 = fptosi <8 x float> %a to <8 x i8>
%2 = bitcast <8 x i8> %1 to i64
ret i64 %2
}
-define <16 x i8> @test16elt_signed(<16 x float>* nocapture readonly) local_unnamed_addr #3 {
+define <16 x i8> @test16elt_signed(ptr nocapture readonly) local_unnamed_addr #3 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: lxvd2x vs0, 0, r3
; CHECK-BE-NEXT: xxmrghd v2, vs0, vs2
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x float>, <16 x float>* %0, align 64
+ %a = load <16 x float>, ptr %0, align 64
%1 = fptosi <16 x float> %a to <16 x i8>
ret <16 x i8> %1
}
ret i32 %1
}
-define i64 @test4elt(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define i64 @test4elt(ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: mffprd r3, f0
; CHECK-BE-NEXT: blr
entry:
- %a = load <4 x double>, <4 x double>* %0, align 32
+ %a = load <4 x double>, ptr %0, align 32
%1 = fptoui <4 x double> %a to <4 x i16>
%2 = bitcast <4 x i16> %1 to i64
ret i64 %2
}
-define <8 x i16> @test8elt(<8 x double>* nocapture readonly) local_unnamed_addr #2 {
+define <8 x i16> @test8elt(ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: xxmrghd v2, vs0, vs2
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x double>, <8 x double>* %0, align 64
+ %a = load <8 x double>, ptr %0, align 64
%1 = fptoui <8 x double> %a to <8 x i16>
ret <8 x i16> %1
}
-define void @test16elt(<16 x i16>* noalias nocapture sret(<16 x i16>) %agg.result, <16 x double>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt(ptr noalias nocapture sret(<16 x i16>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs0, 16(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x double>, <16 x double>* %0, align 128
+ %a = load <16 x double>, ptr %0, align 128
%1 = fptoui <16 x double> %a to <16 x i16>
- store <16 x i16> %1, <16 x i16>* %agg.result, align 32
+ store <16 x i16> %1, ptr %agg.result, align 32
ret void
}
ret i32 %1
}
-define i64 @test4elt_signed(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define i64 @test4elt_signed(ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: mffprd r3, f0
; CHECK-BE-NEXT: blr
entry:
- %a = load <4 x double>, <4 x double>* %0, align 32
+ %a = load <4 x double>, ptr %0, align 32
%1 = fptosi <4 x double> %a to <4 x i16>
%2 = bitcast <4 x i16> %1 to i64
ret i64 %2
}
-define <8 x i16> @test8elt_signed(<8 x double>* nocapture readonly) local_unnamed_addr #2 {
+define <8 x i16> @test8elt_signed(ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: xxmrghd v2, vs0, vs2
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x double>, <8 x double>* %0, align 64
+ %a = load <8 x double>, ptr %0, align 64
%1 = fptosi <8 x double> %a to <8 x i16>
ret <8 x i16> %1
}
-define void @test16elt_signed(<16 x i16>* noalias nocapture sret(<16 x i16>) %agg.result, <16 x double>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x i16>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs0, 16(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x double>, <16 x double>* %0, align 128
+ %a = load <16 x double>, ptr %0, align 128
%1 = fptosi <16 x double> %a to <16 x i16>
- store <16 x i16> %1, <16 x i16>* %agg.result, align 32
+ store <16 x i16> %1, ptr %agg.result, align 32
ret void
}
ret i64 %1
}
-define <4 x i32> @test4elt(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define <4 x i32> @test4elt(ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: vmrgew v2, v3, v2
; CHECK-BE-NEXT: blr
entry:
- %a = load <4 x double>, <4 x double>* %0, align 32
+ %a = load <4 x double>, ptr %0, align 32
%1 = fptoui <4 x double> %a to <4 x i32>
ret <4 x i32> %1
}
-define void @test8elt(<8 x i32>* noalias nocapture sret(<8 x i32>) %agg.result, <8 x double>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 32
; CHECK-BE-NEXT: stxv v3, 16(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x double>, <8 x double>* %0, align 64
+ %a = load <8 x double>, ptr %0, align 64
%1 = fptoui <8 x double> %a to <8 x i32>
- store <8 x i32> %1, <8 x i32>* %agg.result, align 32
+ store <8 x i32> %1, ptr %agg.result, align 32
ret void
}
-define void @test16elt(<16 x i32>* noalias nocapture sret(<16 x i32>) %agg.result, <16 x double>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 32
; CHECK-BE-NEXT: stxv v5, 48(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x double>, <16 x double>* %0, align 128
+ %a = load <16 x double>, ptr %0, align 128
%1 = fptoui <16 x double> %a to <16 x i32>
- store <16 x i32> %1, <16 x i32>* %agg.result, align 64
+ store <16 x i32> %1, ptr %agg.result, align 64
ret void
}
ret i64 %1
}
-define <4 x i32> @test4elt_signed(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define <4 x i32> @test4elt_signed(ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: vmrgew v2, v3, v2
; CHECK-BE-NEXT: blr
entry:
- %a = load <4 x double>, <4 x double>* %0, align 32
+ %a = load <4 x double>, ptr %0, align 32
%1 = fptosi <4 x double> %a to <4 x i32>
ret <4 x i32> %1
}
-define void @test8elt_signed(<8 x i32>* noalias nocapture sret(<8 x i32>) %agg.result, <8 x double>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 32
; CHECK-BE-NEXT: stxv v3, 16(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x double>, <8 x double>* %0, align 64
+ %a = load <8 x double>, ptr %0, align 64
%1 = fptosi <8 x double> %a to <8 x i32>
- store <8 x i32> %1, <8 x i32>* %agg.result, align 32
+ store <8 x i32> %1, ptr %agg.result, align 32
ret void
}
-define void @test16elt_signed(<16 x i32>* noalias nocapture sret(<16 x i32>) %agg.result, <16 x double>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 32
; CHECK-BE-NEXT: stxv v5, 48(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x double>, <16 x double>* %0, align 128
+ %a = load <16 x double>, ptr %0, align 128
%1 = fptosi <16 x double> %a to <16 x i32>
- store <16 x i32> %1, <16 x i32>* %agg.result, align 64
+ store <16 x i32> %1, ptr %agg.result, align 64
ret void
}
ret i16 %1
}
-define i32 @test4elt(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define i32 @test4elt(ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: vextuwlx r3, r3, v2
; CHECK-BE-NEXT: blr
entry:
- %a = load <4 x double>, <4 x double>* %0, align 32
+ %a = load <4 x double>, ptr %0, align 32
%1 = fptoui <4 x double> %a to <4 x i8>
%2 = bitcast <4 x i8> %1 to i32
ret i32 %2
}
-define i64 @test8elt(<8 x double>* nocapture readonly) local_unnamed_addr #1 {
+define i64 @test8elt(ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: mffprd r3, f0
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x double>, <8 x double>* %0, align 64
+ %a = load <8 x double>, ptr %0, align 64
%1 = fptoui <8 x double> %a to <8 x i8>
%2 = bitcast <8 x i8> %1 to i64
ret i64 %2
}
-define <16 x i8> @test16elt(<16 x double>* nocapture readonly) local_unnamed_addr #2 {
+define <16 x i8> @test16elt(ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: xxmrghd v2, vs0, vs4
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x double>, <16 x double>* %0, align 128
+ %a = load <16 x double>, ptr %0, align 128
%1 = fptoui <16 x double> %a to <16 x i8>
ret <16 x i8> %1
}
ret i16 %1
}
-define i32 @test4elt_signed(<4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define i32 @test4elt_signed(ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: vextuwlx r3, r3, v2
; CHECK-BE-NEXT: blr
entry:
- %a = load <4 x double>, <4 x double>* %0, align 32
+ %a = load <4 x double>, ptr %0, align 32
%1 = fptosi <4 x double> %a to <4 x i8>
%2 = bitcast <4 x i8> %1 to i32
ret i32 %2
}
-define i64 @test8elt_signed(<8 x double>* nocapture readonly) local_unnamed_addr #1 {
+define i64 @test8elt_signed(ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: mffprd r3, f0
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x double>, <8 x double>* %0, align 64
+ %a = load <8 x double>, ptr %0, align 64
%1 = fptosi <8 x double> %a to <8 x i8>
%2 = bitcast <8 x i8> %1 to i64
ret i64 %2
}
-define <16 x i8> @test16elt_signed(<16 x double>* nocapture readonly) local_unnamed_addr #2 {
+define <16 x i8> @test16elt_signed(ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: xxmrghd v2, vs0, vs4
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x double>, <16 x double>* %0, align 128
+ %a = load <16 x double>, ptr %0, align 128
%1 = fptosi <16 x double> %a to <16 x i8>
ret <16 x i8> %1
}
ret <4 x i32> %0
}
-define void @test8elt(<8 x i32>* noalias nocapture sret(<8 x i32>) %agg.result, <8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs1, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x float>, <8 x float>* %0, align 32
+ %a = load <8 x float>, ptr %0, align 32
%1 = fptoui <8 x float> %a to <8 x i32>
- store <8 x i32> %1, <8 x i32>* %agg.result, align 32
+ store <8 x i32> %1, ptr %agg.result, align 32
ret void
}
-define void @test16elt(<16 x i32>* noalias nocapture sret(<16 x i32>) %agg.result, <16 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs3, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x float>, <16 x float>* %0, align 64
+ %a = load <16 x float>, ptr %0, align 64
%1 = fptoui <16 x float> %a to <16 x i32>
- store <16 x i32> %1, <16 x i32>* %agg.result, align 64
+ store <16 x i32> %1, ptr %agg.result, align 64
ret void
}
ret <4 x i32> %0
}
-define void @test8elt_signed(<8 x i32>* noalias nocapture sret(<8 x i32>) %agg.result, <8 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs1, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x float>, <8 x float>* %0, align 32
+ %a = load <8 x float>, ptr %0, align 32
%1 = fptosi <8 x float> %a to <8 x i32>
- store <8 x i32> %1, <8 x i32>* %agg.result, align 32
+ store <8 x i32> %1, ptr %agg.result, align 32
ret void
}
-define void @test16elt_signed(<16 x i32>* noalias nocapture sret(<16 x i32>) %agg.result, <16 x float>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x i32>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs3, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x float>, <16 x float>* %0, align 64
+ %a = load <16 x float>, ptr %0, align 64
%1 = fptosi <16 x float> %a to <16 x i32>
- store <16 x i32> %1, <16 x i32>* %agg.result, align 64
+ store <16 x i32> %1, ptr %agg.result, align 64
ret void
}
ret <2 x i64> %0
}
-define void @test4elt(<4 x i64>* noalias nocapture sret(<4 x i64>) %agg.result, <4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define void @test4elt(ptr noalias nocapture sret(<4 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs1, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <4 x double>, <4 x double>* %0, align 32
+ %a = load <4 x double>, ptr %0, align 32
%1 = fptoui <4 x double> %a to <4 x i64>
- store <4 x i64> %1, <4 x i64>* %agg.result, align 32
+ store <4 x i64> %1, ptr %agg.result, align 32
ret void
}
-define void @test8elt(<8 x i64>* noalias nocapture sret(<8 x i64>) %agg.result, <8 x double>* nocapture readonly) local_unnamed_addr #1 {
+define void @test8elt(ptr noalias nocapture sret(<8 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs3, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x double>, <8 x double>* %0, align 64
+ %a = load <8 x double>, ptr %0, align 64
%1 = fptoui <8 x double> %a to <8 x i64>
- store <8 x i64> %1, <8 x i64>* %agg.result, align 64
+ store <8 x i64> %1, ptr %agg.result, align 64
ret void
}
-define void @test16elt(<16 x i64>* noalias nocapture sret(<16 x i64>) %agg.result, <16 x double>* nocapture readonly) local_unnamed_addr #1 {
+define void @test16elt(ptr noalias nocapture sret(<16 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs7, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x double>, <16 x double>* %0, align 128
+ %a = load <16 x double>, ptr %0, align 128
%1 = fptoui <16 x double> %a to <16 x i64>
- store <16 x i64> %1, <16 x i64>* %agg.result, align 128
+ store <16 x i64> %1, ptr %agg.result, align 128
ret void
}
ret <2 x i64> %0
}
-define void @test4elt_signed(<4 x i64>* noalias nocapture sret(<4 x i64>) %agg.result, <4 x double>* nocapture readonly) local_unnamed_addr #1 {
+define void @test4elt_signed(ptr noalias nocapture sret(<4 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs1, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <4 x double>, <4 x double>* %0, align 32
+ %a = load <4 x double>, ptr %0, align 32
%1 = fptosi <4 x double> %a to <4 x i64>
- store <4 x i64> %1, <4 x i64>* %agg.result, align 32
+ store <4 x i64> %1, ptr %agg.result, align 32
ret void
}
-define void @test8elt_signed(<8 x i64>* noalias nocapture sret(<8 x i64>) %agg.result, <8 x double>* nocapture readonly) local_unnamed_addr #1 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs3, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x double>, <8 x double>* %0, align 64
+ %a = load <8 x double>, ptr %0, align 64
%1 = fptosi <8 x double> %a to <8 x i64>
- store <8 x i64> %1, <8 x i64>* %agg.result, align 64
+ store <8 x i64> %1, ptr %agg.result, align 64
ret void
}
-define void @test16elt_signed(<16 x i64>* noalias nocapture sret(<16 x i64>) %agg.result, <16 x double>* nocapture readonly) local_unnamed_addr #1 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x i64>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs7, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x double>, <16 x double>* %0, align 128
+ %a = load <16 x double>, ptr %0, align 128
%1 = fptosi <16 x double> %a to <16 x i64>
- store <16 x i64> %1, <16 x i64>* %agg.result, align 128
+ store <16 x i64> %1, ptr %agg.result, align 128
ret void
}
ret <4 x float> %1
}
-define void @test8elt(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x float>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxlxor v3, v3, v3
; CHECK-BE-NEXT: blr
entry:
%0 = uitofp <8 x i16> %a to <8 x float>
- store <8 x float> %0, <8 x float>* %agg.result, align 32
+ store <8 x float> %0, ptr %agg.result, align 32
ret void
}
-define void @test16elt(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i16>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt(ptr noalias nocapture sret(<16 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI3_0@toc@ha
; CHECK-BE-NEXT: stxv vs0, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x i16>, <16 x i16>* %0, align 32
+ %a = load <16 x i16>, ptr %0, align 32
%1 = uitofp <16 x i16> %a to <16 x float>
- store <16 x float> %1, <16 x float>* %agg.result, align 64
+ store <16 x float> %1, ptr %agg.result, align 64
ret void
}
ret <4 x float> %1
}
-define void @test8elt_signed(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x float>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: vmrglh v4, v2, v2
; CHECK-BE-NEXT: blr
entry:
%0 = sitofp <8 x i16> %a to <8 x float>
- store <8 x float> %0, <8 x float>* %agg.result, align 32
+ store <8 x float> %0, ptr %agg.result, align 32
ret void
}
-define void @test16elt_signed(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i16>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs2, 32(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x i16>, <16 x i16>* %0, align 32
+ %a = load <16 x i16>, ptr %0, align 32
%1 = sitofp <16 x i16> %a to <16 x float>
- store <16 x float> %1, <16 x float>* %agg.result, align 64
+ store <16 x float> %1, ptr %agg.result, align 64
ret void
}
ret <2 x double> %1
}
-define void @test4elt(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
+define void @test4elt(ptr noalias nocapture sret(<4 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI1_0@toc@ha
entry:
%0 = bitcast i64 %a.coerce to <4 x i16>
%1 = uitofp <4 x i16> %0 to <4 x double>
- store <4 x double> %1, <4 x double>* %agg.result, align 32
+ store <4 x double> %1, ptr %agg.result, align 32
ret void
}
-define void @test8elt(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x double>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r4, r2, .LCPI2_0@toc@ha
; CHECK-BE-NEXT: blr
entry:
%0 = uitofp <8 x i16> %a to <8 x double>
- store <8 x double> %0, <8 x double>* %agg.result, align 64
+ store <8 x double> %0, ptr %agg.result, align 64
ret void
}
-define void @test16elt(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i16>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt(ptr noalias nocapture sret(<16 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI3_0@toc@ha
; CHECK-BE-NEXT: stxv vs6, 96(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x i16>, <16 x i16>* %0, align 32
+ %a = load <16 x i16>, ptr %0, align 32
%1 = uitofp <16 x i16> %a to <16 x double>
- store <16 x double> %1, <16 x double>* %agg.result, align 128
+ store <16 x double> %1, ptr %agg.result, align 128
ret void
}
ret <2 x double> %1
}
-define void @test4elt_signed(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
+define void @test4elt_signed(ptr noalias nocapture sret(<4 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI5_0@toc@ha
entry:
%0 = bitcast i64 %a.coerce to <4 x i16>
%1 = sitofp <4 x i16> %0 to <4 x double>
- store <4 x double> %1, <4 x double>* %agg.result, align 32
+ store <4 x double> %1, ptr %agg.result, align 32
ret void
}
-define void @test8elt_signed(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x double>) %agg.result, <8 x i16> %a) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r4, r2, .LCPI6_0@toc@ha
; CHECK-BE-NEXT: blr
entry:
%0 = sitofp <8 x i16> %a to <8 x double>
- store <8 x double> %0, <8 x double>* %agg.result, align 64
+ store <8 x double> %0, ptr %agg.result, align 64
ret void
}
-define void @test16elt_signed(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i16>* nocapture readonly) local_unnamed_addr #3 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #3 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI7_0@toc@ha
; CHECK-BE-NEXT: stxv vs7, 112(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x i16>, <16 x i16>* %0, align 32
+ %a = load <16 x i16>, ptr %0, align 32
%1 = sitofp <16 x i16> %a to <16 x double>
- store <16 x double> %1, <16 x double>* %agg.result, align 128
+ store <16 x double> %1, ptr %agg.result, align 128
ret void
}
ret <2 x double> %1
}
-define void @test4elt(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, <4 x i32> %a) local_unnamed_addr #1 {
+define void @test4elt(ptr noalias nocapture sret(<4 x double>) %agg.result, <4 x i32> %a) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxmrglw v3, v2, v2
; CHECK-BE-NEXT: blr
entry:
%0 = uitofp <4 x i32> %a to <4 x double>
- store <4 x double> %0, <4 x double>* %agg.result, align 32
+ store <4 x double> %0, ptr %agg.result, align 32
ret void
}
-define void @test8elt(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, <8 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs0, 48(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x i32>, <8 x i32>* %0, align 32
+ %a = load <8 x i32>, ptr %0, align 32
%1 = uitofp <8 x i32> %a to <8 x double>
- store <8 x double> %1, <8 x double>* %agg.result, align 64
+ store <8 x double> %1, ptr %agg.result, align 64
ret void
}
-define void @test16elt(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs4, 112(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x i32>, <16 x i32>* %0, align 64
+ %a = load <16 x i32>, ptr %0, align 64
%1 = uitofp <16 x i32> %a to <16 x double>
- store <16 x double> %1, <16 x double>* %agg.result, align 128
+ store <16 x double> %1, ptr %agg.result, align 128
ret void
}
ret <2 x double> %1
}
-define void @test4elt_signed(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, <4 x i32> %a) local_unnamed_addr #1 {
+define void @test4elt_signed(ptr noalias nocapture sret(<4 x double>) %agg.result, <4 x i32> %a) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: xxmrglw v3, v2, v2
; CHECK-BE-NEXT: blr
entry:
%0 = sitofp <4 x i32> %a to <4 x double>
- store <4 x double> %0, <4 x double>* %agg.result, align 32
+ store <4 x double> %0, ptr %agg.result, align 32
ret void
}
-define void @test8elt_signed(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, <8 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs0, 48(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x i32>, <8 x i32>* %0, align 32
+ %a = load <8 x i32>, ptr %0, align 32
%1 = sitofp <8 x i32> %a to <8 x double>
- store <8 x double> %1, <8 x double>* %agg.result, align 64
+ store <8 x double> %1, ptr %agg.result, align 64
ret void
}
-define void @test16elt_signed(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs4, 112(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x i32>, <16 x i32>* %0, align 64
+ %a = load <16 x i32>, ptr %0, align 64
%1 = sitofp <16 x i32> %a to <16 x double>
- store <16 x double> %1, <16 x double>* %agg.result, align 128
+ store <16 x double> %1, ptr %agg.result, align 128
ret void
}
ret i64 %1
}
-define <4 x float> @test4elt(<4 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define <4 x float> @test4elt(ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: vpkudum v2, v2, v3
; CHECK-BE-NEXT: blr
entry:
- %a = load <4 x i64>, <4 x i64>* %0, align 32
+ %a = load <4 x i64>, ptr %0, align 32
%1 = uitofp <4 x i64> %a to <4 x float>
ret <4 x float> %1
}
-define void @test8elt(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, <8 x i64>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 32
; CHECK-BE-NEXT: stxv v2, 16(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x i64>, <8 x i64>* %0, align 64
+ %a = load <8 x i64>, ptr %0, align 64
%1 = uitofp <8 x i64> %a to <8 x float>
- store <8 x float> %1, <8 x float>* %agg.result, align 32
+ store <8 x float> %1, ptr %agg.result, align 32
ret void
}
-define void @test16elt(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i64>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 32
; CHECK-BE-NEXT: stxv v2, 48(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x i64>, <16 x i64>* %0, align 128
+ %a = load <16 x i64>, ptr %0, align 128
%1 = uitofp <16 x i64> %a to <16 x float>
- store <16 x float> %1, <16 x float>* %agg.result, align 64
+ store <16 x float> %1, ptr %agg.result, align 64
ret void
}
ret i64 %1
}
-define <4 x float> @test4elt_signed(<4 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define <4 x float> @test4elt_signed(ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r4, 16
; CHECK-BE-NEXT: vpkudum v2, v2, v3
; CHECK-BE-NEXT: blr
entry:
- %a = load <4 x i64>, <4 x i64>* %0, align 32
+ %a = load <4 x i64>, ptr %0, align 32
%1 = sitofp <4 x i64> %a to <4 x float>
ret <4 x float> %1
}
-define void @test8elt_signed(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, <8 x i64>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 32
; CHECK-BE-NEXT: stxv v2, 16(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x i64>, <8 x i64>* %0, align 64
+ %a = load <8 x i64>, ptr %0, align 64
%1 = sitofp <8 x i64> %a to <8 x float>
- store <8 x float> %1, <8 x float>* %agg.result, align 32
+ store <8 x float> %1, ptr %agg.result, align 32
ret void
}
-define void @test16elt_signed(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i64>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 32
; CHECK-BE-NEXT: stxv v2, 48(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x i64>, <16 x i64>* %0, align 128
+ %a = load <16 x i64>, ptr %0, align 128
%1 = sitofp <16 x i64> %a to <16 x float>
- store <16 x float> %1, <16 x float>* %agg.result, align 64
+ store <16 x float> %1, ptr %agg.result, align 64
ret void
}
ret <4 x float> %1
}
-define void @test8elt(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, i64 %a.coerce) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x float>) %agg.result, i64 %a.coerce) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI2_0@toc@ha
entry:
%0 = bitcast i64 %a.coerce to <8 x i8>
%1 = uitofp <8 x i8> %0 to <8 x float>
- store <8 x float> %1, <8 x float>* %agg.result, align 32
+ store <8 x float> %1, ptr %agg.result, align 32
ret void
}
-define void @test16elt(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i8> %a) local_unnamed_addr #3 {
+define void @test16elt(ptr noalias nocapture sret(<16 x float>) %agg.result, <16 x i8> %a) local_unnamed_addr #3 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r4, r2, .LCPI3_0@toc@ha
; CHECK-BE-NEXT: blr
entry:
%0 = uitofp <16 x i8> %a to <16 x float>
- store <16 x float> %0, <16 x float>* %agg.result, align 64
+ store <16 x float> %0, ptr %agg.result, align 64
ret void
}
ret <4 x float> %1
}
-define void @test8elt_signed(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, i64 %a.coerce) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x float>) %agg.result, i64 %a.coerce) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI6_0@toc@ha
entry:
%0 = bitcast i64 %a.coerce to <8 x i8>
%1 = sitofp <8 x i8> %0 to <8 x float>
- store <8 x float> %1, <8 x float>* %agg.result, align 32
+ store <8 x float> %1, ptr %agg.result, align 32
ret void
}
-define void @test16elt_signed(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i8> %a) local_unnamed_addr #3 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x float>) %agg.result, <16 x i8> %a) local_unnamed_addr #3 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r4, r2, .LCPI7_0@toc@ha
; CHECK-BE-NEXT: blr
entry:
%0 = sitofp <16 x i8> %a to <16 x float>
- store <16 x float> %0, <16 x float>* %agg.result, align 64
+ store <16 x float> %0, ptr %agg.result, align 64
ret void
}
ret <2 x double> %1
}
-define void @test4elt(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, i32 %a.coerce) local_unnamed_addr #1 {
+define void @test4elt(ptr noalias nocapture sret(<4 x double>) %agg.result, i32 %a.coerce) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI1_0@toc@ha
entry:
%0 = bitcast i32 %a.coerce to <4 x i8>
%1 = uitofp <4 x i8> %0 to <4 x double>
- store <4 x double> %1, <4 x double>* %agg.result, align 32
+ store <4 x double> %1, ptr %agg.result, align 32
ret void
}
-define void @test8elt(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
+define void @test8elt(ptr noalias nocapture sret(<8 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI2_0@toc@ha
entry:
%0 = bitcast i64 %a.coerce to <8 x i8>
%1 = uitofp <8 x i8> %0 to <8 x double>
- store <8 x double> %1, <8 x double>* %agg.result, align 64
+ store <8 x double> %1, ptr %agg.result, align 64
ret void
}
-define void @test16elt(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i8> %a) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x double>) %agg.result, <16 x i8> %a) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r4, r2, .LCPI3_0@toc@ha
; CHECK-BE-NEXT: blr
entry:
%0 = uitofp <16 x i8> %a to <16 x double>
- store <16 x double> %0, <16 x double>* %agg.result, align 128
+ store <16 x double> %0, ptr %agg.result, align 128
ret void
}
ret <2 x double> %1
}
-define void @test4elt_signed(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, i32 %a.coerce) local_unnamed_addr #1 {
+define void @test4elt_signed(ptr noalias nocapture sret(<4 x double>) %agg.result, i32 %a.coerce) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI5_0@toc@ha
entry:
%0 = bitcast i32 %a.coerce to <4 x i8>
%1 = sitofp <4 x i8> %0 to <4 x double>
- store <4 x double> %1, <4 x double>* %agg.result, align 32
+ store <4 x double> %1, ptr %agg.result, align 32
ret void
}
-define void @test8elt_signed(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x double>) %agg.result, i64 %a.coerce) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r5, r2, .LCPI6_0@toc@ha
entry:
%0 = bitcast i64 %a.coerce to <8 x i8>
%1 = sitofp <8 x i8> %0 to <8 x double>
- store <8 x double> %1, <8 x double>* %agg.result, align 64
+ store <8 x double> %1, ptr %agg.result, align 64
ret void
}
-define void @test16elt_signed(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i8> %a) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x double>) %agg.result, <16 x i8> %a) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: addis r4, r2, .LCPI7_0@toc@ha
; CHECK-BE-NEXT: blr
entry:
%0 = sitofp <16 x i8> %a to <16 x double>
- store <16 x double> %0, <16 x double>* %agg.result, align 128
+ store <16 x double> %0, ptr %agg.result, align 128
ret void
}
ret <4 x float> %0
}
-define void @test8elt(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, <8 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt(ptr noalias nocapture sret(<8 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs1, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x i32>, <8 x i32>* %0, align 32
+ %a = load <8 x i32>, ptr %0, align 32
%1 = uitofp <8 x i32> %a to <8 x float>
- store <8 x float> %1, <8 x float>* %agg.result, align 32
+ store <8 x float> %1, ptr %agg.result, align 32
ret void
}
-define void @test16elt(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt(ptr noalias nocapture sret(<16 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs3, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x i32>, <16 x i32>* %0, align 64
+ %a = load <16 x i32>, ptr %0, align 64
%1 = uitofp <16 x i32> %a to <16 x float>
- store <16 x float> %1, <16 x float>* %agg.result, align 64
+ store <16 x float> %1, ptr %agg.result, align 64
ret void
}
ret <4 x float> %0
}
-define void @test8elt_signed(<8 x float>* noalias nocapture sret(<8 x float>) %agg.result, <8 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs1, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x i32>, <8 x i32>* %0, align 32
+ %a = load <8 x i32>, ptr %0, align 32
%1 = sitofp <8 x i32> %a to <8 x float>
- store <8 x float> %1, <8 x float>* %agg.result, align 32
+ store <8 x float> %1, ptr %agg.result, align 32
ret void
}
-define void @test16elt_signed(<16 x float>* noalias nocapture sret(<16 x float>) %agg.result, <16 x i32>* nocapture readonly) local_unnamed_addr #2 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x float>) %agg.result, ptr nocapture readonly) local_unnamed_addr #2 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs3, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x i32>, <16 x i32>* %0, align 64
+ %a = load <16 x i32>, ptr %0, align 64
%1 = sitofp <16 x i32> %a to <16 x float>
- store <16 x float> %1, <16 x float>* %agg.result, align 64
+ store <16 x float> %1, ptr %agg.result, align 64
ret void
}
ret <2 x double> %0
}
-define void @test4elt(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, <4 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define void @test4elt(ptr noalias nocapture sret(<4 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs0, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <4 x i64>, <4 x i64>* %0, align 32
+ %a = load <4 x i64>, ptr %0, align 32
%1 = uitofp <4 x i64> %a to <4 x double>
- store <4 x double> %1, <4 x double>* %agg.result, align 32
+ store <4 x double> %1, ptr %agg.result, align 32
ret void
}
-define void @test8elt(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, <8 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define void @test8elt(ptr noalias nocapture sret(<8 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test8elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs0, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x i64>, <8 x i64>* %0, align 64
+ %a = load <8 x i64>, ptr %0, align 64
%1 = uitofp <8 x i64> %a to <8 x double>
- store <8 x double> %1, <8 x double>* %agg.result, align 64
+ store <8 x double> %1, ptr %agg.result, align 64
ret void
}
-define void @test16elt(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define void @test16elt(ptr noalias nocapture sret(<16 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test16elt:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs0, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x i64>, <16 x i64>* %0, align 128
+ %a = load <16 x i64>, ptr %0, align 128
%1 = uitofp <16 x i64> %a to <16 x double>
- store <16 x double> %1, <16 x double>* %agg.result, align 128
+ store <16 x double> %1, ptr %agg.result, align 128
ret void
}
ret <2 x double> %0
}
-define void @test4elt_signed(<4 x double>* noalias nocapture sret(<4 x double>) %agg.result, <4 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define void @test4elt_signed(ptr noalias nocapture sret(<4 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test4elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs0, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <4 x i64>, <4 x i64>* %0, align 32
+ %a = load <4 x i64>, ptr %0, align 32
%1 = sitofp <4 x i64> %a to <4 x double>
- store <4 x double> %1, <4 x double>* %agg.result, align 32
+ store <4 x double> %1, ptr %agg.result, align 32
ret void
}
-define void @test8elt_signed(<8 x double>* noalias nocapture sret(<8 x double>) %agg.result, <8 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define void @test8elt_signed(ptr noalias nocapture sret(<8 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test8elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs0, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <8 x i64>, <8 x i64>* %0, align 64
+ %a = load <8 x i64>, ptr %0, align 64
%1 = sitofp <8 x i64> %a to <8 x double>
- store <8 x double> %1, <8 x double>* %agg.result, align 64
+ store <8 x double> %1, ptr %agg.result, align 64
ret void
}
-define void @test16elt_signed(<16 x double>* noalias nocapture sret(<16 x double>) %agg.result, <16 x i64>* nocapture readonly) local_unnamed_addr #1 {
+define void @test16elt_signed(ptr noalias nocapture sret(<16 x double>) %agg.result, ptr nocapture readonly) local_unnamed_addr #1 {
; CHECK-P8-LABEL: test16elt_signed:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: li r5, 16
; CHECK-BE-NEXT: stxv vs0, 0(r3)
; CHECK-BE-NEXT: blr
entry:
- %a = load <16 x i64>, <16 x i64>* %0, align 128
+ %a = load <16 x i64>, ptr %0, align 128
%1 = sitofp <16 x i64> %a to <16 x double>
- store <16 x double> %1, <16 x double>* %agg.result, align 128
+ store <16 x double> %1, ptr %agg.result, align 128
ret void
}
; RUN: -mattr=+altivec -mattr=-vsx | FileCheck %s \
; RUN: -check-prefix=CHECK-NOVSX
-define void @test_float(<4 x float>* %A) {
+define void @test_float(ptr %A) {
; CHECK-LABEL: test_float
; CHECK-NOVSX-LABEL: test_float
- %tmp2 = load <4 x float>, <4 x float>* %A
+ %tmp2 = load <4 x float>, ptr %A
%tmp3 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp2
- store <4 x float> %tmp3, <4 x float>* %A
+ store <4 x float> %tmp3, ptr %A
ret void
; CHECK: xvnegsp
}
-define void @test_double(<2 x double>* %A) {
+define void @test_double(ptr %A) {
; CHECK-LABEL: test_double
; CHECK-NOVSX-LABEL: test_double
- %tmp2 = load <2 x double>, <2 x double>* %A
+ %tmp2 = load <2 x double>, ptr %A
%tmp3 = fsub <2 x double> < double -0.000000e+00, double -0.000000e+00 >, %tmp2
- store <2 x double> %tmp3, <2 x double>* %A
+ store <2 x double> %tmp3, ptr %A
ret void
; CHECK: xvnegdp
ret <4 x float> %vecins
}
-define <4 x float> @testFloat2(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 zeroext %idx2) {
+define <4 x float> @testFloat2(<4 x float> %a, ptr %b, i32 zeroext %idx1, i32 zeroext %idx2) {
; CHECK-LABEL: testFloat2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 0(r5)
; AIX-P8-32-NEXT: lxvw4x v2, 0, r5
; AIX-P8-32-NEXT: blr
entry:
- %0 = bitcast i8* %b to float*
- %add.ptr1 = getelementptr inbounds i8, i8* %b, i64 1
- %1 = bitcast i8* %add.ptr1 to float*
- %2 = load float, float* %0, align 4
- %vecins = insertelement <4 x float> %a, float %2, i32 %idx1
- %3 = load float, float* %1, align 4
- %vecins2 = insertelement <4 x float> %vecins, float %3, i32 %idx2
+ %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 1
+ %0 = load float, ptr %b, align 4
+ %vecins = insertelement <4 x float> %a, float %0, i32 %idx1
+ %1 = load float, ptr %add.ptr1, align 4
+ %vecins2 = insertelement <4 x float> %vecins, float %1, i32 %idx2
ret <4 x float> %vecins2
}
-define <4 x float> @testFloat3(<4 x float> %a, i8* %b, i32 zeroext %idx1, i32 zeroext %idx2) {
+define <4 x float> @testFloat3(<4 x float> %a, ptr %b, i32 zeroext %idx1, i32 zeroext %idx2) {
; CHECK-LABEL: testFloat3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: plwz r3, 65536(r5), 0
; AIX-P8-32-NEXT: lxvw4x v2, 0, r5
; AIX-P8-32-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %b, i64 65536
- %0 = bitcast i8* %add.ptr to float*
- %add.ptr1 = getelementptr inbounds i8, i8* %b, i64 68719476736
- %1 = bitcast i8* %add.ptr1 to float*
- %2 = load float, float* %0, align 4
- %vecins = insertelement <4 x float> %a, float %2, i32 %idx1
- %3 = load float, float* %1, align 4
- %vecins2 = insertelement <4 x float> %vecins, float %3, i32 %idx2
+ %add.ptr = getelementptr inbounds i8, ptr %b, i64 65536
+ %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 68719476736
+ %0 = load float, ptr %add.ptr, align 4
+ %vecins = insertelement <4 x float> %a, float %0, i32 %idx1
+ %1 = load float, ptr %add.ptr1, align 4
+ %vecins2 = insertelement <4 x float> %vecins, float %1, i32 %idx2
ret <4 x float> %vecins2
}
ret <4 x float> %vecins1
}
-define <4 x float> @testFloatImm2(<4 x float> %a, i32* %b) {
+define <4 x float> @testFloatImm2(<4 x float> %a, ptr %b) {
; CHECK-LABEL: testFloatImm2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r3, 0(r5)
; AIX-P8-32-NEXT: vperm v2, v2, v3, v4
; AIX-P8-32-NEXT: blr
entry:
- %0 = bitcast i32* %b to float*
- %add.ptr1 = getelementptr inbounds i32, i32* %b, i64 1
- %1 = bitcast i32* %add.ptr1 to float*
- %2 = load float, float* %0, align 4
- %vecins = insertelement <4 x float> %a, float %2, i32 0
- %3 = load float, float* %1, align 4
- %vecins2 = insertelement <4 x float> %vecins, float %3, i32 2
+ %add.ptr1 = getelementptr inbounds i32, ptr %b, i64 1
+ %0 = load float, ptr %b, align 4
+ %vecins = insertelement <4 x float> %a, float %0, i32 0
+ %1 = load float, ptr %add.ptr1, align 4
+ %vecins2 = insertelement <4 x float> %vecins, float %1, i32 2
ret <4 x float> %vecins2
}
-define <4 x float> @testFloatImm3(<4 x float> %a, i32* %b) {
+define <4 x float> @testFloatImm3(<4 x float> %a, ptr %b) {
; CHECK-LABEL: testFloatImm3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: plwz r3, 262144(r5), 0
; AIX-P8-32-NEXT: vperm v2, v2, v4, v3
; AIX-P8-32-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i32, i32* %b, i64 65536
- %0 = bitcast i32* %add.ptr to float*
- %add.ptr1 = getelementptr inbounds i32, i32* %b, i64 68719476736
- %1 = bitcast i32* %add.ptr1 to float*
- %2 = load float, float* %0, align 4
- %vecins = insertelement <4 x float> %a, float %2, i32 0
- %3 = load float, float* %1, align 4
- %vecins2 = insertelement <4 x float> %vecins, float %3, i32 2
+ %add.ptr = getelementptr inbounds i32, ptr %b, i64 65536
+ %add.ptr1 = getelementptr inbounds i32, ptr %b, i64 68719476736
+ %0 = load float, ptr %add.ptr, align 4
+ %vecins = insertelement <4 x float> %a, float %0, i32 0
+ %1 = load float, ptr %add.ptr1, align 4
+ %vecins2 = insertelement <4 x float> %vecins, float %1, i32 2
ret <4 x float> %vecins2
}
ret <2 x double> %vecins
}
-define <2 x double> @testDouble2(<2 x double> %a, i8* %b, i32 zeroext %idx1, i32 zeroext %idx2) {
+define <2 x double> @testDouble2(<2 x double> %a, ptr %b, i32 zeroext %idx1, i32 zeroext %idx2) {
; CHECK-LABEL: testDouble2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld r3, 0(r5)
; AIX-P8-32-NEXT: lxvd2x v2, 0, r4
; AIX-P8-32-NEXT: blr
entry:
- %0 = bitcast i8* %b to double*
- %add.ptr1 = getelementptr inbounds i8, i8* %b, i64 1
- %1 = bitcast i8* %add.ptr1 to double*
- %2 = load double, double* %0, align 8
- %vecins = insertelement <2 x double> %a, double %2, i32 %idx1
- %3 = load double, double* %1, align 8
- %vecins2 = insertelement <2 x double> %vecins, double %3, i32 %idx2
+ %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 1
+ %0 = load double, ptr %b, align 8
+ %vecins = insertelement <2 x double> %a, double %0, i32 %idx1
+ %1 = load double, ptr %add.ptr1, align 8
+ %vecins2 = insertelement <2 x double> %vecins, double %1, i32 %idx2
ret <2 x double> %vecins2
}
-define <2 x double> @testDouble3(<2 x double> %a, i8* %b, i32 zeroext %idx1, i32 zeroext %idx2) {
+define <2 x double> @testDouble3(<2 x double> %a, ptr %b, i32 zeroext %idx1, i32 zeroext %idx2) {
; CHECK-LABEL: testDouble3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pld r3, 65536(r5), 0
; AIX-P8-32-NEXT: lxvd2x v2, 0, r4
; AIX-P8-32-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %b, i64 65536
- %0 = bitcast i8* %add.ptr to double*
- %add.ptr1 = getelementptr inbounds i8, i8* %b, i64 68719476736
- %1 = bitcast i8* %add.ptr1 to double*
- %2 = load double, double* %0, align 8
- %vecins = insertelement <2 x double> %a, double %2, i32 %idx1
- %3 = load double, double* %1, align 8
- %vecins2 = insertelement <2 x double> %vecins, double %3, i32 %idx2
+ %add.ptr = getelementptr inbounds i8, ptr %b, i64 65536
+ %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 68719476736
+ %0 = load double, ptr %add.ptr, align 8
+ %vecins = insertelement <2 x double> %a, double %0, i32 %idx1
+ %1 = load double, ptr %add.ptr1, align 8
+ %vecins2 = insertelement <2 x double> %vecins, double %1, i32 %idx2
ret <2 x double> %vecins2
}
ret <2 x double> %vecins
}
-define <2 x double> @testDoubleImm2(<2 x double> %a, i32* %b) {
+define <2 x double> @testDoubleImm2(<2 x double> %a, ptr %b) {
; CHECK-LABEL: testDoubleImm2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 0(r5)
; AIX-P8-NEXT: xxpermdi v2, vs0, v2, 1
; AIX-P8-NEXT: blr
entry:
- %0 = bitcast i32* %b to double*
- %1 = load double, double* %0, align 8
- %vecins = insertelement <2 x double> %a, double %1, i32 0
+ %0 = load double, ptr %b, align 8
+ %vecins = insertelement <2 x double> %a, double %0, i32 0
ret <2 x double> %vecins
}
-define <2 x double> @testDoubleImm3(<2 x double> %a, i32* %b) {
+define <2 x double> @testDoubleImm3(<2 x double> %a, ptr %b) {
; CHECK-LABEL: testDoubleImm3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lfd f0, 4(r5)
; AIX-P8-NEXT: xxpermdi v2, vs0, v2, 1
; AIX-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i32, i32* %b, i64 1
- %0 = bitcast i32* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %vecins = insertelement <2 x double> %a, double %1, i32 0
+ %add.ptr = getelementptr inbounds i32, ptr %b, i64 1
+ %0 = load double, ptr %add.ptr, align 8
+ %vecins = insertelement <2 x double> %a, double %0, i32 0
ret <2 x double> %vecins
}
-define <2 x double> @testDoubleImm4(<2 x double> %a, i32* %b) {
+define <2 x double> @testDoubleImm4(<2 x double> %a, ptr %b) {
; CHECK-LABEL: testDoubleImm4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: plfd f0, 262144(r5), 0
; AIX-P8-NEXT: xxpermdi v2, vs0, v2, 1
; AIX-P8-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i32, i32* %b, i64 65536
- %0 = bitcast i32* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %vecins = insertelement <2 x double> %a, double %1, i32 0
+ %add.ptr = getelementptr inbounds i32, ptr %b, i64 65536
+ %0 = load double, ptr %add.ptr, align 8
+ %vecins = insertelement <2 x double> %a, double %0, i32 0
ret <2 x double> %vecins
}
-define <2 x double> @testDoubleImm5(<2 x double> %a, i32* %b) {
+define <2 x double> @testDoubleImm5(<2 x double> %a, ptr %b) {
; CHECK-LABEL: testDoubleImm5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li r3, 1
; AIX-P8-32-NEXT: xxpermdi v2, vs0, v2, 1
; AIX-P8-32-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i32, i32* %b, i64 68719476736
- %0 = bitcast i32* %add.ptr to double*
- %1 = load double, double* %0, align 8
- %vecins = insertelement <2 x double> %a, double %1, i32 0
+ %add.ptr = getelementptr inbounds i32, ptr %b, i64 68719476736
+ %0 = load double, ptr %add.ptr, align 8
+ %vecins = insertelement <2 x double> %a, double %0, i32 0
ret <2 x double> %vecins
}
; instruction. If run on a little endian machine, this should produce the
; vmrgow instruction. Note also that on little endian the input registers
; are swapped also.
-define void @check_merge_even_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @check_merge_even_xy(ptr %A, ptr %B) {
entry:
; CHECK-LE-LABEL: @check_merge_even_xy
; CHECK-BE-LABEL: @check_merge_even_xy
- %tmp = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2,
<16 x i32> <i32 0, i32 1, i32 2, i32 3,
i32 16, i32 17, i32 18, i32 19,
i32 24, i32 25, i32 26, i32 27>
; CHECK-LE: vmrgow 2, 3, 2
; CHECK-BE: vmrgew 2, 2, 3
- store <16 x i8> %tmp3, <16 x i8>* %A
+ store <16 x i8> %tmp3, ptr %A
ret void
; CHECK-LE: blr
; CHECK-BE: blr
; ordering. If run on a big endian machine, this should produce the vmrgew
; instruction. If run on a little endian machine, this should produce the
; vmrgow instruction.
-define void @check_merge_even_xx(<16 x i8>* %A) {
+define void @check_merge_even_xx(ptr %A) {
entry:
; CHECK-LE-LABEL: @check_merge_even_xx
; CHECK-BE-LABEL: @check_merge_even_xx
- %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp = load <16 x i8>, ptr %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp,
<16 x i32> <i32 0, i32 1, i32 2, i32 3,
i32 0, i32 1, i32 2, i32 3,
i32 8, i32 9, i32 10, i32 11>
; CHECK-LE: vmrgow 2, 2, 2
; CHECK-BE: vmrgew 2, 2, 2
- store <16 x i8> %tmp2, <16 x i8>* %A
+ store <16 x i8> %tmp2, ptr %A
ret void
; CHECK-LE: blr
; CHECK-BE: blr
; instruction. If run on a little endian machine, this should produce the
; vmrgew instruction. Note also that on little endian the input registers
; are swapped also.
-define void @check_merge_odd_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @check_merge_odd_xy(ptr %A, ptr %B) {
entry:
; CHECK-LE-LABEL: @check_merge_odd_xy
; CHECK-BE-LABEL: @check_merge_odd_xy
- %tmp = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2,
<16 x i32> <i32 4, i32 5, i32 6, i32 7,
i32 20, i32 21, i32 22, i32 23,
i32 28, i32 29, i32 30, i32 31>
; CHECK-LE: vmrgew 2, 3, 2
; CHECK-BE: vmrgow 2, 2, 3
- store <16 x i8> %tmp3, <16 x i8>* %A
+ store <16 x i8> %tmp3, ptr %A
ret void
; CHECK-LE: blr
; CHECK-BE: blr
; ordering. If run on a big endian machine, this should produce the vmrgow
; instruction. If run on a little endian machine, this should produce the
; vmrgew instruction.
-define void @check_merge_odd_xx(<16 x i8>* %A) {
+define void @check_merge_odd_xx(ptr %A) {
entry:
; CHECK-LE-LABEL: @check_merge_odd_xx
; CHECK-BE-LABEL: @check_merge_odd_xx
- %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp = load <16 x i8>, ptr %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp,
<16 x i32> <i32 4, i32 5, i32 6, i32 7,
i32 4, i32 5, i32 6, i32 7,
i32 12, i32 13, i32 14, i32 15>
; CHECK-LE: vmrgew 2, 2, 2
; CHECK-BE: vmrgow 2, 2, 2
- store <16 x i8> %tmp2, <16 x i8>* %A
+ store <16 x i8> %tmp2, ptr %A
ret void
; CHECK-LE: blr
; CHECK-BE: blr
target triple = "powerpc-unknown-linux-gnu"
%struct.S2203 = type { %struct.u16qi }
%struct.u16qi = type { <16 x i8> }
-@s = weak global %struct.S2203 zeroinitializer ; <%struct.S2203*> [#uses=1]
+@s = weak global %struct.S2203 zeroinitializer ; <ptr> [#uses=1]
define void @foo(i32 %x, ...) {
entry:
; CHECK: foo:
; CHECK-LE: foo:
- %x_addr = alloca i32 ; <i32*> [#uses=1]
- %ap = alloca i8* ; <i8**> [#uses=3]
- %ap.0 = alloca i8* ; <i8**> [#uses=3]
+ %x_addr = alloca i32 ; <ptr> [#uses=1]
+ %ap = alloca ptr ; <ptr> [#uses=3]
+ %ap.0 = alloca ptr ; <ptr> [#uses=3]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %x, i32* %x_addr
- %ap1 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
- call void @llvm.va_start( i8* %ap1 )
- %tmp = load i8*, i8** %ap, align 4 ; <i8*> [#uses=1]
- store i8* %tmp, i8** %ap.0, align 4
- %tmp2 = load i8*, i8** %ap.0, align 4 ; <i8*> [#uses=1]
- %tmp3 = getelementptr i8, i8* %tmp2, i64 16 ; <i8*> [#uses=1]
- store i8* %tmp3, i8** %ap, align 4
- %tmp4 = load i8*, i8** %ap.0, align 4 ; <i8*> [#uses=1]
- %tmp45 = bitcast i8* %tmp4 to %struct.S2203* ; <%struct.S2203*> [#uses=1]
- %tmp6 = getelementptr %struct.S2203, %struct.S2203* @s, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
- %tmp7 = getelementptr %struct.S2203, %struct.S2203* %tmp45, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
- %tmp8 = getelementptr %struct.u16qi, %struct.u16qi* %tmp6, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
- %tmp9 = getelementptr %struct.u16qi, %struct.u16qi* %tmp7, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
- %tmp10 = load <16 x i8>, <16 x i8>* %tmp9, align 4 ; <<16 x i8>> [#uses=1]
+ store i32 %x, ptr %x_addr
+ call void @llvm.va_start( ptr %ap )
+ %tmp = load ptr, ptr %ap, align 4 ; <ptr> [#uses=1]
+ store ptr %tmp, ptr %ap.0, align 4
+ %tmp2 = load ptr, ptr %ap.0, align 4 ; <ptr> [#uses=1]
+ %tmp3 = getelementptr i8, ptr %tmp2, i64 16 ; <ptr> [#uses=1]
+ store ptr %tmp3, ptr %ap, align 4
+ %tmp4 = load ptr, ptr %ap.0, align 4 ; <ptr> [#uses=1]
+ %tmp6 = getelementptr %struct.S2203, ptr @s, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp7 = getelementptr %struct.S2203, ptr %tmp4, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp8 = getelementptr %struct.u16qi, ptr %tmp6, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp9 = getelementptr %struct.u16qi, ptr %tmp7, i32 0, i32 0 ; <ptr> [#uses=1]
+ %tmp10 = load <16 x i8>, ptr %tmp9, align 4 ; <<16 x i8>> [#uses=1]
; CHECK: lvsl
; CHECK: vperm
; CHECK-LE: lvsr
; CHECK-LE: vperm
- store <16 x i8> %tmp10, <16 x i8>* %tmp8, align 4
+ store <16 x i8> %tmp10, ptr %tmp8, align 4
br label %return
return: ; preds = %entry
ret void
}
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec -mattr=+vsx -mcpu=pwr7 | FileCheck %s -check-prefix=CHECK-VSX
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec -mattr=+vsx -mcpu=pwr8 -mattr=-power8-altivec | FileCheck %s -check-prefix=CHECK-LE-VSX
-define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
- %tmp = load <4 x i32>, <4 x i32>* %X ; <<4 x i32>> [#uses=1]
- %tmp2 = load <4 x i32>, <4 x i32>* %Y ; <<4 x i32>> [#uses=1]
+define <4 x i32> @test_v4i32(ptr %X, ptr %Y) {
+ %tmp = load <4 x i32>, ptr %X ; <<4 x i32>> [#uses=1]
+ %tmp2 = load <4 x i32>, ptr %Y ; <<4 x i32>> [#uses=1]
%tmp3 = mul <4 x i32> %tmp, %tmp2 ; <<4 x i32>> [#uses=1]
ret <4 x i32> %tmp3
}
; CHECK-LE-VSX: vmsumuhm
; CHECK-LE-VSX-NOT: mullw
-define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
- %tmp = load <8 x i16>, <8 x i16>* %X ; <<8 x i16>> [#uses=1]
- %tmp2 = load <8 x i16>, <8 x i16>* %Y ; <<8 x i16>> [#uses=1]
+define <8 x i16> @test_v8i16(ptr %X, ptr %Y) {
+ %tmp = load <8 x i16>, ptr %X ; <<8 x i16>> [#uses=1]
+ %tmp2 = load <8 x i16>, ptr %Y ; <<8 x i16>> [#uses=1]
%tmp3 = mul <8 x i16> %tmp, %tmp2 ; <<8 x i16>> [#uses=1]
ret <8 x i16> %tmp3
}
; CHECK-LE-VSX: vmladduhm
; CHECK-LE-VSX-NOT: mullw
-define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
- %tmp = load <16 x i8>, <16 x i8>* %X ; <<16 x i8>> [#uses=1]
- %tmp2 = load <16 x i8>, <16 x i8>* %Y ; <<16 x i8>> [#uses=1]
+define <16 x i8> @test_v16i8(ptr %X, ptr %Y) {
+ %tmp = load <16 x i8>, ptr %X ; <<16 x i8>> [#uses=1]
+ %tmp2 = load <16 x i8>, ptr %Y ; <<16 x i8>> [#uses=1]
%tmp3 = mul <16 x i8> %tmp, %tmp2 ; <<16 x i8>> [#uses=1]
ret <16 x i8> %tmp3
}
; CHECK-LE-VSX: vperm {{[0-9]+}}, [[REG2]], [[REG1]]
; CHECK-LE-VSX-NOT: mullw
-define <4 x float> @test_float(<4 x float>* %X, <4 x float>* %Y) {
- %tmp = load <4 x float>, <4 x float>* %X
- %tmp2 = load <4 x float>, <4 x float>* %Y
+define <4 x float> @test_float(ptr %X, ptr %Y) {
+ %tmp = load <4 x float>, ptr %X
+ %tmp2 = load <4 x float>, ptr %Y
%tmp3 = fmul <4 x float> %tmp, %tmp2
ret <4 x float> %tmp3
}
; TODO: Fix this case when disabling perfect shuffle
-define <4 x float> @test_uu72(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>, <4 x float>* %P2 ; <<4 x float>> [#uses=1]
+define <4 x float> @test_uu72(ptr %P1, ptr %P2) {
+ %V1 = load <4 x float>, ptr %P1 ; <<4 x float>> [#uses=1]
+ %V2 = load <4 x float>, ptr %P2 ; <<4 x float>> [#uses=1]
%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 undef, i32 undef, i32 7, i32 2 > ; <<4 x float>> [#uses=1]
ret <4 x float> %V3
}
-define <4 x float> @test_30u5(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>, <4 x float>* %P2 ; <<4 x float>> [#uses=1]
+define <4 x float> @test_30u5(ptr %P1, ptr %P2) {
+ %V1 = load <4 x float>, ptr %P1 ; <<4 x float>> [#uses=1]
+ %V2 = load <4 x float>, ptr %P2 ; <<4 x float>> [#uses=1]
%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 0, i32 undef, i32 5 > ; <<4 x float>> [#uses=1]
ret <4 x float> %V3
}
-define <4 x float> @test_3u73(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>, <4 x float>* %P2 ; <<4 x float>> [#uses=1]
+define <4 x float> @test_3u73(ptr %P1, ptr %P2) {
+ %V1 = load <4 x float>, ptr %P1 ; <<4 x float>> [#uses=1]
+ %V2 = load <4 x float>, ptr %P2 ; <<4 x float>> [#uses=1]
%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 undef, i32 7, i32 3 > ; <<4 x float>> [#uses=1]
ret <4 x float> %V3
}
-define <4 x float> @test_3774(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>, <4 x float>* %P2 ; <<4 x float>> [#uses=1]
+define <4 x float> @test_3774(ptr %P1, ptr %P2) {
+ %V1 = load <4 x float>, ptr %P1 ; <<4 x float>> [#uses=1]
+ %V2 = load <4 x float>, ptr %P2 ; <<4 x float>> [#uses=1]
%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 7, i32 7, i32 4 > ; <<4 x float>> [#uses=1]
ret <4 x float> %V3
}
-define <4 x float> @test_4450(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>, <4 x float>* %P2 ; <<4 x float>> [#uses=1]
+define <4 x float> @test_4450(ptr %P1, ptr %P2) {
+ %V1 = load <4 x float>, ptr %P1 ; <<4 x float>> [#uses=1]
+ %V2 = load <4 x float>, ptr %P2 ; <<4 x float>> [#uses=1]
%V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 4, i32 4, i32 5, i32 0 > ; <<4 x float>> [#uses=1]
ret <4 x float> %V3
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5
; PR3628
-define void @update(<4 x i32> %val, <4 x i32>* %dst) nounwind {
+define void @update(<4 x i32> %val, ptr %dst) nounwind {
entry:
%shl = shl <4 x i32> %val, < i32 4, i32 3, i32 2, i32 1 >
%shr = ashr <4 x i32> %shl, < i32 1, i32 2, i32 3, i32 4 >
- store <4 x i32> %shr, <4 x i32>* %dst
+ store <4 x i32> %shr, ptr %dst
ret void
}
; RUN: llc -mtriple=ppc32-- -mcpu=g5 | not grep vperm
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5 | FileCheck %s
-define void @VSLDOI_xy(<8 x i16>* %A, <8 x i16>* %B) {
+define void @VSLDOI_xy(ptr %A, ptr %B) {
; CHECK-LABEL: VSLDOI_xy:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=1]
- %tmp2 = load <8 x i16>, <8 x i16>* %B ; <<8 x i16>> [#uses=1]
+ %tmp = load <8 x i16>, ptr %A ; <<8 x i16>> [#uses=1]
+ %tmp2 = load <8 x i16>, ptr %B ; <<8 x i16>> [#uses=1]
%tmp.upgrd.1 = bitcast <8 x i16> %tmp to <16 x i8> ; <<16 x i8>> [#uses=11]
%tmp2.upgrd.2 = bitcast <8 x i16> %tmp2 to <16 x i8> ; <<16 x i8>> [#uses=5]
%tmp.upgrd.3 = extractelement <16 x i8> %tmp.upgrd.1, i32 5 ; <i8> [#uses=1]
%tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14 ; <<16 x i8>> [#uses=1]
%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15 ; <<16 x i8>> [#uses=1]
%tmp33.upgrd.4 = bitcast <16 x i8> %tmp33 to <8 x i16> ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp33.upgrd.4, <8 x i16>* %A
+ store <8 x i16> %tmp33.upgrd.4, ptr %A
ret void
}
-define void @VSLDOI_xx(<8 x i16>* %A, <8 x i16>* %B) {
+define void @VSLDOI_xx(ptr %A, ptr %B) {
; CHECK-LABEL: VSLDOI_xx:
; CHECK: # %bb.0:
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: vsldoi 2, 2, 2, 5
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
- %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=1]
- %tmp2 = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=1]
+ %tmp = load <8 x i16>, ptr %A ; <<8 x i16>> [#uses=1]
+ %tmp2 = load <8 x i16>, ptr %A ; <<8 x i16>> [#uses=1]
%tmp.upgrd.5 = bitcast <8 x i16> %tmp to <16 x i8> ; <<16 x i8>> [#uses=11]
%tmp2.upgrd.6 = bitcast <8 x i16> %tmp2 to <16 x i8> ; <<16 x i8>> [#uses=5]
%tmp.upgrd.7 = extractelement <16 x i8> %tmp.upgrd.5, i32 5 ; <i8> [#uses=1]
%tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14 ; <<16 x i8>> [#uses=1]
%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15 ; <<16 x i8>> [#uses=1]
%tmp33.upgrd.8 = bitcast <16 x i8> %tmp33 to <8 x i16> ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp33.upgrd.8, <8 x i16>* %A
+ store <8 x i16> %tmp33.upgrd.8, ptr %A
ret void
}
-define void @VPERM_promote(<8 x i16>* %A, <8 x i16>* %B) {
+define void @VPERM_promote(ptr %A, ptr %B) {
; CHECK-LABEL: VPERM_promote:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=1]
+ %tmp = load <8 x i16>, ptr %A ; <<8 x i16>> [#uses=1]
%tmp.upgrd.9 = bitcast <8 x i16> %tmp to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp2 = load <8 x i16>, <8 x i16>* %B ; <<8 x i16>> [#uses=1]
+ %tmp2 = load <8 x i16>, ptr %B ; <<8 x i16>> [#uses=1]
%tmp2.upgrd.10 = bitcast <8 x i16> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp3 = call <4 x i32> @llvm.ppc.altivec.vperm( <4 x i32> %tmp.upgrd.9, <4 x i32> %tmp2.upgrd.10, <16 x i8> < i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14 > ) ; <<4 x i32>> [#uses=1]
%tmp3.upgrd.11 = bitcast <4 x i32> %tmp3 to <8 x i16> ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp3.upgrd.11, <8 x i16>* %A
+ store <8 x i16> %tmp3.upgrd.11, ptr %A
ret void
}
declare <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32>, <4 x i32>, <16 x i8>)
-define void @tb_l(<16 x i8>* %A, <16 x i8>* %B) {
+define void @tb_l(ptr %A, ptr %B) {
; CHECK-LABEL: tb_l:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A ; <<16 x i8>> [#uses=8]
- %tmp2 = load <16 x i8>, <16 x i8>* %B ; <<16 x i8>> [#uses=8]
+ %tmp = load <16 x i8>, ptr %A ; <<16 x i8>> [#uses=8]
+ %tmp2 = load <16 x i8>, ptr %B ; <<16 x i8>> [#uses=8]
%tmp.upgrd.12 = extractelement <16 x i8> %tmp, i32 8 ; <i8> [#uses=1]
%tmp3 = extractelement <16 x i8> %tmp2, i32 8 ; <i8> [#uses=1]
%tmp4 = extractelement <16 x i8> %tmp, i32 9 ; <i8> [#uses=1]
%tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13 ; <<16 x i8>> [#uses=1]
%tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14 ; <<16 x i8>> [#uses=1]
%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp33, <16 x i8>* %A
+ store <16 x i8> %tmp33, ptr %A
ret void
}
-define void @th_l(<8 x i16>* %A, <8 x i16>* %B) {
+define void @th_l(ptr %A, ptr %B) {
; CHECK-LABEL: th_l:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=4]
- %tmp2 = load <8 x i16>, <8 x i16>* %B ; <<8 x i16>> [#uses=4]
+ %tmp = load <8 x i16>, ptr %A ; <<8 x i16>> [#uses=4]
+ %tmp2 = load <8 x i16>, ptr %B ; <<8 x i16>> [#uses=4]
%tmp.upgrd.13 = extractelement <8 x i16> %tmp, i32 4 ; <i16> [#uses=1]
%tmp3 = extractelement <8 x i16> %tmp2, i32 4 ; <i16> [#uses=1]
%tmp4 = extractelement <8 x i16> %tmp, i32 5 ; <i16> [#uses=1]
%tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 5 ; <<8 x i16>> [#uses=1]
%tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 6 ; <<8 x i16>> [#uses=1]
%tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 7 ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp17, <8 x i16>* %A
+ store <8 x i16> %tmp17, ptr %A
ret void
}
-define void @tw_l(<4 x i32>* %A, <4 x i32>* %B) {
+define void @tw_l(ptr %A, ptr %B) {
; CHECK-LABEL: tw_l:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <4 x i32>, <4 x i32>* %A ; <<4 x i32>> [#uses=2]
- %tmp2 = load <4 x i32>, <4 x i32>* %B ; <<4 x i32>> [#uses=2]
+ %tmp = load <4 x i32>, ptr %A ; <<4 x i32>> [#uses=2]
+ %tmp2 = load <4 x i32>, ptr %B ; <<4 x i32>> [#uses=2]
%tmp.upgrd.14 = extractelement <4 x i32> %tmp, i32 2 ; <i32> [#uses=1]
%tmp3 = extractelement <4 x i32> %tmp2, i32 2 ; <i32> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp, i32 3 ; <i32> [#uses=1]
%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
%tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp9, <4 x i32>* %A
+ store <4 x i32> %tmp9, ptr %A
ret void
}
-define void @tb_h(<16 x i8>* %A, <16 x i8>* %B) {
+define void @tb_h(ptr %A, ptr %B) {
; CHECK-LABEL: tb_h:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A ; <<16 x i8>> [#uses=8]
- %tmp2 = load <16 x i8>, <16 x i8>* %B ; <<16 x i8>> [#uses=8]
+ %tmp = load <16 x i8>, ptr %A ; <<16 x i8>> [#uses=8]
+ %tmp2 = load <16 x i8>, ptr %B ; <<16 x i8>> [#uses=8]
%tmp.upgrd.15 = extractelement <16 x i8> %tmp, i32 0 ; <i8> [#uses=1]
%tmp3 = extractelement <16 x i8> %tmp2, i32 0 ; <i8> [#uses=1]
%tmp4 = extractelement <16 x i8> %tmp, i32 1 ; <i8> [#uses=1]
%tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13 ; <<16 x i8>> [#uses=1]
%tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14 ; <<16 x i8>> [#uses=1]
%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp33, <16 x i8>* %A
+ store <16 x i8> %tmp33, ptr %A
ret void
}
-define void @th_h(<8 x i16>* %A, <8 x i16>* %B) {
+define void @th_h(ptr %A, ptr %B) {
; CHECK-LABEL: th_h:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=4]
- %tmp2 = load <8 x i16>, <8 x i16>* %B ; <<8 x i16>> [#uses=4]
+ %tmp = load <8 x i16>, ptr %A ; <<8 x i16>> [#uses=4]
+ %tmp2 = load <8 x i16>, ptr %B ; <<8 x i16>> [#uses=4]
%tmp.upgrd.16 = extractelement <8 x i16> %tmp, i32 0 ; <i16> [#uses=1]
%tmp3 = extractelement <8 x i16> %tmp2, i32 0 ; <i16> [#uses=1]
%tmp4 = extractelement <8 x i16> %tmp, i32 1 ; <i16> [#uses=1]
%tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 5 ; <<8 x i16>> [#uses=1]
%tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 6 ; <<8 x i16>> [#uses=1]
%tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 7 ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp17, <8 x i16>* %A
+ store <8 x i16> %tmp17, ptr %A
ret void
}
-define void @tw_h(<4 x i32>* %A, <4 x i32>* %B) {
+define void @tw_h(ptr %A, ptr %B) {
; CHECK-LABEL: tw_h:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <4 x i32>, <4 x i32>* %A ; <<4 x i32>> [#uses=2]
- %tmp2 = load <4 x i32>, <4 x i32>* %B ; <<4 x i32>> [#uses=2]
+ %tmp = load <4 x i32>, ptr %A ; <<4 x i32>> [#uses=2]
+ %tmp2 = load <4 x i32>, ptr %B ; <<4 x i32>> [#uses=2]
%tmp.upgrd.17 = extractelement <4 x i32> %tmp2, i32 0 ; <i32> [#uses=1]
%tmp3 = extractelement <4 x i32> %tmp, i32 0 ; <i32> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp2, i32 1 ; <i32> [#uses=1]
%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
%tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp9, <4 x i32>* %A
+ store <4 x i32> %tmp9, ptr %A
ret void
}
-define void @tw_h_flop(<4 x i32>* %A, <4 x i32>* %B) {
+define void @tw_h_flop(ptr %A, ptr %B) {
; CHECK-LABEL: tw_h_flop:
; CHECK: # %bb.0:
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: vmrghw 2, 2, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
- %tmp = load <4 x i32>, <4 x i32>* %A ; <<4 x i32>> [#uses=2]
- %tmp2 = load <4 x i32>, <4 x i32>* %B ; <<4 x i32>> [#uses=2]
+ %tmp = load <4 x i32>, ptr %A ; <<4 x i32>> [#uses=2]
+ %tmp2 = load <4 x i32>, ptr %B ; <<4 x i32>> [#uses=2]
%tmp.upgrd.18 = extractelement <4 x i32> %tmp, i32 0 ; <i32> [#uses=1]
%tmp3 = extractelement <4 x i32> %tmp2, i32 0 ; <i32> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp, i32 1 ; <i32> [#uses=1]
%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
%tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp9, <4 x i32>* %A
+ store <4 x i32> %tmp9, ptr %A
ret void
}
-define void @VMRG_UNARY_tb_l(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRG_UNARY_tb_l(ptr %A, ptr %B) {
; CHECK-LABEL: VMRG_UNARY_tb_l:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A ; <<16 x i8>> [#uses=16]
+ %tmp = load <16 x i8>, ptr %A ; <<16 x i8>> [#uses=16]
%tmp.upgrd.19 = extractelement <16 x i8> %tmp, i32 8 ; <i8> [#uses=1]
%tmp3 = extractelement <16 x i8> %tmp, i32 8 ; <i8> [#uses=1]
%tmp4 = extractelement <16 x i8> %tmp, i32 9 ; <i8> [#uses=1]
%tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13 ; <<16 x i8>> [#uses=1]
%tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14 ; <<16 x i8>> [#uses=1]
%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp33, <16 x i8>* %A
+ store <16 x i8> %tmp33, ptr %A
ret void
}
-define void @VMRG_UNARY_th_l(<8 x i16>* %A, <8 x i16>* %B) {
+define void @VMRG_UNARY_th_l(ptr %A, ptr %B) {
; CHECK-LABEL: VMRG_UNARY_th_l:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=8]
+ %tmp = load <8 x i16>, ptr %A ; <<8 x i16>> [#uses=8]
%tmp.upgrd.20 = extractelement <8 x i16> %tmp, i32 4 ; <i16> [#uses=1]
%tmp3 = extractelement <8 x i16> %tmp, i32 4 ; <i16> [#uses=1]
%tmp4 = extractelement <8 x i16> %tmp, i32 5 ; <i16> [#uses=1]
%tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 5 ; <<8 x i16>> [#uses=1]
%tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 6 ; <<8 x i16>> [#uses=1]
%tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 7 ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp17, <8 x i16>* %A
+ store <8 x i16> %tmp17, ptr %A
ret void
}
-define void @VMRG_UNARY_tw_l(<4 x i32>* %A, <4 x i32>* %B) {
+define void @VMRG_UNARY_tw_l(ptr %A, ptr %B) {
; CHECK-LABEL: VMRG_UNARY_tw_l:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <4 x i32>, <4 x i32>* %A ; <<4 x i32>> [#uses=4]
+ %tmp = load <4 x i32>, ptr %A ; <<4 x i32>> [#uses=4]
%tmp.upgrd.21 = extractelement <4 x i32> %tmp, i32 2 ; <i32> [#uses=1]
%tmp3 = extractelement <4 x i32> %tmp, i32 2 ; <i32> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp, i32 3 ; <i32> [#uses=1]
%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
%tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp9, <4 x i32>* %A
+ store <4 x i32> %tmp9, ptr %A
ret void
}
-define void @VMRG_UNARY_tb_h(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRG_UNARY_tb_h(ptr %A, ptr %B) {
; CHECK-LABEL: VMRG_UNARY_tb_h:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A ; <<16 x i8>> [#uses=16]
+ %tmp = load <16 x i8>, ptr %A ; <<16 x i8>> [#uses=16]
%tmp.upgrd.22 = extractelement <16 x i8> %tmp, i32 0 ; <i8> [#uses=1]
%tmp3 = extractelement <16 x i8> %tmp, i32 0 ; <i8> [#uses=1]
%tmp4 = extractelement <16 x i8> %tmp, i32 1 ; <i8> [#uses=1]
%tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13 ; <<16 x i8>> [#uses=1]
%tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14 ; <<16 x i8>> [#uses=1]
%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp33, <16 x i8>* %A
+ store <16 x i8> %tmp33, ptr %A
ret void
}
-define void @VMRG_UNARY_th_h(<8 x i16>* %A, <8 x i16>* %B) {
+define void @VMRG_UNARY_th_h(ptr %A, ptr %B) {
; CHECK-LABEL: VMRG_UNARY_th_h:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=8]
+ %tmp = load <8 x i16>, ptr %A ; <<8 x i16>> [#uses=8]
%tmp.upgrd.23 = extractelement <8 x i16> %tmp, i32 0 ; <i16> [#uses=1]
%tmp3 = extractelement <8 x i16> %tmp, i32 0 ; <i16> [#uses=1]
%tmp4 = extractelement <8 x i16> %tmp, i32 1 ; <i16> [#uses=1]
%tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 5 ; <<8 x i16>> [#uses=1]
%tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 6 ; <<8 x i16>> [#uses=1]
%tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 7 ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp17, <8 x i16>* %A
+ store <8 x i16> %tmp17, ptr %A
ret void
}
-define void @VMRG_UNARY_tw_h(<4 x i32>* %A, <4 x i32>* %B) {
+define void @VMRG_UNARY_tw_h(ptr %A, ptr %B) {
; CHECK-LABEL: VMRG_UNARY_tw_h:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <4 x i32>, <4 x i32>* %A ; <<4 x i32>> [#uses=4]
+ %tmp = load <4 x i32>, ptr %A ; <<4 x i32>> [#uses=4]
%tmp.upgrd.24 = extractelement <4 x i32> %tmp, i32 0 ; <i32> [#uses=1]
%tmp3 = extractelement <4 x i32> %tmp, i32 0 ; <i32> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp, i32 1 ; <i32> [#uses=1]
%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
%tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp9, <4 x i32>* %A
+ store <4 x i32> %tmp9, ptr %A
ret void
}
-define void @VPCKUHUM_unary(<8 x i16>* %A, <8 x i16>* %B) {
+define void @VPCKUHUM_unary(ptr %A, ptr %B) {
; CHECK-LABEL: VPCKUHUM_unary:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <8 x i16>, <8 x i16>* %A ; <<8 x i16>> [#uses=2]
+ %tmp = load <8 x i16>, ptr %A ; <<8 x i16>> [#uses=2]
%tmp.upgrd.25 = bitcast <8 x i16> %tmp to <16 x i8> ; <<16 x i8>> [#uses=8]
%tmp3 = bitcast <8 x i16> %tmp to <16 x i8> ; <<16 x i8>> [#uses=8]
%tmp.upgrd.26 = extractelement <16 x i8> %tmp.upgrd.25, i32 1 ; <i8> [#uses=1]
%tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 14 ; <<16 x i8>> [#uses=1]
%tmp34 = insertelement <16 x i8> %tmp33, i8 %tmp18, i32 15 ; <<16 x i8>> [#uses=1]
%tmp34.upgrd.27 = bitcast <16 x i8> %tmp34 to <8 x i16> ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp34.upgrd.27, <8 x i16>* %A
+ store <8 x i16> %tmp34.upgrd.27, ptr %A
ret void
}
-define void @VPCKUWUM_unary(<4 x i32>* %A, <4 x i32>* %B) {
+define void @VPCKUWUM_unary(ptr %A, ptr %B) {
; CHECK-LABEL: VPCKUWUM_unary:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <4 x i32>, <4 x i32>* %A ; <<4 x i32>> [#uses=2]
+ %tmp = load <4 x i32>, ptr %A ; <<4 x i32>> [#uses=2]
%tmp.upgrd.28 = bitcast <4 x i32> %tmp to <8 x i16> ; <<8 x i16>> [#uses=4]
%tmp3 = bitcast <4 x i32> %tmp to <8 x i16> ; <<8 x i16>> [#uses=4]
%tmp.upgrd.29 = extractelement <8 x i16> %tmp.upgrd.28, i32 1 ; <i16> [#uses=1]
%tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 6 ; <<8 x i16>> [#uses=1]
%tmp18 = insertelement <8 x i16> %tmp17, i16 %tmp10, i32 7 ; <<8 x i16>> [#uses=1]
%tmp18.upgrd.30 = bitcast <8 x i16> %tmp18 to <4 x i32> ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp18.upgrd.30, <4 x i32>* %A
+ store <4 x i32> %tmp18.upgrd.30, ptr %A
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec -mattr=-vsx -mcpu=pwr7 | FileCheck %s
-define void @VPKUHUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VPKUHUM_xy(ptr %A, ptr %B) {
; CHECK-LABEL: VPKUHUM_xy:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
- store <16 x i8> %tmp3, <16 x i8>* %A
+ store <16 x i8> %tmp3, ptr %A
ret void
}
-define void @VPKUHUM_xx(<16 x i8>* %A) {
+define void @VPKUHUM_xx(ptr %A) {
; CHECK-LABEL: VPKUHUM_xx:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp = load <16 x i8>, ptr %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
- store <16 x i8> %tmp2, <16 x i8>* %A
+ store <16 x i8> %tmp2, ptr %A
ret void
}
-define void @VPKUWUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VPKUWUM_xy(ptr %A, ptr %B) {
; CHECK-LABEL: VPKUWUM_xy:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29>
- store <16 x i8> %tmp3, <16 x i8>* %A
+ store <16 x i8> %tmp3, ptr %A
ret void
}
-define void @VPKUWUM_xx(<16 x i8>* %A) {
+define void @VPKUWUM_xx(ptr %A) {
; CHECK-LABEL: VPKUWUM_xx:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp = load <16 x i8>, ptr %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
- store <16 x i8> %tmp2, <16 x i8>* %A
+ store <16 x i8> %tmp2, ptr %A
ret void
}
-define void @VMRGLB_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRGLB_xy(ptr %A, ptr %B) {
; CHECK-LABEL: VMRGLB_xy:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
- store <16 x i8> %tmp3, <16 x i8>* %A
+ store <16 x i8> %tmp3, ptr %A
ret void
}
-define void @VMRGLB_xx(<16 x i8>* %A) {
+define void @VMRGLB_xx(ptr %A) {
; CHECK-LABEL: VMRGLB_xx:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp = load <16 x i8>, ptr %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
- store <16 x i8> %tmp2, <16 x i8>* %A
+ store <16 x i8> %tmp2, ptr %A
ret void
}
-define void @VMRGHB_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRGHB_xy(ptr %A, ptr %B) {
; CHECK-LABEL: VMRGHB_xy:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
- store <16 x i8> %tmp3, <16 x i8>* %A
+ store <16 x i8> %tmp3, ptr %A
ret void
}
-define void @VMRGHB_xx(<16 x i8>* %A) {
+define void @VMRGHB_xx(ptr %A) {
; CHECK-LABEL: VMRGHB_xx:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp = load <16 x i8>, ptr %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11, i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
- store <16 x i8> %tmp2, <16 x i8>* %A
+ store <16 x i8> %tmp2, ptr %A
ret void
}
-define void @VMRGLH_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRGLH_xy(ptr %A, ptr %B) {
; CHECK-LABEL: VMRGLH_xy:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 16, i32 17, i32 2, i32 3, i32 18, i32 19, i32 4, i32 5, i32 20, i32 21, i32 6, i32 7, i32 22, i32 23>
- store <16 x i8> %tmp3, <16 x i8>* %A
+ store <16 x i8> %tmp3, ptr %A
ret void
}
-define void @VMRGLH_xx(<16 x i8>* %A) {
+define void @VMRGLH_xx(ptr %A) {
; CHECK-LABEL: VMRGLH_xx:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp = load <16 x i8>, ptr %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 4, i32 5, i32 4, i32 5, i32 6, i32 7, i32 6, i32 7>
- store <16 x i8> %tmp2, <16 x i8>* %A
+ store <16 x i8> %tmp2, ptr %A
ret void
}
-define void @VMRGHH_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRGHH_xy(ptr %A, ptr %B) {
; CHECK-LABEL: VMRGHH_xy:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 24, i32 25, i32 10, i32 11, i32 26, i32 27, i32 12, i32 13, i32 28, i32 29, i32 14, i32 15, i32 30, i32 31>
- store <16 x i8> %tmp3, <16 x i8>* %A
+ store <16 x i8> %tmp3, ptr %A
ret void
}
-define void @VMRGHH_xx(<16 x i8>* %A) {
+define void @VMRGHH_xx(ptr %A) {
; CHECK-LABEL: VMRGHH_xx:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp = load <16 x i8>, ptr %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 8, i32 9, i32 10, i32 11, i32 10, i32 11, i32 12, i32 13, i32 12, i32 13, i32 14, i32 15, i32 14, i32 15>
- store <16 x i8> %tmp2, <16 x i8>* %A
+ store <16 x i8> %tmp2, ptr %A
ret void
}
-define void @VMRGLW_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRGLW_xy(ptr %A, ptr %B) {
; CHECK-LABEL: VMRGLW_xy:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23>
- store <16 x i8> %tmp3, <16 x i8>* %A
+ store <16 x i8> %tmp3, ptr %A
ret void
}
-define void @VMRGLW_xx(<16 x i8>* %A) {
+define void @VMRGLW_xx(ptr %A) {
; CHECK-LABEL: VMRGLW_xx:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp = load <16 x i8>, ptr %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
- store <16 x i8> %tmp2, <16 x i8>* %A
+ store <16 x i8> %tmp2, ptr %A
ret void
}
-define void @VMRGHW_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VMRGHW_xy(ptr %A, ptr %B) {
; CHECK-LABEL: VMRGHW_xy:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31>
- store <16 x i8> %tmp3, <16 x i8>* %A
+ store <16 x i8> %tmp3, ptr %A
ret void
}
-define void @VMRGHW_xx(<16 x i8>* %A) {
+define void @VMRGHW_xx(ptr %A) {
; CHECK-LABEL: VMRGHW_xx:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp = load <16 x i8>, ptr %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15>
- store <16 x i8> %tmp2, <16 x i8>* %A
+ store <16 x i8> %tmp2, ptr %A
ret void
}
-define void @VSLDOI_xy(<16 x i8>* %A, <16 x i8>* %B) {
+define void @VSLDOI_xy(ptr %A, ptr %B) {
; CHECK-LABEL: VSLDOI_xy:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27>
- store <16 x i8> %tmp3, <16 x i8>* %A
+ store <16 x i8> %tmp3, ptr %A
ret void
}
-define void @VSLDOI_xx(<16 x i8>* %A) {
+define void @VSLDOI_xx(ptr %A) {
; CHECK-LABEL: VSLDOI_xx:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lvx 2, 0, 3
; CHECK-NEXT: stvx 2, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <16 x i8>, <16 x i8>* %A
+ %tmp = load <16 x i8>, ptr %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
- store <16 x i8> %tmp2, <16 x i8>* %A
+ store <16 x i8> %tmp2, ptr %A
ret void
}
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck -check-prefix=CHECK-PWR7 %s
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mtriple=powerpc64-ibm-aix-xcoff -vec-extabi < %s | FileCheck -check-prefix=CHECK-PWR7-AIX %s
-define void @VPKUDUM_unary(<2 x i64>* %A) {
+define void @VPKUDUM_unary(ptr %A) {
; CHECK-LABEL: VPKUDUM_unary:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-PWR7-AIX-NEXT: stxvw4x 34, 0, 3
; CHECK-PWR7-AIX-NEXT: blr
entry:
- %tmp = load <2 x i64>, <2 x i64>* %A
+ %tmp = load <2 x i64>, ptr %A
%tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
%tmp3 = extractelement <4 x i32> %tmp2, i32 1
%tmp4 = extractelement <4 x i32> %tmp2, i32 3
%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 2
%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 3
%tmp9 = bitcast <4 x i32> %tmp8 to <2 x i64>
- store <2 x i64> %tmp9, <2 x i64>* %A
+ store <2 x i64> %tmp9, ptr %A
ret void
}
-define void @VPKUDUM(<2 x i64>* %A, <2 x i64>* %B) {
+define void @VPKUDUM(ptr %A, ptr %B) {
; CHECK-LABEL: VPKUDUM:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-PWR7-AIX-NEXT: stxvw4x 34, 0, 3
; CHECK-PWR7-AIX-NEXT: blr
entry:
- %tmp = load <2 x i64>, <2 x i64>* %A
+ %tmp = load <2 x i64>, ptr %A
%tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
- %tmp3 = load <2 x i64>, <2 x i64>* %B
+ %tmp3 = load <2 x i64>, ptr %B
%tmp4 = bitcast <2 x i64> %tmp3 to <4 x i32>
%tmp5 = extractelement <4 x i32> %tmp2, i32 1
%tmp6 = extractelement <4 x i32> %tmp2, i32 3
%tmp11 = insertelement <4 x i32> %tmp10, i32 %tmp7, i32 2
%tmp12 = insertelement <4 x i32> %tmp11, i32 %tmp8, i32 3
%tmp13 = bitcast <4 x i32> %tmp12 to <2 x i64>
- store <2 x i64> %tmp13, <2 x i64>* %A
+ store <2 x i64> %tmp13, ptr %A
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu -mattr=+power8-vector < %s | FileCheck %s
-define void @VPKUDUM_unary(<2 x i64>* %A) {
+define void @VPKUDUM_unary(ptr %A) {
; CHECK-LABEL: VPKUDUM_unary:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-NEXT: stxvd2x 0, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <2 x i64>, <2 x i64>* %A
+ %tmp = load <2 x i64>, ptr %A
%tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
%tmp3 = extractelement <4 x i32> %tmp2, i32 0
%tmp4 = extractelement <4 x i32> %tmp2, i32 2
%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 2
%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 3
%tmp9 = bitcast <4 x i32> %tmp8 to <2 x i64>
- store <2 x i64> %tmp9, <2 x i64>* %A
+ store <2 x i64> %tmp9, ptr %A
ret void
}
-define void @VPKUDUM(<2 x i64>* %A, <2 x i64>* %B) {
+define void @VPKUDUM(ptr %A, ptr %B) {
; CHECK-LABEL: VPKUDUM:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-NEXT: stxvd2x 0, 0, 3
; CHECK-NEXT: blr
entry:
- %tmp = load <2 x i64>, <2 x i64>* %A
+ %tmp = load <2 x i64>, ptr %A
%tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
- %tmp3 = load <2 x i64>, <2 x i64>* %B
+ %tmp3 = load <2 x i64>, ptr %B
%tmp4 = bitcast <2 x i64> %tmp3 to <4 x i32>
%tmp5 = extractelement <4 x i32> %tmp2, i32 0
%tmp6 = extractelement <4 x i32> %tmp2, i32 2
%tmp11 = insertelement <4 x i32> %tmp10, i32 %tmp7, i32 2
%tmp12 = insertelement <4 x i32> %tmp11, i32 %tmp8, i32 3
%tmp13 = bitcast <4 x i32> %tmp12 to <2 x i64>
- store <2 x i64> %tmp13, <2 x i64>* %A
+ store <2 x i64> %tmp13, ptr %A
ret void
}
%f4 = type <4 x float>
%i4 = type <4 x i32>
-define void @splat(%f4* %P, %f4* %Q, float %X) nounwind {
+define void @splat(ptr %P, ptr %Q, float %X) nounwind {
; G3-LABEL: splat:
; G3: # %bb.0:
; G3-NEXT: lfs 0, 12(4)
%tmp2 = insertelement %f4 %tmp, float %X, i32 1 ; <%f4> [#uses=1]
%tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1]
%tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1]
- %q = load %f4, %f4* %Q ; <%f4> [#uses=1]
+ %q = load %f4, ptr %Q ; <%f4> [#uses=1]
%R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1]
- store %f4 %R, %f4* %P
+ store %f4 %R, ptr %P
ret void
}
-define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) nounwind {
+define void @splat_i4(ptr %P, ptr %Q, i32 %X) nounwind {
; G3-LABEL: splat_i4:
; G3: # %bb.0:
; G3-NEXT: lwz 6, 12(4)
%tmp2 = insertelement %i4 %tmp, i32 %X, i32 1 ; <%i4> [#uses=1]
%tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2 ; <%i4> [#uses=1]
%tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3 ; <%i4> [#uses=1]
- %q = load %i4, %i4* %Q ; <%i4> [#uses=1]
+ %q = load %i4, ptr %Q ; <%i4> [#uses=1]
%R = add %i4 %q, %tmp6 ; <%i4> [#uses=1]
- store %i4 %R, %i4* %P
+ store %i4 %R, ptr %P
ret void
}
-define void @splat_imm_i32(%i4* %P, %i4* %Q, i32 %X) nounwind {
+define void @splat_imm_i32(ptr %P, ptr %Q, i32 %X) nounwind {
; G3-LABEL: splat_imm_i32:
; G3: # %bb.0:
; G3-NEXT: lwz 5, 12(4)
; G5-NEXT: vadduwm 2, 2, 3
; G5-NEXT: stvx 2, 0, 3
; G5-NEXT: blr
- %q = load %i4, %i4* %Q ; <%i4> [#uses=1]
+ %q = load %i4, ptr %Q ; <%i4> [#uses=1]
%R = add %i4 %q, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <%i4> [#uses=1]
- store %i4 %R, %i4* %P
+ store %i4 %R, ptr %P
ret void
}
-define void @splat_imm_i16(%i4* %P, %i4* %Q, i32 %X) nounwind {
+define void @splat_imm_i16(ptr %P, ptr %Q, i32 %X) nounwind {
; G3-LABEL: splat_imm_i16:
; G3: # %bb.0:
; G3-NEXT: lwz 5, 8(4)
; G5-NEXT: vadduwm 2, 2, 3
; G5-NEXT: stvx 2, 0, 3
; G5-NEXT: blr
- %q = load %i4, %i4* %Q ; <%i4> [#uses=1]
+ %q = load %i4, ptr %Q ; <%i4> [#uses=1]
%R = add %i4 %q, < i32 65537, i32 65537, i32 65537, i32 65537 > ; <%i4> [#uses=1]
- store %i4 %R, %i4* %P
+ store %i4 %R, ptr %P
ret void
}
-define void @splat_h(i16 %tmp, <16 x i8>* %dst) nounwind {
+define void @splat_h(i16 %tmp, ptr %dst) nounwind {
; G3-LABEL: splat_h:
; G3: # %bb.0:
; G3-NEXT: sth 3, 14(4)
%tmp77 = insertelement <8 x i16> %tmp76, i16 %tmp, i32 6
%tmp78 = insertelement <8 x i16> %tmp77, i16 %tmp, i32 7
%tmp78.upgrd.2 = bitcast <8 x i16> %tmp78 to <16 x i8>
- store <16 x i8> %tmp78.upgrd.2, <16 x i8>* %dst
+ store <16 x i8> %tmp78.upgrd.2, ptr %dst
ret void
}
-define void @spltish(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define void @spltish(ptr %A, ptr %B) nounwind {
; G3-LABEL: spltish:
; G3: # %bb.0:
; G3-NEXT: stwu 1, -48(1)
; G5-NEXT: vsububm 2, 2, 3
; G5-NEXT: stvx 2, 0, 3
; G5-NEXT: blr
- %tmp = load <16 x i8>, <16 x i8>* %B ; <<16 x i8>> [#uses=1]
+ %tmp = load <16 x i8>, ptr %B ; <<16 x i8>> [#uses=1]
%tmp.s = bitcast <16 x i8> %tmp to <16 x i8> ; <<16 x i8>> [#uses=1]
%tmp4 = sub <16 x i8> %tmp.s, bitcast (<8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16
15, i16 15, i16 15 > to <16 x i8>) ; <<16 x i8>> [#uses=1]
%tmp4.u = bitcast <16 x i8> %tmp4 to <16 x i8> ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp4.u, <16 x i8>* %A
+ store <16 x i8> %tmp4.u, ptr %A
ret void
}
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -mcpu=g5 | FileCheck %s
; Formerly incorrectly inserted vsldoi (endian confusion)
-@baz = common global <16 x i8> zeroinitializer ; <<16 x i8>*> [#uses=1]
+@baz = common global <16 x i8> zeroinitializer ; <ptr> [#uses=1]
define void @foo(<16 x i8> %x) nounwind ssp {
entry:
; CHECK: foo:
; CHECK-NOT: vsldoi
- %x_addr = alloca <16 x i8> ; <<16 x i8>*> [#uses=2]
- %temp = alloca <16 x i8> ; <<16 x i8>*> [#uses=2]
+ %x_addr = alloca <16 x i8> ; <ptr> [#uses=2]
+ %temp = alloca <16 x i8> ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store <16 x i8> %x, <16 x i8>* %x_addr
- store <16 x i8> <i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14>, <16 x i8>* %temp, align 16
- %0 = load <16 x i8>, <16 x i8>* %x_addr, align 16 ; <<16 x i8>> [#uses=1]
- %1 = load <16 x i8>, <16 x i8>* %temp, align 16 ; <<16 x i8>> [#uses=1]
+ store <16 x i8> %x, ptr %x_addr
+ store <16 x i8> <i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14>, ptr %temp, align 16
+ %0 = load <16 x i8>, ptr %x_addr, align 16 ; <<16 x i8>> [#uses=1]
+ %1 = load <16 x i8>, ptr %temp, align 16 ; <<16 x i8>> [#uses=1]
%tmp = add <16 x i8> %0, %1 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp, <16 x i8>* @baz, align 16
+ store <16 x i8> %tmp, ptr @baz, align 16
br label %return
return: ; preds = %entry
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5 | grep vxor
-define void @foo(<4 x float>* %P) {
- %T = load <4 x float>, <4 x float>* %P ; <<4 x float>> [#uses=1]
+define void @foo(ptr %P) {
+ %T = load <4 x float>, ptr %P ; <<4 x float>> [#uses=1]
%S = fadd <4 x float> zeroinitializer, %T ; <<4 x float>> [#uses=1]
- store <4 x float> %S, <4 x float>* %P
+ store <4 x float> %S, ptr %P
ret void
}
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5 | grep test:
; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5 | not grep vperm
-define void @test(<4 x float>* %tmp2.i) {
- %tmp2.i.upgrd.1 = load <4 x float>, <4 x float>* %tmp2.i ; <<4 x float>> [#uses=4]
+define void @test(ptr %tmp2.i) {
+ %tmp2.i.upgrd.1 = load <4 x float>, ptr %tmp2.i ; <<4 x float>> [#uses=4]
%xFloat0.48 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 0 ; <float> [#uses=1]
%inFloat0.49 = insertelement <4 x float> undef, float %xFloat0.48, i32 0 ; <<4 x float>> [#uses=1]
%xFloat1.50 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 1 ; <float> [#uses=1]
%inFloat2.55 = insertelement <4 x float> %inFloat1.52, float %xFloat2.53, i32 2 ; <<4 x float>> [#uses=1]
%xFloat3.56 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 3 ; <float> [#uses=1]
%inFloat3.58 = insertelement <4 x float> %inFloat2.55, float %xFloat3.56, i32 3 ; <<4 x float>> [#uses=1]
- store <4 x float> %inFloat3.58, <4 x float>* %tmp2.i
+ store <4 x float> %inFloat3.58, ptr %tmp2.i
ret void
}
; CHECK-P8-BE-NEXT: lxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %ptr to ptr
+ %1 = load <16 x i8>, ptr %0, align 16
ret <16 x i8> %1
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_unalign16_vector(i8* nocapture readonly %ptr) {
+define dso_local <16 x i8> @ld_unalign16_vector(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign16_vector:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plxv v2, 1(r3), 0
; CHECK-P8-BE-NEXT: lxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
- ret <16 x i8> %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ %0 = load <16 x i8>, ptr %add.ptr, align 16
+ ret <16 x i8> %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_align16_vector(i8* nocapture readonly %ptr) {
+define dso_local <16 x i8> @ld_align16_vector(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align16_vector:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plxv v2, 8(r3), 0
; CHECK-P8-BE-NEXT: lxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
- ret <16 x i8> %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ %0 = load <16 x i8>, ptr %add.ptr, align 16
+ ret <16 x i8> %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_unalign32_vector(i8* nocapture readonly %ptr) {
+define dso_local <16 x i8> @ld_unalign32_vector(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign32_vector:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plxv v2, 99999(r3), 0
; CHECK-P8-BE-NEXT: lxvw4x v2, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
- ret <16 x i8> %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ %0 = load <16 x i8>, ptr %add.ptr, align 16
+ ret <16 x i8> %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_align32_vector(i8* nocapture readonly %ptr) {
+define dso_local <16 x i8> @ld_align32_vector(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align32_vector:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: plxv v2, 99999000(r3), 0
; CHECK-P8-BE-NEXT: lxvw4x v2, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
- ret <16 x i8> %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ %0 = load <16 x i8>, ptr %add.ptr, align 16
+ ret <16 x i8> %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_unalign64_vector(i8* nocapture readonly %ptr) {
+define dso_local <16 x i8> @ld_unalign64_vector(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_unalign64_vector:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-P8-BE-NEXT: lxvw4x v2, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
- ret <16 x i8> %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ %0 = load <16 x i8>, ptr %add.ptr, align 16
+ ret <16 x i8> %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_align64_vector(i8* nocapture readonly %ptr) {
+define dso_local <16 x i8> @ld_align64_vector(ptr nocapture readonly %ptr) {
; CHECK-P10-LABEL: ld_align64_vector:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: lxvw4x v2, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
- ret <16 x i8> %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ %0 = load <16 x i8>, ptr %add.ptr, align 16
+ ret <16 x i8> %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
-define dso_local <16 x i8> @ld_reg_vector(i8* nocapture readonly %ptr, i64 %off) {
+define dso_local <16 x i8> @ld_reg_vector(ptr nocapture readonly %ptr, i64 %off) {
; CHECK-LABEL: ld_reg_vector:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvx v2, r3, r4
; CHECK-P8-BE-NEXT: lxvw4x v2, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
- ret <16 x i8> %1
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ %0 = load <16 x i8>, ptr %add.ptr, align 16
+ ret <16 x i8> %0
}
; Function Attrs: norecurse nounwind readonly uwtable willreturn
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load <16 x i8>, ptr %0, align 16
ret <16 x i8> %1
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load <16 x i8>, ptr %0, align 16
ret <16 x i8> %1
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load <16 x i8>, ptr %0, align 16
ret <16 x i8> %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load <16 x i8>, ptr %0, align 16
ret <16 x i8> %1
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load <16 x i8>, ptr %0, align 16
ret <16 x i8> %1
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load <16 x i8>, ptr %0, align 16
ret <16 x i8> %1
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load <16 x i8>, ptr %0, align 16
ret <16 x i8> %1
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load <16 x i8>, ptr %0, align 16
ret <16 x i8> %1
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load <16 x i8>, ptr %0, align 16
ret <16 x i8> %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ %1 = load <16 x i8>, ptr %0, align 16
ret <16 x i8> %1
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to <16 x i8>*
- %1 = load <16 x i8>, <16 x i8>* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ %1 = load <16 x i8>, ptr %0, align 4096
ret <16 x i8> %1
}
; CHECK-P8-BE-NEXT: lxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* inttoptr (i64 255 to <16 x i8>*), align 16
+ %0 = load <16 x i8>, ptr inttoptr (i64 255 to ptr), align 16
ret <16 x i8> %0
}
; CHECK-P8-BE-NEXT: lxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* inttoptr (i64 4080 to <16 x i8>*), align 16
+ %0 = load <16 x i8>, ptr inttoptr (i64 4080 to ptr), align 16
ret <16 x i8> %0
}
; CHECK-P8-BE-NEXT: lxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* inttoptr (i64 99999 to <16 x i8>*), align 16
+ %0 = load <16 x i8>, ptr inttoptr (i64 99999 to ptr), align 16
ret <16 x i8> %0
}
; CHECK-P8-BE-NEXT: lxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* inttoptr (i64 9999900 to <16 x i8>*), align 16
+ %0 = load <16 x i8>, ptr inttoptr (i64 9999900 to ptr), align 16
ret <16 x i8> %0
}
; CHECK-P8-BE-NEXT: lxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* inttoptr (i64 1000000000001 to <16 x i8>*), align 16
+ %0 = load <16 x i8>, ptr inttoptr (i64 1000000000001 to ptr), align 16
ret <16 x i8> %0
}
; CHECK-P8-BE-NEXT: lxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = load <16 x i8>, <16 x i8>* inttoptr (i64 1000000000000 to <16 x i8>*), align 4096
+ %0 = load <16 x i8>, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret <16 x i8> %0
}
; CHECK-P8-BE-NEXT: stxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %0 = inttoptr i64 %ptr to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %ptr to ptr
+ store <16 x i8> %str, ptr %0, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_unalign16_vector(i8* nocapture %ptr, <16 x i8> %str) {
+define dso_local void @st_unalign16_vector(ptr nocapture %ptr, <16 x i8> %str) {
; CHECK-P10-LABEL: st_unalign16_vector:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstxv v2, 1(r3), 0
; CHECK-P8-BE-NEXT: stxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+ store <16 x i8> %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align16_vector(i8* nocapture %ptr, <16 x i8> %str) {
+define dso_local void @st_align16_vector(ptr nocapture %ptr, <16 x i8> %str) {
; CHECK-P10-LABEL: st_align16_vector:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstxv v2, 8(r3), 0
; CHECK-P8-BE-NEXT: stxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 8
+ store <16 x i8> %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_unalign32_vector(i8* nocapture %ptr, <16 x i8> %str) {
+define dso_local void @st_unalign32_vector(ptr nocapture %ptr, <16 x i8> %str) {
; CHECK-P10-LABEL: st_unalign32_vector:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstxv v2, 99999(r3), 0
; CHECK-P8-BE-NEXT: stxvw4x v2, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999
+ store <16 x i8> %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align32_vector(i8* nocapture %ptr, <16 x i8> %str) {
+define dso_local void @st_align32_vector(ptr nocapture %ptr, <16 x i8> %str) {
; CHECK-P10-LABEL: st_align32_vector:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pstxv v2, 99999000(r3), 0
; CHECK-P8-BE-NEXT: stxvw4x v2, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 99999000
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 99999000
+ store <16 x i8> %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_unalign64_vector(i8* nocapture %ptr, <16 x i8> %str) {
+define dso_local void @st_unalign64_vector(ptr nocapture %ptr, <16 x i8> %str) {
; CHECK-P10-LABEL: st_unalign64_vector:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 232
; CHECK-P8-BE-NEXT: stxvw4x v2, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000001
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000001
+ store <16 x i8> %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_align64_vector(i8* nocapture %ptr, <16 x i8> %str) {
+define dso_local void @st_align64_vector(ptr nocapture %ptr, <16 x i8> %str) {
; CHECK-P10-LABEL: st_align64_vector:
; CHECK-P10: # %bb.0: # %entry
; CHECK-P10-NEXT: pli r4, 244140625
; CHECK-P8-BE-NEXT: stxvw4x v2, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 1000000000000
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 1000000000000
+ store <16 x i8> %str, ptr %add.ptr, align 16
ret void
}
; Function Attrs: nofree norecurse nounwind uwtable willreturn writeonly
-define dso_local void @st_reg_vector(i8* nocapture %ptr, i64 %off, <16 x i8> %str) {
+define dso_local void @st_reg_vector(ptr nocapture %ptr, i64 %off, <16 x i8> %str) {
; CHECK-LABEL: st_reg_vector:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvx v2, r3, r4
; CHECK-P8-BE-NEXT: stxvw4x v2, r3, r4
; CHECK-P8-BE-NEXT: blr
entry:
- %add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %off
- %0 = bitcast i8* %add.ptr to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %add.ptr = getelementptr inbounds i8, ptr %ptr, i64 %off
+ store <16 x i8> %str, ptr %add.ptr, align 16
ret void
}
entry:
%conv = zext i8 %off to i64
%or = or i64 %conv, %ptr
- %0 = inttoptr i64 %or to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store <16 x i8> %str, ptr %0, align 16
ret void
}
%and = and i64 %ptr, -4096
%conv = zext i8 %off to i64
%or = or i64 %and, %conv
- %0 = inttoptr i64 %or to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store <16 x i8> %str, ptr %0, align 16
ret void
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 6
- %0 = inttoptr i64 %or to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store <16 x i8> %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 6
- %0 = inttoptr i64 %or to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store <16 x i8> %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -4096
%or = or i64 %and, 24
- %0 = inttoptr i64 %or to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store <16 x i8> %str, ptr %0, align 16
ret void
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 99999
- %0 = inttoptr i64 %or to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store <16 x i8> %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -1048576
%or = or i64 %and, 99999
- %0 = inttoptr i64 %or to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store <16 x i8> %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -1000341504
%or = or i64 %and, 999990000
- %0 = inttoptr i64 %or to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store <16 x i8> %str, ptr %0, align 16
ret void
}
; CHECK-P8-BE-NEXT: blr
entry:
%or = or i64 %ptr, 1000000000001
- %0 = inttoptr i64 %or to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store <16 x i8> %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000001
- %0 = inttoptr i64 %or to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 16
+ %0 = inttoptr i64 %or to ptr
+ store <16 x i8> %str, ptr %0, align 16
ret void
}
entry:
%and = and i64 %ptr, -1099511627776
%or = or i64 %and, 1000000000000
- %0 = inttoptr i64 %or to <16 x i8>*
- store <16 x i8> %str, <16 x i8>* %0, align 4096
+ %0 = inttoptr i64 %or to ptr
+ store <16 x i8> %str, ptr %0, align 4096
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- store <16 x i8> %str, <16 x i8>* inttoptr (i64 255 to <16 x i8>*), align 16
+ store <16 x i8> %str, ptr inttoptr (i64 255 to ptr), align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- store <16 x i8> %str, <16 x i8>* inttoptr (i64 4080 to <16 x i8>*), align 16
+ store <16 x i8> %str, ptr inttoptr (i64 4080 to ptr), align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- store <16 x i8> %str, <16 x i8>* inttoptr (i64 99999 to <16 x i8>*), align 16
+ store <16 x i8> %str, ptr inttoptr (i64 99999 to ptr), align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- store <16 x i8> %str, <16 x i8>* inttoptr (i64 9999900 to <16 x i8>*), align 16
+ store <16 x i8> %str, ptr inttoptr (i64 9999900 to ptr), align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- store <16 x i8> %str, <16 x i8>* inttoptr (i64 1000000000001 to <16 x i8>*), align 16
+ store <16 x i8> %str, ptr inttoptr (i64 1000000000001 to ptr), align 16
ret void
}
; CHECK-P8-BE-NEXT: stxvw4x v2, 0, r3
; CHECK-P8-BE-NEXT: blr
entry:
- store <16 x i8> %str, <16 x i8>* inttoptr (i64 1000000000000 to <16 x i8>*), align 4096
+ store <16 x i8> %str, ptr inttoptr (i64 1000000000000 to ptr), align 4096
ret void
}
; CHECK-DAG: std [[ZEROREG]], 16([[PTR]])
; CHECK-DAG: std [[ZEROREG]], 24([[PTR]])
; CHECK: blr
-define void @merge_8_float_zero_stores(float* %ptr) {
- %idx0 = getelementptr float, float* %ptr, i64 0
- %idx1 = getelementptr float, float* %ptr, i64 1
- %idx2 = getelementptr float, float* %ptr, i64 2
- %idx3 = getelementptr float, float* %ptr, i64 3
- %idx4 = getelementptr float, float* %ptr, i64 4
- %idx5 = getelementptr float, float* %ptr, i64 5
- %idx6 = getelementptr float, float* %ptr, i64 6
- %idx7 = getelementptr float, float* %ptr, i64 7
- store float 0.0, float* %idx0, align 4
- store float 0.0, float* %idx1, align 4
- store float 0.0, float* %idx2, align 4
- store float 0.0, float* %idx3, align 4
- store float 0.0, float* %idx4, align 4
- store float 0.0, float* %idx5, align 4
- store float 0.0, float* %idx6, align 4
- store float 0.0, float* %idx7, align 4
+define void @merge_8_float_zero_stores(ptr %ptr) {
+ %idx1 = getelementptr float, ptr %ptr, i64 1
+ %idx2 = getelementptr float, ptr %ptr, i64 2
+ %idx3 = getelementptr float, ptr %ptr, i64 3
+ %idx4 = getelementptr float, ptr %ptr, i64 4
+ %idx5 = getelementptr float, ptr %ptr, i64 5
+ %idx6 = getelementptr float, ptr %ptr, i64 6
+ %idx7 = getelementptr float, ptr %ptr, i64 7
+ store float 0.0, ptr %ptr, align 4
+ store float 0.0, ptr %idx1, align 4
+ store float 0.0, ptr %idx2, align 4
+ store float 0.0, ptr %idx3, align 4
+ store float 0.0, ptr %idx4, align 4
+ store float 0.0, ptr %idx5, align 4
+ store float 0.0, ptr %idx6, align 4
+ store float 0.0, ptr %idx7, align 4
ret void
}
;;; TEST HANDLING OF VARIOUS VECTOR SIZES
-define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) {
- %p = load %f1, %f1* %P ; <%f1> [#uses=1]
- %q = load %f1, %f1* %Q ; <%f1> [#uses=1]
+define void @test_f1(ptr %P, ptr %Q, ptr %S) {
+ %p = load %f1, ptr %P ; <%f1> [#uses=1]
+ %q = load %f1, ptr %Q ; <%f1> [#uses=1]
%R = fadd %f1 %p, %q ; <%f1> [#uses=1]
- store %f1 %R, %f1* %S
+ store %f1 %R, ptr %S
ret void
}
-define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
- %p = load %f2, %f2* %P ; <%f2> [#uses=1]
- %q = load %f2, %f2* %Q ; <%f2> [#uses=1]
+define void @test_f2(ptr %P, ptr %Q, ptr %S) {
+ %p = load %f2, ptr %P ; <%f2> [#uses=1]
+ %q = load %f2, ptr %Q ; <%f2> [#uses=1]
%R = fadd %f2 %p, %q ; <%f2> [#uses=1]
- store %f2 %R, %f2* %S
+ store %f2 %R, ptr %S
ret void
}
-define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) {
- %p = load %f4, %f4* %P ; <%f4> [#uses=1]
- %q = load %f4, %f4* %Q ; <%f4> [#uses=1]
+define void @test_f4(ptr %P, ptr %Q, ptr %S) {
+ %p = load %f4, ptr %P ; <%f4> [#uses=1]
+ %q = load %f4, ptr %Q ; <%f4> [#uses=1]
%R = fadd %f4 %p, %q ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
+ store %f4 %R, ptr %S
ret void
}
-define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8, %f8* %P ; <%f8> [#uses=1]
- %q = load %f8, %f8* %Q ; <%f8> [#uses=1]
+define void @test_f8(ptr %P, ptr %Q, ptr %S) {
+ %p = load %f8, ptr %P ; <%f8> [#uses=1]
+ %q = load %f8, ptr %Q ; <%f8> [#uses=1]
%R = fadd %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
+ store %f8 %R, ptr %S
ret void
}
-define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8, %f8* %P ; <%f8> [#uses=1]
- %q = load %f8, %f8* %Q ; <%f8> [#uses=1]
+define void @test_fmul(ptr %P, ptr %Q, ptr %S) {
+ %p = load %f8, ptr %P ; <%f8> [#uses=1]
+ %q = load %f8, ptr %Q ; <%f8> [#uses=1]
%R = fmul %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
+ store %f8 %R, ptr %S
ret void
}
-define void @test_div(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8, %f8* %P ; <%f8> [#uses=1]
- %q = load %f8, %f8* %Q ; <%f8> [#uses=1]
+define void @test_div(ptr %P, ptr %Q, ptr %S) {
+ %p = load %f8, ptr %P ; <%f8> [#uses=1]
+ %q = load %f8, ptr %Q ; <%f8> [#uses=1]
%R = fdiv %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
+ store %f8 %R, ptr %S
ret void
}
-define void @test_rem(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8, %f8* %P ; <%f8> [#uses=1]
- %q = load %f8, %f8* %Q ; <%f8> [#uses=1]
+define void @test_rem(ptr %P, ptr %Q, ptr %S) {
+ %p = load %f8, ptr %P ; <%f8> [#uses=1]
+ %q = load %f8, ptr %Q ; <%f8> [#uses=1]
%R = frem %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
+ store %f8 %R, ptr %S
ret void
}
;;; TEST VECTOR CONSTRUCTS
-define void @test_cst(%f4* %P, %f4* %S) {
- %p = load %f4, %f4* %P ; <%f4> [#uses=1]
+define void @test_cst(ptr %P, ptr %S) {
+ %p = load %f4, ptr %P ; <%f4> [#uses=1]
%R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float
2.000000e+00, float 4.500000e+00 > ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
+ store %f4 %R, ptr %S
ret void
}
-define void @test_zero(%f4* %P, %f4* %S) {
- %p = load %f4, %f4* %P ; <%f4> [#uses=1]
+define void @test_zero(ptr %P, ptr %S) {
+ %p = load %f4, ptr %P ; <%f4> [#uses=1]
%R = fadd %f4 %p, zeroinitializer ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
+ store %f4 %R, ptr %S
ret void
}
-define void @test_undef(%f4* %P, %f4* %S) {
- %p = load %f4, %f4* %P ; <%f4> [#uses=1]
+define void @test_undef(ptr %P, ptr %S) {
+ %p = load %f4, ptr %P ; <%f4> [#uses=1]
%R = fadd %f4 %p, undef ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
+ store %f4 %R, ptr %S
ret void
}
-define void @test_constant_insert(%f4* %S) {
+define void @test_constant_insert(ptr %S) {
%R = insertelement %f4 zeroinitializer, float 1.000000e+01, i32 0
; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
+ store %f4 %R, ptr %S
ret void
}
-define void @test_variable_buildvector(float %F, %f4* %S) {
+define void @test_variable_buildvector(float %F, ptr %S) {
%R = insertelement %f4 zeroinitializer, float %F, i32 0
- store %f4 %R, %f4* %S
+ store %f4 %R, ptr %S
ret void
}
-define void @test_scalar_to_vector(float %F, %f4* %S) {
+define void @test_scalar_to_vector(float %F, ptr %S) {
%R = insertelement %f4 undef, float %F, i32 0
- store %f4 %R, %f4* %S
+ store %f4 %R, ptr %S
ret void
}
-define float @test_extract_elt(%f8* %P) {
- %p = load %f8, %f8* %P ; <%f8> [#uses=1]
+define float @test_extract_elt(ptr %P) {
+ %p = load %f8, ptr %P ; <%f8> [#uses=1]
%R = extractelement %f8 %p, i32 3 ; <float> [#uses=1]
ret float %R
}
-define double @test_extract_elt2(%d8* %P) {
- %p = load %d8, %d8* %P ; <%d8> [#uses=1]
+define double @test_extract_elt2(ptr %P) {
+ %p = load %d8, ptr %P ; <%d8> [#uses=1]
%R = extractelement %d8 %p, i32 3 ; <double> [#uses=1]
ret double %R
}
-define void @test_cast_1(%f4* %b, %i4* %a) {
- %tmp = load %f4, %f4* %b ; <%f4> [#uses=1]
+define void @test_cast_1(ptr %b, ptr %a) {
+ %tmp = load %f4, ptr %b ; <%f4> [#uses=1]
%tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float
3.000000e+00, float 4.000000e+00 > ; <%f4> [#uses=1]
%tmp3 = bitcast %f4 %tmp2 to %i4 ; <%i4> [#uses=1]
%tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 >
- store %i4 %tmp4, %i4* %a
+ store %i4 %tmp4, ptr %a
ret void
}
-define void @test_cast_2(%f8* %a, <8 x i32>* %b) {
- %T = load %f8, %f8* %a ; <%f8> [#uses=1]
+define void @test_cast_2(ptr %a, ptr %b) {
+ %T = load %f8, ptr %a ; <%f8> [#uses=1]
%T2 = bitcast %f8 %T to <8 x i32>
- store <8 x i32> %T2, <8 x i32>* %b
+ store <8 x i32> %T2, ptr %b
ret void
}
;;; TEST IMPORTANT IDIOMS
-define void @splat(%f4* %P, %f4* %Q, float %X) {
+define void @splat(ptr %P, ptr %Q, float %X) {
%tmp = insertelement %f4 undef, float %X, i32 0
%tmp2 = insertelement %f4 %tmp, float %X, i32 1
%tmp4 = insertelement %f4 %tmp2, float %X, i32 2
%tmp6 = insertelement %f4 %tmp4, float %X, i32 3
- %q = load %f4, %f4* %Q ; <%f4> [#uses=1]
+ %q = load %f4, ptr %Q ; <%f4> [#uses=1]
%R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1]
- store %f4 %R, %f4* %P
+ store %f4 %R, ptr %P
ret void
}
-define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) {
+define void @splat_i4(ptr %P, ptr %Q, i32 %X) {
%tmp = insertelement %i4 undef, i32 %X, i32 0
%tmp2 = insertelement %i4 %tmp, i32 %X, i32 1
%tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2
%tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3
- %q = load %i4, %i4* %Q ; <%i4> [#uses=1]
+ %q = load %i4, ptr %Q ; <%i4> [#uses=1]
%R = add %i4 %q, %tmp6 ; <%i4> [#uses=1]
- store %i4 %R, %i4* %P
+ store %i4 %R, ptr %P
ret void
}
entry:
%i.addr = alloca i32, align 4
%w.addr = alloca <4 x float>, align 16
- store i32 %i, i32* %i.addr, align 4
- store <4 x float> %w, <4 x float>* %w.addr, align 16
- call void @foo(i32* %i.addr)
+ store i32 %i, ptr %i.addr, align 4
+ store <4 x float> %w, ptr %w.addr, align 16
+ call void @foo(ptr %i.addr)
ret void
}
; the opcode.
; CHECK-VSX: stxvw4x
-declare void @foo(i32*)
+declare void @foo(ptr)
if.end: ; preds = %entry
%0 = select i1 undef, <2 x double> undef, <2 x double> zeroinitializer
%1 = extractelement <2 x double> %0, i32 1
- store double %1, double* undef, align 8
+ store double %1, ptr undef, align 8
ret void
; CHECK-LABEL: @Compute_Lateral
define void @test1() {
entry:
- %0 = load <4 x float>, <4 x float>* @vf, align 16
+ %0 = load <4 x float>, ptr @vf, align 16
%1 = tail call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %0)
- store <4 x float> %1, <4 x float>* @vf_res, align 16
+ store <4 x float> %1, ptr @vf_res, align 16
ret void
}
; CHECK-LABEL: @test1
define void @test2() {
entry:
- %0 = load <2 x double>, <2 x double>* @vd, align 16
+ %0 = load <2 x double>, ptr @vd, align 16
%1 = tail call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %0)
- store <2 x double> %1, <2 x double>* @vd_res, align 16
+ store <2 x double> %1, ptr @vd_res, align 16
ret void
}
; CHECK-LABEL: @test2
; Function Attrs: nounwind
define float @emit_xsaddsp() {
entry:
- %0 = load float, float* @a, align 4
- %1 = load float, float* @b, align 4
+ %0 = load float, ptr @a, align 4
+ %1 = load float, ptr @b, align 4
%add = fadd float %0, %1
ret float %add
; CHECK-LABEL: @emit_xsaddsp
; Function Attrs: nounwind
define float @emit_xssubsp() {
entry:
- %0 = load float, float* @a, align 4
- %1 = load float, float* @b, align 4
+ %0 = load float, ptr @a, align 4
+ %1 = load float, ptr @b, align 4
%sub = fsub float %0, %1
ret float %sub
; CHECK-LABEL: @emit_xssubsp
; Function Attrs: nounwind
define float @emit_xsdivsp() {
entry:
- %0 = load float, float* @a, align 4
- %1 = load float, float* @b, align 4
+ %0 = load float, ptr @a, align 4
+ %1 = load float, ptr @b, align 4
%div = fdiv float %0, %1
ret float %div
; CHECK-LABEL: @emit_xsdivsp
; Function Attrs: nounwind
define float @emit_xsmulsp() {
entry:
- %0 = load float, float* @a, align 4
- %1 = load float, float* @b, align 4
+ %0 = load float, ptr @a, align 4
+ %1 = load float, ptr @b, align 4
%mul = fmul float %0, %1
ret float %mul
; CHECK-LABEL: @emit_xsmulsp
; Function Attrs: nounwind
define float @emit_xssqrtsp() {
entry:
- %0 = load float, float* @b, align 4
+ %0 = load float, ptr @b, align 4
%call = call float @sqrtf(float %0)
ret float %call
; CHECK-LABEL: @emit_xssqrtsp
; Function Attrs: nounwind
define double @emit_xsadddp() {
entry:
- %0 = load double, double* @c, align 8
- %1 = load double, double* @d, align 8
+ %0 = load double, ptr @c, align 8
+ %1 = load double, ptr @d, align 8
%add = fadd double %0, %1
ret double %add
; CHECK-LABEL: @emit_xsadddp
; Function Attrs: nounwind
define double @emit_xssubdp() {
entry:
- %0 = load double, double* @c, align 8
- %1 = load double, double* @d, align 8
+ %0 = load double, ptr @c, align 8
+ %1 = load double, ptr @d, align 8
%sub = fsub double %0, %1
ret double %sub
; CHECK-LABEL: @emit_xssubdp
; Function Attrs: nounwind
define double @emit_xsdivdp() {
entry:
- %0 = load double, double* @c, align 8
- %1 = load double, double* @d, align 8
+ %0 = load double, ptr @c, align 8
+ %1 = load double, ptr @d, align 8
%div = fdiv double %0, %1
ret double %div
; CHECK-LABEL: @emit_xsdivdp
; Function Attrs: nounwind
define double @emit_xsmuldp() {
entry:
- %0 = load double, double* @c, align 8
- %1 = load double, double* @d, align 8
+ %0 = load double, ptr @c, align 8
+ %1 = load double, ptr @d, align 8
%mul = fmul double %0, %1
ret double %mul
; CHECK-LABEL: @emit_xsmuldp
; Function Attrs: nounwind
define double @emit_xssqrtdp() {
entry:
- %0 = load double, double* @d, align 8
+ %0 = load double, ptr @d, align 8
%call = call double @sqrt(double %0)
ret double %call
; CHECK-LABEL: @emit_xssqrtdp
; CHECK-LABEL: @emit_xvrsqrtesp
%vf = alloca <4 x float>, align 16
%vfr = alloca <4 x float>, align 16
- %0 = load <4 x float>, <4 x float>* %vf, align 16
+ %0 = load <4 x float>, ptr %vf, align 16
%call = call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %0)
; CHECK: xvrsqrtesp {{[0-9]+}}, {{[0-9]+}}
ret <4 x float> %call
; CHECK-LABEL: @emit_xvrsqrtedp
%vd = alloca <2 x double>, align 16
%vdr = alloca <2 x double>, align 16
- %0 = load <2 x double>, <2 x double>* %vd, align 16
+ %0 = load <2 x double>, ptr %vd, align 16
%call = call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %0)
ret <2 x double> %call
; CHECK: xvrsqrtedp {{[0-9]+}}, {{[0-9]+}}
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-define void @test1(double %a, double %b, double %c, double %e, double* nocapture %d) #0 {
+define void @test1(double %a, double %b, double %c, double %e, ptr nocapture %d) #0 {
entry:
%0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
- store double %0, double* %d, align 8
+ store double %0, ptr %d, align 8
%1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
- %arrayidx1 = getelementptr inbounds double, double* %d, i64 1
- store double %1, double* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds double, ptr %d, i64 1
+ store double %1, ptr %arrayidx1, align 8
ret void
; CHECK-LABEL: @test1
; CHECK-FISL: blr
}
-define void @test2(double %a, double %b, double %c, double %e, double %f, double* nocapture %d) #0 {
+define void @test2(double %a, double %b, double %c, double %e, double %f, ptr nocapture %d) #0 {
entry:
%0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
- store double %0, double* %d, align 8
+ store double %0, ptr %d, align 8
%1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
- %arrayidx1 = getelementptr inbounds double, double* %d, i64 1
- store double %1, double* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds double, ptr %d, i64 1
+ store double %1, ptr %arrayidx1, align 8
%2 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
- %arrayidx2 = getelementptr inbounds double, double* %d, i64 2
- store double %2, double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %d, i64 2
+ store double %2, ptr %arrayidx2, align 8
ret void
; CHECK-LABEL: @test2
; CHECK-FISL: blr
}
-define void @test3(double %a, double %b, double %c, double %e, double %f, double* nocapture %d) #0 {
+define void @test3(double %a, double %b, double %c, double %e, double %f, ptr nocapture %d) #0 {
entry:
%0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
- store double %0, double* %d, align 8
+ store double %0, ptr %d, align 8
%1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
%2 = tail call double @llvm.fma.f64(double %b, double %c, double %1)
- %arrayidx1 = getelementptr inbounds double, double* %d, i64 3
- store double %2, double* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds double, ptr %d, i64 3
+ store double %2, ptr %arrayidx1, align 8
%3 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
- %arrayidx2 = getelementptr inbounds double, double* %d, i64 2
- store double %3, double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double, double* %d, i64 1
- store double %1, double* %arrayidx3, align 8
+ %arrayidx2 = getelementptr inbounds double, ptr %d, i64 2
+ store double %3, ptr %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double, ptr %d, i64 1
+ store double %1, ptr %arrayidx3, align 8
ret void
; CHECK-LABEL: @test3
; CHECK-FISL: blr
}
-define void @test4(double %a, double %b, double %c, double %e, double %f, double* nocapture %d) #0 {
+define void @test4(double %a, double %b, double %c, double %e, double %f, ptr nocapture %d) #0 {
entry:
%0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
- store double %0, double* %d, align 8
+ store double %0, ptr %d, align 8
%1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
- %arrayidx1 = getelementptr inbounds double, double* %d, i64 1
- store double %1, double* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds double, ptr %d, i64 1
+ store double %1, ptr %arrayidx1, align 8
%2 = tail call double @llvm.fma.f64(double %b, double %c, double %1)
- %arrayidx3 = getelementptr inbounds double, double* %d, i64 3
- store double %2, double* %arrayidx3, align 8
+ %arrayidx3 = getelementptr inbounds double, ptr %d, i64 3
+ store double %2, ptr %arrayidx3, align 8
%3 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
- %arrayidx4 = getelementptr inbounds double, double* %d, i64 2
- store double %3, double* %arrayidx4, align 8
+ %arrayidx4 = getelementptr inbounds double, ptr %d, i64 2
+ store double %3, ptr %arrayidx4, align 8
ret void
; CHECK-LABEL: @test4
declare double @llvm.fma.f64(double, double, double) #0
-define void @testv1(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double>* nocapture %d) #0 {
+define void @testv1(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, ptr nocapture %d) #0 {
entry:
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
- store <2 x double> %0, <2 x double>* %d, align 8
+ store <2 x double> %0, ptr %d, align 8
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
- %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
- store <2 x double> %1, <2 x double>* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds <2 x double>, ptr %d, i64 1
+ store <2 x double> %1, ptr %arrayidx1, align 8
ret void
; CHECK-LABEL: @testv1
; CHECK-FISL: blr
}
-define void @testv2(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, <2 x double>* nocapture %d) #0 {
+define void @testv2(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, ptr nocapture %d) #0 {
entry:
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
- store <2 x double> %0, <2 x double>* %d, align 8
+ store <2 x double> %0, ptr %d, align 8
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
- %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
- store <2 x double> %1, <2 x double>* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds <2 x double>, ptr %d, i64 1
+ store <2 x double> %1, ptr %arrayidx1, align 8
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
- %arrayidx2 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 2
- store <2 x double> %2, <2 x double>* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds <2 x double>, ptr %d, i64 2
+ store <2 x double> %2, ptr %arrayidx2, align 8
ret void
; CHECK-LABEL: @testv2
; CHECK-FISL: blr
}
-define void @testv3(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, <2 x double>* nocapture %d) #0 {
+define void @testv3(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, ptr nocapture %d) #0 {
entry:
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
- store <2 x double> %0, <2 x double>* %d, align 8
+ store <2 x double> %0, ptr %d, align 8
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %1)
- %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 3
- store <2 x double> %2, <2 x double>* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds <2 x double>, ptr %d, i64 3
+ store <2 x double> %2, ptr %arrayidx1, align 8
%3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
- %arrayidx2 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 2
- store <2 x double> %3, <2 x double>* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
- store <2 x double> %1, <2 x double>* %arrayidx3, align 8
+ %arrayidx2 = getelementptr inbounds <2 x double>, ptr %d, i64 2
+ store <2 x double> %3, ptr %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds <2 x double>, ptr %d, i64 1
+ store <2 x double> %1, ptr %arrayidx3, align 8
ret void
; Note: There is some unavoidable changeability in this variant. If the
; CHECK-FISL: blr
}
-define void @testv4(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, <2 x double>* nocapture %d) #0 {
+define void @testv4(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, ptr nocapture %d) #0 {
entry:
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
- store <2 x double> %0, <2 x double>* %d, align 8
+ store <2 x double> %0, ptr %d, align 8
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
- %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
- store <2 x double> %1, <2 x double>* %arrayidx1, align 8
+ %arrayidx1 = getelementptr inbounds <2 x double>, ptr %d, i64 1
+ store <2 x double> %1, ptr %arrayidx1, align 8
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %1)
- %arrayidx3 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 3
- store <2 x double> %2, <2 x double>* %arrayidx3, align 8
+ %arrayidx3 = getelementptr inbounds <2 x double>, ptr %d, i64 3
+ store <2 x double> %2, ptr %arrayidx3, align 8
%3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
- %arrayidx4 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 2
- store <2 x double> %3, <2 x double>* %arrayidx4, align 8
+ %arrayidx4 = getelementptr inbounds <2 x double>, ptr %d, i64 2
+ store <2 x double> %3, ptr %arrayidx4, align 8
ret void
; CHECK-LABEL: @testv4
%conv2 = fpext float %add to double
%0 = tail call double @llvm.sqrt.f64(double %conv2)
%div4 = fdiv reassoc arcp double %conv3, %0
- %call = tail call signext i32 bitcast (i32 (...)* @p_col_helper to i32 (double)*)(double %div4) #2
+ %call = tail call signext i32 @p_col_helper(double %div4) #2
br label %for.body
for.end: ; preds = %entry
%astype5.i.i.80.i = bitcast <4 x i32> %or.i.i.79.i to <4 x float>
%1 = shufflevector <4 x float> %astype5.i.i.80.i, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = shufflevector <8 x float> undef, <8 x float> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
- store <8 x float> %2, <8 x float>* undef, align 32
+ store <8 x float> %2, ptr undef, align 32
br label %if.end
; CHECK-LABEL: @acosh_float8
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=+vsx -fast-isel -O0 | FileCheck -check-prefix=CHECK-FISL %s
; XFAIL: *
-define void @test1sp(float %a, float %b, float %c, float %e, float* nocapture %d) #0 {
+define void @test1sp(float %a, float %b, float %c, float %e, ptr nocapture %d) #0 {
entry:
%0 = tail call float @llvm.fma.f32(float %b, float %c, float %a)
- store float %0, float* %d, align 4
+ store float %0, ptr %d, align 4
%1 = tail call float @llvm.fma.f32(float %b, float %e, float %a)
- %arrayidx1 = getelementptr inbounds float, float* %d, i64 1
- store float %1, float* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %d, i64 1
+ store float %1, ptr %arrayidx1, align 4
ret void
; CHECK-LABEL: @test1sp
; CHECK-FISL: blr
}
-define void @test2sp(float %a, float %b, float %c, float %e, float %f, float* nocapture %d) #0 {
+define void @test2sp(float %a, float %b, float %c, float %e, float %f, ptr nocapture %d) #0 {
entry:
%0 = tail call float @llvm.fma.f32(float %b, float %c, float %a)
- store float %0, float* %d, align 4
+ store float %0, ptr %d, align 4
%1 = tail call float @llvm.fma.f32(float %b, float %e, float %a)
- %arrayidx1 = getelementptr inbounds float, float* %d, i64 1
- store float %1, float* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %d, i64 1
+ store float %1, ptr %arrayidx1, align 4
%2 = tail call float @llvm.fma.f32(float %b, float %f, float %a)
- %arrayidx2 = getelementptr inbounds float, float* %d, i64 2
- store float %2, float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %d, i64 2
+ store float %2, ptr %arrayidx2, align 4
ret void
; CHECK-LABEL: @test2sp
; CHECK-FISL: blr
}
-define void @test3sp(float %a, float %b, float %c, float %e, float %f, float* nocapture %d) #0 {
+define void @test3sp(float %a, float %b, float %c, float %e, float %f, ptr nocapture %d) #0 {
entry:
%0 = tail call float @llvm.fma.f32(float %b, float %c, float %a)
- store float %0, float* %d, align 4
+ store float %0, ptr %d, align 4
%1 = tail call float @llvm.fma.f32(float %b, float %e, float %a)
%2 = tail call float @llvm.fma.f32(float %b, float %c, float %1)
- %arrayidx1 = getelementptr inbounds float, float* %d, i64 3
- store float %2, float* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %d, i64 3
+ store float %2, ptr %arrayidx1, align 4
%3 = tail call float @llvm.fma.f32(float %b, float %f, float %a)
- %arrayidx2 = getelementptr inbounds float, float* %d, i64 2
- store float %3, float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float, float* %d, i64 1
- store float %1, float* %arrayidx3, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %d, i64 2
+ store float %3, ptr %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float, ptr %d, i64 1
+ store float %1, ptr %arrayidx3, align 4
ret void
; CHECK-LABEL: @test3sp
; CHECK-FISL: blr
}
-define void @test4sp(float %a, float %b, float %c, float %e, float %f, float* nocapture %d) #0 {
+define void @test4sp(float %a, float %b, float %c, float %e, float %f, ptr nocapture %d) #0 {
entry:
%0 = tail call float @llvm.fma.f32(float %b, float %c, float %a)
- store float %0, float* %d, align 4
+ store float %0, ptr %d, align 4
%1 = tail call float @llvm.fma.f32(float %b, float %e, float %a)
- %arrayidx1 = getelementptr inbounds float, float* %d, i64 1
- store float %1, float* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %d, i64 1
+ store float %1, ptr %arrayidx1, align 4
%2 = tail call float @llvm.fma.f32(float %b, float %c, float %1)
- %arrayidx3 = getelementptr inbounds float, float* %d, i64 3
- store float %2, float* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds float, ptr %d, i64 3
+ store float %2, ptr %arrayidx3, align 4
%3 = tail call float @llvm.fma.f32(float %b, float %f, float %a)
- %arrayidx4 = getelementptr inbounds float, float* %d, i64 2
- store float %3, float* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds float, ptr %d, i64 2
+ store float %3, ptr %arrayidx4, align 4
ret void
; CHECK-LABEL: @test4sp
; CHECK-P9UP-LABEL: test1
; CHECK: lxvd2x
; CHECK-P9UP-DAG: lxv
- %0 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vsi to i8*))
+ %0 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr @vsi)
; CHECK: stxvd2x
; CHECK-P9UP-DAG: stxv
- store <4 x i32> %0, <4 x i32>* @res_vsi, align 16
+ store <4 x i32> %0, ptr @res_vsi, align 16
; CHECK: lxvd2x
; CHECK-P9UP-DAG: lxv
- %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vui to i8*))
+ %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr @vui)
; CHECK: stxvd2x
; CHECK-P9UP-DAG: stxv
- store <4 x i32> %1, <4 x i32>* @res_vui, align 16
+ store <4 x i32> %1, ptr @res_vui, align 16
; CHECK: lxvd2x
; CHECK-P9UP-DAG: lxv
- %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x float>* @vf to i8*))
+ %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr @vf)
%3 = bitcast <4 x i32> %2 to <4 x float>
; CHECK: stxvd2x
; CHECK-P9UP-DAG: stxv
- store <4 x float> %3, <4 x float>* @res_vf, align 16
+ store <4 x float> %3, ptr @res_vf, align 16
; CHECK: lxvd2x
; CHECK-P9UP-DAG: lxv
- %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vsll to i8*))
+ %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(ptr @vsll)
%5 = bitcast <2 x double> %4 to <2 x i64>
; CHECK: stxvd2x
; CHECK-P9UP-DAG: stxv
- store <2 x i64> %5, <2 x i64>* @res_vsll, align 16
+ store <2 x i64> %5, ptr @res_vsll, align 16
; CHECK: lxvd2x
; CHECK-P9UP-DAG: lxv
- %6 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vull to i8*))
+ %6 = call <2 x double> @llvm.ppc.vsx.lxvd2x(ptr @vull)
%7 = bitcast <2 x double> %6 to <2 x i64>
; CHECK: stxvd2x
; CHECK-P9UP-DAG: stxv
- store <2 x i64> %7, <2 x i64>* @res_vull, align 16
+ store <2 x i64> %7, ptr @res_vull, align 16
; CHECK: lxvd2x
; CHECK-P9UP-DAG: lxv
- %8 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x double>* @vd to i8*))
+ %8 = call <2 x double> @llvm.ppc.vsx.lxvd2x(ptr @vd)
; CHECK: stxvd2x
; CHECK-P9UP-DAG: stxv
- store <2 x double> %8, <2 x double>* @res_vd, align 16
+ store <2 x double> %8, ptr @res_vd, align 16
; CHECK: lxvd2x
; CHECK-P9UP-DAG: lxv
- %9 = load <4 x i32>, <4 x i32>* @vsi, align 16
+ %9 = load <4 x i32>, ptr @vsi, align 16
; CHECK: stxvd2x
; CHECK-P9UP-DAG: stxv
- call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %9, i8* bitcast (<4 x i32>* @res_vsi to i8*))
+ call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %9, ptr @res_vsi)
; CHECK: lxvd2x
; CHECK-P9UP-DAG: lxv
- %10 = load <4 x i32>, <4 x i32>* @vui, align 16
+ %10 = load <4 x i32>, ptr @vui, align 16
; CHECK: stxvd2x
; CHECK-P9UP-DAG: stxv
- call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %10, i8* bitcast (<4 x i32>* @res_vui to i8*))
+ call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %10, ptr @res_vui)
; CHECK: lxvd2x
; CHECK-P9UP-DAG: lxv
- %11 = load <4 x float>, <4 x float>* @vf, align 16
+ %11 = load <4 x float>, ptr @vf, align 16
%12 = bitcast <4 x float> %11 to <4 x i32>
; CHECK: stxvd2x
; CHECK-P9UP-DAG: stxv
- call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %12, i8* bitcast (<4 x float>* @res_vf to i8*))
+ call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %12, ptr @res_vf)
; CHECK: lxvd2x
; CHECK-P9UP-DAG: lxv
- %13 = load <2 x i64>, <2 x i64>* @vsll, align 16
+ %13 = load <2 x i64>, ptr @vsll, align 16
%14 = bitcast <2 x i64> %13 to <2 x double>
; CHECK: stxvd2x
; CHECK-P9UP-DAG: stxv
- call void @llvm.ppc.vsx.stxvd2x(<2 x double> %14, i8* bitcast (<2 x i64>* @res_vsll to i8*))
+ call void @llvm.ppc.vsx.stxvd2x(<2 x double> %14, ptr @res_vsll)
; CHECK: lxvd2x
; CHECK-P9UP-DAG: lxv
- %15 = load <2 x i64>, <2 x i64>* @vull, align 16
+ %15 = load <2 x i64>, ptr @vull, align 16
%16 = bitcast <2 x i64> %15 to <2 x double>
; CHECK: stxvd2x
; CHECK-P9UP-DAG: stxv
- call void @llvm.ppc.vsx.stxvd2x(<2 x double> %16, i8* bitcast (<2 x i64>* @res_vull to i8*))
+ call void @llvm.ppc.vsx.stxvd2x(<2 x double> %16, ptr @res_vull)
; CHECK: lxvd2x
; CHECK-P9UP-DAG: lxv
- %17 = load <2 x double>, <2 x double>* @vd, align 16
+ %17 = load <2 x double>, ptr @vd, align 16
; CHECK: stxvd2x
; CHECK-P9UP-DAG: stxv
- call void @llvm.ppc.vsx.stxvd2x(<2 x double> %17, i8* bitcast (<2 x double>* @res_vd to i8*))
+ call void @llvm.ppc.vsx.stxvd2x(<2 x double> %17, ptr @res_vd)
ret void
}
-declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*)
-declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, i8*)
-declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*)
-declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*)
+declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, ptr)
+declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, ptr)
+declare <2 x double> @llvm.ppc.vsx.lxvd2x(ptr)
+declare <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr)
; Function Attrs: nounwind
define void @test1() {
entry:
- %0 = load <4 x i32>, <4 x i32>* @vsi, align 16
- %1 = load <4 x i32>, <4 x i32>* @vui, align 16
- %2 = load <4 x i32>, <4 x i32>* bitcast (<4 x float>* @vf to <4 x i32>*), align 16
- %3 = load <2 x double>, <2 x double>* bitcast (<2 x i64>* @vsll to <2 x double>*), align 16
- %4 = load <2 x double>, <2 x double>* bitcast (<2 x i64>* @vull to <2 x double>*), align 16
- %5 = load <2 x double>, <2 x double>* @vd, align 16
- store <4 x i32> %0, <4 x i32>* @res_vsi, align 16
- store <4 x i32> %1, <4 x i32>* @res_vui, align 16
- store <4 x i32> %2, <4 x i32>* bitcast (<4 x float>* @res_vf to <4 x i32>*), align 16
- store <2 x double> %3, <2 x double>* bitcast (<2 x i64>* @res_vsll to <2 x double>*), align 16
- store <2 x double> %4, <2 x double>* bitcast (<2 x i64>* @res_vull to <2 x double>*), align 16
- store <2 x double> %5, <2 x double>* @res_vd, align 16
+ %0 = load <4 x i32>, ptr @vsi, align 16
+ %1 = load <4 x i32>, ptr @vui, align 16
+ %2 = load <4 x i32>, ptr @vf, align 16
+ %3 = load <2 x double>, ptr @vsll, align 16
+ %4 = load <2 x double>, ptr @vull, align 16
+ %5 = load <2 x double>, ptr @vd, align 16
+ store <4 x i32> %0, ptr @res_vsi, align 16
+ store <4 x i32> %1, ptr @res_vui, align 16
+ store <4 x i32> %2, ptr @res_vf, align 16
+ store <2 x double> %3, ptr @res_vsll, align 16
+ store <2 x double> %4, ptr @res_vull, align 16
+ store <2 x double> %5, ptr @res_vd, align 16
ret void
}
define void @test1() #0 {
; CHECK-LABEL: @test1
entry:
- %0 = load volatile <4 x float>, <4 x float>* @vf, align 16
- %1 = load volatile <4 x float>, <4 x float>* @vf, align 16
+ %0 = load volatile <4 x float>, ptr @vf, align 16
+ %1 = load volatile <4 x float>, ptr @vf, align 16
%2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1)
; CHECK: xvmaxsp
- store <4 x float> %2, <4 x float>* @vf1, align 16
- %3 = load <2 x double>, <2 x double>* @vd, align 16
+ store <4 x float> %2, ptr @vf1, align 16
+ %3 = load <2 x double>, ptr @vd, align 16
%4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3)
; CHECK: xvmaxdp
- store <2 x double> %4, <2 x double>* @vd1, align 16
- %5 = load volatile <4 x float>, <4 x float>* @vf, align 16
- %6 = load volatile <4 x float>, <4 x float>* @vf, align 16
+ store <2 x double> %4, ptr @vd1, align 16
+ %5 = load volatile <4 x float>, ptr @vf, align 16
+ %6 = load volatile <4 x float>, ptr @vf, align 16
%7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6)
; CHECK: xvmaxsp
- store <4 x float> %7, <4 x float>* @vf2, align 16
- %8 = load volatile <4 x float>, <4 x float>* @vf, align 16
- %9 = load volatile <4 x float>, <4 x float>* @vf, align 16
+ store <4 x float> %7, ptr @vf2, align 16
+ %8 = load volatile <4 x float>, ptr @vf, align 16
+ %9 = load volatile <4 x float>, ptr @vf, align 16
%10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9)
; CHECK: xvminsp
- store <4 x float> %10, <4 x float>* @vf3, align 16
- %11 = load <2 x double>, <2 x double>* @vd, align 16
+ store <4 x float> %10, ptr @vf3, align 16
+ %11 = load <2 x double>, ptr @vd, align 16
%12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11)
; CHECK: xvmindp
- store <2 x double> %12, <2 x double>* @vd2, align 16
- %13 = load volatile <4 x float>, <4 x float>* @vf, align 16
- %14 = load volatile <4 x float>, <4 x float>* @vf, align 16
+ store <2 x double> %12, ptr @vd2, align 16
+ %13 = load volatile <4 x float>, ptr @vf, align 16
+ %14 = load volatile <4 x float>, ptr @vf, align 16
%15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14)
; CHECK: xvminsp
- store <4 x float> %15, <4 x float>* @vf4, align 16
- %16 = load double, double* @d, align 8
+ store <4 x float> %15, ptr @vf4, align 16
+ %16 = load double, ptr @d, align 8
%17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16)
; CHECK: xsmaxdp
- store double %17, double* @d1, align 8
+ store double %17, ptr @d1, align 8
%18 = tail call double @llvm.ppc.vsx.xsmindp(double %16, double %16)
; CHECK: xsmindp
- store double %18, double* @d2, align 8
+ store double %18, ptr @d2, align 8
ret void
}
; Unaligned loads/stores on P8 and later should use VSX where possible.
-define <2 x double> @test28u(<2 x double>* %a) {
- %v = load <2 x double>, <2 x double>* %a, align 8
+define <2 x double> @test28u(ptr %a) {
+ %v = load <2 x double>, ptr %a, align 8
ret <2 x double> %v
; CHECK-LABEL: @test28u
; CHECK: blr
}
-define void @test29u(<2 x double>* %a, <2 x double> %b) {
- store <2 x double> %b, <2 x double>* %a, align 8
+define void @test29u(ptr %a, <2 x double> %b) {
+ store <2 x double> %b, ptr %a, align 8
ret void
; CHECK-LABEL: @test29u
; CHECK: blr
}
-define <4 x float> @test32u(<4 x float>* %a) {
- %v = load <4 x float>, <4 x float>* %a, align 8
+define <4 x float> @test32u(ptr %a) {
+ %v = load <4 x float>, ptr %a, align 8
ret <4 x float> %v
; CHECK-REG-LABEL: @test32u
; CHECK-FISL: blr
}
-define void @test33u(<4 x float>* %a, <4 x float> %b) {
- store <4 x float> %b, <4 x float>* %a, align 8
+define void @test33u(ptr %a, <4 x float> %b) {
+ store <4 x float> %b, ptr %a, align 8
ret void
; CHECK-REG-LABEL: @test33u
define void @_Z4testv() {
entry:
; CHECK-LABEL: @_Z4testv
- %0 = load <16 x i8>, <16 x i8>* @uca, align 16
- %1 = load <16 x i8>, <16 x i8>* @ucb, align 16
+ %0 = load <16 x i8>, ptr @uca, align 16
+ %1 = load <16 x i8>, ptr @ucb, align 16
%add.i = add <16 x i8> %1, %0
tail call void (...) @sink(<16 x i8> %add.i)
; CHECK: lxv 34, 0(3)
; CHECK: vaddubm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
- %2 = load <16 x i8>, <16 x i8>* @sca, align 16
- %3 = load <16 x i8>, <16 x i8>* @scb, align 16
+ %2 = load <16 x i8>, ptr @sca, align 16
+ %3 = load <16 x i8>, ptr @scb, align 16
%add.i22 = add <16 x i8> %3, %2
tail call void (...) @sink(<16 x i8> %add.i22)
; CHECK: lxv 34, 0(3)
; CHECK: vaddubm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
- %4 = load <8 x i16>, <8 x i16>* @usa, align 16
- %5 = load <8 x i16>, <8 x i16>* @usb, align 16
+ %4 = load <8 x i16>, ptr @usa, align 16
+ %5 = load <8 x i16>, ptr @usb, align 16
%add.i21 = add <8 x i16> %5, %4
tail call void (...) @sink(<8 x i16> %add.i21)
; CHECK: lxv 34, 0(3)
; CHECK: vadduhm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
- %6 = load <8 x i16>, <8 x i16>* @ssa, align 16
- %7 = load <8 x i16>, <8 x i16>* @ssb, align 16
+ %6 = load <8 x i16>, ptr @ssa, align 16
+ %7 = load <8 x i16>, ptr @ssb, align 16
%add.i20 = add <8 x i16> %7, %6
tail call void (...) @sink(<8 x i16> %add.i20)
; CHECK: lxv 34, 0(3)
; CHECK: vadduhm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
- %8 = load <4 x i32>, <4 x i32>* @uia, align 16
- %9 = load <4 x i32>, <4 x i32>* @uib, align 16
+ %8 = load <4 x i32>, ptr @uia, align 16
+ %9 = load <4 x i32>, ptr @uib, align 16
%add.i19 = add <4 x i32> %9, %8
tail call void (...) @sink(<4 x i32> %add.i19)
; CHECK: lxv 34, 0(3)
; CHECK: vadduwm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
- %10 = load <4 x i32>, <4 x i32>* @sia, align 16
- %11 = load <4 x i32>, <4 x i32>* @sib, align 16
+ %10 = load <4 x i32>, ptr @sia, align 16
+ %11 = load <4 x i32>, ptr @sib, align 16
%add.i18 = add <4 x i32> %11, %10
tail call void (...) @sink(<4 x i32> %add.i18)
; CHECK: lxv 34, 0(3)
; CHECK: vadduwm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
- %12 = load <2 x i64>, <2 x i64>* @ulla, align 16
- %13 = load <2 x i64>, <2 x i64>* @ullb, align 16
+ %12 = load <2 x i64>, ptr @ulla, align 16
+ %13 = load <2 x i64>, ptr @ullb, align 16
%add.i17 = add <2 x i64> %13, %12
tail call void (...) @sink(<2 x i64> %add.i17)
; CHECK: lxv 34, 0(3)
; CHECK: vaddudm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
- %14 = load <2 x i64>, <2 x i64>* @slla, align 16
- %15 = load <2 x i64>, <2 x i64>* @sllb, align 16
+ %14 = load <2 x i64>, ptr @slla, align 16
+ %15 = load <2 x i64>, ptr @sllb, align 16
%add.i16 = add <2 x i64> %15, %14
tail call void (...) @sink(<2 x i64> %add.i16)
; CHECK: lxv 34, 0(3)
; CHECK: vaddudm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
- %16 = load <1 x i128>, <1 x i128>* @uxa, align 16
- %17 = load <1 x i128>, <1 x i128>* @uxb, align 16
+ %16 = load <1 x i128>, ptr @uxa, align 16
+ %17 = load <1 x i128>, ptr @uxb, align 16
%add.i15 = add <1 x i128> %17, %16
tail call void (...) @sink(<1 x i128> %add.i15)
; CHECK: lxv 34, 0(3)
; CHECK: vadduqm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
- %18 = load <1 x i128>, <1 x i128>* @sxa, align 16
- %19 = load <1 x i128>, <1 x i128>* @sxb, align 16
+ %18 = load <1 x i128>, ptr @sxa, align 16
+ %19 = load <1 x i128>, ptr @sxb, align 16
%add.i14 = add <1 x i128> %19, %18
tail call void (...) @sink(<1 x i128> %add.i14)
; CHECK: lxv 34, 0(3)
; CHECK: vadduqm 2, 3, 2
; CHECK: stxv 34,
; CHECK: bl sink
- %20 = load <4 x float>, <4 x float>* @vfa, align 16
- %21 = load <4 x float>, <4 x float>* @vfb, align 16
+ %20 = load <4 x float>, ptr @vfa, align 16
+ %21 = load <4 x float>, ptr @vfb, align 16
%add.i13 = fadd <4 x float> %20, %21
tail call void (...) @sink(<4 x float> %add.i13)
; CHECK: lxv 0, 0(3)
; CHECK: xvaddsp 34, 0, 1
; CHECK: stxv 34,
; CHECK: bl sink
- %22 = load <2 x double>, <2 x double>* @vda, align 16
- %23 = load <2 x double>, <2 x double>* @vdb, align 16
+ %22 = load <2 x double>, ptr @vda, align 16
+ %23 = load <2 x double>, ptr @vdb, align 16
%add.i12 = fadd <2 x double> %22, %23
tail call void (...) @sink(<2 x double> %add.i12)
; CHECK: lxv 0, 0(3)
declare <4 x float>@llvm.ppc.vsx.xvcvhpsp(<8 x i16>)
; Function Attrs: nounwind readnone
-define <4 x i32> @testLXVL(i8* %a, i64 %b) {
+define <4 x i32> @testLXVL(ptr %a, i64 %b) {
entry:
- %0 = tail call <4 x i32> @llvm.ppc.vsx.lxvl(i8* %a, i64 %b)
+ %0 = tail call <4 x i32> @llvm.ppc.vsx.lxvl(ptr %a, i64 %b)
ret <4 x i32> %0
; CHECK-LABEL: testLXVL
; CHECK: lxvl 34, 3, 4
; CHECK: blr
}
; Function Attrs: nounwind readnone
-declare <4 x i32> @llvm.ppc.vsx.lxvl(i8*, i64)
+declare <4 x i32> @llvm.ppc.vsx.lxvl(ptr, i64)
-define void @testSTXVL(<4 x i32> %a, i8* %b, i64 %c) {
+define void @testSTXVL(<4 x i32> %a, ptr %b, i64 %c) {
entry:
- tail call void @llvm.ppc.vsx.stxvl(<4 x i32> %a, i8* %b, i64 %c)
+ tail call void @llvm.ppc.vsx.stxvl(<4 x i32> %a, ptr %b, i64 %c)
ret void
; CHECK-LABEL: testSTXVL
; CHECK: stxvl 34, 5, 6
; CHECK: blr
}
; Function Attrs: nounwind readnone
-declare void @llvm.ppc.vsx.stxvl(<4 x i32>, i8*, i64)
+declare void @llvm.ppc.vsx.stxvl(<4 x i32>, ptr, i64)
; Function Attrs: nounwind readnone
-define <4 x i32> @testLXVLL(i8* %a, i64 %b) {
+define <4 x i32> @testLXVLL(ptr %a, i64 %b) {
entry:
- %0 = tail call <4 x i32> @llvm.ppc.vsx.lxvll(i8* %a, i64 %b)
+ %0 = tail call <4 x i32> @llvm.ppc.vsx.lxvll(ptr %a, i64 %b)
ret <4 x i32> %0
; CHECK-LABEL: testLXVLL
; CHECK: lxvll 34, 3, 4
; CHECK: blr
}
; Function Attrs: nounwind readnone
-declare <4 x i32> @llvm.ppc.vsx.lxvll(i8*, i64)
+declare <4 x i32> @llvm.ppc.vsx.lxvll(ptr, i64)
-define void @testSTXVLL(<4 x i32> %a, i8* %b, i64 %c) {
+define void @testSTXVLL(<4 x i32> %a, ptr %b, i64 %c) {
entry:
- tail call void @llvm.ppc.vsx.stxvll(<4 x i32> %a, i8* %b, i64 %c)
+ tail call void @llvm.ppc.vsx.stxvll(<4 x i32> %a, ptr %b, i64 %c)
ret void
; CHECK-LABEL: testSTXVLL
; CHECK: stxvll 34, 5, 6
; CHECK: blr
}
; Function Attrs: nounwind readnone
-declare void @llvm.ppc.vsx.stxvll(<4 x i32>, i8*, i64)
+declare void @llvm.ppc.vsx.stxvll(<4 x i32>, ptr, i64)
define <4 x i32> @test0(<4 x i32> %a) local_unnamed_addr #0 {
entry:
; CHECK: blr
entry:
%a = alloca [4 x i32], align 4
- %0 = bitcast [4 x i32]* %a to i8*
- call void @llvm.memset.p0i8.i64(i8* nonnull align 4 %0, i8 0, i64 16, i1 false)
- %arraydecay = getelementptr inbounds [4 x i32], [4 x i32]* %a, i64 0, i64 0
- %call = call signext i32 @callee(i32* nonnull %arraydecay) #3
+ call void @llvm.memset.p0.i64(ptr nonnull align 4 %a, i8 0, i64 16, i1 false)
+ %call = call signext i32 @callee(ptr nonnull %a) #3
ret i32 %call
}
; CHECK: blr
entry:
%a = alloca [16 x i32], align 4
- %0 = bitcast [16 x i32]* %a to i8*
- call void @llvm.memset.p0i8.i64(i8* nonnull align 4 %0, i8 0, i64 64, i1 false)
- %arraydecay = getelementptr inbounds [16 x i32], [16 x i32]* %a, i64 0, i64 0
- %call = call signext i32 @callee(i32* nonnull %arraydecay) #3
+ call void @llvm.memset.p0.i64(ptr nonnull align 4 %a, i8 0, i64 64, i1 false)
+ %call = call signext i32 @callee(ptr nonnull %a) #3
ret i32 %call
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
-declare signext i32 @callee(i32*) local_unnamed_addr #2
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #1
+declare signext i32 @callee(ptr) local_unnamed_addr #2
; RUN: llc -mcpu=pwr9 -mtriple=powerpc64-unknown-unknown < %s | FileCheck %s \
; RUN: --check-prefix=CHECK-BE
; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecucuc(i8* nocapture readonly %ptr) {
+define <16 x i8> @vecucuc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%splat.splatinsert = insertelement <16 x i8> undef, i8 %0, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
ret <16 x i8> %splat.splat
}
; Function Attrs: norecurse nounwind readonly
-define <8 x i16> @vecusuc(i8* nocapture readonly %ptr) {
+define <8 x i16> @vecusuc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = zext i8 %0 to i16
%splat.splatinsert = insertelement <8 x i16> undef, i16 %conv, i32 0
%splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecuiuc(i8* nocapture readonly %ptr) {
+define <4 x i32> @vecuiuc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = zext i8 %0 to i32
%splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @veculuc(i8* nocapture readonly %ptr) {
+define <2 x i64> @veculuc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = zext i8 %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecscuc(i8* nocapture readonly %ptr) {
+define <16 x i8> @vecscuc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%splat.splatinsert = insertelement <16 x i8> undef, i8 %0, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
ret <16 x i8> %splat.splat
}
; Function Attrs: norecurse nounwind readonly
-define <8 x i16> @vecssuc(i8* nocapture readonly %ptr) {
+define <8 x i16> @vecssuc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = zext i8 %0 to i16
%splat.splatinsert = insertelement <8 x i16> undef, i16 %conv, i32 0
%splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecsiuc(i8* nocapture readonly %ptr) {
+define <4 x i32> @vecsiuc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = zext i8 %0 to i32
%splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @vecsluc(i8* nocapture readonly %ptr) {
+define <2 x i64> @vecsluc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = zext i8 %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <4 x float> @vecfuc(i8* nocapture readonly %ptr) {
+define <4 x float> @vecfuc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = uitofp i8 %0 to float
%splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 0
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x double> @vecduc(i8* nocapture readonly %ptr) {
+define <2 x double> @vecduc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = uitofp i8 %0 to double
%splat.splatinsert = insertelement <2 x double> undef, double %conv, i32 0
%splat.splat = shufflevector <2 x double> %splat.splatinsert, <2 x double> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecucsc(i8* nocapture readonly %ptr) {
+define <16 x i8> @vecucsc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%splat.splatinsert = insertelement <16 x i8> undef, i8 %0, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
ret <16 x i8> %splat.splat
}
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecuisc(i8* nocapture readonly %ptr) {
+define <4 x i32> @vecuisc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = sext i8 %0 to i32
%splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @veculsc(i8* nocapture readonly %ptr) {
+define <2 x i64> @veculsc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = sext i8 %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecscsc(i8* nocapture readonly %ptr) {
+define <16 x i8> @vecscsc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%splat.splatinsert = insertelement <16 x i8> undef, i8 %0, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
ret <16 x i8> %splat.splat
}
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecsisc(i8* nocapture readonly %ptr) {
+define <4 x i32> @vecsisc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = sext i8 %0 to i32
%splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @vecslsc(i8* nocapture readonly %ptr) {
+define <2 x i64> @vecslsc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = sext i8 %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <4 x float> @vecfsc(i8* nocapture readonly %ptr) {
+define <4 x float> @vecfsc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = sitofp i8 %0 to float
%splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 0
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x double> @vecdsc(i8* nocapture readonly %ptr) {
+define <2 x double> @vecdsc(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = sitofp i8 %0 to double
%splat.splatinsert = insertelement <2 x double> undef, double %conv, i32 0
%splat.splat = shufflevector <2 x double> %splat.splatinsert, <2 x double> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecucus(i16* nocapture readonly %ptr) {
+define <16 x i8> @vecucus(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = trunc i16 %0 to i8
%splat.splatinsert = insertelement <16 x i8> undef, i8 %conv, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <8 x i16> @vecusus(i16* nocapture readonly %ptr) {
+define <8 x i16> @vecusus(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%splat.splatinsert = insertelement <8 x i16> undef, i16 %0, i32 0
%splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
ret <8 x i16> %splat.splat
}
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecuius(i16* nocapture readonly %ptr) {
+define <4 x i32> @vecuius(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = zext i16 %0 to i32
%splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @veculus(i16* nocapture readonly %ptr) {
+define <2 x i64> @veculus(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = zext i16 %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecscus(i16* nocapture readonly %ptr) {
+define <16 x i8> @vecscus(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = trunc i16 %0 to i8
%splat.splatinsert = insertelement <16 x i8> undef, i8 %conv, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <8 x i16> @vecssus(i16* nocapture readonly %ptr) {
+define <8 x i16> @vecssus(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%splat.splatinsert = insertelement <8 x i16> undef, i16 %0, i32 0
%splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
ret <8 x i16> %splat.splat
}
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecsius(i16* nocapture readonly %ptr) {
+define <4 x i32> @vecsius(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = zext i16 %0 to i32
%splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @vecslus(i16* nocapture readonly %ptr) {
+define <2 x i64> @vecslus(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = zext i16 %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <4 x float> @vecfus(i16* nocapture readonly %ptr) {
+define <4 x float> @vecfus(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = uitofp i16 %0 to float
%splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 0
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x double> @vecdus(i16* nocapture readonly %ptr) {
+define <2 x double> @vecdus(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = uitofp i16 %0 to double
%splat.splatinsert = insertelement <2 x double> undef, double %conv, i32 0
%splat.splat = shufflevector <2 x double> %splat.splatinsert, <2 x double> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecucss(i16* nocapture readonly %ptr) {
+define <16 x i8> @vecucss(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = trunc i16 %0 to i8
%splat.splatinsert = insertelement <16 x i8> undef, i8 %conv, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecuiss(i16* nocapture readonly %ptr) {
+define <4 x i32> @vecuiss(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = sext i16 %0 to i32
%splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @veculss(i16* nocapture readonly %ptr) {
+define <2 x i64> @veculss(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = sext i16 %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <16 x i8> @vecscss(i16* nocapture readonly %ptr) {
+define <16 x i8> @vecscss(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = trunc i16 %0 to i8
%splat.splatinsert = insertelement <16 x i8> undef, i8 %conv, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <4 x i32> @vecsiss(i16* nocapture readonly %ptr) {
+define <4 x i32> @vecsiss(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = sext i16 %0 to i32
%splat.splatinsert = insertelement <4 x i32> undef, i32 %conv, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x i64> @vecslss(i16* nocapture readonly %ptr) {
+define <2 x i64> @vecslss(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = sext i16 %0 to i64
%splat.splatinsert = insertelement <2 x i64> undef, i64 %conv, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <4 x float> @vecfss(i16* nocapture readonly %ptr) {
+define <4 x float> @vecfss(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = sitofp i16 %0 to float
%splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 0
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind readonly
-define <2 x double> @vecdss(i16* nocapture readonly %ptr) {
+define <2 x double> @vecdss(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = sitofp i16 %0 to double
%splat.splatinsert = insertelement <2 x double> undef, double %conv, i32 0
%splat.splat = shufflevector <2 x double> %splat.splatinsert, <2 x double> undef, <2 x i32> zeroinitializer
}
; Function Attrs: norecurse nounwind
-define void @storefsc(float %f, i8* nocapture %ptr) {
+define void @storefsc(float %f, ptr nocapture %ptr) {
entry:
%conv = fptosi float %f to i8
- store i8 %conv, i8* %ptr, align 1
+ store i8 %conv, ptr %ptr, align 1
ret void
; CHECK-LABEL: storefsc
; CHECK: xscvdpsxws 0, 1
}
; Function Attrs: norecurse nounwind
-define void @storedsc(double %d, i8* nocapture %ptr) {
+define void @storedsc(double %d, ptr nocapture %ptr) {
entry:
%conv = fptosi double %d to i8
- store i8 %conv, i8* %ptr, align 1
+ store i8 %conv, ptr %ptr, align 1
ret void
; CHECK-LABEL: storedsc
; CHECK: xscvdpsxws 0, 1
}
; Function Attrs: norecurse nounwind
-define void @storevcsc0(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc0(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 0
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc0
; CHECK: vsldoi 2, 2, 2, 8
}
; Function Attrs: norecurse nounwind
-define void @storevcsc1(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc1(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 1
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc1
; CHECK: vsldoi 2, 2, 2, 7
}
; Function Attrs: norecurse nounwind
-define void @storevcsc2(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc2(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 2
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc2
; CHECK: vsldoi 2, 2, 2, 6
}
; Function Attrs: norecurse nounwind
-define void @storevcsc3(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc3(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 3
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc3
; CHECK: vsldoi 2, 2, 2, 5
}
; Function Attrs: norecurse nounwind
-define void @storevcsc4(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc4(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 4
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc4
; CHECK: vsldoi 2, 2, 2, 4
}
; Function Attrs: norecurse nounwind
-define void @storevcsc5(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc5(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 5
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc5
; CHECK: vsldoi 2, 2, 2, 3
}
; Function Attrs: norecurse nounwind
-define void @storevcsc6(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc6(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 6
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc6
; CHECK: vsldoi 2, 2, 2, 2
}
; Function Attrs: norecurse nounwind
-define void @storevcsc7(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc7(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 7
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc7
; CHECK: vsldoi 2, 2, 2, 1
}
; Function Attrs: norecurse nounwind
-define void @storevcsc8(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc8(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 8
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc8
; CHECK: stxsibx 34, 0, 5
}
; Function Attrs: norecurse nounwind
-define void @storevcsc9(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc9(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 9
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc9
; CHECK: vsldoi 2, 2, 2, 15
}
; Function Attrs: norecurse nounwind
-define void @storevcsc10(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc10(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 10
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc10
; CHECK: vsldoi 2, 2, 2, 14
}
; Function Attrs: norecurse nounwind
-define void @storevcsc11(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc11(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 11
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc11
; CHECK: vsldoi 2, 2, 2, 13
}
; Function Attrs: norecurse nounwind
-define void @storevcsc12(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc12(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 12
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc12
; CHECK: vsldoi 2, 2, 2, 12
}
; Function Attrs: norecurse nounwind
-define void @storevcsc13(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc13(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 13
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc13
; CHECK: vsldoi 2, 2, 2, 11
}
; Function Attrs: norecurse nounwind
-define void @storevcsc14(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc14(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 14
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc14
; CHECK: vsldoi 2, 2, 2, 10
}
; Function Attrs: norecurse nounwind
-define void @storevcsc15(<16 x i8> %v, i8* nocapture %ptr) {
+define void @storevcsc15(<16 x i8> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <16 x i8> %v, i32 15
- store i8 %vecext, i8* %ptr, align 1
+ store i8 %vecext, ptr %ptr, align 1
ret void
; CHECK-LABEL: storevcsc15
; CHECK: vsldoi 2, 2, 2, 9
}
; Function Attrs: norecurse nounwind
-define void @storefss(float %f, i16* nocapture %ptr) {
+define void @storefss(float %f, ptr nocapture %ptr) {
entry:
%conv = fptosi float %f to i16
- store i16 %conv, i16* %ptr, align 2
+ store i16 %conv, ptr %ptr, align 2
ret void
; CHECK-LABEL: storefss
; CHECK: xscvdpsxws 0, 1
}
; Function Attrs: norecurse nounwind
-define void @storedss(double %d, i16* nocapture %ptr) {
+define void @storedss(double %d, ptr nocapture %ptr) {
entry:
%conv = fptosi double %d to i16
- store i16 %conv, i16* %ptr, align 2
+ store i16 %conv, ptr %ptr, align 2
ret void
; CHECK-LABEL: storedss
; CHECK: xscvdpsxws 0, 1
}
; Function Attrs: norecurse nounwind
-define void @storevsss0(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss0(<8 x i16> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <8 x i16> %v, i32 0
- store i16 %vecext, i16* %ptr, align 2
+ store i16 %vecext, ptr %ptr, align 2
ret void
; CHECK-LABEL: storevsss0
; CHECK: vsldoi 2, 2, 2, 8
}
; Function Attrs: norecurse nounwind
-define void @storevsss1(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss1(<8 x i16> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <8 x i16> %v, i32 1
- store i16 %vecext, i16* %ptr, align 2
+ store i16 %vecext, ptr %ptr, align 2
ret void
; CHECK-LABEL: storevsss1
; CHECK: vsldoi 2, 2, 2, 6
}
; Function Attrs: norecurse nounwind
-define void @storevsss2(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss2(<8 x i16> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <8 x i16> %v, i32 2
- store i16 %vecext, i16* %ptr, align 2
+ store i16 %vecext, ptr %ptr, align 2
ret void
; CHECK-LABEL: storevsss2
; CHECK: vsldoi 2, 2, 2, 4
}
; Function Attrs: norecurse nounwind
-define void @storevsss3(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss3(<8 x i16> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <8 x i16> %v, i32 3
- store i16 %vecext, i16* %ptr, align 2
+ store i16 %vecext, ptr %ptr, align 2
ret void
; CHECK-LABEL: storevsss3
; CHECK: vsldoi 2, 2, 2, 2
}
; Function Attrs: norecurse nounwind
-define void @storevsss4(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss4(<8 x i16> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <8 x i16> %v, i32 4
- store i16 %vecext, i16* %ptr, align 2
+ store i16 %vecext, ptr %ptr, align 2
ret void
; CHECK-LABEL: storevsss4
; CHECK: stxsihx 34, 0, 5
}
; Function Attrs: norecurse nounwind
-define void @storevsss5(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss5(<8 x i16> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <8 x i16> %v, i32 5
- store i16 %vecext, i16* %ptr, align 2
+ store i16 %vecext, ptr %ptr, align 2
ret void
; CHECK-LABEL: storevsss5
; CHECK: vsldoi 2, 2, 2, 14
}
; Function Attrs: norecurse nounwind
-define void @storevsss6(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss6(<8 x i16> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <8 x i16> %v, i32 6
- store i16 %vecext, i16* %ptr, align 2
+ store i16 %vecext, ptr %ptr, align 2
ret void
; CHECK-LABEL: storevsss6
; CHECK: vsldoi 2, 2, 2, 12
}
; Function Attrs: norecurse nounwind
-define void @storevsss7(<8 x i16> %v, i16* nocapture %ptr) {
+define void @storevsss7(<8 x i16> %v, ptr nocapture %ptr) {
entry:
%vecext = extractelement <8 x i16> %v, i32 7
- store i16 %vecext, i16* %ptr, align 2
+ store i16 %vecext, ptr %ptr, align 2
ret void
; CHECK-LABEL: storevsss7
; CHECK: vsldoi 2, 2, 2, 10
}
; Function Attrs: norecurse nounwind readonly
-define float @convscf(i8* nocapture readonly %ptr) {
+define float @convscf(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = sitofp i8 %0 to float
ret float %conv
; CHECK-LABEL: convscf
}
; Function Attrs: norecurse nounwind readonly
-define float @convucf(i8* nocapture readonly %ptr) {
+define float @convucf(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = uitofp i8 %0 to float
ret float %conv
; CHECK-LABEL: convucf
}
; Function Attrs: norecurse nounwind readonly
-define double @convscd(i8* nocapture readonly %ptr) {
+define double @convscd(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = sitofp i8 %0 to double
; CHECK-LABEL: convscd
; CHECK: lxsibzx 34, 0, 3
}
; Function Attrs: norecurse nounwind readonly
-define double @convucd(i8* nocapture readonly %ptr) {
+define double @convucd(ptr nocapture readonly %ptr) {
entry:
- %0 = load i8, i8* %ptr, align 1
+ %0 = load i8, ptr %ptr, align 1
%conv = uitofp i8 %0 to double
ret double %conv
; CHECK-LABEL: convucd
}
; Function Attrs: norecurse nounwind readonly
-define float @convssf(i16* nocapture readonly %ptr) {
+define float @convssf(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = sitofp i16 %0 to float
ret float %conv
; CHECK-LABEL: convssf
}
; Function Attrs: norecurse nounwind readonly
-define float @convusf(i16* nocapture readonly %ptr) {
+define float @convusf(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = uitofp i16 %0 to float
ret float %conv
; CHECK-LABEL: convusf
}
; Function Attrs: norecurse nounwind readonly
-define double @convssd(i16* nocapture readonly %ptr) {
+define double @convssd(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = sitofp i16 %0 to double
ret double %conv
; CHECK-LABEL: convssd
}
; Function Attrs: norecurse nounwind readonly
-define double @convusd(i16* nocapture readonly %ptr) {
+define double @convusd(ptr nocapture readonly %ptr) {
entry:
- %0 = load i16, i16* %ptr, align 2
+ %0 = load i16, ptr %ptr, align 2
%conv = uitofp i16 %0 to double
ret double %conv
; CHECK-LABEL: convusd
; Function Attrs: nounwind
define float @emit_xsresp() {
entry:
- %0 = load float, float* @a, align 4
- %1 = load float, float* @b, align 4
+ %0 = load float, ptr @a, align 4
+ %1 = load float, ptr @b, align 4
%div = fdiv arcp ninf float %0, %1
ret float %div
; CHECK-LABEL: @emit_xsresp
define float @emit_xsrsqrtesp(float %f) {
entry:
%f.addr = alloca float, align 4
- store float %f, float* %f.addr, align 4
- %0 = load float, float* %f.addr, align 4
- %1 = load float, float* @b, align 4
+ store float %f, ptr %f.addr, align 4
+ %0 = load float, ptr %f.addr, align 4
+ %1 = load float, ptr @b, align 4
%2 = call float @llvm.sqrt.f32(float %1)
%div = fdiv arcp float %0, %2
ret float %div
; Function Attrs: nounwind
define double @emit_xsredp() {
entry:
- %0 = load double, double* @c, align 8
- %1 = load double, double* @d, align 8
+ %0 = load double, ptr @c, align 8
+ %1 = load double, ptr @d, align 8
%div = fdiv arcp ninf double %0, %1
ret double %div
; CHECK-LABEL: @emit_xsredp
define double @emit_xsrsqrtedp(double %f) {
entry:
%f.addr = alloca double, align 8
- store double %f, double* %f.addr, align 8
- %0 = load double, double* %f.addr, align 8
- %1 = load double, double* @d, align 8
+ store double %f, ptr %f.addr, align 8
+ %0 = load double, ptr %f.addr, align 8
+ %1 = load double, ptr @d, align 8
%2 = call double @llvm.sqrt.f64(double %1)
%div = fdiv arcp double %0, %2
ret double %div
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mattr=+vsx \
; RUN: -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
-define <2 x double> @loadChainHasUser(<2 x double>* %p1, <2 x double> %v2) {
+define <2 x double> @loadChainHasUser(ptr %p1, <2 x double> %v2) {
; CHECK-LABEL: loadChainHasUser:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-NEXT: stxv 34, 0(3)
; CHECK-NEXT: xxlor 34, 0, 0
; CHECK-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- store <2 x double> %v2, <2 x double>* %p1, align 16
+ %v1 = load <2 x double>, ptr %p1
+ store <2 x double> %v2, ptr %p1, align 16
%v3 = shufflevector <2 x double> %v1, <2 x double> %v1, <2 x i32> < i32 1, i32 0>
ret <2 x double> %v3
}
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mattr=+vsx \
; RUN: -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
-define <2 x double> @loadHasMultipleUses(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @loadHasMultipleUses(ptr %p1, ptr %p2) {
; CHECK-LABEL: loadHasMultipleUses:
; CHECK: # %bb.0:
; CHECK-NEXT: lxv 0, 0(3)
; CHECK-NEXT: xxswapd 34, 0
; CHECK-NEXT: stxv 0, 0(4)
; CHECK-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- store <2 x double> %v1, <2 x double>* %p2, align 16
+ %v1 = load <2 x double>, ptr %p1
+ store <2 x double> %v1, ptr %p2, align 16
%v2 = shufflevector <2 x double> %v1, <2 x double> %v1, <2 x i32> < i32 1, i32 0>
ret <2 x double> %v2
}
-define <2 x double> @storeHasMultipleUses(<2 x double> %v, <2 x double>* %p) {
+define <2 x double> @storeHasMultipleUses(<2 x double> %v, ptr %p) {
; CHECK-LABEL: storeHasMultipleUses:
; CHECK: # %bb.0:
; CHECK-NEXT: xxswapd 34, 34
; CHECK-NEXT: stxv 34, 256(5)
; CHECK-NEXT: blr
%v1 = shufflevector <2 x double> %v, <2 x double> %v, <2 x i32> < i32 1, i32 0>
- %addr = getelementptr inbounds <2 x double>, <2 x double>* %p, i64 16
- store <2 x double> %v1, <2 x double>* %addr, align 16
+ %addr = getelementptr inbounds <2 x double>, ptr %p, i64 16
+ store <2 x double> %v1, ptr %addr, align 16
%v2 = shufflevector <2 x double> %v, <2 x double> %v, <2 x i32> < i32 1, i32 2>
ret <2 x double> %v2
}
; CHECK: stxvd2x
entry:
- %val = load <2 x double>, <2 x double>* @.v2f64, align 16
+ %val = load <2 x double>, ptr @.v2f64, align 16
%0 = tail call <8 x i16> @llvm.ppc.altivec.vupkhsb(<16 x i8> <i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1>) #0
%1 = tail call <8 x i16> @llvm.ppc.altivec.vupklsb(<16 x i8> <i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1>) #0
br i1 false, label %if.then.i68.i, label %check.exit69.i
br i1 undef, label %if.then.i63.i, label %check.exit64.i
if.then.i63.i: ; preds = %check.exit69.i
- tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str10, i64 0, i64 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str1, i64 0, i64 0), <2 x double> %val) #0
+ tail call void (ptr, ...) @printf(ptr @.str10, ptr @.str1, <2 x double> %val) #0
br label %check.exit64.i
check.exit64.i: ; preds = %if.then.i63.i, %check.exit69.i
declare <8 x i16> @llvm.ppc.altivec.vupklsb(<16 x i8>) #1
; Function Attrs: nounwind
-declare void @printf(i8* nocapture readonly, ...) #0
+declare void @printf(ptr nocapture readonly, ...) #0
; Function Attrs: nounwind readnone
declare i32 @llvm.ppc.altivec.vcmpequh.p(i32, <8 x i16>, <8 x i16>) #1
}
-define <2 x double> @test28(<2 x double>* %a) {
+define <2 x double> @test28(ptr %a) {
; CHECK-LABEL: test28:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x v2, 0, r3
; CHECK-LE-NEXT: lxvd2x vs0, 0, r3
; CHECK-LE-NEXT: xxswapd v2, vs0
; CHECK-LE-NEXT: blr
- %v = load <2 x double>, <2 x double>* %a, align 16
+ %v = load <2 x double>, ptr %a, align 16
ret <2 x double> %v
}
-define void @test29(<2 x double>* %a, <2 x double> %b) {
+define void @test29(ptr %a, <2 x double> %b) {
; CHECK-LABEL: test29:
; CHECK: # %bb.0:
; CHECK-NEXT: stxvd2x v2, 0, r3
; CHECK-LE-NEXT: xxswapd vs0, v2
; CHECK-LE-NEXT: stxvd2x vs0, 0, r3
; CHECK-LE-NEXT: blr
- store <2 x double> %b, <2 x double>* %a, align 16
+ store <2 x double> %b, ptr %a, align 16
ret void
}
-define <2 x double> @test28u(<2 x double>* %a) {
+define <2 x double> @test28u(ptr %a) {
; CHECK-LABEL: test28u:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x v2, 0, r3
; CHECK-LE-NEXT: lxvd2x vs0, 0, r3
; CHECK-LE-NEXT: xxswapd v2, vs0
; CHECK-LE-NEXT: blr
- %v = load <2 x double>, <2 x double>* %a, align 8
+ %v = load <2 x double>, ptr %a, align 8
ret <2 x double> %v
}
-define void @test29u(<2 x double>* %a, <2 x double> %b) {
+define void @test29u(ptr %a, <2 x double> %b) {
; CHECK-LABEL: test29u:
; CHECK: # %bb.0:
; CHECK-NEXT: stxvd2x v2, 0, r3
; CHECK-LE-NEXT: xxswapd vs0, v2
; CHECK-LE-NEXT: stxvd2x vs0, 0, r3
; CHECK-LE-NEXT: blr
- store <2 x double> %b, <2 x double>* %a, align 8
+ store <2 x double> %b, ptr %a, align 8
ret void
}
-define <2 x i64> @test30(<2 x i64>* %a) {
+define <2 x i64> @test30(ptr %a) {
; CHECK-LABEL: test30:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x v2, 0, r3
; CHECK-LE-NEXT: lxvd2x vs0, 0, r3
; CHECK-LE-NEXT: xxswapd v2, vs0
; CHECK-LE-NEXT: blr
- %v = load <2 x i64>, <2 x i64>* %a, align 16
+ %v = load <2 x i64>, ptr %a, align 16
ret <2 x i64> %v
}
-define void @test31(<2 x i64>* %a, <2 x i64> %b) {
+define void @test31(ptr %a, <2 x i64> %b) {
; CHECK-LABEL: test31:
; CHECK: # %bb.0:
; CHECK-NEXT: stxvd2x v2, 0, r3
; CHECK-LE-NEXT: xxswapd vs0, v2
; CHECK-LE-NEXT: stxvd2x vs0, 0, r3
; CHECK-LE-NEXT: blr
- store <2 x i64> %b, <2 x i64>* %a, align 16
+ store <2 x i64> %b, ptr %a, align 16
ret void
}
-define <4 x float> @test32(<4 x float>* %a) {
+define <4 x float> @test32(ptr %a) {
; CHECK-LABEL: test32:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvw4x v2, 0, r3
; CHECK-LE-NEXT: lxvd2x vs0, 0, r3
; CHECK-LE-NEXT: xxswapd v2, vs0
; CHECK-LE-NEXT: blr
- %v = load <4 x float>, <4 x float>* %a, align 16
+ %v = load <4 x float>, ptr %a, align 16
ret <4 x float> %v
}
-define void @test33(<4 x float>* %a, <4 x float> %b) {
+define void @test33(ptr %a, <4 x float> %b) {
; CHECK-LABEL: test33:
; CHECK: # %bb.0:
; CHECK-NEXT: stxvw4x v2, 0, r3
; CHECK-LE-NEXT: xxswapd vs0, v2
; CHECK-LE-NEXT: stxvd2x vs0, 0, r3
; CHECK-LE-NEXT: blr
- store <4 x float> %b, <4 x float>* %a, align 16
+ store <4 x float> %b, ptr %a, align 16
ret void
}
-define <4 x float> @test32u(<4 x float>* %a) {
+define <4 x float> @test32u(ptr %a) {
; CHECK-LABEL: test32u:
; CHECK: # %bb.0:
; CHECK-NEXT: li r4, 15
; CHECK-LE-NEXT: lxvd2x vs0, 0, r3
; CHECK-LE-NEXT: xxswapd v2, vs0
; CHECK-LE-NEXT: blr
- %v = load <4 x float>, <4 x float>* %a, align 8
+ %v = load <4 x float>, ptr %a, align 8
ret <4 x float> %v
}
-define void @test33u(<4 x float>* %a, <4 x float> %b) {
+define void @test33u(ptr %a, <4 x float> %b) {
; CHECK-LABEL: test33u:
; CHECK: # %bb.0:
; CHECK-NEXT: stxvw4x v2, 0, r3
; CHECK-LE-NEXT: xxswapd vs0, v2
; CHECK-LE-NEXT: stxvd2x vs0, 0, r3
; CHECK-LE-NEXT: blr
- store <4 x float> %b, <4 x float>* %a, align 8
+ store <4 x float> %b, ptr %a, align 8
ret void
}
-define <4 x i32> @test34(<4 x i32>* %a) {
+define <4 x i32> @test34(ptr %a) {
; CHECK-LABEL: test34:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvw4x v2, 0, r3
; CHECK-LE-NEXT: lxvd2x vs0, 0, r3
; CHECK-LE-NEXT: xxswapd v2, vs0
; CHECK-LE-NEXT: blr
- %v = load <4 x i32>, <4 x i32>* %a, align 16
+ %v = load <4 x i32>, ptr %a, align 16
ret <4 x i32> %v
}
-define void @test35(<4 x i32>* %a, <4 x i32> %b) {
+define void @test35(ptr %a, <4 x i32> %b) {
; CHECK-LABEL: test35:
; CHECK: # %bb.0:
; CHECK-NEXT: stxvw4x v2, 0, r3
; CHECK-LE-NEXT: xxswapd vs0, v2
; CHECK-LE-NEXT: stxvd2x vs0, 0, r3
; CHECK-LE-NEXT: blr
- store <4 x i32> %b, <4 x i32>* %a, align 16
+ store <4 x i32> %b, ptr %a, align 16
ret void
; FIXME: The code quality here looks pretty bad.
}
-define <2 x double> @test50(double* %a) {
+define <2 x double> @test50(ptr %a) {
; CHECK-LABEL: test50:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvdsx v2, 0, r3
; CHECK-LE: # %bb.0:
; CHECK-LE-NEXT: lxvdsx v2, 0, r3
; CHECK-LE-NEXT: blr
- %v = load double, double* %a, align 8
+ %v = load double, ptr %a, align 8
%w = insertelement <2 x double> undef, double %v, i32 0
%x = insertelement <2 x double> %w, double %v, i32 1
ret <2 x double> %x
; RUN: --check-prefixes=CHECK,CHECK-P9UP
; Function Attrs: nounwind readnone
-define <4 x i32> @test1(i8* %a) {
+define <4 x i32> @test1(ptr %a) {
; CHECK-LABEL: test1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x v2, 0, r3
; CHECK-NEXT: blr
entry:
- %0 = tail call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %a)
+ %0 = tail call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(ptr %a)
ret <4 x i32> %0
}
; Function Attrs: nounwind readnone
-declare <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8*)
+declare <4 x i32> @llvm.ppc.vsx.lxvw4x.be(ptr)
; Function Attrs: nounwind readnone
-define <2 x double> @test2(i8* %a) {
+define <2 x double> @test2(ptr %a) {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x v2, 0, r3
; CHECK-NEXT: blr
entry:
- %0 = tail call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %a)
+ %0 = tail call <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr %a)
ret <2 x double> %0
}
; Function Attrs: nounwind readnone
-declare <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8*)
+declare <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr)
; Function Attrs: nounwind readnone
-define void @test3(<4 x i32> %a, i8* %b) {
+define void @test3(<4 x i32> %a, ptr %b) {
; CHECK-LABEL: test3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvw4x v2, 0, r5
; CHECK-NEXT: blr
entry:
- tail call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %a, i8* %b)
+ tail call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %a, ptr %b)
ret void
}
; Function Attrs: nounwind readnone
-declare void @llvm.ppc.vsx.stxvw4x.be(<4 x i32>, i8*)
+declare void @llvm.ppc.vsx.stxvw4x.be(<4 x i32>, ptr)
; Function Attrs: nounwind readnone
-define void @test4(<2 x double> %a, i8* %b) {
+define void @test4(<2 x double> %a, ptr %b) {
; CHECK-LABEL: test4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvd2x v2, 0, r5
; CHECK-NEXT: blr
entry:
- tail call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %a, i8* %b)
+ tail call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %a, ptr %b)
ret void
}
; Function Attrs: nounwind readnone
-declare void @llvm.ppc.vsx.stxvd2x.be(<2 x double>, i8*)
+declare void @llvm.ppc.vsx.stxvd2x.be(<2 x double>, ptr)
define i32 @test_vec_test_swdiv(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: test_vec_test_swdiv:
}
; Function Attrs: nounwind readnone
-define <2 x double> @test_lxvd2x(i8* %a) {
+define <2 x double> @test_lxvd2x(ptr %a) {
; CHECK-P9UP-LABEL: test_lxvd2x:
; CHECK-P9UP: # %bb.0: # %entry
; CHECK-P9UP-NEXT: lxv v2, 0(r3)
; CHECK-INTRIN-NEXT: lxvd2x v2, 0, r3
; CHECK-INTRIN-NEXT: blr
entry:
- %0 = tail call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %a)
+ %0 = tail call <2 x double> @llvm.ppc.vsx.lxvd2x(ptr %a)
ret <2 x double> %0
}
; Function Attrs: nounwind readnone
-declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*)
+declare <2 x double> @llvm.ppc.vsx.lxvd2x(ptr)
; Function Attrs: nounwind readnone
-define void @test_stxvd2x(<2 x double> %a, i8* %b) {
+define void @test_stxvd2x(<2 x double> %a, ptr %b) {
; CHECK-P9UP-LABEL: test_stxvd2x:
; CHECK-P9UP: # %bb.0: # %entry
; CHECK-P9UP-NEXT: stxv v2, 0(r5)
; CHECK-INTRIN-NEXT: stxvd2x v2, 0, r5
; CHECK-INTRIN-NEXT: blr
entry:
- tail call void @llvm.ppc.vsx.stxvd2x(<2 x double> %a, i8* %b)
+ tail call void @llvm.ppc.vsx.stxvd2x(<2 x double> %a, ptr %b)
ret void
}
; Function Attrs: nounwind readnone
-declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*)
+declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, ptr)
; RUN: -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s \
; RUN: --check-prefix=CHECK-P9 --implicit-check-not xxswapd
-define <2 x double> @testi0(<2 x double>* %p1, double* %p2) {
+define <2 x double> @testi0(ptr %p1, ptr %p2) {
; CHECK-LABEL: testi0:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x vs0, 0, r3
; CHECK-P9-NEXT: lfd f1, 0(r4)
; CHECK-P9-NEXT: xxmrghd v2, vs0, vs1
; CHECK-P9-NEXT: blr
- %v = load <2 x double>, <2 x double>* %p1
- %s = load double, double* %p2
+ %v = load <2 x double>, ptr %p1
+ %s = load double, ptr %p2
%r = insertelement <2 x double> %v, double %s, i32 0
ret <2 x double> %r
}
-define <2 x double> @testi1(<2 x double>* %p1, double* %p2) {
+define <2 x double> @testi1(ptr %p1, ptr %p2) {
; CHECK-LABEL: testi1:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x vs0, 0, r3
; CHECK-P9-NEXT: lfd f1, 0(r4)
; CHECK-P9-NEXT: xxpermdi v2, vs1, vs0, 1
; CHECK-P9-NEXT: blr
- %v = load <2 x double>, <2 x double>* %p1
- %s = load double, double* %p2
+ %v = load <2 x double>, ptr %p1
+ %s = load double, ptr %p2
%r = insertelement <2 x double> %v, double %s, i32 1
ret <2 x double> %r
}
-define double @teste0(<2 x double>* %p1) {
+define double @teste0(ptr %p1) {
; CHECK-LABEL: teste0:
; CHECK: # %bb.0:
; CHECK-NEXT: lfd f1, 0(r3)
; CHECK-P9: # %bb.0:
; CHECK-P9-NEXT: lfd f1, 0(r3)
; CHECK-P9-NEXT: blr
- %v = load <2 x double>, <2 x double>* %p1
+ %v = load <2 x double>, ptr %p1
%r = extractelement <2 x double> %v, i32 0
ret double %r
}
-define double @teste1(<2 x double>* %p1) {
+define double @teste1(ptr %p1) {
; CHECK-LABEL: teste1:
; CHECK: # %bb.0:
; CHECK-NEXT: lfd f1, 8(r3)
; CHECK-P9: # %bb.0:
; CHECK-P9-NEXT: lfd f1, 8(r3)
; CHECK-P9-NEXT: blr
- %v = load <2 x double>, <2 x double>* %p1
+ %v = load <2 x double>, ptr %p1
%r = extractelement <2 x double> %v, i32 1
ret double %r
define void @dblToInt() #0 {
entry:
%ii = alloca i32, align 4
- %0 = load double, double* @d, align 8
+ %0 = load double, ptr @d, align 8
%conv = fptosi double %0 to i32
- store volatile i32 %conv, i32* %ii, align 4
+ store volatile i32 %conv, ptr %ii, align 4
ret void
; CHECK-LABEL: @dblToInt
; CHECK: xscvdpsxws [[REGCONV1:[0-9]+]],
define void @fltToInt() #0 {
entry:
%ii = alloca i32, align 4
- %0 = load float, float* @f, align 4
+ %0 = load float, ptr @f, align 4
%conv = fptosi float %0 to i32
- store volatile i32 %conv, i32* %ii, align 4
+ store volatile i32 %conv, ptr %ii, align 4
ret void
; CHECK-LABEL: @fltToInt
; CHECK: xscvdpsxws [[REGCONV2:[0-9]+]],
define void @intToDbl() #0 {
entry:
%dd = alloca double, align 8
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%conv = sitofp i32 %0 to double
- store volatile double %conv, double* %dd, align 8
+ store volatile double %conv, ptr %dd, align 8
ret void
; CHECK-LABEL: @intToDbl
; CHECK: lfiwax [[REGLD1:[0-9]+]],
define void @intToFlt() #0 {
entry:
%ff = alloca float, align 4
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%conv = sitofp i32 %0 to float
- store volatile float %conv, float* %ff, align 4
+ store volatile float %conv, ptr %ff, align 4
ret void
; CHECK-LABEL: @intToFlt
; CHECK: lfiwax [[REGLD2:[0-9]+]],
define void @dblToUInt() #0 {
entry:
%uiui = alloca i32, align 4
- %0 = load double, double* @d, align 8
+ %0 = load double, ptr @d, align 8
%conv = fptoui double %0 to i32
- store volatile i32 %conv, i32* %uiui, align 4
+ store volatile i32 %conv, ptr %uiui, align 4
ret void
; CHECK-LABEL: @dblToUInt
; CHECK: xscvdpuxws [[REGCONV3:[0-9]+]],
define void @fltToUInt() #0 {
entry:
%uiui = alloca i32, align 4
- %0 = load float, float* @f, align 4
+ %0 = load float, ptr @f, align 4
%conv = fptoui float %0 to i32
- store volatile i32 %conv, i32* %uiui, align 4
+ store volatile i32 %conv, ptr %uiui, align 4
ret void
; CHECK-LABEL: @fltToUInt
; CHECK: xscvdpuxws [[REGCONV4:[0-9]+]],
define void @uIntToDbl() #0 {
entry:
%dd = alloca double, align 8
- %0 = load i32, i32* @ui, align 4
+ %0 = load i32, ptr @ui, align 4
%conv = uitofp i32 %0 to double
- store volatile double %conv, double* %dd, align 8
+ store volatile double %conv, ptr %dd, align 8
ret void
; CHECK-LABEL: @uIntToDbl
; CHECK: lfiwzx [[REGLD3:[0-9]+]],
define void @uIntToFlt() #0 {
entry:
%ff = alloca float, align 4
- %0 = load i32, i32* @ui, align 4
+ %0 = load i32, ptr @ui, align 4
%conv = uitofp i32 %0 to float
- store volatile float %conv, float* %ff, align 4
+ store volatile float %conv, ptr %ff, align 4
ret void
; CHECK-LABEL: @uIntToFlt
; CHECK: lfiwzx [[REGLD4:[0-9]+]],
define void @dblToFloat() #0 {
entry:
%ff = alloca float, align 4
- %0 = load double, double* @d, align 8
+ %0 = load double, ptr @d, align 8
%conv = fptrunc double %0 to float
- store volatile float %conv, float* %ff, align 4
+ store volatile float %conv, ptr %ff, align 4
ret void
; CHECK-LABEL: @dblToFloat
; CHECK: lfd [[REGLD5:[0-9]+]],
define void @floatToDbl() #0 {
entry:
%dd = alloca double, align 8
- %0 = load float, float* @f, align 4
+ %0 = load float, ptr @f, align 4
%conv = fpext float %0 to double
- store volatile double %conv, double* %dd, align 8
+ store volatile double %conv, ptr %dd, align 8
ret void
; CHECK-LABEL: @floatToDbl
; CHECK: lfs [[REGLD5:[0-9]+]],
; RUN: -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s \
; RUN: --check-prefix=CHECK-P9 --implicit-check-not xxswapd
-define <2 x double> @test00(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test00(ptr %p1, ptr %p2) {
; CHECK-LABEL: test00:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvdsx 34, 0, 3
; CHECK-P9: # %bb.0:
; CHECK-P9-NEXT: lxvdsx 34, 0, 3
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 0>
ret <2 x double> %v3
}
-define <2 x double> @test01(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test01(ptr %p1, ptr %p2) {
; CHECK-LABEL: test01:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-P9: # %bb.0:
; CHECK-P9-NEXT: lxv 34, 0(3)
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 1>
ret <2 x double> %v3
}
-define <2 x double> @test02(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test02(ptr %p1, ptr %p2) {
; CHECK-LABEL: test02:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-P9-NEXT: lxv 1, 0(4)
; CHECK-P9-NEXT: xxmrgld 34, 1, 0
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 2>
ret <2 x double> %v3
}
-define <2 x double> @test03(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test03(ptr %p1, ptr %p2) {
; CHECK-LABEL: test03:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-P9-NEXT: lxv 1, 0(4)
; CHECK-P9-NEXT: xxpermdi 34, 1, 0, 1
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 3>
ret <2 x double> %v3
}
-define <2 x double> @test10(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test10(ptr %p1, ptr %p2) {
; CHECK-LABEL: test10:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-P9: # %bb.0:
; CHECK-P9-NEXT: lxvd2x 34, 0, 3
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 0>
ret <2 x double> %v3
}
-define <2 x double> @test11(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test11(ptr %p1, ptr %p2) {
; CHECK-LABEL: test11:
; CHECK: # %bb.0:
; CHECK-NEXT: addi 3, 3, 8
; CHECK-P9-NEXT: addi 3, 3, 8
; CHECK-P9-NEXT: lxvdsx 34, 0, 3
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 1>
ret <2 x double> %v3
}
-define <2 x double> @test12(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test12(ptr %p1, ptr %p2) {
; CHECK-LABEL: test12:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-P9-NEXT: lxv 1, 0(4)
; CHECK-P9-NEXT: xxpermdi 34, 1, 0, 2
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 2>
ret <2 x double> %v3
}
-define <2 x double> @test13(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test13(ptr %p1, ptr %p2) {
; CHECK-LABEL: test13:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-P9-NEXT: lxv 1, 0(4)
; CHECK-P9-NEXT: xxmrghd 34, 1, 0
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 3>
ret <2 x double> %v3
}
-define <2 x double> @test20(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test20(ptr %p1, ptr %p2) {
; CHECK-LABEL: test20:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-P9-NEXT: lxv 1, 0(4)
; CHECK-P9-NEXT: xxmrgld 34, 0, 1
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 0>
ret <2 x double> %v3
}
-define <2 x double> @test21(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test21(ptr %p1, ptr %p2) {
; CHECK-LABEL: test21:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-P9-NEXT: lxv 1, 0(4)
; CHECK-P9-NEXT: xxpermdi 34, 0, 1, 1
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 1>
ret <2 x double> %v3
}
-define <2 x double> @test22(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test22(ptr %p1, ptr %p2) {
; CHECK-LABEL: test22:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvdsx 34, 0, 4
; CHECK-P9: # %bb.0:
; CHECK-P9-NEXT: lxvdsx 34, 0, 4
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 2>
ret <2 x double> %v3
}
-define <2 x double> @test23(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test23(ptr %p1, ptr %p2) {
; CHECK-LABEL: test23:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 0, 0, 4
; CHECK-P9: # %bb.0:
; CHECK-P9-NEXT: lxv 34, 0(4)
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 3>
ret <2 x double> %v3
}
-define <2 x double> @test30(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test30(ptr %p1, ptr %p2) {
; CHECK-LABEL: test30:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-P9-NEXT: lxv 1, 0(4)
; CHECK-P9-NEXT: xxpermdi 34, 0, 1, 2
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 0>
ret <2 x double> %v3
}
-define <2 x double> @test31(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test31(ptr %p1, ptr %p2) {
; CHECK-LABEL: test31:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 0, 0, 3
; CHECK-P9-NEXT: lxv 1, 0(4)
; CHECK-P9-NEXT: xxmrghd 34, 0, 1
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 1>
ret <2 x double> %v3
}
-define <2 x double> @test32(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test32(ptr %p1, ptr %p2) {
; CHECK-LABEL: test32:
; CHECK: # %bb.0:
; CHECK-NEXT: lxvd2x 34, 0, 4
; CHECK-P9: # %bb.0:
; CHECK-P9-NEXT: lxvd2x 34, 0, 4
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 2>
ret <2 x double> %v3
}
-define <2 x double> @test33(<2 x double>* %p1, <2 x double>* %p2) {
+define <2 x double> @test33(ptr %p1, ptr %p2) {
; CHECK-LABEL: test33:
; CHECK: # %bb.0:
; CHECK-NEXT: addi 3, 4, 8
; CHECK-P9-NEXT: addi 3, 4, 8
; CHECK-P9-NEXT: lxvdsx 34, 0, 3
; CHECK-P9-NEXT: blr
- %v1 = load <2 x double>, <2 x double>* %p1
- %v2 = load <2 x double>, <2 x double>* %p2
+ %v1 = load <2 x double>, ptr %p1
+ %v2 = load <2 x double>, ptr %p2
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 3>
ret <2 x double> %v3
}
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
-@_ZTV3foo = linkonce_odr unnamed_addr constant [1 x i8*] [i8* bitcast (void ()* @__cxa_pure_virtual to i8*)]
+@_ZTV3foo = linkonce_odr unnamed_addr constant [1 x ptr] [ptr @__cxa_pure_virtual]
declare void @__cxa_pure_virtual()
; CHECK: .section .data.rel.ro
; CHECK: .weak v1
define i32 @f1() {
- %x = load i32 , i32 * @v1
+ %x = load i32 , ptr @v1
ret i32 %x
}
@v2 = linkonce_odr constant i32 32
; CHECK: .weak v2
-define i32* @f2() {
- ret i32* @v2
+define ptr @f2() {
+ ret ptr @v2
}
@v3 = linkonce_odr unnamed_addr constant i32 32
; CHECK: .section .rodata.cst4,"aM",
; CHECK: .weak v3
-define i32* @f3() {
- ret i32* @v3
+define ptr @f3() {
+ ret ptr @v3
}
@v4 = linkonce_odr unnamed_addr global i32 32
; CHECK: .weak v4
define i32 @f4() {
- %x = load i32 , i32 * @v4
+ %x = load i32 , ptr @v4
ret i32 %x
}
br i1 undef, label %bb1, label %bb8
bb1:
- %tmp = tail call i64 asm sideeffect "", "=&r,=*m,b,r,*m,~{cc}"(i64* elementtype(i64) nonnull undef, i64* nonnull undef, i64 1, i64* elementtype(i64) nonnull undef)
+ %tmp = tail call i64 asm sideeffect "", "=&r,=*m,b,r,*m,~{cc}"(ptr elementtype(i64) nonnull undef, ptr nonnull undef, i64 1, ptr elementtype(i64) nonnull undef)
%tmp2 = icmp eq i64 %tmp, 0
br i1 %tmp2, label %bb3, label %bb8
bb3:
- %tmp4 = tail call i64 asm sideeffect "", "=&r,=*m,b,r,r,*m,~{cc}"(i64* elementtype(i64) undef, i64* undef, i64 0, i64 undef, i64* elementtype(i64) undef)
+ %tmp4 = tail call i64 asm sideeffect "", "=&r,=*m,b,r,r,*m,~{cc}"(ptr elementtype(i64) undef, ptr undef, i64 0, i64 undef, ptr elementtype(i64) undef)
%tmp5 = icmp eq i64 0, %tmp4
br i1 %tmp5, label %bb6, label %bb3
; Function Attrs: nounwind
define void @__fmax_double3_3D_exec(<3 x double> %input1, <3 x i64> %input2,
<3 x i1> %input3, <3 x i64> %input4,
- <3 x i64> %input5, <4 x double>* %input6) #0 {
+ <3 x i64> %input5, ptr %input6) #0 {
entry:
br i1 undef, label %if.then.i, label %fmax_double3.exit
%or.i.i.i = or <3 x i64> %and.i.i.i, %and26.i.i.i
%astype32.i.i.i = bitcast <3 x i64> %or.i.i.i to <3 x double>
%extractVec33.i.i.i = shufflevector <3 x double> %astype32.i.i.i, <3 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
- store <4 x double> %extractVec33.i.i.i, <4 x double>* %input6, align 32
+ store <4 x double> %extractVec33.i.i.i, ptr %input6, align 32
br label %fmax_double3.exit
; CHECK-LABEL: @__fmax_double3_3D_exec
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-define internal i32* @func_65(i32* %p_66) #0 {
+define internal ptr @func_65(ptr %p_66) #0 {
entry:
br i1 undef, label %for.body, label %for.end731
for.body: ; preds = %entry
- %0 = load i32, i32* undef, align 4
+ %0 = load i32, ptr undef, align 4
%or31 = or i32 %0, 319143828
- store i32 %or31, i32* undef, align 4
+ store i32 %or31, ptr undef, align 4
%cmp32 = icmp eq i32 319143828, %or31
%conv33 = zext i1 %cmp32 to i32
%conv34 = sext i32 %conv33 to i64
unreachable
for.end731: ; preds = %entry
- ret i32* undef
+ ret ptr undef
}
; Function Attrs: nounwind
@k = local_unnamed_addr global i32 0, align 4
; Function Attrs: norecurse nounwind
-define signext i32 @cmplwi(i32* nocapture readonly %p, i32* nocapture readonly %q, i32 signext %j, i32 signext %r10) {
+define signext i32 @cmplwi(ptr nocapture readonly %p, ptr nocapture readonly %q, i32 signext %j, i32 signext %r10) {
entry:
- %0 = load i32, i32* %q, align 4
+ %0 = load i32, ptr %q, align 4
%shl = shl i32 %0, %j
- %1 = load i32, i32* %p, align 4
+ %1 = load i32, ptr %p, align 4
%and = and i32 %shl, %r10
%and1 = and i32 %and, %1
%tobool = icmp eq i32 %and1, 0
br i1 %tobool, label %cleanup, label %if.then
if.then:
- store i32 %j, i32* @k, align 4
+ store i32 %j, ptr @k, align 4
br label %cleanup
cleanup:
; Test case for PPCTargetLowering::extendSubTreeForBitPermutation.
; We expect mask and rotate are folded into a rlwinm instruction.
-define zeroext i32 @func(i32* %p, i32 zeroext %i) {
+define zeroext i32 @func(ptr %p, i32 zeroext %i) {
; CHECK-LABEL: @func
; CHECK: addi [[REG1:[0-9]+]], 4, 1
; CHECK: rlwinm [[REG2:[0-9]+]], [[REG1]], 2, 22, 29
%add = add i32 %i, 1
%and = and i32 %add, 255
%idxprom = zext i32 %and to i64
- %arrayidx = getelementptr inbounds i32, i32* %p, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %p, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
ret i32 %0
}
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: noreturn nounwind
-define signext i32 @_Z1fRPc(i8** nocapture dereferenceable(8) %p) #0 {
+define signext i32 @_Z1fRPc(ptr nocapture dereferenceable(8) %p) #0 {
entry:
- %.pre = load i8*, i8** %p, align 8
+ %.pre = load ptr, ptr %p, align 8
br label %loop
loop: ; preds = %loop.backedge, %entry
- %0 = phi i8* [ %.pre, %entry ], [ %.be, %loop.backedge ]
- %1 = load i8, i8* %0, align 1
+ %0 = phi ptr [ %.pre, %entry ], [ %.be, %loop.backedge ]
+ %1 = load i8, ptr %0, align 1
%tobool = icmp eq i8 %1, 0
- %incdec.ptr = getelementptr inbounds i8, i8* %0, i64 1
- store i8* %incdec.ptr, i8** %p, align 8
- %2 = load i8, i8* %incdec.ptr, align 1
+ %incdec.ptr = getelementptr inbounds i8, ptr %0, i64 1
+ store ptr %incdec.ptr, ptr %p, align 8
+ %2 = load i8, ptr %incdec.ptr, align 1
%tobool2 = icmp ne i8 %2, 0
%or.cond = and i1 %tobool, %tobool2
br i1 %or.cond, label %if.then3, label %loop.backedge
if.then3: ; preds = %loop
- %incdec.ptr4 = getelementptr inbounds i8, i8* %0, i64 2
- store i8* %incdec.ptr4, i8** %p, align 8
+ %incdec.ptr4 = getelementptr inbounds i8, ptr %0, i64 2
+ store ptr %incdec.ptr4, ptr %p, align 8
br label %loop.backedge
loop.backedge: ; preds = %if.then3, %loop
- %.be = phi i8* [ %incdec.ptr4, %if.then3 ], [ %incdec.ptr, %loop ]
+ %.be = phi ptr [ %incdec.ptr4, %if.then3 ], [ %incdec.ptr, %loop ]
br label %loop
; CHECK-LABEL: @_Z1fRPc