%struct.sret0 = type { i32, i32, i32 }
-define void @test0(%struct.sret0* noalias sret(%struct.sret0) %agg.result, i32 %dummy) nounwind {
+define void @test0(ptr noalias sret(%struct.sret0) %agg.result, i32 %dummy) nounwind {
entry:
; CHECK: sw ${{[0-9]+}}, {{[0-9]+}}($4)
; CHECK: sw ${{[0-9]+}}, {{[0-9]+}}($4)
; CHECK: sw ${{[0-9]+}}, {{[0-9]+}}($4)
- getelementptr %struct.sret0, %struct.sret0* %agg.result, i32 0, i32 0 ; <i32*>:0 [#uses=1]
- store i32 %dummy, i32* %0, align 4
- getelementptr %struct.sret0, %struct.sret0* %agg.result, i32 0, i32 1 ; <i32*>:1 [#uses=1]
- store i32 %dummy, i32* %1, align 4
- getelementptr %struct.sret0, %struct.sret0* %agg.result, i32 0, i32 2 ; <i32*>:2 [#uses=1]
- store i32 %dummy, i32* %2, align 4
+ getelementptr %struct.sret0, ptr %agg.result, i32 0, i32 0 ; <ptr>:0 [#uses=1]
+ store i32 %dummy, ptr %0, align 4
+ getelementptr %struct.sret0, ptr %agg.result, i32 0, i32 1 ; <ptr>:1 [#uses=1]
+ store i32 %dummy, ptr %1, align 4
+ getelementptr %struct.sret0, ptr %agg.result, i32 0, i32 2 ; <ptr>:2 [#uses=1]
+ store i32 %dummy, ptr %2, align 4
ret void
}
@.str = internal unnamed_addr constant [10 x i8] c"AAAAAAAAA\00"
@i0 = internal unnamed_addr constant [5 x i32] [ i32 0, i32 1, i32 2, i32 3, i32 4 ]
-define i8* @foo() nounwind {
+define ptr @foo() nounwind {
entry:
; CHECK: foo
; CHECK: %hi(.str)
; CHECK: %lo(.str)
- ret i8* getelementptr ([10 x i8], [10 x i8]* @.str, i32 0, i32 0)
+ ret ptr @.str
}
-define i32* @bar() nounwind {
+define ptr @bar() nounwind {
entry:
; CHECK: bar
; CHECK: %hi(i0)
; CHECK: %lo(i0)
- ret i32* getelementptr ([5 x i32], [5 x i32]* @i0, i32 0, i32 0)
+ ret ptr @i0
}
; CHECK: rodata.str1.4,"aMS",@progbits
; COMMON-NEXT: .section .sbss,"aw",@nobits
@bar = global %struct.anon zeroinitializer
-define i8* @A0() nounwind {
+define ptr @A0() nounwind {
entry:
- ret i8* getelementptr ([8 x i8], [8 x i8]* @s0, i32 0, i32 0)
+ ret ptr @s0
}
define i32 @A1() nounwind {
entry:
- load i32, i32* getelementptr (%struct.anon, %struct.anon* @foo, i32 0, i32 0), align 8
- load i32, i32* getelementptr (%struct.anon, %struct.anon* @foo, i32 0, i32 1), align 4
+ load i32, ptr @foo, align 8
+ load i32, ptr getelementptr (%struct.anon, ptr @foo, i32 0, i32 1), align 4
add i32 %1, %0
ret i32 %2
}
define void @foo0() nounwind {
entry:
; CHECK: addu
- %0 = load i32, i32* @gi1, align 4
- %1 = load i32, i32* @gi0, align 4
+ %0 = load i32, ptr @gi1, align 4
+ %1 = load i32, ptr @gi0, align 4
%2 = tail call i32 asm "addu $0, $1, $2", "=r,r,r"(i32 %0, i32 %1) nounwind
- store i32 %2, i32* @gi2, align 4
+ store i32 %2, ptr @gi2, align 4
ret void
}
define void @foo2() nounwind {
entry:
; CHECK: neg.s
- %0 = load float, float* @gf1, align 4
+ %0 = load float, ptr @gf1, align 4
%1 = tail call float asm "neg.s $0, $1", "=f,f"(float %0) nounwind
- store float %1, float* @gf0, align 4
+ store float %1, ptr @gf0, align 4
ret void
}
define void @foo3() nounwind {
entry:
; CHECK: neg.d
- %0 = load double, double* @gd1, align 8
+ %0 = load double, ptr @gd1, align 8
%1 = tail call double asm "neg.d $0, $1", "=f,f"(double %0) nounwind
- store double %1, double* @gd0, align 8
+ store double %1, ptr @gd0, align 8
ret void
}
define void @foo4() {
entry:
%0 = tail call i32 asm sideeffect "ulh $0,16($$sp)\0A\09", "=r,~{$2}"()
- store i32 %0, i32* @gi2, align 4
- %1 = load float, float* @gf0, align 4
+ store i32 %0, ptr @gi2, align 4
+ %1 = load float, ptr @gf0, align 4
%2 = tail call double asm sideeffect "cvt.d.s $0, $1\0A\09", "=f,f,~{$f0}"(float %1)
- store double %2, double* @gd0, align 8
+ store double %2, ptr @gd0, align 8
ret void
}
define double @main(...) {
entry:
- %retval = alloca double ; <double*> [#uses=3]
- store double 0.000000e+00, double* %retval
- %r = alloca double ; <double*> [#uses=1]
- load double, double* %r ; <double>:0 [#uses=1]
- store double %0, double* %retval
+ %retval = alloca double ; <ptr> [#uses=3]
+ store double 0.000000e+00, ptr %retval
+ %r = alloca double ; <ptr> [#uses=1]
+ load double, ptr %r ; <double>:0 [#uses=1]
+ store double %0, ptr %retval
br label %return
return: ; preds = %entry
- load double, double* %retval ; <double>:1 [#uses=1]
+ load double, ptr %retval ; <double>:1 [#uses=1]
ret double %1
}
entry:
; CHECK: subu ${{[0-9]+}}, $sp
; CHECK: subu ${{[0-9]+}}, $sp
- alloca i8, i32 %size ; <i8*>:0 [#uses=1]
- alloca i8, i32 %size ; <i8*>:1 [#uses=1]
- call i32 @foo( i8* %0 ) nounwind ; <i32>:2 [#uses=1]
- call i32 @foo( i8* %1 ) nounwind ; <i32>:3 [#uses=1]
+ alloca i8, i32 %size ; <ptr>:0 [#uses=1]
+ alloca i8, i32 %size ; <ptr>:1 [#uses=1]
+ call i32 @foo( ptr %0 ) nounwind ; <i32>:2 [#uses=1]
+ call i32 @foo( ptr %1 ) nounwind ; <i32>:3 [#uses=1]
add i32 %3, %2 ; <i32>:4 [#uses=1]
ret i32 %4
}
-declare i32 @foo(i8*)
+declare i32 @foo(ptr)
; RUN: llc < %s -march=mips
; PR2794
-define i32 @main(i8*) nounwind {
+define i32 @main(ptr) nounwind {
entry:
br label %continue.outer
continue.outer: ; preds = %case4, %entry
%p.0.ph.rec = phi i32 [ 0, %entry ], [ %indvar.next, %case4 ] ; <i32> [#uses=2]
- %p.0.ph = getelementptr i8, i8* %0, i32 %p.0.ph.rec ; <i8*> [#uses=1]
- %1 = load i8, i8* %p.0.ph ; <i8> [#uses=1]
+ %p.0.ph = getelementptr i8, ptr %0, i32 %p.0.ph.rec ; <ptr> [#uses=1]
+ %1 = load i8, ptr %p.0.ph ; <i8> [#uses=1]
switch i8 %1, label %infloop [
i8 0, label %return.split
i8 76, label %case4
; RUN: llc -march=mips -mattr=+soft-float < %s
; PR2667
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
- %struct._Bigint = type { %struct._Bigint*, i32, i32, i32, i32, [1 x i32] }
- %struct.__FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*, i8*, i32)*, i32 (i8*, i8*, i32)*, i32 (i8*, i32, i32)*, i32 (i8*)*, %struct.__sbuf, i8*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i32, %struct._reent*, i32 }
- %struct.__sbuf = type { i8*, i32 }
- %struct._atexit = type { %struct._atexit*, i32, [32 x void ()*], %struct._on_exit_args }
- %struct._glue = type { %struct._glue*, i32, %struct.__FILE* }
- %struct._on_exit_args = type { [32 x i8*], [32 x i8*], i32, i32 }
- %struct._reent = type { i32, %struct.__FILE*, %struct.__FILE*, %struct.__FILE*, i32, [25 x i8], i32, i8*, i32, void (%struct._reent*)*, %struct._Bigint*, i32, %struct._Bigint*, %struct._Bigint**, i32, i8*, { { [30 x i8*], [30 x i32] } }, %struct._atexit*, %struct._atexit, void (i32)**, %struct._glue, [3 x %struct.__FILE] }
-@_impure_ptr = external global %struct._reent* ; <%struct._reent**> [#uses=1]
+ %struct._Bigint = type { ptr, i32, i32, i32, i32, [1 x i32] }
+ %struct.__FILE = type { ptr, i32, i32, i16, i16, %struct.__sbuf, i32, ptr, ptr, ptr, ptr, ptr, %struct.__sbuf, ptr, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i32, ptr, i32 }
+ %struct.__sbuf = type { ptr, i32 }
+ %struct._atexit = type { ptr, i32, [32 x ptr], %struct._on_exit_args }
+ %struct._glue = type { ptr, i32, ptr }
+ %struct._on_exit_args = type { [32 x ptr], [32 x ptr], i32, i32 }
+ %struct._reent = type { i32, ptr, ptr, ptr, i32, [25 x i8], i32, ptr, i32, ptr, ptr, i32, ptr, ptr, i32, ptr, { { [30 x ptr], [30 x i32] } }, ptr, %struct._atexit, ptr, %struct._glue, [3 x %struct.__FILE] }
+@_impure_ptr = external global ptr ; <ptr> [#uses=1]
-define double @_erand48_r(%struct._reent* %r, i16* %xseed) nounwind {
+define double @_erand48_r(ptr %r, ptr %xseed) nounwind {
entry:
- tail call void @__dorand48( %struct._reent* %r, i16* %xseed ) nounwind
- load i16, i16* %xseed, align 2 ; <i16>:0 [#uses=1]
+ tail call void @__dorand48( ptr %r, ptr %xseed ) nounwind
+ load i16, ptr %xseed, align 2 ; <i16>:0 [#uses=1]
uitofp i16 %0 to double ; <double>:1 [#uses=1]
tail call double @ldexp( double %1, i32 -48 ) nounwind ; <double>:2 [#uses=1]
- getelementptr i16, i16* %xseed, i32 1 ; <i16*>:3 [#uses=1]
- load i16, i16* %3, align 2 ; <i16>:4 [#uses=1]
+ getelementptr i16, ptr %xseed, i32 1 ; <ptr>:3 [#uses=1]
+ load i16, ptr %3, align 2 ; <i16>:4 [#uses=1]
uitofp i16 %4 to double ; <double>:5 [#uses=1]
tail call double @ldexp( double %5, i32 -32 ) nounwind ; <double>:6 [#uses=1]
fadd double %2, %6 ; <double>:7 [#uses=1]
- getelementptr i16, i16* %xseed, i32 2 ; <i16*>:8 [#uses=1]
- load i16, i16* %8, align 2 ; <i16>:9 [#uses=1]
+ getelementptr i16, ptr %xseed, i32 2 ; <ptr>:8 [#uses=1]
+ load i16, ptr %8, align 2 ; <i16>:9 [#uses=1]
uitofp i16 %9 to double ; <double>:10 [#uses=1]
tail call double @ldexp( double %10, i32 -16 ) nounwind ; <double>:11 [#uses=1]
fadd double %7, %11 ; <double>:12 [#uses=1]
ret double %12
}
-declare void @__dorand48(%struct._reent*, i16*)
+declare void @__dorand48(ptr, ptr)
declare double @ldexp(double, i32)
-define double @erand48(i16* %xseed) nounwind {
+define double @erand48(ptr %xseed) nounwind {
entry:
- load %struct._reent*, %struct._reent** @_impure_ptr, align 4 ; <%struct._reent*>:0 [#uses=1]
- tail call void @__dorand48( %struct._reent* %0, i16* %xseed ) nounwind
- load i16, i16* %xseed, align 2 ; <i16>:1 [#uses=1]
+ load ptr, ptr @_impure_ptr, align 4 ; <ptr>:0 [#uses=1]
+ tail call void @__dorand48( ptr %0, ptr %xseed ) nounwind
+ load i16, ptr %xseed, align 2 ; <i16>:1 [#uses=1]
uitofp i16 %1 to double ; <double>:2 [#uses=1]
tail call double @ldexp( double %2, i32 -48 ) nounwind ; <double>:3 [#uses=1]
- getelementptr i16, i16* %xseed, i32 1 ; <i16*>:4 [#uses=1]
- load i16, i16* %4, align 2 ; <i16>:5 [#uses=1]
+ getelementptr i16, ptr %xseed, i32 1 ; <ptr>:4 [#uses=1]
+ load i16, ptr %4, align 2 ; <i16>:5 [#uses=1]
uitofp i16 %5 to double ; <double>:6 [#uses=1]
tail call double @ldexp( double %6, i32 -32 ) nounwind ; <double>:7 [#uses=1]
fadd double %3, %7 ; <double>:8 [#uses=1]
- getelementptr i16, i16* %xseed, i32 2 ; <i16*>:9 [#uses=1]
- load i16, i16* %9, align 2 ; <i16>:10 [#uses=1]
+ getelementptr i16, ptr %xseed, i32 2 ; <ptr>:9 [#uses=1]
+ load i16, ptr %9, align 2 ; <i16>:10 [#uses=1]
uitofp i16 %10 to double ; <double>:11 [#uses=1]
tail call double @ldexp( double %11, i32 -16 ) nounwind ; <double>:12 [#uses=1]
fadd double %8, %12 ; <double>:13 [#uses=1]
define i32 @main() nounwind readnone {
entry:
- %x = alloca i32, align 4 ; <i32*> [#uses=2]
- store volatile i32 2, i32* %x, align 4
- %0 = load volatile i32, i32* %x, align 4 ; <i32> [#uses=1]
+ %x = alloca i32, align 4 ; <ptr> [#uses=2]
+ store volatile i32 2, ptr %x, align 4
+ %0 = load volatile i32, ptr %x, align 4 ; <i32> [#uses=1]
; STATIC-O32: sll $[[R0:[0-9]+]], ${{[0-9]+}}, 2
; STATIC-O32: lui $[[R1:[0-9]+]], %hi($JTI0_0)
; STATIC-O32: addu $[[R2:[0-9]+]], $[[R0]], $[[R1]]
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-n32"
target triple = "mips-ellcc-linux"
-define i32 @mergesort(i8* %base, i32 %nmemb, i32 %size, i32 (i8*, i8*)* nocapture %cmp) nounwind {
+define i32 @mergesort(ptr %base, i32 %nmemb, i32 %size, ptr nocapture %cmp) nounwind {
entry:
br i1 undef, label %return, label %if.end13
br label %while.body
while.body: ; preds = %while.body, %if.end13
- %list1.0482 = phi i8* [ %base, %if.end13 ], [ null, %while.body ]
+ %list1.0482 = phi ptr [ %base, %if.end13 ], [ null, %while.body ]
br i1 undef, label %while.end415, label %while.body
while.end415: ; preds = %while.body
br i1 undef, label %if.then419, label %if.end427
if.then419: ; preds = %while.end415
- %call425 = tail call i8* @memmove(i8* %list1.0482, i8* undef, i32 undef) nounwind
+ %call425 = tail call ptr @memmove(ptr %list1.0482, ptr undef, i32 undef) nounwind
br label %if.end427
if.end427: ; preds = %if.then419, %while.end415
- %list2.1 = phi i8* [ undef, %if.then419 ], [ %list1.0482, %while.end415 ]
- tail call void @free(i8* %list2.1)
+ %list2.1 = phi ptr [ undef, %if.then419 ], [ %list1.0482, %while.end415 ]
+ tail call void @free(ptr %list2.1)
unreachable
return: ; preds = %entry
}
-declare i8* @memmove(i8*, i8*, i32)
+declare ptr @memmove(ptr, ptr, i32)
-declare void @free(i8*)
+declare void @free(ptr)
@.str = private unnamed_addr constant [7 x i8] c"hello\0A\00", align 1
-define void @t(i8* %ptr) {
+define void @t(ptr %ptr) {
entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %ptr, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i64 0, i64 0), i64 7, i1 false)
+ tail call void @llvm.memcpy.p0.p0.i64(ptr %ptr, ptr @.str, i64 7, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
; Function Attrs: nounwind
define void @br() #0 {
entry:
- %0 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @b, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- store i32 6754, i32* @i, align 4
+ store i32 6754, ptr @i, align 4
br label %if.end
if.end: ; preds = %entry, %if.then
; 32R2: wsbh $[[RESULT:[0-9]+]], $[[A_VAL]]
- %1 = load i16, i16* @a, align 2
+ %1 = load i16, ptr @a, align 2
%2 = call i16 @llvm.bswap.i16(i16 %1)
- store i16 %2, i16* @a1, align 2
+ store i16 %2, ptr @a1, align 2
ret void
}
; 32R2: wsbh $[[TMP:[0-9]+]], $[[B_VAL]]
; 32R2: rotr $[[RESULT:[0-9]+]], $[[TMP]], 16
- %1 = load i32, i32* @b, align 4
+ %1 = load i32, ptr @b, align 4
%2 = call i32 @llvm.bswap.i32(i32 %1)
- store i32 %2, i32* @b1, align 4
+ store i32 %2, ptr @b1, align 4
ret void
}
; ALL-DAG: lhu $[[REG_US1:[0-9]+]], 0($[[REG_US1_ADDR]])
; ALL-DAG: andi $7, $[[REG_US1]], 65535
; ALL: jalr $25
- %1 = load i8, i8* @c1, align 1
+ %1 = load i8, ptr @c1, align 1
%conv = sext i8 %1 to i32
- %2 = load i8, i8* @uc1, align 1
+ %2 = load i8, ptr @uc1, align 1
%conv1 = zext i8 %2 to i32
- %3 = load i16, i16* @s1, align 2
+ %3 = load i16, ptr @s1, align 2
%conv2 = sext i16 %3 to i32
- %4 = load i16, i16* @us1, align 2
+ %4 = load i16, ptr @us1, align 2
%conv3 = zext i16 %4 to i32
call void @xiiii(i32 %conv, i32 %conv1, i32 %conv2, i32 %conv3)
ret void
; CHECK: sw $[[T0]], 8($[[ARR]])
entry:
- store i32 12345, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ARR, i32 0, i32 2), align 4
+ store i32 12345, ptr getelementptr inbounds ([10 x i32], ptr @ARR, i32 0, i32 2), align 4
ret void
}
; CHECK-DAG: teq $[[K]], $zero, 7
; CHECK-DAG: mflo $[[RESULT:[0-9]+]]
; CHECK: sw $[[RESULT]], 0($[[I_ADDR]])
- %1 = load i32, i32* @sj, align 4
- %2 = load i32, i32* @sk, align 4
+ %1 = load i32, ptr @sj, align 4
+ %2 = load i32, ptr @sk, align 4
%div = sdiv i32 %1, %2
- store i32 %div, i32* @si, align 4
+ store i32 %div, ptr @si, align 4
ret void
}
; CHECK-DAG: teq $[[K]], $zero, 7
; CHECK-DAG: mflo $[[RESULT:[0-9]+]]
; CHECK: sw $[[RESULT]], 0($[[I_ADDR]])
- %1 = load i32, i32* @uj, align 4
- %2 = load i32, i32* @uk, align 4
+ %1 = load i32, ptr @uj, align 4
+ %2 = load i32, ptr @uk, align 4
%div = udiv i32 %1, %2
- store i32 %div, i32* @ui, align 4
+ store i32 %div, ptr @ui, align 4
ret void
}
; CHECK-LABEL: f:
; CHECK: sdc1
%value.addr = alloca double, align 8
- store double %value, double* %value.addr, align 8
+ store double %value, ptr %value.addr, align 8
ret i1 false
}
define void @__signbit(double %__x) {
entry:
%__x.addr = alloca double, align 8
- store double %__x, double* %__x.addr, align 8
+ store double %__x, ptr %__x.addr, align 8
ret void
}
%retval = alloca i32, align 4
%x.addr = alloca i32, align 4
%a = alloca %struct.x, align 4
- %c = alloca %struct.x*, align 4
- store i32 %x, i32* %x.addr, align 4
- %x1 = getelementptr inbounds %struct.x, %struct.x* %a, i32 0, i32 0
- %0 = load i32, i32* %x.addr, align 4
- store i32 %0, i32* %x1, align 4
- store %struct.x* %a, %struct.x** %c, align 4
- %1 = load %struct.x*, %struct.x** %c, align 4
- %x2 = getelementptr inbounds %struct.x, %struct.x* %1, i32 0, i32 0
- %2 = load i32, i32* %x2, align 4
- store i32 %2, i32* @i, align 4
- %3 = load i32, i32* %retval
+ %c = alloca ptr, align 4
+ store i32 %x, ptr %x.addr, align 4
+ %0 = load i32, ptr %x.addr, align 4
+ store i32 %0, ptr %a, align 4
+ store ptr %a, ptr %c, align 4
+ %1 = load ptr, ptr %c, align 4
+ %2 = load i32, ptr %1, align 4
+ store i32 %2, ptr @i, align 4
+ %3 = load i32, ptr %retval
; CHECK: addiu $[[A_ADDR:[0-9]+]], $sp, 8
; CHECK-DAG: lw $[[I_ADDR:[0-9]+]], %got(i)($[[REG_GP:[0-9]+]])
; CHECK-DAG: sw $[[A_ADDR]], [[A_ADDR_FI:[0-9]+]]($sp)
; Function Attrs: nounwind
define void @feq1() {
entry:
- %0 = load float, float* @f1, align 4
- %1 = load float, float* @f2, align 4
+ %0 = load float, ptr @f1, align 4
+ %1 = load float, ptr @f2, align 4
%cmp = fcmp oeq float %0, %1
; CHECK-LABEL: feq1:
; CHECK: lw $[[REG_F1_GOT:[0-9]+]], %got(f1)(${{[0-9]+}})
; CHECK: movt $[[REG_ZERO]], $[[REG_ONE]], $fcc0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; Function Attrs: nounwind
define void @fne1() {
entry:
- %0 = load float, float* @f1, align 4
- %1 = load float, float* @f2, align 4
+ %0 = load float, ptr @f1, align 4
+ %1 = load float, ptr @f2, align 4
%cmp = fcmp une float %0, %1
; CHECK-LABEL: fne1:
; CHECK: lw $[[REG_F1_GOT:[0-9]+]], %got(f1)(${{[0-9]+}})
; CHECK: c.eq.s $f[[REG_F1]], $f[[REG_F2]]
; CHECK: movf $[[REG_ZERO]], $[[REG_ONE]], $fcc0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; Function Attrs: nounwind
define void @flt1() {
entry:
- %0 = load float, float* @f1, align 4
- %1 = load float, float* @f2, align 4
+ %0 = load float, ptr @f1, align 4
+ %1 = load float, ptr @f2, align 4
%cmp = fcmp olt float %0, %1
; CHECK-LABEL: flt1:
; CHECK: lw $[[REG_F1_GOT:[0-9]+]], %got(f1)(${{[0-9]+}})
; CHECK: movt $[[REG_ZERO]], $[[REG_ONE]], $fcc0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; Function Attrs: nounwind
define void @fgt1() {
entry:
- %0 = load float, float* @f1, align 4
- %1 = load float, float* @f2, align 4
+ %0 = load float, ptr @f1, align 4
+ %1 = load float, ptr @f2, align 4
%cmp = fcmp ogt float %0, %1
; CHECK-LABEL: fgt1:
; CHECK: lw $[[REG_F1_GOT:[0-9]+]], %got(f1)(${{[0-9]+}})
; CHECK: c.ule.s $f[[REG_F1]], $f[[REG_F2]]
; CHECK: movf $[[REG_ZERO]], $[[REG_ONE]], $fcc0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; Function Attrs: nounwind
define void @fle1() {
entry:
- %0 = load float, float* @f1, align 4
- %1 = load float, float* @f2, align 4
+ %0 = load float, ptr @f1, align 4
+ %1 = load float, ptr @f2, align 4
%cmp = fcmp ole float %0, %1
; CHECK-LABEL: fle1:
; CHECK: lw $[[REG_F1_GOT:[0-9]+]], %got(f1)(${{[0-9]+}})
; CHECK: c.ole.s $f[[REG_F1]], $f[[REG_F2]]
; CHECK: movt $[[REG_ZERO]], $[[REG_ONE]], $fcc0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; Function Attrs: nounwind
define void @fge1() {
entry:
- %0 = load float, float* @f1, align 4
- %1 = load float, float* @f2, align 4
+ %0 = load float, ptr @f1, align 4
+ %1 = load float, ptr @f2, align 4
%cmp = fcmp oge float %0, %1
; CHECK-LABEL: fge1:
; CHECK: lw $[[REG_F1_GOT:[0-9]+]], %got(f1)(${{[0-9]+}})
; CHECK: c.ult.s $f[[REG_F1]], $f[[REG_F2]]
; CHECK: movf $[[REG_ZERO]], $[[REG_ONE]], $fcc0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; Function Attrs: nounwind
define void @deq1() {
entry:
- %0 = load double, double* @d1, align 8
- %1 = load double, double* @d2, align 8
+ %0 = load double, ptr @d1, align 8
+ %1 = load double, ptr @d2, align 8
%cmp = fcmp oeq double %0, %1
; CHECK-LABEL: deq1:
; CHECK: lw $[[REG_D1_GOT:[0-9]+]], %got(d1)(${{[0-9]+}})
; CHECK: c.eq.d $f[[REG_D1]], $f[[REG_D2]]
; CHECK: movt $[[REG_ZERO]], $[[REG_ONE]], $fcc0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; Function Attrs: nounwind
define void @dne1() {
entry:
- %0 = load double, double* @d1, align 8
- %1 = load double, double* @d2, align 8
+ %0 = load double, ptr @d1, align 8
+ %1 = load double, ptr @d2, align 8
%cmp = fcmp une double %0, %1
; CHECK-LABEL: dne1:
; CHECK: lw $[[REG_D1_GOT:[0-9]+]], %got(d1)(${{[0-9]+}})
; CHECK: c.eq.d $f[[REG_D1]], $f[[REG_D2]]
; CHECK: movf $[[REG_ZERO]], $[[REG_ONE]], $fcc0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; Function Attrs: nounwind
define void @dlt1() {
entry:
- %0 = load double, double* @d1, align 8
- %1 = load double, double* @d2, align 8
+ %0 = load double, ptr @d1, align 8
+ %1 = load double, ptr @d2, align 8
%cmp = fcmp olt double %0, %1
; CHECK-LABEL: dlt1:
; CHECK: lw $[[REG_D1_GOT:[0-9]+]], %got(d1)(${{[0-9]+}})
; CHECK: c.olt.d $f[[REG_D1]], $f[[REG_D2]]
; CHECK: movt $[[REG_ZERO]], $[[REG_ONE]], $fcc0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; Function Attrs: nounwind
define void @dgt1() {
entry:
- %0 = load double, double* @d1, align 8
- %1 = load double, double* @d2, align 8
+ %0 = load double, ptr @d1, align 8
+ %1 = load double, ptr @d2, align 8
%cmp = fcmp ogt double %0, %1
; CHECK-LABEL: dgt1:
; CHECK: lw $[[REG_D1_GOT:[0-9]+]], %got(d1)(${{[0-9]+}})
; CHECK: c.ule.d $f[[REG_D1]], $f[[REG_D2]]
; CHECK: movf $[[REG_ZERO]], $[[REG_ONE]], $fcc0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; Function Attrs: nounwind
define void @dle1() {
entry:
- %0 = load double, double* @d1, align 8
- %1 = load double, double* @d2, align 8
+ %0 = load double, ptr @d1, align 8
+ %1 = load double, ptr @d2, align 8
%cmp = fcmp ole double %0, %1
; CHECK-LABEL: dle1:
; CHECK: lw $[[REG_D1_GOT:[0-9]+]], %got(d1)(${{[0-9]+}})
; CHECK: c.ole.d $f[[REG_D1]], $f[[REG_D2]]
; CHECK: movt $[[REG_ZERO]], $[[REG_ONE]], $fcc0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; Function Attrs: nounwind
define void @dge1() {
entry:
- %0 = load double, double* @d1, align 8
- %1 = load double, double* @d2, align 8
+ %0 = load double, ptr @d1, align 8
+ %1 = load double, ptr @d2, align 8
%cmp = fcmp oge double %0, %1
; CHECK-LABEL: dge1:
; CHECK: lw $[[REG_D1_GOT:[0-9]+]], %got(d1)(${{[0-9]+}})
; CHECK: c.ult.d $f[[REG_D1]], $f[[REG_D2]]
; CHECK: movf $[[REG_ZERO]], $[[REG_ONE]], $fcc0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; Function Attrs: nounwind
define void @dv() #0 {
entry:
- %0 = load float, float* @f, align 4
+ %0 = load float, ptr @f, align 4
%conv = fpext float %0 to double
; CHECK: cvt.d.s $f{{[0-9]+}}, $f{{[0-9]+}}
- store double %conv, double* @d_f, align 8
+ store double %conv, ptr @d_f, align 8
ret void
}
define void @ifv() {
entry:
; CHECK-LABEL: .ent ifv
- %0 = load float, float* @f, align 4
+ %0 = load float, ptr @f, align 4
%conv = fptosi float %0 to i32
; CHECK: trunc.w.s $f[[REG:[0-9]+]], $f{{[0-9]+}}
; CHECK: mfc1 ${{[0-9]+}}, $f[[REG]]
- store i32 %conv, i32* @i_f, align 4
+ store i32 %conv, ptr @i_f, align 4
ret void
}
define void @idv() {
entry:
; CHECK-LABEL: .ent idv
- %0 = load double, double* @d, align 8
+ %0 = load double, ptr @d, align 8
%conv = fptosi double %0 to i32
; CHECK: trunc.w.d $f[[REG:[0-9]+]], $f{{[0-9]+}}
; CHECK: mfc1 ${{[0-9]+}}, $f[[REG]]
- store i32 %conv, i32* @i_d, align 4
+ store i32 %conv, ptr @i_d, align 4
ret void
}
; Function Attrs: nounwind
define void @fv() #0 {
entry:
- %0 = load double, double* @d, align 8
+ %0 = load double, ptr @d, align 8
%conv = fptrunc double %0 to float
; CHECK: cvt.s.d $f{{[0-9]+}}, $f{{[0-9]+}}
- store float %conv, float* @f, align 4
+ store float %conv, ptr @f, align 4
ret void
}
entry:
; CHECK-LABEL: .ent eq
- %0 = load i32, i32* @c, align 4
- %1 = load i32, i32* @d, align 4
+ %0 = load i32, ptr @c, align 4
+ %1 = load i32, ptr @d, align 4
%cmp = icmp eq i32 %0, %1
%conv = zext i1 %cmp to i32
; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})
; FIXME: This instruction is redundant. The sltiu can only produce 0 and 1.
; CHECK: andi ${{[0-9]+}}, $[[REG2]], 1
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
define void @ne() {
entry:
; CHECK-LABEL: .ent ne
- %0 = load i32, i32* @c, align 4
- %1 = load i32, i32* @d, align 4
+ %0 = load i32, ptr @c, align 4
+ %1 = load i32, ptr @d, align 4
%cmp = icmp ne i32 %0, %1
%conv = zext i1 %cmp to i32
; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})
; FIXME: This instruction is redundant. The sltu can only produce 0 and 1.
; CHECK: andi ${{[0-9]+}}, $[[REG2]], 1
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
define void @ugt() {
entry:
; CHECK-LABEL: .ent ugt
- %0 = load i32, i32* @uc, align 4
- %1 = load i32, i32* @ud, align 4
+ %0 = load i32, ptr @uc, align 4
+ %1 = load i32, ptr @ud, align 4
%cmp = icmp ugt i32 %0, %1
%conv = zext i1 %cmp to i32
; CHECK: lw $[[REG_UC_GOT:[0-9+]]], %got(uc)(${{[0-9]+}})
; FIXME: This instruction is redundant. The sltu can only produce 0 and 1.
; CHECK: andi ${{[0-9]+}}, $[[REG1]], 1
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
define void @ult() {
entry:
; CHECK-LABEL: .ent ult
- %0 = load i32, i32* @uc, align 4
- %1 = load i32, i32* @ud, align 4
+ %0 = load i32, ptr @uc, align 4
+ %1 = load i32, ptr @ud, align 4
%cmp = icmp ult i32 %0, %1
%conv = zext i1 %cmp to i32
; CHECK-DAG: lw $[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}})
; CHECK: sltu $[[REG1:[0-9]+]], $[[REG_UC]], $[[REG_UD]]
; FIXME: This instruction is redundant. The sltu can only produce 0 and 1.
; CHECK: andi ${{[0-9]+}}, $[[REG1]], 1
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
define void @uge() {
entry:
; CHECK-LABEL: .ent uge
- %0 = load i32, i32* @uc, align 4
- %1 = load i32, i32* @ud, align 4
+ %0 = load i32, ptr @uc, align 4
+ %1 = load i32, ptr @ud, align 4
%cmp = icmp uge i32 %0, %1
%conv = zext i1 %cmp to i32
; CHECK-DAG: lw $[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}})
; CHECK: xori $[[REG2:[0-9]+]], $[[REG1]], 1
; FIXME: This instruction is redundant. The sltu can only produce 0 and 1.
; CHECK: andi ${{[0-9]+}}, $[[REG2]], 1
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
define void @ule() {
entry:
; CHECK-LABEL: .ent ule
- %0 = load i32, i32* @uc, align 4
- %1 = load i32, i32* @ud, align 4
+ %0 = load i32, ptr @uc, align 4
+ %1 = load i32, ptr @ud, align 4
%cmp = icmp ule i32 %0, %1
%conv = zext i1 %cmp to i32
; CHECK: lw $[[REG_UC_GOT:[0-9+]]], %got(uc)(${{[0-9]+}})
; CHECK: xori $[[REG2:[0-9]+]], $[[REG1]], 1
; FIXME: This instruction is redundant. The sltu can only produce 0 and 1.
; CHECK: andi ${{[0-9]+}}, $[[REG2]], 1
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
define void @sgt() {
entry:
; CHECK-LABEL: .ent sgt
- %0 = load i32, i32* @c, align 4
- %1 = load i32, i32* @d, align 4
+ %0 = load i32, ptr @c, align 4
+ %1 = load i32, ptr @d, align 4
%cmp = icmp sgt i32 %0, %1
%conv = zext i1 %cmp to i32
; CHECK: lw $[[REG_C_GOT:[0-9+]]], %got(c)(${{[0-9]+}})
; CHECK: slt $[[REG1:[0-9]+]], $[[REG_D]], $[[REG_C]]
; FIXME: This instruction is redundant. The slt can only produce 0 and 1.
; CHECK: andi ${{[0-9]+}}, $[[REG1]], 1
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
define void @slt() {
entry:
; CHECK-LABEL: .ent slt
- %0 = load i32, i32* @c, align 4
- %1 = load i32, i32* @d, align 4
+ %0 = load i32, ptr @c, align 4
+ %1 = load i32, ptr @d, align 4
%cmp = icmp slt i32 %0, %1
%conv = zext i1 %cmp to i32
; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})
; CHECK: slt $[[REG1:[0-9]+]], $[[REG_C]], $[[REG_D]]
; FIXME: This instruction is redundant. The slt can only produce 0 and 1.
; CHECK: andi ${{[0-9]+}}, $[[REG1]], 1
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
define void @sge() {
entry:
; CHECK-LABEL: .ent sge
- %0 = load i32, i32* @c, align 4
- %1 = load i32, i32* @d, align 4
+ %0 = load i32, ptr @c, align 4
+ %1 = load i32, ptr @d, align 4
%cmp = icmp sge i32 %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})
; CHECK-DAG: lw $[[REG_C_GOT:[0-9+]]], %got(c)(${{[0-9]+}})
; CHECK-DAG: lw $[[REG_D:[0-9]+]], 0($[[REG_D_GOT]])
define void @sle() {
entry:
; CHECK-LABEL: .ent sle
- %0 = load i32, i32* @c, align 4
- %1 = load i32, i32* @d, align 4
+ %0 = load i32, ptr @c, align 4
+ %1 = load i32, ptr @d, align 4
%cmp = icmp sle i32 %0, %1
%conv = zext i1 %cmp to i32
; CHECK: lw $[[REG_C_GOT:[0-9+]]], %got(c)(${{[0-9]+}})
; CHECK: xori $[[REG2:[0-9]+]], $[[REG1]], 1
; FIXME: This instruction is redundant. The slt can only produce 0 and 1.
; CHECK: andi ${{[0-9]+}}, $[[REG2]], 1
- store i32 %conv, i32* @b1, align 4
+ store i32 %conv, ptr @b1, align 4
ret void
}
; RUN: < %s -verify-machineinstrs | FileCheck %s
-define i32 @foobar(i32*) {
+define i32 @foobar(ptr) {
bb0:
; CHECK-LABEL: foobar:
; CHECK: # %bb.0: # %bb0
; CHECK: lw $[[REG0:[0-9]+]], 0($4)
; CHECK-NEXT: sltiu $[[REG1:[0-9]+]], $[[REG0]], 1
; CHECK: sw $[[REG1]], [[SPILL:[0-9]+]]($sp) # 4-byte Folded Spill
- %1 = load i32, i32* %0 , align 4
+ %1 = load i32, ptr %0 , align 4
%2 = icmp eq i32 %1, 0
- store atomic i32 0, i32* %0 monotonic, align 4
+ store atomic i32 0, ptr %0 monotonic, align 4
br label %bb1
bb1:
; CHECK: # %bb.1: # %bb1
; RUN: < %s -verify-machineinstrs | FileCheck %s
-define zeroext i1 @foo(i8* nocapture readonly) {
+define zeroext i1 @foo(ptr nocapture readonly) {
; CHECK-LABEL: foo
; CHECK: lbu $[[REG0:[0-9]+]], 0($4)
; CHECK-NEXT: xori $[[REG1:[0-9]+]], $[[REG0]], 1
; CHECK-NEXT: andi $2, $[[REG1]], 1
- %2 = load i8, i8* %0, align 1
+ %2 = load i8, ptr %0, align 1
%3 = trunc i8 %2 to i1
%4 = icmp ne i1 %3, true
ret i1 %4
; Function Attrs: nounwind
define void @cfoo() #0 {
entry:
- %0 = load i8, i8* @c2, align 1
- store i8 %0, i8* @c1, align 1
+ %0 = load i8, ptr @c2, align 1
+ store i8 %0, ptr @c1, align 1
; CHECK-LABEL: cfoo:
; CHECK: lbu $[[REGc:[0-9]+]], 0(${{[0-9]+}})
; CHECK: sb $[[REGc]], 0(${{[0-9]+}})
; Function Attrs: nounwind
define void @sfoo() #0 {
entry:
- %0 = load i16, i16* @s2, align 2
- store i16 %0, i16* @s1, align 2
+ %0 = load i16, ptr @s2, align 2
+ store i16 %0, ptr @s1, align 2
; CHECK-LABEL: sfoo:
; CHECK: lhu $[[REGs:[0-9]+]], 0(${{[0-9]+}})
; CHECK: sh $[[REGs]], 0(${{[0-9]+}})
; Function Attrs: nounwind
define void @ifoo() #0 {
entry:
- %0 = load i32, i32* @i2, align 4
- store i32 %0, i32* @i1, align 4
+ %0 = load i32, ptr @i2, align 4
+ store i32 %0, ptr @i1, align 4
; CHECK-LABEL: ifoo:
; CHECK: lw $[[REGi:[0-9]+]], 0(${{[0-9]+}})
; CHECK: sw $[[REGi]], 0(${{[0-9]+}})
; Function Attrs: nounwind
define void @ffoo() #0 {
entry:
- %0 = load float, float* @f2, align 4
- store float %0, float* @f1, align 4
+ %0 = load float, ptr @f2, align 4
+ store float %0, ptr @f1, align 4
; CHECK-LABEL: ffoo:
; CHECK: lwc1 $f[[REGf:[0-9]+]], 0(${{[0-9]+}})
; CHECK: swc1 $f[[REGf]], 0(${{[0-9]+}})
; Function Attrs: nounwind
define void @dfoo() #0 {
entry:
- %0 = load double, double* @d2, align 8
- store double %0, double* @d1, align 8
+ %0 = load double, ptr @d2, align 8
+ store double %0, ptr @d1, align 8
; CHECK-LABEL: dfoo:
; CHECK: ldc1 $f[[REGd:[0-9]+]], 0(${{[0-9]+}})
; CHECK: sdc1 $f[[REGd]], 0(${{[0-9]+}})
define void @_Z3b_iv() {
entry:
; CHECK-LABEL: .ent _Z3b_iv
- %0 = load i8, i8* @b1, align 1
+ %0 = load i8, ptr @b1, align 1
%tobool = trunc i8 %0 to i1
%frombool = zext i1 %tobool to i8
- store i8 %frombool, i8* @b2, align 1
- %1 = load i8, i8* @b2, align 1
+ store i8 %frombool, ptr @b2, align 1
+ %1 = load i8, ptr @b2, align 1
%tobool1 = trunc i8 %1 to i1
%conv = zext i1 %tobool1 to i32
- store i32 %conv, i32* @i, align 4
+ store i32 %conv, ptr @i, align 4
; CHECK: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
; CHECK: andi $[[REG2:[0-9]+]], $[[REG1]], 1
; CHECK: sb $[[REG2]], 0(${{[0-9]+}})
entry:
; CHECK-LABEL: .ent _Z4uc_iv
- %0 = load i8, i8* @uc1, align 1
+ %0 = load i8, ptr @uc1, align 1
%conv = zext i8 %0 to i32
- store i32 %conv, i32* @i, align 4
- %1 = load i8, i8* @uc2, align 1
+ store i32 %conv, ptr @i, align 4
+ %1 = load i8, ptr @uc2, align 1
%conv1 = zext i8 %1 to i32
; CHECK: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
; CHECK: andi ${{[0-9]+}}, $[[REG1]], 255
- store i32 %conv1, i32* @j, align 4
+ store i32 %conv1, ptr @j, align 4
ret void
; CHECK: .end _Z4uc_iv
; mips32r2-LABEL: .ent _Z4sc_iv
; mips32-LABEL: .ent _Z4sc_iv
- %0 = load i8, i8* @sc1, align 1
+ %0 = load i8, ptr @sc1, align 1
%conv = sext i8 %0 to i32
- store i32 %conv, i32* @i, align 4
- %1 = load i8, i8* @sc2, align 1
+ store i32 %conv, ptr @i, align 4
+ %1 = load i8, ptr @sc2, align 1
%conv1 = sext i8 %1 to i32
- store i32 %conv1, i32* @j, align 4
+ store i32 %conv1, ptr @j, align 4
; mips32r2: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
; mips32r2: seb ${{[0-9]+}}, $[[REG1]]
; mips32: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
define void @_Z4us_iv() {
entry:
; CHECK-LABEL: .ent _Z4us_iv
- %0 = load i16, i16* @us1, align 2
+ %0 = load i16, ptr @us1, align 2
%conv = zext i16 %0 to i32
- store i32 %conv, i32* @i, align 4
- %1 = load i16, i16* @us2, align 2
+ store i32 %conv, ptr @i, align 4
+ %1 = load i16, ptr @us2, align 2
%conv1 = zext i16 %1 to i32
- store i32 %conv1, i32* @j, align 4
+ store i32 %conv1, ptr @j, align 4
ret void
; CHECK: lhu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
; CHECK: andi ${{[0-9]+}}, $[[REG1]], 65535
; mips32r2-LABEL: .ent _Z4ss_iv
; mips32=LABEL: .ent _Z4ss_iv
- %0 = load i16, i16* @ss1, align 2
+ %0 = load i16, ptr @ss1, align 2
%conv = sext i16 %0 to i32
- store i32 %conv, i32* @i, align 4
- %1 = load i16, i16* @ss2, align 2
+ store i32 %conv, ptr @i, align 4
+ %1 = load i16, ptr @ss2, align 2
%conv1 = sext i16 %1 to i32
- store i32 %conv1, i32* @j, align 4
+ store i32 %conv1, ptr @j, align 4
; mips32r2: lhu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
; mips32r2: seh ${{[0-9]+}}, $[[REG1]]
; mips32: lhu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
define void @_Z4b_ssv() {
entry:
; CHECK-LABEL: .ent _Z4b_ssv
- %0 = load i8, i8* @b2, align 1
+ %0 = load i8, ptr @b2, align 1
%tobool = trunc i8 %0 to i1
%conv = zext i1 %tobool to i16
- store i16 %conv, i16* @ssi, align 2
+ store i16 %conv, ptr @ssi, align 2
ret void
; CHECK: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
; CHECK: andi ${{[0-9]+}}, $[[REG1]], 1
define void @_Z5uc_ssv() {
entry:
; CHECK-LABEL: .ent _Z5uc_ssv
- %0 = load i8, i8* @uc1, align 1
+ %0 = load i8, ptr @uc1, align 1
%conv = zext i8 %0 to i16
- store i16 %conv, i16* @ssi, align 2
- %1 = load i8, i8* @uc2, align 1
+ store i16 %conv, ptr @ssi, align 2
+ %1 = load i8, ptr @uc2, align 1
%conv1 = zext i8 %1 to i16
; CHECK: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
; CHECK: andi ${{[0-9]+}}, $[[REG1]], 255
- store i16 %conv1, i16* @ssj, align 2
+ store i16 %conv1, ptr @ssj, align 2
ret void
; CHECK: .end _Z5uc_ssv
}
entry:
; mips32r2-LABEL: .ent _Z5sc_ssv
; mips32-LABEL: .ent _Z5sc_ssv
- %0 = load i8, i8* @sc1, align 1
+ %0 = load i8, ptr @sc1, align 1
%conv = sext i8 %0 to i16
- store i16 %conv, i16* @ssi, align 2
- %1 = load i8, i8* @sc2, align 1
+ store i16 %conv, ptr @ssi, align 2
+ %1 = load i8, ptr @sc2, align 1
%conv1 = sext i8 %1 to i16
- store i16 %conv1, i16* @ssj, align 2
+ store i16 %conv1, ptr @ssj, align 2
; mips32r2: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
; mips32r2: seb ${{[0-9]+}}, $[[REG1]]
; mips32: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
; RUN: < %s | FileCheck %s
@.str = private unnamed_addr constant [6 x i8] c"hello\00", align 1
-@s = common global i8* null, align 4
+@s = common global ptr null, align 4
; Function Attrs: nounwind
define void @foo() #0 {
entry:
- store i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), i8** @s, align 4
+ store ptr @.str, ptr @s, align 4
ret void
; CHECK: .ent foo
; CHECK: lw $[[REG1:[0-9]+]], %got($.str)(${{[0-9]+}})
; Function Attrs: noinline nounwind
define void @andUb() #0 {
entry:
- %0 = load i8, i8* @ub1, align 1
- %1 = load i8, i8* @ub2, align 1
+ %0 = load i8, ptr @ub1, align 1
+ %1 = load i8, ptr @ub2, align 1
%conv0 = trunc i8 %0 to i1
%conv1 = trunc i8 %1 to i1
%and0 = and i1 %conv1, %conv0
%conv3 = zext i1 %and0 to i8
- store i8 %conv3, i8* @ub, align 1, !tbaa !2
+ store i8 %conv3, ptr @ub, align 1, !tbaa !2
; CHECK-LABEL: .ent andUb
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @andUb0() #0 {
entry:
- %0 = load i8, i8* @ub1, align 1, !tbaa !2
+ %0 = load i8, ptr @ub1, align 1, !tbaa !2
%conv = trunc i8 %0 to i1
%and = and i1 %conv, 0
%conv1 = zext i1 %and to i8
- store i8 %conv1, i8* @ub, align 1, !tbaa !2
+ store i8 %conv1, ptr @ub, align 1, !tbaa !2
; CHECK-LABEL: .ent andUb0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define void @andUb1() #0 {
; clang uses i8 constants for booleans, so we test with an i8 1.
entry:
- %x = load i8, i8* @ub1, align 1, !tbaa !2
+ %x = load i8, ptr @ub1, align 1, !tbaa !2
%and = and i8 %x, 1
%conv = trunc i8 %and to i1
%conv1 = zext i1 %conv to i8
- store i8 %conv1, i8* @ub, align 1, !tbaa !2
+ store i8 %conv1, ptr @ub, align 1, !tbaa !2
; CHECK-LABEL: .ent andUb1
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @orUb() #0 {
entry:
- %0 = load i8, i8* @ub1, align 1
- %1 = load i8, i8* @ub2, align 1
+ %0 = load i8, ptr @ub1, align 1
+ %1 = load i8, ptr @ub2, align 1
%conv0 = trunc i8 %0 to i1
%conv1 = trunc i8 %1 to i1
%or0 = or i1 %conv1, %conv0
%conv3 = zext i1 %or0 to i8
- store i8 %conv3, i8* @ub, align 1, !tbaa !2
+ store i8 %conv3, ptr @ub, align 1, !tbaa !2
; CHECK-LABEL: .ent orUb
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @orUb0() #0 {
entry:
- %0 = load i8, i8* @ub1, align 1, !tbaa !2
+ %0 = load i8, ptr @ub1, align 1, !tbaa !2
%conv = trunc i8 %0 to i1
%or = or i1 %conv, 0
%conv1 = zext i1 %or to i8
- store i8 %conv1, i8* @ub, align 1, !tbaa !2
+ store i8 %conv1, ptr @ub, align 1, !tbaa !2
; CHECK-LABEL: .ent orUb0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @orUb1() #0 {
entry:
- %x = load i8, i8* @ub1, align 1, !tbaa !2
+ %x = load i8, ptr @ub1, align 1, !tbaa !2
%or = or i8 %x, 1
%conv = trunc i8 %or to i1
%conv1 = zext i1 %conv to i8
- store i8 %conv1, i8* @ub, align 1, !tbaa !2
+ store i8 %conv1, ptr @ub, align 1, !tbaa !2
; CHECK-LABEL: .ent orUb1
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @xorUb() #0 {
entry:
- %0 = load i8, i8* @ub1, align 1
- %1 = load i8, i8* @ub2, align 1
+ %0 = load i8, ptr @ub1, align 1
+ %1 = load i8, ptr @ub2, align 1
%conv0 = trunc i8 %0 to i1
%conv1 = trunc i8 %1 to i1
%xor0 = xor i1 %conv1, %conv0
%conv3 = zext i1 %xor0 to i8
- store i8 %conv3, i8* @ub, align 1, !tbaa !2
+ store i8 %conv3, ptr @ub, align 1, !tbaa !2
; CHECK-LABEL: .ent xorUb
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @xorUb0() #0 {
entry:
- %0 = load i8, i8* @ub1, align 1, !tbaa !2
+ %0 = load i8, ptr @ub1, align 1, !tbaa !2
%conv = trunc i8 %0 to i1
%xor = xor i1 %conv, 0
%conv1 = zext i1 %xor to i8
- store i8 %conv1, i8* @ub, align 1, !tbaa !2
+ store i8 %conv1, ptr @ub, align 1, !tbaa !2
; CHECK-LABEL: .ent xorUb0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @xorUb1() #0 {
entry:
- %x = load i8, i8* @ub1, align 1, !tbaa !2
+ %x = load i8, ptr @ub1, align 1, !tbaa !2
%xor = xor i8 1, %x
%conv = trunc i8 %xor to i1
%conv1 = zext i1 %conv to i8
- store i8 %conv1, i8* @ub, align 1, !tbaa !2
+ store i8 %conv1, ptr @ub, align 1, !tbaa !2
; CHECK-LABEL: .ent xorUb1
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @andUc() #0 {
entry:
- %0 = load i8, i8* @uc1, align 1, !tbaa !2
- %1 = load i8, i8* @uc2, align 1, !tbaa !2
+ %0 = load i8, ptr @uc1, align 1, !tbaa !2
+ %1 = load i8, ptr @uc2, align 1, !tbaa !2
%and3 = and i8 %1, %0
- store i8 %and3, i8* @uc, align 1, !tbaa !2
+ store i8 %and3, ptr @uc, align 1, !tbaa !2
; CHECK-LABEL: .ent andUc
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @andUc0() #0 {
entry:
- %0 = load i8, i8* @uc1, align 1, !tbaa !2
+ %0 = load i8, ptr @uc1, align 1, !tbaa !2
%and = and i8 %0, 67
- store i8 %and, i8* @uc, align 1, !tbaa !2
+ store i8 %and, ptr @uc, align 1, !tbaa !2
; CHECK-LABEL: .ent andUc0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @andUc1() #0 {
entry:
- %0 = load i8, i8* @uc1, align 1, !tbaa !2
+ %0 = load i8, ptr @uc1, align 1, !tbaa !2
%and = and i8 %0, 167
- store i8 %and, i8* @uc, align 1, !tbaa !2
+ store i8 %and, ptr @uc, align 1, !tbaa !2
; CHECK-LABEL: .ent andUc1
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @orUc() #0 {
entry:
- %0 = load i8, i8* @uc1, align 1, !tbaa !2
- %1 = load i8, i8* @uc2, align 1, !tbaa !2
+ %0 = load i8, ptr @uc1, align 1, !tbaa !2
+ %1 = load i8, ptr @uc2, align 1, !tbaa !2
%or3 = or i8 %1, %0
- store i8 %or3, i8* @uc, align 1, !tbaa !2
+ store i8 %or3, ptr @uc, align 1, !tbaa !2
; CHECK-LABEL: .ent orUc
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @orUc0() #0 {
entry:
- %0 = load i8, i8* @uc1, align 1, !tbaa !2
+ %0 = load i8, ptr @uc1, align 1, !tbaa !2
%or = or i8 %0, 69
- store i8 %or, i8* @uc, align 1, !tbaa !2
+ store i8 %or, ptr @uc, align 1, !tbaa !2
; CHECK-LABEL: .ent orUc0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @orUc1() #0 {
entry:
- %0 = load i8, i8* @uc1, align 1, !tbaa !2
+ %0 = load i8, ptr @uc1, align 1, !tbaa !2
%or = or i8 %0, 238
- store i8 %or, i8* @uc, align 1, !tbaa !2
+ store i8 %or, ptr @uc, align 1, !tbaa !2
; CHECK-LABEL: .ent orUc1
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @xorUc() #0 {
entry:
- %0 = load i8, i8* @uc1, align 1, !tbaa !2
- %1 = load i8, i8* @uc2, align 1, !tbaa !2
+ %0 = load i8, ptr @uc1, align 1, !tbaa !2
+ %1 = load i8, ptr @uc2, align 1, !tbaa !2
%xor3 = xor i8 %1, %0
- store i8 %xor3, i8* @uc, align 1, !tbaa !2
+ store i8 %xor3, ptr @uc, align 1, !tbaa !2
; CHECK-LABEL: .ent xorUc
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @xorUc0() #0 {
entry:
- %0 = load i8, i8* @uc1, align 1, !tbaa !2
+ %0 = load i8, ptr @uc1, align 1, !tbaa !2
%xor = xor i8 %0, 23
- store i8 %xor, i8* @uc, align 1, !tbaa !2
+ store i8 %xor, ptr @uc, align 1, !tbaa !2
; CHECK-LABEL: .ent xorUc0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @xorUc1() #0 {
entry:
- %0 = load i8, i8* @uc1, align 1, !tbaa !2
+ %0 = load i8, ptr @uc1, align 1, !tbaa !2
%xor = xor i8 %0, 120
- store i8 %xor, i8* @uc, align 1, !tbaa !2
+ store i8 %xor, ptr @uc, align 1, !tbaa !2
; CHECK-LABEL: .ent xorUc1
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @andUs() #0 {
entry:
- %0 = load i16, i16* @us1, align 2, !tbaa !5
- %1 = load i16, i16* @us2, align 2, !tbaa !5
+ %0 = load i16, ptr @us1, align 2, !tbaa !5
+ %1 = load i16, ptr @us2, align 2, !tbaa !5
%and3 = and i16 %1, %0
- store i16 %and3, i16* @us, align 2, !tbaa !5
+ store i16 %and3, ptr @us, align 2, !tbaa !5
; CHECK-LABEL: .ent andUs
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @andUs0() #0 {
entry:
- %0 = load i16, i16* @us1, align 2, !tbaa !5
+ %0 = load i16, ptr @us1, align 2, !tbaa !5
%and = and i16 %0, 4660
- store i16 %and, i16* @us, align 2, !tbaa !5
+ store i16 %and, ptr @us, align 2, !tbaa !5
; CHECK-LABEL: .ent andUs0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @andUs1() #0 {
entry:
- %0 = load i16, i16* @us1, align 2, !tbaa !5
+ %0 = load i16, ptr @us1, align 2, !tbaa !5
%and = and i16 %0, 61351
- store i16 %and, i16* @us, align 2, !tbaa !5
+ store i16 %and, ptr @us, align 2, !tbaa !5
; CHECK-LABEL: .ent andUs1
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @orUs() #0 {
entry:
- %0 = load i16, i16* @us1, align 2, !tbaa !5
- %1 = load i16, i16* @us2, align 2, !tbaa !5
+ %0 = load i16, ptr @us1, align 2, !tbaa !5
+ %1 = load i16, ptr @us2, align 2, !tbaa !5
%or3 = or i16 %1, %0
- store i16 %or3, i16* @us, align 2, !tbaa !5
+ store i16 %or3, ptr @us, align 2, !tbaa !5
; CHECK-LABEL: .ent orUs
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @orUs0() #0 {
entry:
- %0 = load i16, i16* @us1, align 2, !tbaa !5
+ %0 = load i16, ptr @us1, align 2, !tbaa !5
%or = or i16 %0, 17666
- store i16 %or, i16* @us, align 2, !tbaa !5
+ store i16 %or, ptr @us, align 2, !tbaa !5
ret void
}
; Function Attrs: noinline nounwind
define void @orUs1() #0 {
entry:
- %0 = load i16, i16* @us1, align 2, !tbaa !5
+ %0 = load i16, ptr @us1, align 2, !tbaa !5
%or = or i16 %0, 60945
- store i16 %or, i16* @us, align 2, !tbaa !5
+ store i16 %or, ptr @us, align 2, !tbaa !5
; CHECK-LABEL: .ent orUs1
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @xorUs() #0 {
entry:
- %0 = load i16, i16* @us1, align 2, !tbaa !5
- %1 = load i16, i16* @us2, align 2, !tbaa !5
+ %0 = load i16, ptr @us1, align 2, !tbaa !5
+ %1 = load i16, ptr @us2, align 2, !tbaa !5
%xor3 = xor i16 %1, %0
- store i16 %xor3, i16* @us, align 2, !tbaa !5
+ store i16 %xor3, ptr @us, align 2, !tbaa !5
; CHECK-LABEL: .ent xorUs
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @xorUs0() #0 {
entry:
- %0 = load i16, i16* @us1, align 2, !tbaa !5
+ %0 = load i16, ptr @us1, align 2, !tbaa !5
%xor = xor i16 %0, 6062
- store i16 %xor, i16* @us, align 2, !tbaa !5
+ store i16 %xor, ptr @us, align 2, !tbaa !5
; CHECK-LABEL: .ent xorUs0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; Function Attrs: noinline nounwind
define void @xorUs1() #0 {
entry:
- %0 = load i16, i16* @us1, align 2, !tbaa !5
+ %0 = load i16, ptr @us1, align 2, !tbaa !5
%xor = xor i16 %0, 60024
- store i16 %xor, i16* @us, align 2, !tbaa !5
+ store i16 %xor, ptr @us, align 2, !tbaa !5
; CHECK-LABEL: .ent xorUs1
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
; RUN: -fast-isel-abort=3 -verify-machineinstrs | FileCheck %s
@str = private unnamed_addr constant [12 x i8] c"hello there\00", align 1
-@src = global i8* getelementptr inbounds ([12 x i8], [12 x i8]* @str, i32 0, i32 0), align 4
+@src = global ptr @str, align 4
@i = global i32 12, align 4
@dest = common global [50 x i8] zeroinitializer, align 1
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1)
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1)
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1)
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1)
+declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1)
-define void @cpy(i8* %src, i32 %i) {
+define void @cpy(ptr %src, i32 %i) {
; CHECK-LABEL: cpy:
; CHECK: lw $[[T0:[0-9]+]], %got(dest)(${{[0-9]+}})
; CHECK: jalr $[[T2]]
; CHECK-NEXT: nop
; CHECK-NOT: {{.*}}$2{{.*}}
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0), i8* %src, i32 %i, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr @dest, ptr %src, i32 %i, i1 false)
ret void
}
-define void @mov(i8* %src, i32 %i) {
+define void @mov(ptr %src, i32 %i) {
; CHECK-LABEL: mov:
; CHECK: lw $[[T0:[0-9]+]], %got(dest)(${{[0-9]+}})
; CHECK: jalr $[[T2]]
; CHECK-NEXT: nop
; CHECK-NOT: {{.*}}$2{{.*}}
- call void @llvm.memmove.p0i8.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0), i8* %src, i32 %i, i1 false)
+ call void @llvm.memmove.p0.p0.i32(ptr @dest, ptr %src, i32 %i, i1 false)
ret void
}
; CHECK: jalr $[[T2]]
; CHECK-NEXT: nop
; CHECK-NOT: {{.*}}$2{{.*}}
- call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0), i8 42, i32 %i, i1 false)
+ call void @llvm.memset.p0.i32(ptr @dest, i8 42, i32 %i, i1 false)
ret void
}
; RUN: < %s | FileCheck %s
@x = common global [128000 x float] zeroinitializer, align 4
-@y = global float* getelementptr inbounds ([128000 x float], [128000 x float]* @x, i32 0, i32 0), align 4
+@y = global ptr @x, align 4
@result = common global float 0.000000e+00, align 4
@.str = private unnamed_addr constant [5 x i8] c"%f \0A\00", align 1
define void @foo() {
entry:
; CHECK-LABEL: .ent foo
- %0 = load float*, float** @y, align 4
- %arrayidx = getelementptr inbounds float, float* %0, i32 64000
- store float 5.500000e+00, float* %arrayidx, align 4
+ %0 = load ptr, ptr @y, align 4
+ %arrayidx = getelementptr inbounds float, ptr %0, i32 64000
+ store float 5.500000e+00, ptr %arrayidx, align 4
; CHECK: lw $[[REG_Y_GOT:[0-9]+]], %got(y)(${{[0-9]+}})
; CHECK: lw $[[REG_Y:[0-9]+]], 0($[[REG_Y_GOT]])
; CHECK: lui $[[REG_FPCONST_INT:[0-9]+]], 16560
define void @goo() {
entry:
; CHECK-LABEL: .ent goo
- %0 = load float*, float** @y, align 4
- %arrayidx = getelementptr inbounds float, float* %0, i32 64000
- %1 = load float, float* %arrayidx, align 4
- store float %1, float* @result, align 4
+ %0 = load ptr, ptr @y, align 4
+ %arrayidx = getelementptr inbounds float, ptr %0, i32 64000
+ %1 = load float, ptr %arrayidx, align 4
+ store float %1, ptr @result, align 4
; CHECK-DAG: lw $[[REG_RESULT:[0-9]+]], %got(result)(${{[0-9]+}})
; CHECK-DAG: lw $[[REG_Y_GOT:[0-9]+]], %got(y)(${{[0-9]+}})
; CHECK-DAG: lw $[[REG_Y:[0-9]+]], 0($[[REG_Y_GOT]])
; Original C code for test.
;
;float x[128000];
-;float *y = x;
+;ptr y = x;
;float result;
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=mipsel -relocation-model=pic -O0 -mcpu=mips32 < %s | FileCheck %s
-define void @test(i32 %x, i1* %p) nounwind {
+define void @test(i32 %x, ptr %p) nounwind {
; CHECK-LABEL: test:
; CHECK: # %bb.0:
; CHECK-NEXT: move $1, $4
; CHECK-NEXT: nop
%y = and i32 %x, 1
%c = icmp eq i32 %y, 1
- store i1 %c, i1* %p
+ store i1 %c, ptr %p
br i1 %c, label %foo, label %foo
foo:
; CHECK-DAG: teq $[[K]], $zero, 7
; CHECK-DAG: mfhi $[[RESULT:[0-9]+]]
; CHECK: sw $[[RESULT]], 0($[[I_ADDR]])
- %1 = load i32, i32* @sj, align 4
- %2 = load i32, i32* @sk, align 4
+ %1 = load i32, ptr @sj, align 4
+ %2 = load i32, ptr @sk, align 4
%rem = srem i32 %1, %2
- store i32 %rem, i32* @si, align 4
+ store i32 %rem, ptr @si, align 4
ret void
}
; CHECK-DAG: teq $[[K]], $zero, 7
; CHECK-DAG: mfhi $[[RESULT:[0-9]+]]
; CHECK: sw $[[RESULT]], 0($[[I_ADDR]])
- %1 = load i32, i32* @uj, align 4
- %2 = load i32, i32* @uk, align 4
+ %1 = load i32, ptr @uj, align 4
+ %2 = load i32, ptr @uk, align 4
%rem = urem i32 %1, %2
- store i32 %rem, i32* @ui, align 4
+ store i32 %rem, ptr @ui, align 4
ret void
}
define i32 @reti() {
entry:
; CHECK-LABEL: reti:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
ret i32 %0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define i16 @retus() {
entry:
; CHECK-LABEL: retus:
- %0 = load i16, i16* @s, align 2
+ %0 = load i16, ptr @s, align 2
ret i16 %0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define signext i16 @rets() {
entry:
; CHECK-LABEL: rets:
- %0 = load i16, i16* @s, align 2
+ %0 = load i16, ptr @s, align 2
ret i16 %0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define i8 @retuc() {
entry:
; CHECK-LABEL: retuc:
- %0 = load i8, i8* @c, align 1
+ %0 = load i8, ptr @c, align 1
ret i8 %0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define signext i8 @retc() {
entry:
; CHECK-LABEL: retc:
- %0 = load i8, i8* @c, align 1
+ %0 = load i8, ptr @c, align 1
ret i8 %0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define float @retf() {
entry:
; CHECK-LABEL: retf:
- %0 = load float, float* @f, align 4
+ %0 = load float, ptr @f, align 4
ret float %0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define double @retd() {
entry:
; CHECK-LABEL: retd:
- %0 = load double, double* @d, align 8
+ %0 = load double, ptr @d, align 8
ret double %0
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define void @sll() {
entry:
- %0 = load i16, i16* @s1, align 2
- %1 = load i16, i16* @s2, align 2
+ %0 = load i16, ptr @s1, align 2
+ %1 = load i16, ptr @s2, align 2
%shl = shl i16 %0, %1
- store i16 %shl, i16* @s3, align 2
+ store i16 %shl, ptr @s3, align 2
; CHECK-LABEL: sll:
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK-DAG: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define void @slli() {
entry:
- %0 = load i16, i16* @s1, align 2
+ %0 = load i16, ptr @s1, align 2
%shl = shl i16 %0, 5
- store i16 %shl, i16* @s3, align 2
+ store i16 %shl, ptr @s3, align 2
; CHECK-LABEL: slli:
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK-DAG: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define void @srl() {
entry:
- %0 = load i16, i16* @us1, align 2
- %1 = load i16, i16* @us2, align 2
+ %0 = load i16, ptr @us1, align 2
+ %1 = load i16, ptr @us2, align 2
%shr = lshr i16 %0, %1
- store i16 %shr, i16* @us3, align 2
+ store i16 %shr, ptr @us3, align 2
ret void
; CHECK-LABEL: srl:
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
define void @srli() {
entry:
- %0 = load i16, i16* @us1, align 2
+ %0 = load i16, ptr @us1, align 2
%shr = lshr i16 %0, 4
- store i16 %shr, i16* @us3, align 2
+ store i16 %shr, ptr @us3, align 2
; CHECK-LABEL: srli:
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK-DAG: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define void @sra() {
entry:
- %0 = load i16, i16* @s1, align 2
- %1 = load i16, i16* @s2, align 2
+ %0 = load i16, ptr @s1, align 2
+ %1 = load i16, ptr @s2, align 2
%shr = ashr i16 %0, %1
- store i16 %shr, i16* @s3, align 2
+ store i16 %shr, ptr @s3, align 2
; CHECK-LABEL: sra:
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK-DAG: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define void @srai() {
entry:
- %0 = load i16, i16* @s1, align 2
+ %0 = load i16, ptr @s1, align 2
%shr = ashr i16 %0, 2
- store i16 %shr, i16* @s3, align 2
+ store i16 %shr, ptr @s3, align 2
; CHECK-LABEL: srai:
; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
; CHECK-DAG: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
define i32 @main() nounwind uwtable {
entry:
%foo = alloca %struct.s, align 4
- %0 = bitcast %struct.s* %foo to i32*
- %bf.load = load i32, i32* %0, align 4
+ %bf.load = load i32, ptr %foo, align 4
%bf.lshr = lshr i32 %bf.load, 2
%cmp = icmp ne i32 %bf.lshr, 2
br i1 %cmp, label %if.then, label %if.end
; Function Attrs: nounwind
define void @foo() {
entry:
- store i32 12345, i32* @abcd, align 4
+ store i32 12345, ptr @abcd, align 4
; CHECK: addiu $[[REG1:[0-9]+]], $zero, 12345
; CHECK: lw $[[REG2:[0-9]+]], %got(abcd)(${{[0-9]+}})
; CHECK: sw $[[REG1]], 0($[[REG2]])
; Function Attrs: nounwind
define void @f1() #0 {
entry:
- store float 0x3FFA76C8C0000000, float* @f, align 4
+ store float 0x3FFA76C8C0000000, ptr @f, align 4
ret void
; CHECK: .ent f1
; CHECK: lui $[[REG1:[0-9]+]], 16339
; Function Attrs: nounwind
define void @d1() #0 {
entry:
- store double 1.234567e+00, double* @de, align 8
+ store double 1.234567e+00, ptr @de, align 8
; mip32r2: .ent d1
; mips32r2: lui $[[REG1a:[0-9]+]], 16371
; mips32r2: ori $[[REG2a:[0-9]+]], $[[REG1a]], 49353
; Function Attrs: nounwind
define void @si2_1() #0 {
entry:
- store i32 32767, i32* @ijk, align 4
+ store i32 32767, ptr @ijk, align 4
; CHECK: .ent si2_1
; CHECK: addiu $[[REG1:[0-9]+]], $zero, 32767
; CHECK: lw $[[REG2:[0-9]+]], %got(ijk)(${{[0-9]+}})
; Function Attrs: nounwind
define void @si2_2() #0 {
entry:
- store i32 -32768, i32* @ijk, align 4
+ store i32 -32768, ptr @ijk, align 4
; CHECK: .ent si2_2
; CHECK: lui $[[REG1:[0-9]+]], 65535
; CHECK: ori $[[REG2:[0-9]+]], $[[REG1]], 32768
; Function Attrs: nounwind
define void @ui2_1() #0 {
entry:
- store i32 65535, i32* @ijk, align 4
+ store i32 65535, ptr @ijk, align 4
; CHECK: .ent ui2_1
; CHECK: ori $[[REG1:[0-9]+]], $zero, 65535
; CHECK: lw $[[REG2:[0-9]+]], %got(ijk)(${{[0-9]+}})
; Function Attrs: nounwind
define void @ui4_1() #0 {
entry:
- store i32 983040, i32* @ijk, align 4
+ store i32 983040, ptr @ijk, align 4
; CHECK: .ent ui4_1
; CHECK: lui $[[REG1:[0-9]+]], 15
; CHECK: lw $[[REG2:[0-9]+]], %got(ijk)(${{[0-9]+}})
; Function Attrs: nounwind
define void @ui4_2() #0 {
entry:
- store i32 719566, i32* @ijk, align 4
+ store i32 719566, ptr @ijk, align 4
; CHECK: .ent ui4_2
; CHECK: lui $[[REG1:[0-9]+]], 10
; CHECK: ori $[[REG1]], $[[REG1]], 64206
define i16 @test() {
%a = alloca [4 x i16], align 4
- %arrayidx = getelementptr inbounds [4 x i16], [4 x i16]* %a, i32 0, i32 -2
- %b = load i16, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds [4 x i16], ptr %a, i32 0, i32 -2
+ %b = load i16, ptr %arrayidx, align 2
ret i16 %b
}
define void @test2() {
%a = alloca [4 x i16], align 4
- %arrayidx = getelementptr inbounds [4 x i16], [4 x i16]* %a, i32 0, i32 -2
- store i16 2, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds [4 x i16], ptr %a, i32 0, i32 -2
+ store i16 2, ptr %arrayidx, align 2
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
-define { float, float } @add_complex_float({ float, float }* %a, { float, float }* %b) {
+define { float, float } @add_complex_float(ptr %a, ptr %b) {
; MIPS32-LABEL: name: add_complex_float
; MIPS32: bb.1.entry:
; MIPS32: liveins: $a0, $a1
; MIPS32: $f2 = COPY [[FADD1]](s32)
; MIPS32: RetRA implicit $f0, implicit $f2
entry:
- %.realp = getelementptr inbounds { float, float }, { float, float }* %a, i32 0, i32 0
- %.real = load float, float* %.realp, align 4
- %.imagp = getelementptr inbounds { float, float }, { float, float }* %a, i32 0, i32 1
- %.imag = load float, float* %.imagp, align 4
- %.realp1 = getelementptr inbounds { float, float }, { float, float }* %b, i32 0, i32 0
- %.real2 = load float, float* %.realp1, align 4
- %.imagp3 = getelementptr inbounds { float, float }, { float, float }* %b, i32 0, i32 1
- %.imag4 = load float, float* %.imagp3, align 4
+ %.realp = getelementptr inbounds { float, float }, ptr %a, i32 0, i32 0
+ %.real = load float, ptr %.realp, align 4
+ %.imagp = getelementptr inbounds { float, float }, ptr %a, i32 0, i32 1
+ %.imag = load float, ptr %.imagp, align 4
+ %.realp1 = getelementptr inbounds { float, float }, ptr %b, i32 0, i32 0
+ %.real2 = load float, ptr %.realp1, align 4
+ %.imagp3 = getelementptr inbounds { float, float }, ptr %b, i32 0, i32 1
+ %.imag4 = load float, ptr %.imagp3, align 4
%add.r = fadd float %.real, %.real2
%add.i = fadd float %.imag, %.imag4
%.fca.0.insert = insertvalue { float, float } undef, float %add.r, 0
ret { float, float } %.fca.1.insert
}
-define { double, double } @add_complex_double({ double, double }* %a, { double, double }* %b) {
+define { double, double } @add_complex_double(ptr %a, ptr %b) {
; MIPS32-LABEL: name: add_complex_double
; MIPS32: bb.1.entry:
; MIPS32: liveins: $a0, $a1
; MIPS32: $d1 = COPY [[FADD1]](s64)
; MIPS32: RetRA implicit $d0, implicit $d1
entry:
- %.realp = getelementptr inbounds { double, double }, { double, double }* %a, i32 0, i32 0
- %.real = load double, double* %.realp, align 8
- %.imagp = getelementptr inbounds { double, double }, { double, double }* %a, i32 0, i32 1
- %.imag = load double, double* %.imagp, align 8
- %.realp1 = getelementptr inbounds { double, double }, { double, double }* %b, i32 0, i32 0
- %.real2 = load double, double* %.realp1, align 8
- %.imagp3 = getelementptr inbounds { double, double }, { double, double }* %b, i32 0, i32 1
- %.imag4 = load double, double* %.imagp3, align 8
+ %.realp = getelementptr inbounds { double, double }, ptr %a, i32 0, i32 0
+ %.real = load double, ptr %.realp, align 8
+ %.imagp = getelementptr inbounds { double, double }, ptr %a, i32 0, i32 1
+ %.imag = load double, ptr %.imagp, align 8
+ %.realp1 = getelementptr inbounds { double, double }, ptr %b, i32 0, i32 0
+ %.real2 = load double, ptr %.realp1, align 8
+ %.imagp3 = getelementptr inbounds { double, double }, ptr %b, i32 0, i32 1
+ %.imag4 = load double, ptr %.imagp3, align 8
%add.r = fadd double %.real, %.real2
%add.i = fadd double %.imag, %.imag4
%.fca.0.insert = insertvalue { double, double } undef, double %add.r, 0
}
declare { float, float } @ret_complex_float()
-define void @call_ret_complex_float({ float, float }* %z) {
+define void @call_ret_complex_float(ptr %z) {
; MIPS32-LABEL: name: call_ret_complex_float
; MIPS32: bb.1.entry:
; MIPS32: liveins: $a0
%call = call { float, float } @ret_complex_float()
%0 = extractvalue { float, float } %call, 0
%1 = extractvalue { float, float } %call, 1
- %.realp = getelementptr inbounds { float, float }, { float, float }* %z, i32 0, i32 0
- %.imagp = getelementptr inbounds { float, float }, { float, float }* %z, i32 0, i32 1
- store float %0, float* %.realp, align 4
- store float %1, float* %.imagp, align 4
+ %.realp = getelementptr inbounds { float, float }, ptr %z, i32 0, i32 0
+ %.imagp = getelementptr inbounds { float, float }, ptr %z, i32 0, i32 1
+ store float %0, ptr %.realp, align 4
+ store float %1, ptr %.imagp, align 4
ret void
}
declare { double, double } @ret_complex_double()
-define void @call_ret_complex_double({ double, double }* %z) {
+define void @call_ret_complex_double(ptr %z) {
; MIPS32-LABEL: name: call_ret_complex_double
; MIPS32: bb.1.entry:
; MIPS32: liveins: $a0
%call = call { double, double } @ret_complex_double()
%0 = extractvalue { double, double } %call, 0
%1 = extractvalue { double, double } %call, 1
- %.realp = getelementptr inbounds { double, double }, { double, double }* %z, i32 0, i32 0
- %.imagp = getelementptr inbounds { double, double }, { double, double }* %z, i32 0, i32 1
- store double %0, double* %.realp, align 8
- store double %1, double* %.imagp, align 8
+ %.realp = getelementptr inbounds { double, double }, ptr %z, i32 0, i32 0
+ %.imagp = getelementptr inbounds { double, double }, ptr %z, i32 0, i32 1
+ store double %0, ptr %.realp, align 8
+ store double %1, ptr %.imagp, align 8
ret void
}
ret i32 %doublez
}
-define i32 @call_reg(i32 (i32, i32)* %f_ptr, i32 %x, i32 %y) {
+define i32 @call_reg(ptr %f_ptr, i32 %x, i32 %y) {
; MIPS32-LABEL: name: call_reg
; MIPS32: bb.1.entry:
; MIPS32: liveins: $a0, $a1, $a2
ret i32 %call
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1 immarg)
-define void @call_symbol(i8* nocapture readonly %src, i8* nocapture %dest, i32 signext %length) {
+define void @call_symbol(ptr nocapture readonly %src, ptr nocapture %dest, i32 signext %length) {
; MIPS32-LABEL: name: call_symbol
; MIPS32: bb.1.entry:
; MIPS32: liveins: $a0, $a1, $a2
; MIPS32_PIC: G_MEMCPY [[COPY1]](p0), [[COPY]](p0), [[COPY2]](s32), 0 :: (store (s8) into %ir.dest), (load (s8) from %ir.src)
; MIPS32_PIC: RetRA
entry:
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 %length, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 %length, i1 false)
ret void
}
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
-define i32 @ptr_arg_in_regs(i32* %p) {
+define i32 @ptr_arg_in_regs(ptr %p) {
; MIPS32-LABEL: name: ptr_arg_in_regs
; MIPS32: bb.1.entry:
; MIPS32: liveins: $a0
; MIPS32: $v0 = COPY [[LOAD]](s32)
; MIPS32: RetRA implicit $v0
entry:
- %0 = load i32, i32* %p
+ %0 = load i32, ptr %p
ret i32 %0
}
-define i32 @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {
+define i32 @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, ptr %p) {
; MIPS32-LABEL: name: ptr_arg_on_stack
; MIPS32: bb.1.entry:
; MIPS32: liveins: $a0, $a1, $a2, $a3
; MIPS32: $v0 = COPY [[LOAD1]](s32)
; MIPS32: RetRA implicit $v0
entry:
- %0 = load i32, i32* %p
+ %0 = load i32, ptr %p
ret i32 %0
}
-define i8* @ret_ptr(i8* %p) {
+define ptr @ret_ptr(ptr %p) {
; MIPS32-LABEL: name: ret_ptr
; MIPS32: bb.1.entry:
; MIPS32: liveins: $a0
; MIPS32: $v0 = COPY [[COPY]](p0)
; MIPS32: RetRA implicit $v0
entry:
- ret i8* %p
+ ret ptr %p
}
}
declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
-define void @uadd_with_overflow(i32 %lhs, i32 %rhs, i32* %padd, i1* %pcarry_flag) {
+define void @uadd_with_overflow(i32 %lhs, i32 %rhs, ptr %padd, ptr %pcarry_flag) {
; MIPS32-LABEL: uadd_with_overflow:
; MIPS32: # %bb.0:
; MIPS32-NEXT: addu $1, $4, $5
%res = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %lhs, i32 %rhs)
%carry_flag = extractvalue { i32, i1 } %res, 1
%add = extractvalue { i32, i1 } %res, 0
- store i1 %carry_flag, i1* %pcarry_flag
- store i32 %add, i32* %padd
+ store i1 %carry_flag, ptr %pcarry_flag
+ store i32 %add, ptr %padd
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
-define void @add_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @add_v16i8(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: add_v16i8:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w1, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%add = add <16 x i8> %1, %0
- store <16 x i8> %add, <16 x i8>* %c, align 16
+ store <16 x i8> %add, ptr %c, align 16
ret void
}
-define void @add_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @add_v8i16(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: add_v8i16:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w1, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%add = add <8 x i16> %1, %0
- store <8 x i16> %add, <8 x i16>* %c, align 16
+ store <8 x i16> %add, ptr %c, align 16
ret void
}
-define void @add_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @add_v4i32(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: add_v4i32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w1, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%add = add <4 x i32> %1, %0
- store <4 x i32> %add, <4 x i32>* %c, align 16
+ store <4 x i32> %add, ptr %c, align 16
ret void
}
-define void @add_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @add_v2i64(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: add_v2i64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w1, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%add = add <2 x i64> %1, %0
- store <2 x i64> %add, <2 x i64>* %c, align 16
+ store <2 x i64> %add, ptr %c, align 16
ret void
}
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>)
-define void @add_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @add_v16i8_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: add_v16i8_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* %c, align 16
+ store <16 x i8> %2, ptr %c, align 16
ret void
}
declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>)
-define void @add_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @add_v8i16_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: add_v8i16_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* %c, align 16
+ store <8 x i16> %2, ptr %c, align 16
ret void
}
declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>)
-define void @add_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @add_v4i32_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: add_v4i32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* %c, align 16
+ store <4 x i32> %2, ptr %c, align 16
ret void
}
declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>)
-define void @add_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @add_v2i64_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: add_v2i64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* %c, align 16
+ store <2 x i64> %2, ptr %c, align 16
ret void
}
declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32 immarg)
-define void @add_v16i8_builtin_imm(<16 x i8>* %a, <16 x i8>* %c) {
+define void @add_v16i8_builtin_imm(ptr %a, ptr %c) {
; P5600-LABEL: add_v16i8_builtin_imm:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
%1 = tail call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %0, i32 3)
- store <16 x i8> %1, <16 x i8>* %c, align 16
+ store <16 x i8> %1, ptr %c, align 16
ret void
}
declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32 immarg)
-define void @add_v8i16_builtin_imm(<8 x i16>* %a, <8 x i16>* %c) {
+define void @add_v8i16_builtin_imm(ptr %a, ptr %c) {
; P5600-LABEL: add_v8i16_builtin_imm:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
%1 = tail call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %0, i32 18)
- store <8 x i16> %1, <8 x i16>* %c, align 16
+ store <8 x i16> %1, ptr %c, align 16
ret void
}
declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32 immarg)
-define void @add_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) {
+define void @add_v4i32_builtin_imm(ptr %a, ptr %c) {
; P5600-LABEL: add_v4i32_builtin_imm:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
%1 = tail call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %0, i32 25)
- store <4 x i32> %1, <4 x i32>* %c, align 16
+ store <4 x i32> %1, ptr %c, align 16
ret void
}
declare <2 x i64> @llvm.mips.addvi.d(<2 x i64>, i32 immarg)
-define void @add_v2i64_builtin_imm(<2 x i64>* %a, <2 x i64>* %c) {
+define void @add_v2i64_builtin_imm(ptr %a, ptr %c) {
; P5600-LABEL: add_v2i64_builtin_imm:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
%1 = tail call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %0, i32 31)
- store <2 x i64> %1, <2 x i64>* %c, align 16
+ store <2 x i64> %1, ptr %c, align 16
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define { float, float } @add_complex_float({ float, float }* %a, { float, float }* %b) {
+define { float, float } @add_complex_float(ptr %a, ptr %b) {
; MIPS32-LABEL: add_complex_float:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lwc1 $f0, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %.realp = getelementptr inbounds { float, float }, { float, float }* %a, i32 0, i32 0
- %.real = load float, float* %.realp, align 4
- %.imagp = getelementptr inbounds { float, float }, { float, float }* %a, i32 0, i32 1
- %.imag = load float, float* %.imagp, align 4
- %.realp1 = getelementptr inbounds { float, float }, { float, float }* %b, i32 0, i32 0
- %.real2 = load float, float* %.realp1, align 4
- %.imagp3 = getelementptr inbounds { float, float }, { float, float }* %b, i32 0, i32 1
- %.imag4 = load float, float* %.imagp3, align 4
+ %.realp = getelementptr inbounds { float, float }, ptr %a, i32 0, i32 0
+ %.real = load float, ptr %.realp, align 4
+ %.imagp = getelementptr inbounds { float, float }, ptr %a, i32 0, i32 1
+ %.imag = load float, ptr %.imagp, align 4
+ %.realp1 = getelementptr inbounds { float, float }, ptr %b, i32 0, i32 0
+ %.real2 = load float, ptr %.realp1, align 4
+ %.imagp3 = getelementptr inbounds { float, float }, ptr %b, i32 0, i32 1
+ %.imag4 = load float, ptr %.imagp3, align 4
%add.r = fadd float %.real, %.real2
%add.i = fadd float %.imag, %.imag4
%.fca.0.insert = insertvalue { float, float } undef, float %add.r, 0
ret { float, float } %.fca.1.insert
}
-define { double, double } @add_complex_double({ double, double }* %a, { double, double }* %b) {
+define { double, double } @add_complex_double(ptr %a, ptr %b) {
; MIPS32-LABEL: add_complex_double:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: ldc1 $f0, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %.realp = getelementptr inbounds { double, double }, { double, double }* %a, i32 0, i32 0
- %.real = load double, double* %.realp, align 8
- %.imagp = getelementptr inbounds { double, double }, { double, double }* %a, i32 0, i32 1
- %.imag = load double, double* %.imagp, align 8
- %.realp1 = getelementptr inbounds { double, double }, { double, double }* %b, i32 0, i32 0
- %.real2 = load double, double* %.realp1, align 8
- %.imagp3 = getelementptr inbounds { double, double }, { double, double }* %b, i32 0, i32 1
- %.imag4 = load double, double* %.imagp3, align 8
+ %.realp = getelementptr inbounds { double, double }, ptr %a, i32 0, i32 0
+ %.real = load double, ptr %.realp, align 8
+ %.imagp = getelementptr inbounds { double, double }, ptr %a, i32 0, i32 1
+ %.imag = load double, ptr %.imagp, align 8
+ %.realp1 = getelementptr inbounds { double, double }, ptr %b, i32 0, i32 0
+ %.real2 = load double, ptr %.realp1, align 8
+ %.imagp3 = getelementptr inbounds { double, double }, ptr %b, i32 0, i32 1
+ %.imag4 = load double, ptr %.imagp3, align 8
%add.r = fadd double %.real, %.real2
%add.i = fadd double %.imag, %.imag4
%.fca.0.insert = insertvalue { double, double } undef, double %add.r, 0
}
declare { float, float } @ret_complex_float()
-define void @call_ret_complex_float({ float, float }* %z) {
+define void @call_ret_complex_float(ptr %z) {
; MIPS32-LABEL: call_ret_complex_float:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -24
%call = call { float, float } @ret_complex_float()
%0 = extractvalue { float, float } %call, 0
%1 = extractvalue { float, float } %call, 1
- %.realp = getelementptr inbounds { float, float }, { float, float }* %z, i32 0, i32 0
- %.imagp = getelementptr inbounds { float, float }, { float, float }* %z, i32 0, i32 1
- store float %0, float* %.realp, align 4
- store float %1, float* %.imagp, align 4
+ %.realp = getelementptr inbounds { float, float }, ptr %z, i32 0, i32 0
+ %.imagp = getelementptr inbounds { float, float }, ptr %z, i32 0, i32 1
+ store float %0, ptr %.realp, align 4
+ store float %1, ptr %.imagp, align 4
ret void
}
declare { double, double } @ret_complex_double()
-define void @call_ret_complex_double({ double, double }* %z) {
+define void @call_ret_complex_double(ptr %z) {
; MIPS32-LABEL: call_ret_complex_double:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -24
%call = call { double, double } @ret_complex_double()
%0 = extractvalue { double, double } %call, 0
%1 = extractvalue { double, double } %call, 1
- %.realp = getelementptr inbounds { double, double }, { double, double }* %z, i32 0, i32 0
- %.imagp = getelementptr inbounds { double, double }, { double, double }* %z, i32 0, i32 1
- store double %0, double* %.realp, align 8
- store double %1, double* %.imagp, align 8
+ %.realp = getelementptr inbounds { double, double }, ptr %z, i32 0, i32 0
+ %.imagp = getelementptr inbounds { double, double }, ptr %z, i32 0, i32 1
+ store double %0, ptr %.realp, align 8
+ store double %1, ptr %.imagp, align 8
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define i32 @indirectbr(i8 *%addr) {
+define i32 @indirectbr(ptr %addr) {
; MIPS32-LABEL: indirectbr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -8
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- indirectbr i8* %addr, [label %L1, label %L2]
+ indirectbr ptr %addr, [label %L1, label %L2]
L1:
ret i32 0
ret i32 %doublez
}
-define i32 @call_reg(i32 (i32, i32)* %f_ptr, i32 %x, i32 %y) {
+define i32 @call_reg(ptr %f_ptr, i32 %x, i32 %y) {
; MIPS32-LABEL: call_reg:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -24
ret i32 %call
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1 immarg)
-define void @call_symbol(i8* nocapture readonly %src, i8* nocapture %dest, i32 signext %length) {
+define void @call_symbol(ptr nocapture readonly %src, ptr nocapture %dest, i32 signext %length) {
; MIPS32-LABEL: call_symbol:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -24
; MIPS32_PIC-NEXT: jr $ra
; MIPS32_PIC-NEXT: nop
entry:
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 %length, i1 false)
+ call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 %length, i1 false)
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-declare i32 @puts(i8*)
-declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i1)
+declare i32 @puts(ptr)
+declare void @llvm.memset.p0.i32(ptr, i8, i32, i1)
define void @Print_c_N_times(i8 %c, i32 %N) {
; MIPS32-LABEL: Print_c_N_times:
entry:
%add = add i32 %N, 1
%vla = alloca i8, i32 %add, align 1
- call void @llvm.memset.p0i8.i32(i8* align 1 %vla, i8 %c, i32 %N, i1 false)
- %arrayidx = getelementptr inbounds i8, i8* %vla, i32 %N
- store i8 0, i8* %arrayidx, align 1
- %call = call i32 @puts(i8* %vla)
+ call void @llvm.memset.p0.i32(ptr align 1 %vla, i8 %c, i32 %N, i1 false)
+ %arrayidx = getelementptr inbounds i8, ptr %vla, i32 %N
+ store i8 0, ptr %arrayidx, align 1
+ %call = call i32 @puts(ptr %vla)
ret void
}
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
declare <4 x float> @llvm.fabs.v4f32(<4 x float> %Val)
-define void @fabs_v4f32(<4 x float>* %a, <4 x float>* %c) {
+define void @fabs_v4f32(ptr %a, ptr %c) {
; P5600-LABEL: fabs_v4f32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
+ %0 = load <4 x float>, ptr %a, align 16
%fabs = call <4 x float> @llvm.fabs.v4f32 (<4 x float> %0)
- store <4 x float> %fabs, <4 x float>* %c, align 16
+ store <4 x float> %fabs, ptr %c, align 16
ret void
}
declare <2 x double> @llvm.fabs.v2f64(<2 x double> %Val)
-define void @fabs_v2f64(<2 x double>* %a, <2 x double>* %c) {
+define void @fabs_v2f64(ptr %a, ptr %c) {
; P5600-LABEL: fabs_v2f64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %a, align 16
+ %0 = load <2 x double>, ptr %a, align 16
%fabs = call <2 x double> @llvm.fabs.v2f64 (<2 x double> %0)
- store <2 x double> %fabs, <2 x double>* %c, align 16
+ store <2 x double> %fabs, ptr %c, align 16
ret void
}
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
declare <4 x float> @llvm.mips.fmax.a.w(<4 x float>, <4 x float>)
-define void @fabs_v4f32_builtin(<4 x float>* %a, <4 x float>* %c) {
+define void @fabs_v4f32_builtin(ptr %a, ptr %c) {
; P5600-LABEL: fabs_v4f32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
+ %0 = load <4 x float>, ptr %a, align 16
%1 = tail call <4 x float> @llvm.mips.fmax.a.w(<4 x float> %0, <4 x float> %0)
- store <4 x float> %1, <4 x float>* %c, align 16
+ store <4 x float> %1, ptr %c, align 16
ret void
}
declare <2 x double> @llvm.mips.fmax.a.d(<2 x double>, <2 x double>)
-define void @fabs_v2f64_builtin(<2 x double>* %a, <2 x double>* %c) {
+define void @fabs_v2f64_builtin(ptr %a, ptr %c) {
; P5600-LABEL: fabs_v2f64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %a, align 16
+ %0 = load <2 x double>, ptr %a, align 16
%1 = tail call <2 x double> @llvm.mips.fmax.a.d(<2 x double> %0, <2 x double> %0)
- store <2 x double> %1, <2 x double>* %c, align 16
+ store <2 x double> %1, ptr %c, align 16
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define i32 @atomic_load_i32(i32* %ptr) {
+define i32 @atomic_load_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %val = load atomic i32, i32* %ptr acquire, align 4
+ %val = load atomic i32, ptr %ptr acquire, align 4
ret i32 %val
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
-define void @fadd_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fadd_v4f32(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fadd_v4f32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
- %1 = load <4 x float>, <4 x float>* %b, align 16
+ %0 = load <4 x float>, ptr %a, align 16
+ %1 = load <4 x float>, ptr %b, align 16
%add = fadd <4 x float> %0, %1
- store <4 x float> %add, <4 x float>* %c, align 16
+ store <4 x float> %add, ptr %c, align 16
ret void
}
-define void @fadd_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fadd_v2f64(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fadd_v2f64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %a, align 16
- %1 = load <2 x double>, <2 x double>* %b, align 16
+ %0 = load <2 x double>, ptr %a, align 16
+ %1 = load <2 x double>, ptr %b, align 16
%add = fadd <2 x double> %0, %1
- store <2 x double> %add, <2 x double>* %c, align 16
+ store <2 x double> %add, ptr %c, align 16
ret void
}
-define void @fsub_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fsub_v4f32(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fsub_v4f32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
- %1 = load <4 x float>, <4 x float>* %b, align 16
+ %0 = load <4 x float>, ptr %a, align 16
+ %1 = load <4 x float>, ptr %b, align 16
%sub = fsub <4 x float> %0, %1
- store <4 x float> %sub, <4 x float>* %c, align 16
+ store <4 x float> %sub, ptr %c, align 16
ret void
}
-define void @fsub_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fsub_v2f64(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fsub_v2f64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %a, align 16
- %1 = load <2 x double>, <2 x double>* %b, align 16
+ %0 = load <2 x double>, ptr %a, align 16
+ %1 = load <2 x double>, ptr %b, align 16
%sub = fsub <2 x double> %0, %1
- store <2 x double> %sub, <2 x double>* %c, align 16
+ store <2 x double> %sub, ptr %c, align 16
ret void
}
-define void @fmul_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fmul_v4f32(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fmul_v4f32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
- %1 = load <4 x float>, <4 x float>* %b, align 16
+ %0 = load <4 x float>, ptr %a, align 16
+ %1 = load <4 x float>, ptr %b, align 16
%mul = fmul <4 x float> %0, %1
- store <4 x float> %mul, <4 x float>* %c, align 16
+ store <4 x float> %mul, ptr %c, align 16
ret void
}
-define void @fmul_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fmul_v2f64(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fmul_v2f64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %a, align 16
- %1 = load <2 x double>, <2 x double>* %b, align 16
+ %0 = load <2 x double>, ptr %a, align 16
+ %1 = load <2 x double>, ptr %b, align 16
%mul = fmul <2 x double> %0, %1
- store <2 x double> %mul, <2 x double>* %c, align 16
+ store <2 x double> %mul, ptr %c, align 16
ret void
}
-define void @fdiv_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fdiv_v4f32(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fdiv_v4f32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
- %1 = load <4 x float>, <4 x float>* %b, align 16
+ %0 = load <4 x float>, ptr %a, align 16
+ %1 = load <4 x float>, ptr %b, align 16
%div = fdiv <4 x float> %0, %1
- store <4 x float> %div, <4 x float>* %c, align 16
+ store <4 x float> %div, ptr %c, align 16
ret void
}
-define void @fdiv_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fdiv_v2f64(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fdiv_v2f64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %a, align 16
- %1 = load <2 x double>, <2 x double>* %b, align 16
+ %0 = load <2 x double>, ptr %a, align 16
+ %1 = load <2 x double>, ptr %b, align 16
%div = fdiv <2 x double> %0, %1
- store <2 x double> %div, <2 x double>* %c, align 16
+ store <2 x double> %div, ptr %c, align 16
ret void
}
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>)
-define void @fadd_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fadd_v4f32_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fadd_v4f32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
- %1 = load <4 x float>, <4 x float>* %b, align 16
+ %0 = load <4 x float>, ptr %a, align 16
+ %1 = load <4 x float>, ptr %b, align 16
%2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* %c, align 16
+ store <4 x float> %2, ptr %c, align 16
ret void
}
declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>)
-define void @fadd_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fadd_v2f64_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fadd_v2f64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %a, align 16
- %1 = load <2 x double>, <2 x double>* %b, align 16
+ %0 = load <2 x double>, ptr %a, align 16
+ %1 = load <2 x double>, ptr %b, align 16
%2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* %c, align 16
+ store <2 x double> %2, ptr %c, align 16
ret void
}
declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>)
-define void @fsub_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fsub_v4f32_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fsub_v4f32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
- %1 = load <4 x float>, <4 x float>* %b, align 16
+ %0 = load <4 x float>, ptr %a, align 16
+ %1 = load <4 x float>, ptr %b, align 16
%2 = tail call <4 x float> @llvm.mips.fsub.w(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* %c, align 16
+ store <4 x float> %2, ptr %c, align 16
ret void
}
declare <2 x double> @llvm.mips.fsub.d(<2 x double>, <2 x double>)
-define void @fsub_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fsub_v2f64_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fsub_v2f64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %a, align 16
- %1 = load <2 x double>, <2 x double>* %b, align 16
+ %0 = load <2 x double>, ptr %a, align 16
+ %1 = load <2 x double>, ptr %b, align 16
%2 = tail call <2 x double> @llvm.mips.fsub.d(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* %c, align 16
+ store <2 x double> %2, ptr %c, align 16
ret void
}
declare <4 x float> @llvm.mips.fmul.w(<4 x float>, <4 x float>)
-define void @fmul_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fmul_v4f32_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fmul_v4f32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
- %1 = load <4 x float>, <4 x float>* %b, align 16
+ %0 = load <4 x float>, ptr %a, align 16
+ %1 = load <4 x float>, ptr %b, align 16
%2 = tail call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* %c, align 16
+ store <4 x float> %2, ptr %c, align 16
ret void
}
declare <2 x double> @llvm.mips.fmul.d(<2 x double>, <2 x double>)
-define void @fmul_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fmul_v2f64_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fmul_v2f64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %a, align 16
- %1 = load <2 x double>, <2 x double>* %b, align 16
+ %0 = load <2 x double>, ptr %a, align 16
+ %1 = load <2 x double>, ptr %b, align 16
%2 = tail call <2 x double> @llvm.mips.fmul.d(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* %c, align 16
+ store <2 x double> %2, ptr %c, align 16
ret void
}
declare <4 x float> @llvm.mips.fdiv.w(<4 x float>, <4 x float>)
-define void @fdiv_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fdiv_v4f32_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fdiv_v4f32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
- %1 = load <4 x float>, <4 x float>* %b, align 16
+ %0 = load <4 x float>, ptr %a, align 16
+ %1 = load <4 x float>, ptr %b, align 16
%2 = tail call <4 x float> @llvm.mips.fdiv.w(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* %c, align 16
+ store <4 x float> %2, ptr %c, align 16
ret void
}
declare <2 x double> @llvm.mips.fdiv.d(<2 x double>, <2 x double>)
-define void @fdiv_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fdiv_v2f64_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: fdiv_v2f64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %a, align 16
- %1 = load <2 x double>, <2 x double>* %b, align 16
+ %0 = load <2 x double>, ptr %a, align 16
+ %1 = load <2 x double>, ptr %b, align 16
%2 = tail call <2 x double> @llvm.mips.fdiv.d(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* %c, align 16
+ store <2 x double> %2, ptr %c, align 16
ret void
}
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
declare <4 x float> @llvm.sqrt.v4f32(<4 x float> %Val)
-define void @sqrt_v4f32(<4 x float>* %a, <4 x float>* %c) {
+define void @sqrt_v4f32(ptr %a, ptr %c) {
; P5600-LABEL: sqrt_v4f32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
+ %0 = load <4 x float>, ptr %a, align 16
%sqrt = call <4 x float> @llvm.sqrt.v4f32 (<4 x float> %0)
- store <4 x float> %sqrt, <4 x float>* %c, align 16
+ store <4 x float> %sqrt, ptr %c, align 16
ret void
}
declare <2 x double> @llvm.sqrt.v2f64(<2 x double> %Val)
-define void @sqrt_v2f64(<2 x double>* %a, <2 x double>* %c) {
+define void @sqrt_v2f64(ptr %a, ptr %c) {
; P5600-LABEL: sqrt_v2f64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %a, align 16
+ %0 = load <2 x double>, ptr %a, align 16
%sqrt = call <2 x double> @llvm.sqrt.v2f64 (<2 x double> %0)
- store <2 x double> %sqrt, <2 x double>* %c, align 16
+ store <2 x double> %sqrt, ptr %c, align 16
ret void
}
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
declare <4 x float> @llvm.mips.fsqrt.w(<4 x float>)
-define void @fsqrt_v4f32_builtin(<4 x float>* %a, <4 x float>* %c) {
+define void @fsqrt_v4f32_builtin(ptr %a, ptr %c) {
; P5600-LABEL: fsqrt_v4f32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
+ %0 = load <4 x float>, ptr %a, align 16
%1 = tail call <4 x float> @llvm.mips.fsqrt.w(<4 x float> %0)
- store <4 x float> %1, <4 x float>* %c, align 16
+ store <4 x float> %1, ptr %c, align 16
ret void
}
declare <2 x double> @llvm.mips.fsqrt.d(<2 x double>)
-define void @fsqrt_v2f64_builtin(<2 x double>* %a, <2 x double>* %c) {
+define void @fsqrt_v2f64_builtin(ptr %a, ptr %c) {
; P5600-LABEL: fsqrt_v2f64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %a, align 16
+ %0 = load <2 x double>, ptr %a, align 16
%1 = tail call <2 x double> @llvm.mips.fsqrt.d(<2 x double> %0)
- store <2 x double> %1, <2 x double>* %c, align 16
+ store <2 x double> %1, ptr %c, align 16
ret void
}
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i32 signext 1234567890)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 signext 1234567890)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; MIPS32_PIC-NEXT: jr $ra
; MIPS32_PIC-NEXT: nop
entry:
- %0 = load i32, i32* @val
+ %0 = load i32, ptr @val
ret i32 %0
}
; MIPS32_PIC-NEXT: jr $ra
; MIPS32_PIC-NEXT: nop
entry:
- %0 = load i32, i32* @val_with_local_linkage
+ %0 = load i32, ptr @val_with_local_linkage
ret i32 %0
}
ret i1 %cmp
}
-define i1 @eq_ptr(i32* %a, i32* %b){
+define i1 @eq_ptr(ptr %a, ptr %b){
; MIPS32-LABEL: eq_ptr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: xor $1, $4, $5
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %cmp = icmp eq i32* %a, %b
+ %cmp = icmp eq ptr %a, %b
ret i1 %cmp
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define i32* @inttoptr(i32 %a) {
+define ptr @inttoptr(i32 %a) {
; MIPS32-LABEL: inttoptr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: move $2, $4
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = inttoptr i32 %a to i32*
- ret i32* %0
+ %0 = inttoptr i32 %a to ptr
+ ret ptr %0
}
-define i32 @ptrtoint(i32* %a) {
+define i32 @ptrtoint(ptr %a) {
; MIPS32-LABEL: ptrtoint:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: move $2, $4
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = ptrtoint i32* %a to i32
+ %0 = ptrtoint ptr %a to i32
ret i32 %0
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define i32 @load_i32(i32* %ptr) {
+define i32 @load_i32(ptr %ptr) {
; MIPS32-LABEL: load_i32:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i32, i32* %ptr
+ %0 = load i32, ptr %ptr
ret i32 %0
}
-define i64 @load_i64(i64* %ptr) {
+define i64 @load_i64(ptr %ptr) {
; MIPS32-LABEL: load_i64:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i64, i64* %ptr
+ %0 = load i64, ptr %ptr
ret i64 %0
}
-define void @load_ambiguous_i64_in_fpr(i64* %i64_ptr_a, i64* %i64_ptr_b) {
+define void @load_ambiguous_i64_in_fpr(ptr %i64_ptr_a, ptr %i64_ptr_b) {
; MIPS32-LABEL: load_ambiguous_i64_in_fpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: ldc1 $f0, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i64, i64* %i64_ptr_a
- store i64 %0, i64* %i64_ptr_b
+ %0 = load i64, ptr %i64_ptr_a
+ store i64 %0, ptr %i64_ptr_b
ret void
}
-define float @load_float(float* %ptr) {
+define float @load_float(ptr %ptr) {
; MIPS32-LABEL: load_float:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lwc1 $f0, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load float, float* %ptr
+ %0 = load float, ptr %ptr
ret float %0
}
-define void @load_ambiguous_float_in_gpr(float* %float_ptr_a, float* %float_ptr_b) {
+define void @load_ambiguous_float_in_gpr(ptr %float_ptr_a, ptr %float_ptr_b) {
; MIPS32-LABEL: load_ambiguous_float_in_gpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $1, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load float, float* %float_ptr_a
- store float %0, float* %float_ptr_b
+ %0 = load float, ptr %float_ptr_a
+ store float %0, ptr %float_ptr_b
ret void
}
-define double @load_double(double* %ptr) {
+define double @load_double(ptr %ptr) {
; MIPS32-LABEL: load_double:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: ldc1 $f0, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load double, double* %ptr
+ %0 = load double, ptr %ptr
ret double %0
}
; MIPS32R6-NEXT: lwc1 $f0, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load float, float* @float_align1, align 1
+ %0 = load float, ptr @float_align1, align 1
ret float %0
}
; MIPS32R6-NEXT: lwc1 $f0, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load float, float* @float_align2, align 2
+ %0 = load float, ptr @float_align2, align 2
ret float %0
}
; MIPS32R6-NEXT: lwc1 $f0, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load float, float* @float_align4, align 4
+ %0 = load float, ptr @float_align4, align 4
ret float %0
}
; MIPS32R6-NEXT: lwc1 $f0, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load float, float* @float_align8, align 8
+ %0 = load float, ptr @float_align8, align 8
ret float %0
}
; MIPS32R6-NEXT: lw $2, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load i32, i32* @i32_align1, align 1
+ %0 = load i32, ptr @i32_align1, align 1
ret i32 %0
}
; MIPS32R6-NEXT: lw $2, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load i32, i32* @i32_align2, align 2
+ %0 = load i32, ptr @i32_align2, align 2
ret i32 %0
}
; MIPS32R6-NEXT: lw $2, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load i32, i32* @i32_align4, align 4
+ %0 = load i32, ptr @i32_align4, align 4
ret i32 %0
}
; MIPS32R6-NEXT: lw $2, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load i32, i32* @i32_align8, align 8
+ %0 = load i32, ptr @i32_align8, align 8
ret i32 %0
}
; unordered
; --------------------------------------------------------------------
-define i8 @atomic_load_unordered_i8(i8* %ptr) {
+define i8 @atomic_load_unordered_i8(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i8:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %load = load atomic i8, i8* %ptr unordered, align 1
+ %load = load atomic i8, ptr %ptr unordered, align 1
ret i8 %load
}
-define i32 @atomic_load_unordered_i8_sext_i32(i8* %ptr) {
+define i32 @atomic_load_unordered_i8_sext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i8_sext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sll $1, $1, 24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $2, $1, 24
- %load = load atomic i8, i8* %ptr unordered, align 1
+ %load = load atomic i8, ptr %ptr unordered, align 1
%sext = sext i8 %load to i32
ret i32 %sext
}
-define i16 @atomic_load_unordered_i8_sext_i16(i8* %ptr) {
+define i16 @atomic_load_unordered_i8_sext_i16(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i8_sext_i16:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sll $1, $1, 24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $2, $1, 24
- %load = load atomic i8, i8* %ptr unordered, align 1
+ %load = load atomic i8, ptr %ptr unordered, align 1
%sext = sext i8 %load to i16
ret i16 %sext
}
-define i64 @atomic_load_unordered_i8_sext_i64(i8* %ptr) {
+define i64 @atomic_load_unordered_i8_sext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i8_sext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sra $2, $1, 24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $3, $1, 31
- %load = load atomic i8, i8* %ptr unordered, align 1
+ %load = load atomic i8, ptr %ptr unordered, align 1
%sext = sext i8 %load to i64
ret i64 %sext
}
-define i32 @atomic_load_unordered_i8_zext_i32(i8* %ptr) {
+define i32 @atomic_load_unordered_i8_zext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i8_zext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $2, $1, 255
- %load = load atomic i8, i8* %ptr unordered, align 1
+ %load = load atomic i8, ptr %ptr unordered, align 1
%zext = zext i8 %load to i32
ret i32 %zext
}
-define i16 @atomic_load_unordered_i8_zext_i16(i8* %ptr) {
+define i16 @atomic_load_unordered_i8_zext_i16(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i8_zext_i16:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $2, $1, 255
- %load = load atomic i8, i8* %ptr unordered, align 1
+ %load = load atomic i8, ptr %ptr unordered, align 1
%zext = zext i8 %load to i16
ret i16 %zext
}
-define i64 @atomic_load_unordered_i8_zext_i64(i8* %ptr) {
+define i64 @atomic_load_unordered_i8_zext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i8_zext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: andi $2, $1, 255
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $3, $1, 0
- %load = load atomic i8, i8* %ptr unordered, align 1
+ %load = load atomic i8, ptr %ptr unordered, align 1
%zext = zext i8 %load to i64
ret i64 %zext
}
-define i16 @atomic_load_unordered_i16(i16* %ptr) {
+define i16 @atomic_load_unordered_i16(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i16:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %load = load atomic i16, i16* %ptr unordered, align 2
+ %load = load atomic i16, ptr %ptr unordered, align 2
ret i16 %load
}
-define i32 @atomic_load_unordered_i16_sext_i32(i16* %ptr) {
+define i32 @atomic_load_unordered_i16_sext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i16_sext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: sll $1, $1, 16
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $2, $1, 16
- %load = load atomic i16, i16* %ptr unordered, align 2
+ %load = load atomic i16, ptr %ptr unordered, align 2
%sext = sext i16 %load to i32
ret i32 %sext
}
-define i64 @atomic_load_unordered_i16_sext_i64(i16* %ptr) {
+define i64 @atomic_load_unordered_i16_sext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i16_sext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: sra $2, $1, 16
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $3, $1, 31
- %load = load atomic i16, i16* %ptr unordered, align 2
+ %load = load atomic i16, ptr %ptr unordered, align 2
%sext = sext i16 %load to i64
ret i64 %sext
}
-define i32 @atomic_load_unordered_i16_zext_i32(i16* %ptr) {
+define i32 @atomic_load_unordered_i16_zext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i16_zext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $2, $1, 65535
- %load = load atomic i16, i16* %ptr unordered, align 2
+ %load = load atomic i16, ptr %ptr unordered, align 2
%zext = zext i16 %load to i32
ret i32 %zext
}
-define i64 @atomic_load_unordered_i16_zext_i64(i16* %ptr) {
+define i64 @atomic_load_unordered_i16_zext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i16_zext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: andi $2, $1, 65535
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $3, $1, 0
- %load = load atomic i16, i16* %ptr unordered, align 2
+ %load = load atomic i16, ptr %ptr unordered, align 2
%zext = zext i16 %load to i64
ret i64 %zext
}
-define i32 @atomic_load_unordered_i32(i32* %ptr) {
+define i32 @atomic_load_unordered_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %load = load atomic i32, i32* %ptr unordered, align 4
+ %load = load atomic i32, ptr %ptr unordered, align 4
ret i32 %load
}
-define i64 @atomic_load_unordered_i64(i64* %ptr) {
+define i64 @atomic_load_unordered_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: ldc1 $f0, 0($4)
; MIPS32-NEXT: mfc1 $2, $f0
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: mfc1 $3, $f1
- %load = load atomic i64, i64* %ptr unordered, align 8
+ %load = load atomic i64, ptr %ptr unordered, align 8
ret i64 %load
}
-define float @atomic_load_unordered_f32(float* %ptr) {
+define float @atomic_load_unordered_f32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_f32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lwc1 $f0, 64($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %gep = getelementptr inbounds float, float* %ptr, i32 16
- %load = load atomic float, float* %gep unordered, align 4
+ %gep = getelementptr inbounds float, ptr %ptr, i32 16
+ %load = load atomic float, ptr %gep unordered, align 4
ret float %load
}
-define double @atomic_load_unordered_f64(double* %ptr) {
+define double @atomic_load_unordered_f64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_f64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: ldc1 $f0, 128($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %gep = getelementptr inbounds double, double* %ptr, i32 16
- %load = load atomic double, double* %gep unordered, align 8
+ %gep = getelementptr inbounds double, ptr %ptr, i32 16
+ %load = load atomic double, ptr %gep unordered, align 8
ret double %load
}
-define i8* @atomic_load_unordered_p0i8(i8** %ptr) {
+define ptr @atomic_load_unordered_p0i8(ptr %ptr) {
; MIPS32-LABEL: atomic_load_unordered_p0i8:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lw $2, 64($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %gep = getelementptr inbounds i8*, i8** %ptr, i32 16
- %load = load atomic i8*, i8** %gep unordered, align 4
- ret i8* %load
+ %gep = getelementptr inbounds ptr, ptr %ptr, i32 16
+ %load = load atomic ptr, ptr %gep unordered, align 4
+ ret ptr %load
}
; --------------------------------------------------------------------
; monotonic
; --------------------------------------------------------------------
-define i8 @atomic_load_monotonic_i8(i8* %ptr) {
+define i8 @atomic_load_monotonic_i8(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i8:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %load = load atomic i8, i8* %ptr monotonic, align 1
+ %load = load atomic i8, ptr %ptr monotonic, align 1
ret i8 %load
}
-define i32 @atomic_load_monotonic_i8_sext_i32(i8* %ptr) {
+define i32 @atomic_load_monotonic_i8_sext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i8_sext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sll $1, $1, 24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $2, $1, 24
- %load = load atomic i8, i8* %ptr monotonic, align 1
+ %load = load atomic i8, ptr %ptr monotonic, align 1
%sext = sext i8 %load to i32
ret i32 %sext
}
-define i16 @atomic_load_monotonic_i8_sext_i16(i8* %ptr) {
+define i16 @atomic_load_monotonic_i8_sext_i16(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i8_sext_i16:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sll $1, $1, 24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $2, $1, 24
- %load = load atomic i8, i8* %ptr monotonic, align 1
+ %load = load atomic i8, ptr %ptr monotonic, align 1
%sext = sext i8 %load to i16
ret i16 %sext
}
-define i64 @atomic_load_monotonic_i8_sext_i64(i8* %ptr) {
+define i64 @atomic_load_monotonic_i8_sext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i8_sext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sra $2, $1, 24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $3, $1, 31
- %load = load atomic i8, i8* %ptr monotonic, align 1
+ %load = load atomic i8, ptr %ptr monotonic, align 1
%sext = sext i8 %load to i64
ret i64 %sext
}
-define i32 @atomic_load_monotonic_i8_zext_i32(i8* %ptr) {
+define i32 @atomic_load_monotonic_i8_zext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i8_zext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $2, $1, 255
- %load = load atomic i8, i8* %ptr monotonic, align 1
+ %load = load atomic i8, ptr %ptr monotonic, align 1
%zext = zext i8 %load to i32
ret i32 %zext
}
-define i16 @atomic_load_monotonic_i8_zext_i16(i8* %ptr) {
+define i16 @atomic_load_monotonic_i8_zext_i16(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i8_zext_i16:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $2, $1, 255
- %load = load atomic i8, i8* %ptr monotonic, align 1
+ %load = load atomic i8, ptr %ptr monotonic, align 1
%zext = zext i8 %load to i16
ret i16 %zext
}
-define i64 @atomic_load_monotonic_i8_zext_i64(i8* %ptr) {
+define i64 @atomic_load_monotonic_i8_zext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i8_zext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: andi $2, $1, 255
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $3, $1, 0
- %load = load atomic i8, i8* %ptr monotonic, align 1
+ %load = load atomic i8, ptr %ptr monotonic, align 1
%zext = zext i8 %load to i64
ret i64 %zext
}
-define i16 @atomic_load_monotonic_i16(i16* %ptr) {
+define i16 @atomic_load_monotonic_i16(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i16:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %load = load atomic i16, i16* %ptr monotonic, align 2
+ %load = load atomic i16, ptr %ptr monotonic, align 2
ret i16 %load
}
-define i32 @atomic_load_monotonic_i16_sext_i32(i16* %ptr) {
+define i32 @atomic_load_monotonic_i16_sext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i16_sext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: sll $1, $1, 16
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $2, $1, 16
- %load = load atomic i16, i16* %ptr monotonic, align 2
+ %load = load atomic i16, ptr %ptr monotonic, align 2
%sext = sext i16 %load to i32
ret i32 %sext
}
-define i64 @atomic_load_monotonic_i16_sext_i64(i16* %ptr) {
+define i64 @atomic_load_monotonic_i16_sext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i16_sext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: sra $2, $1, 16
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $3, $1, 31
- %load = load atomic i16, i16* %ptr monotonic, align 2
+ %load = load atomic i16, ptr %ptr monotonic, align 2
%sext = sext i16 %load to i64
ret i64 %sext
}
-define i32 @atomic_load_monotonic_i16_zext_i32(i16* %ptr) {
+define i32 @atomic_load_monotonic_i16_zext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i16_zext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $2, $1, 65535
- %load = load atomic i16, i16* %ptr monotonic, align 2
+ %load = load atomic i16, ptr %ptr monotonic, align 2
%zext = zext i16 %load to i32
ret i32 %zext
}
-define i64 @atomic_load_monotonic_i16_zext_i64(i16* %ptr) {
+define i64 @atomic_load_monotonic_i16_zext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i16_zext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: andi $2, $1, 65535
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $3, $1, 0
- %load = load atomic i16, i16* %ptr monotonic, align 2
+ %load = load atomic i16, ptr %ptr monotonic, align 2
%zext = zext i16 %load to i64
ret i64 %zext
}
-define i32 @atomic_load_monotonic_i32(i32* %ptr) {
+define i32 @atomic_load_monotonic_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %load = load atomic i32, i32* %ptr monotonic, align 4
+ %load = load atomic i32, ptr %ptr monotonic, align 4
ret i32 %load
}
-define i64 @atomic_load_monotonic_i64(i64* %ptr) {
+define i64 @atomic_load_monotonic_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: ldc1 $f0, 0($4)
; MIPS32-NEXT: mfc1 $2, $f0
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: mfc1 $3, $f1
- %load = load atomic i64, i64* %ptr monotonic, align 8
+ %load = load atomic i64, ptr %ptr monotonic, align 8
ret i64 %load
}
-define float @atomic_load_monotonic_f32(float* %ptr) {
+define float @atomic_load_monotonic_f32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_f32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lwc1 $f0, 64($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %gep = getelementptr inbounds float, float* %ptr, i32 16
- %load = load atomic float, float* %gep monotonic, align 4
+ %gep = getelementptr inbounds float, ptr %ptr, i32 16
+ %load = load atomic float, ptr %gep monotonic, align 4
ret float %load
}
-define double @atomic_load_monotonic_f64(double* %ptr) {
+define double @atomic_load_monotonic_f64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_f64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: ldc1 $f0, 128($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %gep = getelementptr inbounds double, double* %ptr, i32 16
- %load = load atomic double, double* %gep monotonic, align 8
+ %gep = getelementptr inbounds double, ptr %ptr, i32 16
+ %load = load atomic double, ptr %gep monotonic, align 8
ret double %load
}
-define i8* @atomic_load_monotonic_p0i8(i8** %ptr) {
+define ptr @atomic_load_monotonic_p0i8(ptr %ptr) {
; MIPS32-LABEL: atomic_load_monotonic_p0i8:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lw $2, 64($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %gep = getelementptr inbounds i8*, i8** %ptr, i32 16
- %load = load atomic i8*, i8** %gep monotonic, align 4
- ret i8* %load
+ %gep = getelementptr inbounds ptr, ptr %ptr, i32 16
+ %load = load atomic ptr, ptr %gep monotonic, align 4
+ ret ptr %load
}
; --------------------------------------------------------------------
; acquire
; --------------------------------------------------------------------
-define i8 @atomic_load_acquire_i8(i8* %ptr) {
+define i8 @atomic_load_acquire_i8(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i8:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $2, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %load = load atomic i8, i8* %ptr acquire, align 1
+ %load = load atomic i8, ptr %ptr acquire, align 1
ret i8 %load
}
-define i32 @atomic_load_acquire_i8_sext_i32(i8* %ptr) {
+define i32 @atomic_load_acquire_i8_sext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i8_sext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sll $1, $1, 24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $2, $1, 24
- %load = load atomic i8, i8* %ptr acquire, align 1
+ %load = load atomic i8, ptr %ptr acquire, align 1
%sext = sext i8 %load to i32
ret i32 %sext
}
-define i16 @atomic_load_acquire_i8_sext_i16(i8* %ptr) {
+define i16 @atomic_load_acquire_i8_sext_i16(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i8_sext_i16:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sll $1, $1, 24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $2, $1, 24
- %load = load atomic i8, i8* %ptr acquire, align 1
+ %load = load atomic i8, ptr %ptr acquire, align 1
%sext = sext i8 %load to i16
ret i16 %sext
}
-define i64 @atomic_load_acquire_i8_sext_i64(i8* %ptr) {
+define i64 @atomic_load_acquire_i8_sext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i8_sext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sra $2, $1, 24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $3, $1, 31
- %load = load atomic i8, i8* %ptr acquire, align 1
+ %load = load atomic i8, ptr %ptr acquire, align 1
%sext = sext i8 %load to i64
ret i64 %sext
}
-define i32 @atomic_load_acquire_i8_zext_i32(i8* %ptr) {
+define i32 @atomic_load_acquire_i8_zext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i8_zext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $2, $1, 255
- %load = load atomic i8, i8* %ptr acquire, align 1
+ %load = load atomic i8, ptr %ptr acquire, align 1
%zext = zext i8 %load to i32
ret i32 %zext
}
-define i16 @atomic_load_acquire_i8_zext_i16(i8* %ptr) {
+define i16 @atomic_load_acquire_i8_zext_i16(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i8_zext_i16:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $2, $1, 255
- %load = load atomic i8, i8* %ptr acquire, align 1
+ %load = load atomic i8, ptr %ptr acquire, align 1
%zext = zext i8 %load to i16
ret i16 %zext
}
-define i64 @atomic_load_acquire_i8_zext_i64(i8* %ptr) {
+define i64 @atomic_load_acquire_i8_zext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i8_zext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: andi $2, $1, 255
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $3, $1, 0
- %load = load atomic i8, i8* %ptr acquire, align 1
+ %load = load atomic i8, ptr %ptr acquire, align 1
%zext = zext i8 %load to i64
ret i64 %zext
}
-define i16 @atomic_load_acquire_i16(i16* %ptr) {
+define i16 @atomic_load_acquire_i16(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i16:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $2, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %load = load atomic i16, i16* %ptr acquire, align 2
+ %load = load atomic i16, ptr %ptr acquire, align 2
ret i16 %load
}
-define i32 @atomic_load_acquire_i16_sext_i32(i16* %ptr) {
+define i32 @atomic_load_acquire_i16_sext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i16_sext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: sll $1, $1, 16
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $2, $1, 16
- %load = load atomic i16, i16* %ptr acquire, align 2
+ %load = load atomic i16, ptr %ptr acquire, align 2
%sext = sext i16 %load to i32
ret i32 %sext
}
-define i64 @atomic_load_acquire_i16_sext_i64(i16* %ptr) {
+define i64 @atomic_load_acquire_i16_sext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i16_sext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: sra $2, $1, 16
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $3, $1, 31
- %load = load atomic i16, i16* %ptr acquire, align 2
+ %load = load atomic i16, ptr %ptr acquire, align 2
%sext = sext i16 %load to i64
ret i64 %sext
}
-define i32 @atomic_load_acquire_i16_zext_i32(i16* %ptr) {
+define i32 @atomic_load_acquire_i16_zext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i16_zext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $2, $1, 65535
- %load = load atomic i16, i16* %ptr acquire, align 2
+ %load = load atomic i16, ptr %ptr acquire, align 2
%zext = zext i16 %load to i32
ret i32 %zext
}
-define i64 @atomic_load_acquire_i16_zext_i64(i16* %ptr) {
+define i64 @atomic_load_acquire_i16_zext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i16_zext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: andi $2, $1, 65535
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $3, $1, 0
- %load = load atomic i16, i16* %ptr acquire, align 2
+ %load = load atomic i16, ptr %ptr acquire, align 2
%zext = zext i16 %load to i64
ret i64 %zext
}
-define i32 @atomic_load_acquire_i32(i32* %ptr) {
+define i32 @atomic_load_acquire_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %load = load atomic i32, i32* %ptr acquire, align 4
+ %load = load atomic i32, ptr %ptr acquire, align 4
ret i32 %load
}
-define i64 @atomic_load_acquire_i64(i64* %ptr) {
+define i64 @atomic_load_acquire_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: ldc1 $f0, 0($4)
; MIPS32-NEXT: mfc1 $2, $f0
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: mfc1 $3, $f1
- %load = load atomic i64, i64* %ptr acquire, align 8
+ %load = load atomic i64, ptr %ptr acquire, align 8
ret i64 %load
}
-define float @atomic_load_acquire_f32(float* %ptr) {
+define float @atomic_load_acquire_f32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_f32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lwc1 $f0, 64($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %gep = getelementptr inbounds float, float* %ptr, i32 16
- %load = load atomic float, float* %gep acquire, align 4
+ %gep = getelementptr inbounds float, ptr %ptr, i32 16
+ %load = load atomic float, ptr %gep acquire, align 4
ret float %load
}
-define double @atomic_load_acquire_f64(double* %ptr) {
+define double @atomic_load_acquire_f64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_f64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: ldc1 $f0, 128($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %gep = getelementptr inbounds double, double* %ptr, i32 16
- %load = load atomic double, double* %gep acquire, align 8
+ %gep = getelementptr inbounds double, ptr %ptr, i32 16
+ %load = load atomic double, ptr %gep acquire, align 8
ret double %load
}
-define i8* @atomic_load_acquire_p0i8(i8** %ptr) {
+define ptr @atomic_load_acquire_p0i8(ptr %ptr) {
; MIPS32-LABEL: atomic_load_acquire_p0i8:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lw $2, 64($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %gep = getelementptr inbounds i8*, i8** %ptr, i32 16
- %load = load atomic i8*, i8** %gep acquire, align 4
- ret i8* %load
+ %gep = getelementptr inbounds ptr, ptr %ptr, i32 16
+ %load = load atomic ptr, ptr %gep acquire, align 4
+ ret ptr %load
}
; --------------------------------------------------------------------
; seq_cst
; --------------------------------------------------------------------
-define i8 @atomic_load_seq_cst_i8(i8* %ptr) {
+define i8 @atomic_load_seq_cst_i8(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i8:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $2, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %load = load atomic i8, i8* %ptr seq_cst, align 1
+ %load = load atomic i8, ptr %ptr seq_cst, align 1
ret i8 %load
}
-define i32 @atomic_load_seq_cst_i8_sext_i32(i8* %ptr) {
+define i32 @atomic_load_seq_cst_i8_sext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i8_sext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sll $1, $1, 24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $2, $1, 24
- %load = load atomic i8, i8* %ptr seq_cst, align 1
+ %load = load atomic i8, ptr %ptr seq_cst, align 1
%sext = sext i8 %load to i32
ret i32 %sext
}
-define i16 @atomic_load_seq_cst_i8_sext_i16(i8* %ptr) {
+define i16 @atomic_load_seq_cst_i8_sext_i16(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i8_sext_i16:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sll $1, $1, 24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $2, $1, 24
- %load = load atomic i8, i8* %ptr seq_cst, align 1
+ %load = load atomic i8, ptr %ptr seq_cst, align 1
%sext = sext i8 %load to i16
ret i16 %sext
}
-define i64 @atomic_load_seq_cst_i8_sext_i64(i8* %ptr) {
+define i64 @atomic_load_seq_cst_i8_sext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i8_sext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sra $2, $1, 24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $3, $1, 31
- %load = load atomic i8, i8* %ptr seq_cst, align 1
+ %load = load atomic i8, ptr %ptr seq_cst, align 1
%sext = sext i8 %load to i64
ret i64 %sext
}
-define i32 @atomic_load_seq_cst_i8_zext_i32(i8* %ptr) {
+define i32 @atomic_load_seq_cst_i8_zext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i8_zext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $2, $1, 255
- %load = load atomic i8, i8* %ptr seq_cst, align 1
+ %load = load atomic i8, ptr %ptr seq_cst, align 1
%zext = zext i8 %load to i32
ret i32 %zext
}
-define i16 @atomic_load_seq_cst_i8_zext_i16(i8* %ptr) {
+define i16 @atomic_load_seq_cst_i8_zext_i16(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i8_zext_i16:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $2, $1, 255
- %load = load atomic i8, i8* %ptr seq_cst, align 1
+ %load = load atomic i8, ptr %ptr seq_cst, align 1
%zext = zext i8 %load to i16
ret i16 %zext
}
-define i64 @atomic_load_seq_cst_i8_zext_i64(i8* %ptr) {
+define i64 @atomic_load_seq_cst_i8_zext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i8_zext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lbu $1, 0($4)
; MIPS32-NEXT: andi $2, $1, 255
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $3, $1, 0
- %load = load atomic i8, i8* %ptr seq_cst, align 1
+ %load = load atomic i8, ptr %ptr seq_cst, align 1
%zext = zext i8 %load to i64
ret i64 %zext
}
-define i16 @atomic_load_seq_cst_i16(i16* %ptr) {
+define i16 @atomic_load_seq_cst_i16(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i16:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $2, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %load = load atomic i16, i16* %ptr seq_cst, align 2
+ %load = load atomic i16, ptr %ptr seq_cst, align 2
ret i16 %load
}
-define i32 @atomic_load_seq_cst_i16_sext_i32(i16* %ptr) {
+define i32 @atomic_load_seq_cst_i16_sext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i16_sext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: sll $1, $1, 16
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $2, $1, 16
- %load = load atomic i16, i16* %ptr seq_cst, align 2
+ %load = load atomic i16, ptr %ptr seq_cst, align 2
%sext = sext i16 %load to i32
ret i32 %sext
}
-define i64 @atomic_load_seq_cst_i16_sext_i64(i16* %ptr) {
+define i64 @atomic_load_seq_cst_i16_sext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i16_sext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: sra $2, $1, 16
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: sra $3, $1, 31
- %load = load atomic i16, i16* %ptr seq_cst, align 2
+ %load = load atomic i16, ptr %ptr seq_cst, align 2
%sext = sext i16 %load to i64
ret i64 %sext
}
-define i32 @atomic_load_seq_cst_i16_zext_i32(i16* %ptr) {
+define i32 @atomic_load_seq_cst_i16_zext_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i16_zext_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $2, $1, 65535
- %load = load atomic i16, i16* %ptr seq_cst, align 2
+ %load = load atomic i16, ptr %ptr seq_cst, align 2
%zext = zext i16 %load to i32
ret i32 %zext
}
-define i64 @atomic_load_seq_cst_i16_zext_i64(i16* %ptr) {
+define i64 @atomic_load_seq_cst_i16_zext_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i16_zext_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lhu $1, 0($4)
; MIPS32-NEXT: andi $2, $1, 65535
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: andi $3, $1, 0
- %load = load atomic i16, i16* %ptr seq_cst, align 2
+ %load = load atomic i16, ptr %ptr seq_cst, align 2
%zext = zext i16 %load to i64
ret i64 %zext
}
-define i32 @atomic_load_seq_cst_i32(i32* %ptr) {
+define i32 @atomic_load_seq_cst_i32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %load = load atomic i32, i32* %ptr seq_cst, align 4
+ %load = load atomic i32, ptr %ptr seq_cst, align 4
ret i32 %load
}
-define i64 @atomic_load_seq_cst_i64(i64* %ptr) {
+define i64 @atomic_load_seq_cst_i64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_i64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: ldc1 $f0, 0($4)
; MIPS32-NEXT: mfc1 $2, $f0
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: mfc1 $3, $f1
- %load = load atomic i64, i64* %ptr seq_cst, align 8
+ %load = load atomic i64, ptr %ptr seq_cst, align 8
ret i64 %load
}
-define float @atomic_load_seq_cst_f32(float* %ptr) {
+define float @atomic_load_seq_cst_f32(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_f32:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lwc1 $f0, 64($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %gep = getelementptr inbounds float, float* %ptr, i32 16
- %load = load atomic float, float* %gep seq_cst, align 4
+ %gep = getelementptr inbounds float, ptr %ptr, i32 16
+ %load = load atomic float, ptr %gep seq_cst, align 4
ret float %load
}
-define double @atomic_load_seq_cst_f64(double* %ptr) {
+define double @atomic_load_seq_cst_f64(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_f64:
; MIPS32: # %bb.0:
; MIPS32-NEXT: ldc1 $f0, 128($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %gep = getelementptr inbounds double, double* %ptr, i32 16
- %load = load atomic double, double* %gep seq_cst, align 8
+ %gep = getelementptr inbounds double, ptr %ptr, i32 16
+ %load = load atomic double, ptr %gep seq_cst, align 8
ret double %load
}
-define i8* @atomic_load_seq_cst_p0i8(i8** %ptr) {
+define ptr @atomic_load_seq_cst_p0i8(ptr %ptr) {
; MIPS32-LABEL: atomic_load_seq_cst_p0i8:
; MIPS32: # %bb.0:
; MIPS32-NEXT: lw $2, 64($4)
; MIPS32-NEXT: sync
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
- %gep = getelementptr inbounds i8*, i8** %ptr, i32 16
- %load = load atomic i8*, i8** %gep seq_cst, align 4
- ret i8* %load
+ %gep = getelementptr inbounds ptr, ptr %ptr, i32 16
+ %load = load atomic ptr, ptr %gep seq_cst, align 4
+ ret ptr %load
}
@i64_align4 = common global i64 0, align 4
@i64_align8 = common global i64 0, align 8
-define i32 @load3align1(%struct.MemSize3_Align1* %S) {
+define i32 @load3align1(ptr %S) {
; MIPS32-LABEL: load3align1:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: # implicit-def: $at
; MIPS32R6-NEXT: and $2, $1, $2
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize3_Align1* %S to i24*
- %bf.load = load i24, i24* %0, align 1
+ %bf.load = load i24, ptr %S, align 1
%bf.cast = zext i24 %bf.load to i32
ret i32 %bf.cast
}
-define i32 @load3align2(%struct.MemSize3_Align2* %S) {
+define i32 @load3align2(ptr %S) {
; MIPS32-LABEL: load3align2:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: # implicit-def: $at
; MIPS32R6-NEXT: and $2, $1, $2
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize3_Align2* %S to i24*
- %bf.load = load i24, i24* %0, align 2
+ %bf.load = load i24, ptr %S, align 2
%bf.cast = zext i24 %bf.load to i32
ret i32 %bf.cast
}
-define i32 @load3align4(%struct.MemSize3_Align4* %S, i32 signext %a) {
+define i32 @load3align4(ptr %S, i32 signext %a) {
; MIPS32-LABEL: load3align4:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $1, 0($4)
; MIPS32R6-NEXT: and $2, $1, $2
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize3_Align4* %S to i24*
- %bf.load = load i24, i24* %0, align 4
+ %bf.load = load i24, ptr %S, align 4
%bf.cast = zext i24 %bf.load to i32
ret i32 %bf.cast
}
-define i32 @load3align8(%struct.MemSize3_Align8* %S, i32 signext %a) {
+define i32 @load3align8(ptr %S, i32 signext %a) {
; MIPS32-LABEL: load3align8:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $1, 0($4)
; MIPS32R6-NEXT: and $2, $1, $2
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize3_Align8* %S to i24*
- %bf.load = load i24, i24* %0, align 8
+ %bf.load = load i24, ptr %S, align 8
%bf.cast = zext i24 %bf.load to i32
ret i32 %bf.cast
}
-define i64 @load5align1(%struct.MemSize5_Align1* %S) {
+define i64 @load5align1(ptr %S) {
; MIPS32-LABEL: load5align1:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: # implicit-def: $v0
; MIPS32R6-NEXT: andi $3, $1, 255
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize5_Align1* %S to i40*
- %bf.load = load i40, i40* %0, align 1
+ %bf.load = load i40, ptr %S, align 1
%bf.cast = zext i40 %bf.load to i64
ret i64 %bf.cast
}
-define i64 @load5align2(%struct.MemSize5_Align2* %S) {
+define i64 @load5align2(ptr %S) {
; MIPS32-LABEL: load5align2:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: # implicit-def: $v0
; MIPS32R6-NEXT: andi $3, $1, 255
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize5_Align2* %S to i40*
- %bf.load = load i40, i40* %0, align 2
+ %bf.load = load i40, ptr %S, align 2
%bf.cast = zext i40 %bf.load to i64
ret i64 %bf.cast
}
-define i64 @load5align4(%struct.MemSize5_Align4* %S) {
+define i64 @load5align4(ptr %S) {
; MIPS32-LABEL: load5align4:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32R6-NEXT: andi $3, $1, 255
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize5_Align4* %S to i40*
- %bf.load = load i40, i40* %0, align 4
+ %bf.load = load i40, ptr %S, align 4
%bf.cast = zext i40 %bf.load to i64
ret i64 %bf.cast
}
-define i64 @load5align8(%struct.MemSize5_Align8* %S) {
+define i64 @load5align8(ptr %S) {
; MIPS32-LABEL: load5align8:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32R6-NEXT: andi $3, $1, 255
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize5_Align8* %S to i40*
- %bf.load = load i40, i40* %0, align 8
+ %bf.load = load i40, ptr %S, align 8
%bf.cast = zext i40 %bf.load to i64
ret i64 %bf.cast
}
-define i64 @load6align1(%struct.MemSize6_Align1* %S) {
+define i64 @load6align1(ptr %S) {
; MIPS32-LABEL: load6align1:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: # implicit-def: $v0
; MIPS32R6-NEXT: andi $3, $1, 65535
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize6_Align1* %S to i48*
- %bf.load = load i48, i48* %0, align 1
+ %bf.load = load i48, ptr %S, align 1
%bf.cast = zext i48 %bf.load to i64
ret i64 %bf.cast
}
-define i64 @load6align2(%struct.MemSize6_Align2* %S) {
+define i64 @load6align2(ptr %S) {
; MIPS32-LABEL: load6align2:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: # implicit-def: $v0
; MIPS32R6-NEXT: andi $3, $1, 65535
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize6_Align2* %S to i48*
- %bf.load = load i48, i48* %0, align 2
+ %bf.load = load i48, ptr %S, align 2
%bf.cast = zext i48 %bf.load to i64
ret i64 %bf.cast
}
-define i64 @load6align4(%struct.MemSize6_Align4* %S) {
+define i64 @load6align4(ptr %S) {
; MIPS32-LABEL: load6align4:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32R6-NEXT: andi $3, $1, 65535
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize6_Align4* %S to i48*
- %bf.load = load i48, i48* %0, align 4
+ %bf.load = load i48, ptr %S, align 4
%bf.cast = zext i48 %bf.load to i64
ret i64 %bf.cast
}
-define i64 @load6align8(%struct.MemSize6_Align8* %S) {
+define i64 @load6align8(ptr %S) {
; MIPS32-LABEL: load6align8:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32R6-NEXT: andi $3, $1, 65535
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize6_Align8* %S to i48*
- %bf.load = load i48, i48* %0, align 8
+ %bf.load = load i48, ptr %S, align 8
%bf.cast = zext i48 %bf.load to i64
ret i64 %bf.cast
}
-define i64 @load7align1(%struct.MemSize7_Align1* %S) {
+define i64 @load7align1(ptr %S) {
; MIPS32-LABEL: load7align1:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: # implicit-def: $v0
; MIPS32R6-NEXT: and $3, $1, $3
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize7_Align1* %S to i56*
- %bf.load = load i56, i56* %0, align 1
+ %bf.load = load i56, ptr %S, align 1
%bf.cast = zext i56 %bf.load to i64
ret i64 %bf.cast
}
-define i64 @load7align2(%struct.MemSize7_Align2* %S) {
+define i64 @load7align2(ptr %S) {
; MIPS32-LABEL: load7align2:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: # implicit-def: $v0
; MIPS32R6-NEXT: and $3, $1, $3
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize7_Align2* %S to i56*
- %bf.load = load i56, i56* %0, align 2
+ %bf.load = load i56, ptr %S, align 2
%bf.cast = zext i56 %bf.load to i64
ret i64 %bf.cast
}
-define i64 @load7align4(%struct.MemSize7_Align4* %S) {
+define i64 @load7align4(ptr %S) {
; MIPS32-LABEL: load7align4:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32R6-NEXT: and $3, $1, $3
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize7_Align4* %S to i56*
- %bf.load = load i56, i56* %0, align 4
+ %bf.load = load i56, ptr %S, align 4
%bf.cast = zext i56 %bf.load to i64
ret i64 %bf.cast
}
-define i64 @load7align8(%struct.MemSize7_Align8* %S) {
+define i64 @load7align8(ptr %S) {
; MIPS32-LABEL: load7align8:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32R6-NEXT: and $3, $1, $3
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize7_Align8* %S to i56*
- %bf.load = load i56, i56* %0, align 8
+ %bf.load = load i56, ptr %S, align 8
%bf.cast = zext i56 %bf.load to i64
ret i64 %bf.cast
}
; MIPS32R6-NEXT: ldc1 $f0, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load double, double* @double_align1, align 1
+ %0 = load double, ptr @double_align1, align 1
ret double %0
}
; MIPS32R6-NEXT: ldc1 $f0, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load double, double* @double_align2, align 2
+ %0 = load double, ptr @double_align2, align 2
ret double %0
}
; MIPS32R6-NEXT: ldc1 $f0, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load double, double* @double_align4, align 4
+ %0 = load double, ptr @double_align4, align 4
ret double %0
}
; MIPS32R6-NEXT: ldc1 $f0, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load double, double* @double_align8, align 8
+ %0 = load double, ptr @double_align8, align 8
ret double %0
}
; MIPS32R6-NEXT: lw $3, 4($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load i64, i64* @i64_align1, align 1
+ %0 = load i64, ptr @i64_align1, align 1
ret i64 %0
}
; MIPS32R6-NEXT: lw $3, 4($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load i64, i64* @i64_align2, align 2
+ %0 = load i64, ptr @i64_align2, align 2
ret i64 %0
}
; MIPS32R6-NEXT: lw $3, 4($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load i64, i64* @i64_align4, align 4
+ %0 = load i64, ptr @i64_align4, align 4
ret i64 %0
}
; MIPS32R6-NEXT: lw $3, 4($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = load i64, i64* @i64_align8, align 8
+ %0 = load i64, ptr @i64_align8, align 8
ret i64 %0
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define zeroext i8 @_16_bit_positive_offset(i8* %a) {
+define zeroext i8 @_16_bit_positive_offset(ptr %a) {
; MIPS32-LABEL: _16_bit_positive_offset:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lbu $2, 32767($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %arrayidx = getelementptr inbounds i8, i8* %a, i32 32767
- %0 = load i8, i8* %arrayidx
+ %arrayidx = getelementptr inbounds i8, ptr %a, i32 32767
+ %0 = load i8, ptr %arrayidx
ret i8 %0
}
-define void @_16_bit_negative_offset(i8 %val, i8* %a) {
+define void @_16_bit_negative_offset(i8 %val, ptr %a) {
; MIPS32-LABEL: _16_bit_negative_offset:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sb $4, -32768($5)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %arrayidx = getelementptr inbounds i8, i8* %a, i32 -32768
- store i8 %val, i8* %arrayidx
+ %arrayidx = getelementptr inbounds i8, ptr %a, i32 -32768
+ store i8 %val, ptr %arrayidx
ret void
}
-define void @_large_positive_offset(i8 %val, i8* %a) {
+define void @_large_positive_offset(i8 %val, ptr %a) {
; MIPS32-LABEL: _large_positive_offset:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: ori $1, $zero, 32768
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %arrayidx = getelementptr inbounds i8, i8* %a, i32 32768
- store i8 %val, i8* %arrayidx
+ %arrayidx = getelementptr inbounds i8, ptr %a, i32 32768
+ store i8 %val, ptr %arrayidx
ret void
}
-define signext i8 @_large_negative_offset(i8* %a) {
+define signext i8 @_large_negative_offset(ptr %a) {
; MIPS32-LABEL: _large_negative_offset:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lui $1, 65535
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %arrayidx = getelementptr inbounds i8, i8* %a, i32 -32769
- %0 = load i8, i8* %arrayidx
+ %arrayidx = getelementptr inbounds i8, ptr %a, i32 -32769
+ %0 = load i8, ptr %arrayidx
ret i8 %0
}
-define float @fold_f32_load(float* %a) {
+define float @fold_f32_load(ptr %a) {
; MIPS32-LABEL: fold_f32_load:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lwc1 $f0, 40($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %arrayidx = getelementptr inbounds float, float* %a, i32 10
- %0 = load float, float* %arrayidx
+ %arrayidx = getelementptr inbounds float, ptr %a, i32 10
+ %0 = load float, ptr %arrayidx
ret float %0
}
-define void @fold_f64_store(double %val, double* %a) {
+define void @fold_f64_store(double %val, ptr %a) {
; MIPS32-LABEL: fold_f64_store:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sdc1 $f12, -80($6)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %arrayidx = getelementptr inbounds double, double* %a, i32 -10
- store double %val, double* %arrayidx
+ %arrayidx = getelementptr inbounds double, ptr %a, i32 -10
+ store double %val, ptr %arrayidx
ret void
}
-define i16 @fold_i16_load(i16* %a) {
+define i16 @fold_i16_load(ptr %a) {
; MIPS32-LABEL: fold_i16_load:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lhu $2, -20($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %arrayidx = getelementptr inbounds i16, i16* %a, i32 -10
- %0 = load i16, i16* %arrayidx
+ %arrayidx = getelementptr inbounds i16, ptr %a, i32 -10
+ %0 = load i16, ptr %arrayidx
ret i16 %0
}
-define void @fold_i32_store(i32 %val, i32* %a) {
+define void @fold_i32_store(i32 %val, ptr %a) {
; MIPS32-LABEL: fold_i32_store:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sw $4, 40($5)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %arrayidx = getelementptr inbounds i32, i32* %a, i32 10
- store i32 %val, i32* %arrayidx
+ %arrayidx = getelementptr inbounds i32, ptr %a, i32 10
+ store i32 %val, ptr %arrayidx
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=msa,+fp64 -mattr=nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
-define void @load_store_v16i8(<16 x i8>* %a, <16 x i8>* %b) {
+define void @load_store_v16i8(ptr %a, ptr %b) {
; P5600-LABEL: load_store_v16i8:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($5)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %b, align 16
- store <16 x i8> %0, <16 x i8>* %a, align 16
+ %0 = load <16 x i8>, ptr %b, align 16
+ store <16 x i8> %0, ptr %a, align 16
ret void
}
-define void @load_store_v8i16(<8 x i16>* %a, <8 x i16>* %b) {
+define void @load_store_v8i16(ptr %a, ptr %b) {
; P5600-LABEL: load_store_v8i16:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($5)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %b, align 16
- store <8 x i16> %0, <8 x i16>* %a, align 16
+ %0 = load <8 x i16>, ptr %b, align 16
+ store <8 x i16> %0, ptr %a, align 16
ret void
}
-define void @load_store_v4i32(<4 x i32>* %a, <4 x i32>* %b) {
+define void @load_store_v4i32(ptr %a, ptr %b) {
; P5600-LABEL: load_store_v4i32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($5)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %b, align 16
- store <4 x i32> %0, <4 x i32>* %a, align 16
+ %0 = load <4 x i32>, ptr %b, align 16
+ store <4 x i32> %0, ptr %a, align 16
ret void
}
-define void @load_store_v2i64(<2 x i64>* %a, <2 x i64>* %b) {
+define void @load_store_v2i64(ptr %a, ptr %b) {
; P5600-LABEL: load_store_v2i64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($5)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %b, align 16
- store <2 x i64> %0, <2 x i64>* %a, align 16
+ %0 = load <2 x i64>, ptr %b, align 16
+ store <2 x i64> %0, ptr %a, align 16
ret void
}
-define void @load_store_v4f32(<4 x float>* %a, <4 x float>* %b) {
+define void @load_store_v4f32(ptr %a, ptr %b) {
; P5600-LABEL: load_store_v4f32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($5)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float>* %b, align 16
- store <4 x float> %0, <4 x float>* %a, align 16
+ %0 = load <4 x float>, ptr %b, align 16
+ store <4 x float> %0, ptr %a, align 16
ret void
}
-define void @load_store_v2f64(<2 x double>* %a, <2 x double>* %b) {
+define void @load_store_v2f64(ptr %a, ptr %b) {
; P5600-LABEL: load_store_v2f64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($5)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double>* %b, align 16
- store <2 x double> %0, <2 x double>* %a, align 16
+ %0 = load <2 x double>, ptr %b, align 16
+ store <2 x double> %0, ptr %a, align 16
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define void @long_chain_ambiguous_i32_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i32* %a, i32* %b, i32* %c, i32* %result) {
+define void @long_chain_ambiguous_i32_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
; MIPS32-LABEL: long_chain_ambiguous_i32_in_gpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -48
br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
b.PHI.1.0:
- %phi1.0 = load i32, i32* %a
+ %phi1.0 = load i32, ptr %a
br label %b.PHI.1
b.PHI.1.1:
- %phi1.1 = load i32, i32* %b
+ %phi1.1 = load i32, ptr %b
br label %b.PHI.1
b.PHI.1.2:
- %phi1.2 = load i32, i32* %c
+ %phi1.2 = load i32, ptr %c
br label %b.PHI.1
b.PHI.1:
br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
b.PHI.1.end:
- store i32 %phi1, i32* %result
+ store i32 %phi1, ptr %result
ret void
pre.PHI.2:
br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
b.PHI.2.0:
- %phi2.0 = load i32, i32* %a
+ %phi2.0 = load i32, ptr %a
br label %b.PHI.2
b.PHI.2.1:
- %phi2.1 = load i32, i32* %b
+ %phi2.1 = load i32, ptr %b
br label %b.PHI.2
b.PHI.2:
br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
b.PHI.2.end:
- store i32 %phi2, i32* %result
+ store i32 %phi2, ptr %result
ret void
b.PHI.3:
%phi4 = phi i32 [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
%sel_1.2 = select i1 %cnd2, i32 %phi3, i32 %phi4
%sel_3_1.2 = select i1 %cnd1, i32 %sel_1.2, i32 %phi3
- store i32 %sel_3_1.2, i32* %result
- store i32 %phi3, i32* %result
+ store i32 %sel_3_1.2, ptr %result
+ store i32 %phi3, ptr %result
ret void
}
-define void @long_chain_i32_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i32* %a, i32* %b, i32* %c, i32* %result) {
+define void @long_chain_i32_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
; MIPS32-LABEL: long_chain_i32_in_gpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -56
br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
b.PHI.1.0:
- %phi1.0 = load i32, i32* %a
+ %phi1.0 = load i32, ptr %a
br label %b.PHI.1
b.PHI.1.1:
- %phi1.1 = load i32, i32* %b
+ %phi1.1 = load i32, ptr %b
br label %b.PHI.1
b.PHI.1.2:
- %phi1.2 = load i32, i32* %c
+ %phi1.2 = load i32, ptr %c
br label %b.PHI.1
b.PHI.1:
br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
b.PHI.1.end:
- store i32 %phi1, i32* %result
+ store i32 %phi1, ptr %result
ret void
pre.PHI.2:
br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
b.PHI.2.0:
- %phi2.0 = load i32, i32* %a
+ %phi2.0 = load i32, ptr %a
br label %b.PHI.2
b.PHI.2.1:
- %phi2.1 = load i32, i32* %b
+ %phi2.1 = load i32, ptr %b
br label %b.PHI.2
b.PHI.2:
br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
b.PHI.2.end:
- store i32 %phi2, i32* %result
+ store i32 %phi2, ptr %result
ret void
b.PHI.3:
%phi4 = phi i32 [ %phi2, %b.PHI.2], [ 0, %b.PHI.1 ]
%sel_1.2 = select i1 %cnd2, i32 %phi3, i32 %phi4
%sel_3_1.2 = select i1 %cnd1, i32 %sel_1.2, i32 %phi3
- store i32 %sel_3_1.2, i32* %result
- store i32 %phi3, i32* %result
+ store i32 %sel_3_1.2, ptr %result
+ store i32 %phi3, ptr %result
ret void
}
-define void @long_chain_ambiguous_float_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, float* %a, float* %b, float* %c, float* %result) {
+define void @long_chain_ambiguous_float_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
; MIPS32-LABEL: long_chain_ambiguous_float_in_fpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -48
br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
b.PHI.1.0:
- %phi1.0 = load float, float* %a
+ %phi1.0 = load float, ptr %a
br label %b.PHI.1
b.PHI.1.1:
- %phi1.1 = load float, float* %b
+ %phi1.1 = load float, ptr %b
br label %b.PHI.1
b.PHI.1.2:
- %phi1.2 = load float, float* %c
+ %phi1.2 = load float, ptr %c
br label %b.PHI.1
b.PHI.1:
br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
b.PHI.1.end:
- store float %phi1, float* %result
+ store float %phi1, ptr %result
ret void
pre.PHI.2:
br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
b.PHI.2.0:
- %phi2.0 = load float, float* %a
+ %phi2.0 = load float, ptr %a
br label %b.PHI.2
b.PHI.2.1:
- %phi2.1 = load float, float* %b
+ %phi2.1 = load float, ptr %b
br label %b.PHI.2
b.PHI.2:
br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
b.PHI.2.end:
- store float %phi2, float* %result
+ store float %phi2, ptr %result
ret void
b.PHI.3:
%phi4 = phi float [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
%sel_1.2 = select i1 %cnd2, float %phi3, float %phi4
%sel_3_1.2 = select i1 %cnd1, float %sel_1.2, float %phi3
- store float %sel_3_1.2, float* %result
- store float %phi3, float* %result
+ store float %sel_3_1.2, ptr %result
+ store float %phi3, ptr %result
ret void
}
-define void @long_chain_float_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, float* %a, float* %b, float* %c, float* %result) {
+define void @long_chain_float_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
; MIPS32-LABEL: long_chain_float_in_fpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -56
br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
b.PHI.1.0:
- %phi1.0 = load float, float* %a
+ %phi1.0 = load float, ptr %a
br label %b.PHI.1
b.PHI.1.1:
- %phi1.1 = load float, float* %b
+ %phi1.1 = load float, ptr %b
br label %b.PHI.1
b.PHI.1.2:
- %phi1.2 = load float, float* %c
+ %phi1.2 = load float, ptr %c
br label %b.PHI.1
b.PHI.1:
br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
b.PHI.1.end:
- store float %phi1, float* %result
+ store float %phi1, ptr %result
ret void
pre.PHI.2:
br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
b.PHI.2.0:
- %phi2.0 = load float, float* %a
+ %phi2.0 = load float, ptr %a
br label %b.PHI.2
b.PHI.2.1:
- %phi2.1 = load float, float* %b
+ %phi2.1 = load float, ptr %b
br label %b.PHI.2
b.PHI.2:
br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
b.PHI.2.end:
- store float %phi2, float* %result
+ store float %phi2, ptr %result
ret void
b.PHI.3:
%phi4 = phi float [ %phi2, %b.PHI.2], [ 0.0, %b.PHI.1 ]
%sel_1.2 = select i1 %cnd2, float %phi3, float %phi4
%sel_3_1.2 = select i1 %cnd1, float %sel_1.2, float %phi3
- store float %sel_3_1.2, float* %result
- store float %phi3, float* %result
+ store float %sel_3_1.2, ptr %result
+ store float %phi3, ptr %result
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
; MIPS32-LABEL: long_chain_ambiguous_i64_in_fpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -72
br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
b.PHI.1.0:
- %phi1.0 = load i64, i64* %a
+ %phi1.0 = load i64, ptr %a
br label %b.PHI.1
b.PHI.1.1:
- %phi1.1 = load i64, i64* %b
+ %phi1.1 = load i64, ptr %b
br label %b.PHI.1
b.PHI.1.2:
- %phi1.2 = load i64, i64* %c
+ %phi1.2 = load i64, ptr %c
br label %b.PHI.1
b.PHI.1:
br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
b.PHI.1.end:
- store i64 %phi1, i64* %result
+ store i64 %phi1, ptr %result
ret void
pre.PHI.2:
br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
b.PHI.2.0:
- %phi2.0 = load i64, i64* %a
+ %phi2.0 = load i64, ptr %a
br label %b.PHI.2
b.PHI.2.1:
- %phi2.1 = load i64, i64* %b
+ %phi2.1 = load i64, ptr %b
br label %b.PHI.2
b.PHI.2:
br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
b.PHI.2.end:
- store i64 %phi2, i64* %result
+ store i64 %phi2, ptr %result
ret void
b.PHI.3:
%phi4 = phi i64 [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
%sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
%sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
- store i64 %sel_3_1.2, i64* %result
- store i64 %phi3, i64* %result
+ store i64 %sel_3_1.2, ptr %result
+ store i64 %phi3, ptr %result
ret void
}
-define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
; MIPS32-LABEL: long_chain_i64_in_gpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -80
br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
b.PHI.1.0:
- %phi1.0 = load i64, i64* %a
+ %phi1.0 = load i64, ptr %a
br label %b.PHI.1
b.PHI.1.1:
- %phi1.1 = load i64, i64* %b
+ %phi1.1 = load i64, ptr %b
br label %b.PHI.1
b.PHI.1.2:
- %phi1.2 = load i64, i64* %c
+ %phi1.2 = load i64, ptr %c
br label %b.PHI.1
b.PHI.1:
br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
b.PHI.1.end:
- store i64 %phi1, i64* %result
+ store i64 %phi1, ptr %result
ret void
pre.PHI.2:
br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
b.PHI.2.0:
- %phi2.0 = load i64, i64* %a
+ %phi2.0 = load i64, ptr %a
br label %b.PHI.2
b.PHI.2.1:
- %phi2.1 = load i64, i64* %b
+ %phi2.1 = load i64, ptr %b
br label %b.PHI.2
b.PHI.2:
br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
b.PHI.2.end:
- store i64 %phi2, i64* %result
+ store i64 %phi2, ptr %result
ret void
b.PHI.3:
%phi4 = phi i64 [ %phi2, %b.PHI.2], [ 0, %b.PHI.1 ]
%sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
%sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
- store i64 %sel_3_1.2, i64* %result
- store i64 %phi3, i64* %result
+ store i64 %sel_3_1.2, ptr %result
+ store i64 %phi3, ptr %result
ret void
}
-define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
; MIPS32-LABEL: long_chain_ambiguous_double_in_fpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -72
br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
b.PHI.1.0:
- %phi1.0 = load double, double* %a
+ %phi1.0 = load double, ptr %a
br label %b.PHI.1
b.PHI.1.1:
- %phi1.1 = load double, double* %b
+ %phi1.1 = load double, ptr %b
br label %b.PHI.1
b.PHI.1.2:
- %phi1.2 = load double, double* %c
+ %phi1.2 = load double, ptr %c
br label %b.PHI.1
b.PHI.1:
br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
b.PHI.1.end:
- store double %phi1, double* %result
+ store double %phi1, ptr %result
ret void
pre.PHI.2:
br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
b.PHI.2.0:
- %phi2.0 = load double, double* %a
+ %phi2.0 = load double, ptr %a
br label %b.PHI.2
b.PHI.2.1:
- %phi2.1 = load double, double* %b
+ %phi2.1 = load double, ptr %b
br label %b.PHI.2
b.PHI.2:
br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
b.PHI.2.end:
- store double %phi2, double* %result
+ store double %phi2, ptr %result
ret void
b.PHI.3:
%phi4 = phi double [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
%sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
%sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
- store double %sel_3_1.2, double* %result
- store double %phi3, double* %result
+ store double %sel_3_1.2, ptr %result
+ store double %phi3, ptr %result
ret void
}
-define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
; MIPS32-LABEL: long_chain_double_in_fpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -88
br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
b.PHI.1.0:
- %phi1.0 = load double, double* %a
+ %phi1.0 = load double, ptr %a
br label %b.PHI.1
b.PHI.1.1:
- %phi1.1 = load double, double* %b
+ %phi1.1 = load double, ptr %b
br label %b.PHI.1
b.PHI.1.2:
- %phi1.2 = load double, double* %c
+ %phi1.2 = load double, ptr %c
br label %b.PHI.1
b.PHI.1:
br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
b.PHI.1.end:
- store double %phi1, double* %result
+ store double %phi1, ptr %result
ret void
pre.PHI.2:
br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
b.PHI.2.0:
- %phi2.0 = load double, double* %a
+ %phi2.0 = load double, ptr %a
br label %b.PHI.2
b.PHI.2.1:
- %phi2.1 = load double, double* %b
+ %phi2.1 = load double, ptr %b
br label %b.PHI.2
b.PHI.2:
br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
b.PHI.2.end:
- store double %phi2, double* %result
+ store double %phi2, ptr %result
ret void
b.PHI.3:
%phi4 = phi double [ %phi2, %b.PHI.2], [ 0.0, %b.PHI.1 ]
%sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
%sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
- store double %sel_3_1.2, double* %result
- store double %phi3, double* %result
+ store double %sel_3_1.2, ptr %result
+ store double %phi3, ptr %result
ret void
}
}
declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
-define void @umul_with_overflow(i32 %lhs, i32 %rhs, i32* %pmul, i1* %pcarry_flag) {
+define void @umul_with_overflow(i32 %lhs, i32 %rhs, ptr %pmul, ptr %pcarry_flag) {
; MIPS32-LABEL: umul_with_overflow:
; MIPS32: # %bb.0:
; MIPS32-NEXT: multu $4, $5
%res = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %lhs, i32 %rhs)
%carry_flag = extractvalue { i32, i1 } %res, 1
%mul = extractvalue { i32, i1 } %res, 0
- store i1 %carry_flag, i1* %pcarry_flag
- store i32 %mul, i32* %pmul
+ store i1 %carry_flag, ptr %pcarry_flag
+ store i32 %mul, ptr %pmul
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
-define void @mul_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @mul_v16i8(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: mul_v16i8:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w1, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%mul = mul <16 x i8> %1, %0
- store <16 x i8> %mul, <16 x i8>* %c, align 16
+ store <16 x i8> %mul, ptr %c, align 16
ret void
}
-define void @mul_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @mul_v8i16(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: mul_v8i16:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w1, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%mul = mul <8 x i16> %1, %0
- store <8 x i16> %mul, <8 x i16>* %c, align 16
+ store <8 x i16> %mul, ptr %c, align 16
ret void
}
-define void @mul_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @mul_v4i32(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: mul_v4i32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w1, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%mul = mul <4 x i32> %1, %0
- store <4 x i32> %mul, <4 x i32>* %c, align 16
+ store <4 x i32> %mul, ptr %c, align 16
ret void
}
-define void @mul_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @mul_v2i64(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: mul_v2i64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w1, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%mul = mul <2 x i64> %1, %0
- store <2 x i64> %mul, <2 x i64>* %c, align 16
+ store <2 x i64> %mul, ptr %c, align 16
ret void
}
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
declare <16 x i8> @llvm.mips.mulv.b(<16 x i8>, <16 x i8>)
-define void @mul_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @mul_v16i8_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: mul_v16i8_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%2 = tail call <16 x i8> @llvm.mips.mulv.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* %c, align 16
+ store <16 x i8> %2, ptr %c, align 16
ret void
}
declare <8 x i16> @llvm.mips.mulv.h(<8 x i16>, <8 x i16>)
-define void @mul_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @mul_v8i16_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: mul_v8i16_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%2 = tail call <8 x i16> @llvm.mips.mulv.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* %c, align 16
+ store <8 x i16> %2, ptr %c, align 16
ret void
}
declare <4 x i32> @llvm.mips.mulv.w(<4 x i32>, <4 x i32>)
-define void @mul_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @mul_v4i32_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: mul_v4i32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%2 = tail call <4 x i32> @llvm.mips.mulv.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* %c, align 16
+ store <4 x i32> %2, ptr %c, align 16
ret void
}
declare <2 x i64> @llvm.mips.mulv.d(<2 x i64>, <2 x i64>)
-define void @mul_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @mul_v2i64_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: mul_v2i64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%2 = tail call <2 x i64> @llvm.mips.mulv.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* %c, align 16
+ store <2 x i64> %2, ptr %c, align 16
ret void
}
ret i64 %cond
}
-define void @phi_ambiguous_i64_in_fpr(i1 %cnd, i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) {
+define void @phi_ambiguous_i64_in_fpr(i1 %cnd, ptr %i64_ptr_a, ptr %i64_ptr_b, ptr %i64_ptr_c) {
; MIPS32-LABEL: phi_ambiguous_i64_in_fpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -32
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i64, i64* %i64_ptr_a, align 8
- %1 = load i64, i64* %i64_ptr_b, align 8
+ %0 = load i64, ptr %i64_ptr_a, align 8
+ %1 = load i64, ptr %i64_ptr_b, align 8
br i1 %cnd, label %cond.true, label %cond.false
cond.true:
cond.end:
%cond = phi i64 [ %0, %cond.true ], [ %1, %cond.false ]
- store i64 %cond, i64* %i64_ptr_c, align 8
+ store i64 %cond, ptr %i64_ptr_c, align 8
ret void
}
ret float %cond
}
-define void @phi_ambiguous_float_in_gpr(i1 %cnd, float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) {
+define void @phi_ambiguous_float_in_gpr(i1 %cnd, ptr %f32_ptr_a, ptr %f32_ptr_b, ptr %f32_ptr_c) {
; MIPS32-LABEL: phi_ambiguous_float_in_gpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -16
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load float, float* %f32_ptr_a, align 4
- %1 = load float, float* %f32_ptr_b, align 4
+ %0 = load float, ptr %f32_ptr_a, align 4
+ %1 = load float, ptr %f32_ptr_b, align 4
br i1 %cnd, label %cond.true, label %cond.false
cond.true:
cond.end:
%cond = phi float [ %0, %cond.true ], [ %1, %cond.false ]
- store float %cond, float* %f32_ptr_c, align 4
+ store float %cond, ptr %f32_ptr_c, align 4
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define i32 @ptr_arg_in_regs(i32* %p) {
+define i32 @ptr_arg_in_regs(ptr %p) {
; MIPS32-LABEL: ptr_arg_in_regs:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i32, i32* %p
+ %0 = load i32, ptr %p
ret i32 %0
}
-define i32 @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {
+define i32 @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, ptr %p) {
; MIPS32-LABEL: ptr_arg_on_stack:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $1, $sp, 16
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i32, i32* %p
+ %0 = load i32, ptr %p
ret i32 %0
}
-define i8* @ret_ptr(i8* %p) {
+define ptr @ret_ptr(ptr %p) {
; MIPS32-LABEL: ret_ptr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: move $2, $4
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- ret i8* %p
+ ret ptr %p
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
-define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @sdiv_v16i8(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sdiv_v16i8:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%div = sdiv <16 x i8> %0, %1
- store <16 x i8> %div, <16 x i8>* %c, align 16
+ store <16 x i8> %div, ptr %c, align 16
ret void
}
-define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @sdiv_v8i16(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sdiv_v8i16:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%div = sdiv <8 x i16> %0, %1
- store <8 x i16> %div, <8 x i16>* %c, align 16
+ store <8 x i16> %div, ptr %c, align 16
ret void
}
-define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @sdiv_v4i32(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sdiv_v4i32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%div = sdiv <4 x i32> %0, %1
- store <4 x i32> %div, <4 x i32>* %c, align 16
+ store <4 x i32> %div, ptr %c, align 16
ret void
}
-define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @sdiv_v2i64(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sdiv_v2i64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%div = sdiv <2 x i64> %0, %1
- store <2 x i64> %div, <2 x i64>* %c, align 16
+ store <2 x i64> %div, ptr %c, align 16
ret void
}
-define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @srem_v16i8(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: srem_v16i8:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%rem = srem <16 x i8> %0, %1
- store <16 x i8> %rem, <16 x i8>* %c, align 16
+ store <16 x i8> %rem, ptr %c, align 16
ret void
}
-define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @srem_v8i16(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: srem_v8i16:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%rem = srem <8 x i16> %0, %1
- store <8 x i16> %rem, <8 x i16>* %c, align 16
+ store <8 x i16> %rem, ptr %c, align 16
ret void
}
-define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @srem_v4i32(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: srem_v4i32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%rem = srem <4 x i32> %0, %1
- store <4 x i32> %rem, <4 x i32>* %c, align 16
+ store <4 x i32> %rem, ptr %c, align 16
ret void
}
-define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @srem_v2i64(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: srem_v2i64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%rem = srem <2 x i64> %0, %1
- store <2 x i64> %rem, <2 x i64>* %c, align 16
+ store <2 x i64> %rem, ptr %c, align 16
ret void
}
-define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @udiv_v16u8(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: udiv_v16u8:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%div = udiv <16 x i8> %0, %1
- store <16 x i8> %div, <16 x i8>* %c, align 16
+ store <16 x i8> %div, ptr %c, align 16
ret void
}
-define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @udiv_v8u16(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: udiv_v8u16:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%div = udiv <8 x i16> %0, %1
- store <8 x i16> %div, <8 x i16>* %c, align 16
+ store <8 x i16> %div, ptr %c, align 16
ret void
}
-define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @udiv_v4u32(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: udiv_v4u32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%div = udiv <4 x i32> %0, %1
- store <4 x i32> %div, <4 x i32>* %c, align 16
+ store <4 x i32> %div, ptr %c, align 16
ret void
}
-define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @udiv_v2u64(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: udiv_v2u64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%div = udiv <2 x i64> %0, %1
- store <2 x i64> %div, <2 x i64>* %c, align 16
+ store <2 x i64> %div, ptr %c, align 16
ret void
}
-define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @urem_v16u8(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: urem_v16u8:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%rem = urem <16 x i8> %0, %1
- store <16 x i8> %rem, <16 x i8>* %c, align 16
+ store <16 x i8> %rem, ptr %c, align 16
ret void
}
-define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @urem_v8u16(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: urem_v8u16:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%rem = urem <8 x i16> %0, %1
- store <8 x i16> %rem, <8 x i16>* %c, align 16
+ store <8 x i16> %rem, ptr %c, align 16
ret void
}
-define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @urem_v4u32(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: urem_v4u32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%rem = urem <4 x i32> %0, %1
- store <4 x i32> %rem, <4 x i32>* %c, align 16
+ store <4 x i32> %rem, ptr %c, align 16
ret void
}
-define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @urem_v2u64(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: urem_v2u64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%rem = urem <2 x i64> %0, %1
- store <2 x i64> %rem, <2 x i64>* %c, align 16
+ store <2 x i64> %rem, ptr %c, align 16
ret void
}
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>)
-define void @sdiv_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @sdiv_v16i8_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sdiv_v16i8_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%2 = tail call <16 x i8> @llvm.mips.div.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* %c, align 16
+ store <16 x i8> %2, ptr %c, align 16
ret void
}
declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>)
-define void @sdiv_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @sdiv_v8i16_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sdiv_v8i16_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%2 = tail call <8 x i16> @llvm.mips.div.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* %c, align 16
+ store <8 x i16> %2, ptr %c, align 16
ret void
}
declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>)
-define void @sdiv_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @sdiv_v4i32_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sdiv_v4i32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%2 = tail call <4 x i32> @llvm.mips.div.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* %c, align 16
+ store <4 x i32> %2, ptr %c, align 16
ret void
}
declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>)
-define void @sdiv_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @sdiv_v2i64_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sdiv_v2i64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%2 = tail call <2 x i64> @llvm.mips.div.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* %c, align 16
+ store <2 x i64> %2, ptr %c, align 16
ret void
}
declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>)
-define void @smod_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @smod_v16i8_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: smod_v16i8_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%2 = tail call <16 x i8> @llvm.mips.mod.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* %c, align 16
+ store <16 x i8> %2, ptr %c, align 16
ret void
}
declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>)
-define void @smod_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @smod_v8i16_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: smod_v8i16_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%2 = tail call <8 x i16> @llvm.mips.mod.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* %c, align 16
+ store <8 x i16> %2, ptr %c, align 16
ret void
}
declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>)
-define void @smod_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @smod_v4i32_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: smod_v4i32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%2 = tail call <4 x i32> @llvm.mips.mod.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* %c, align 16
+ store <4 x i32> %2, ptr %c, align 16
ret void
}
declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>)
-define void @smod_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @smod_v2i64_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: smod_v2i64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%2 = tail call <2 x i64> @llvm.mips.mod.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* %c, align 16
+ store <2 x i64> %2, ptr %c, align 16
ret void
}
declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>)
-define void @udiv_v16u8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @udiv_v16u8_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: udiv_v16u8_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%2 = tail call <16 x i8> @llvm.mips.div.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* %c, align 16
+ store <16 x i8> %2, ptr %c, align 16
ret void
}
declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>)
-define void @udiv_v8u16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @udiv_v8u16_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: udiv_v8u16_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%2 = tail call <8 x i16> @llvm.mips.div.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* %c, align 16
+ store <8 x i16> %2, ptr %c, align 16
ret void
}
declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>)
-define void @udiv_v4u32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @udiv_v4u32_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: udiv_v4u32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%2 = tail call <4 x i32> @llvm.mips.div.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* %c, align 16
+ store <4 x i32> %2, ptr %c, align 16
ret void
}
declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>)
-define void @udiv_v2u64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @udiv_v2u64_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: udiv_v2u64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%2 = tail call <2 x i64> @llvm.mips.div.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* %c, align 16
+ store <2 x i64> %2, ptr %c, align 16
ret void
}
declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>)
-define void @umod_v16u8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @umod_v16u8_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: umod_v16u8_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%2 = tail call <16 x i8> @llvm.mips.mod.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* %c, align 16
+ store <16 x i8> %2, ptr %c, align 16
ret void
}
declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>)
-define void @umod_v8u16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @umod_v8u16_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: umod_v8u16_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%2 = tail call <8 x i16> @llvm.mips.mod.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* %c, align 16
+ store <8 x i16> %2, ptr %c, align 16
ret void
}
declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>)
-define void @umod_v4u32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @umod_v4u32_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: umod_v4u32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%2 = tail call <4 x i32> @llvm.mips.mod.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* %c, align 16
+ store <4 x i32> %2, ptr %c, align 16
ret void
}
declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>)
-define void @umod_v2u64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @umod_v2u64_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: umod_v2u64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%2 = tail call <2 x i64> @llvm.mips.mod.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* %c, align 16
+ store <2 x i64> %2, ptr %c, align 16
ret void
}
ret i32 %cond
}
-define i32* @select_ptr(i1 %test, i32* %a, i32* %b) {
+define ptr @select_ptr(i1 %test, ptr %a, ptr %b) {
; MIPS32-LABEL: select_ptr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: move $2, $6
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %cond = select i1 %test, i32* %a, i32* %b
- ret i32* %cond
+ %cond = select i1 %test, ptr %a, ptr %b
+ ret ptr %cond
}
define i32 @select_with_negation(i32 %a, i32 %b, i32 %x, i32 %y) {
ret i64 %cond
}
-define void @select_ambiguous_i64_in_fpr(i1 %test, i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) {
+define void @select_ambiguous_i64_in_fpr(i1 %test, ptr %i64_ptr_a, ptr %i64_ptr_b, ptr %i64_ptr_c) {
; MIPS32-LABEL: select_ambiguous_i64_in_fpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: ldc1 $f2, 0($5)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i64, i64* %i64_ptr_a, align 8
- %1 = load i64, i64* %i64_ptr_b, align 8
+ %0 = load i64, ptr %i64_ptr_a, align 8
+ %1 = load i64, ptr %i64_ptr_b, align 8
%cond = select i1 %test, i64 %0, i64 %1
- store i64 %cond, i64* %i64_ptr_c, align 8
+ store i64 %cond, ptr %i64_ptr_c, align 8
ret void
}
ret float %cond
}
-define void @select_ambiguous_float_in_gpr(i1 %test, float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) {
+define void @select_ambiguous_float_in_gpr(i1 %test, ptr %f32_ptr_a, ptr %f32_ptr_b, ptr %f32_ptr_c) {
; MIPS32-LABEL: select_ambiguous_float_in_gpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($5)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load float, float* %f32_ptr_a, align 4
- %1 = load float, float* %f32_ptr_b, align 4
+ %0 = load float, ptr %f32_ptr_a, align 4
+ %1 = load float, ptr %f32_ptr_b, align 4
%cond = select i1 %test, float %0, float %1
- store float %cond, float* %f32_ptr_c, align 4
+ store float %cond, ptr %f32_ptr_c, align 4
ret void
}
%struct.S = type { i32, i32 }
-define void @ZeroInit(%struct.S* noalias sret(%struct.S) %agg.result) {
+define void @ZeroInit(ptr noalias sret(%struct.S) %agg.result) {
; MIPS32-LABEL: ZeroInit:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: ori $1, $zero, 0
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %x = getelementptr inbounds %struct.S, %struct.S* %agg.result, i32 0, i32 0
- store i32 0, i32* %x, align 4
- %y = getelementptr inbounds %struct.S, %struct.S* %agg.result, i32 0, i32 1
- store i32 0, i32* %y, align 4
+ store i32 0, ptr %agg.result, align 4
+ %y = getelementptr inbounds %struct.S, ptr %agg.result, i32 0, i32 1
+ store i32 0, ptr %y, align 4
ret void
}
-define void @CallZeroInit(%struct.S* noalias sret(%struct.S) %agg.result) {
+define void @CallZeroInit(ptr noalias sret(%struct.S) %agg.result) {
; MIPS32-LABEL: CallZeroInit:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -24
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- call void @ZeroInit(%struct.S* sret(%struct.S) %agg.result)
+ call void @ZeroInit(ptr sret(%struct.S) %agg.result)
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define void @store_i32(i32 %val, i32* %ptr) {
+define void @store_i32(i32 %val, ptr %ptr) {
; MIPS32-LABEL: store_i32:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sw $4, 0($5)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- store i32 %val, i32* %ptr
+ store i32 %val, ptr %ptr
ret void
}
-define void @store_i64(i64 %val, i64* %ptr) {
+define void @store_i64(i64 %val, ptr %ptr) {
; MIPS32-LABEL: store_i64:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sw $4, 0($6)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- store i64 %val, i64* %ptr
+ store i64 %val, ptr %ptr
ret void
}
-define void @store_float(float %val, float* %ptr) {
+define void @store_float(float %val, ptr %ptr) {
; MIPS32-LABEL: store_float:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: swc1 $f12, 0($5)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- store float %val, float* %ptr
+ store float %val, ptr %ptr
ret void
}
-define void @store_double(double %val, double* %ptr) {
+define void @store_double(double %val, ptr %ptr) {
; MIPS32-LABEL: store_double:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sdc1 $f12, 0($6)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- store double %val, double* %ptr
+ store double %val, ptr %ptr
ret void
}
; MIPS32R6-NEXT: swc1 $f12, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store float %a, float* @float_align1, align 1
+ store float %a, ptr @float_align1, align 1
ret void
}
; MIPS32R6-NEXT: swc1 $f12, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store float %a, float* @float_align2, align 2
+ store float %a, ptr @float_align2, align 2
ret void
}
; MIPS32R6-NEXT: swc1 $f12, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store float %a, float* @float_align4, align 4
+ store float %a, ptr @float_align4, align 4
ret void
}
; MIPS32R6-NEXT: swc1 $f12, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store float %a, float* @float_align8, align 8
+ store float %a, ptr @float_align8, align 8
ret void
}
; MIPS32R6-NEXT: sw $4, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store i32 %a, i32* @i32_align1, align 1
+ store i32 %a, ptr @i32_align1, align 1
ret void
}
; MIPS32R6-NEXT: sw $4, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store i32 %a, i32* @i32_align2, align 2
+ store i32 %a, ptr @i32_align2, align 2
ret void
}
; MIPS32R6-NEXT: sw $4, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store i32 %a, i32* @i32_align4, align 4
+ store i32 %a, ptr @i32_align4, align 4
ret void
}
; MIPS32R6-NEXT: sw $4, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store i32 %a, i32* @i32_align8, align 8
+ store i32 %a, ptr @i32_align8, align 8
ret void
}
@i64_align4 = common global i64 0, align 4
@i64_align8 = common global i64 0, align 8
-define void @store3align1(%struct.MemSize3_Align1* %S, i32 signext %a) {
+define void @store3align1(ptr %S, i32 signext %a) {
; MIPS32-LABEL: store3align1:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sb $5, 0($4)
; MIPS32R6-NEXT: sb $1, 2($4)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize3_Align1* %S to i24*
- %1 = trunc i32 %a to i24
- store i24 %1, i24* %0, align 1
+ %0 = trunc i32 %a to i24
+ store i24 %0, ptr %S, align 1
ret void
}
-define void @store3align2(%struct.MemSize3_Align2* %S, i32 signext %a) {
+define void @store3align2(ptr %S, i32 signext %a) {
; MIPS32-LABEL: store3align2:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sh $5, 0($4)
; MIPS32R6-NEXT: sb $1, 2($4)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize3_Align2* %S to i24*
- %1 = trunc i32 %a to i24
- store i24 %1, i24* %0, align 2
+ %0 = trunc i32 %a to i24
+ store i24 %0, ptr %S, align 2
ret void
}
-define void @store3align4(%struct.MemSize3_Align4* %S, i32 signext %a) {
+define void @store3align4(ptr %S, i32 signext %a) {
; MIPS32-LABEL: store3align4:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sh $5, 0($4)
; MIPS32R6-NEXT: sb $1, 2($4)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize3_Align4* %S to i24*
- %1 = trunc i32 %a to i24
- store i24 %1, i24* %0, align 4
+ %0 = trunc i32 %a to i24
+ store i24 %0, ptr %S, align 4
ret void
}
-define void @store3align8(%struct.MemSize3_Align8* %S, i32 signext %a) {
+define void @store3align8(ptr %S, i32 signext %a) {
; MIPS32-LABEL: store3align8:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sh $5, 0($4)
; MIPS32R6-NEXT: sb $1, 2($4)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize3_Align8* %S to i24*
- %1 = trunc i32 %a to i24
- store i24 %1, i24* %0, align 8
+ %0 = trunc i32 %a to i24
+ store i24 %0, ptr %S, align 8
ret void
}
-define void @store5align1(%struct.MemSize5_Align1* %S, i64 %a) {
+define void @store5align1(ptr %S, i64 %a) {
; MIPS32-LABEL: store5align1:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: swl $6, 3($4)
; MIPS32R6-NEXT: sb $7, 4($4)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize5_Align1* %S to i40*
- %1 = trunc i64 %a to i40
- store i40 %1, i40* %0, align 1
+ %0 = trunc i64 %a to i40
+ store i40 %0, ptr %S, align 1
ret void
}
-define void @store5align2(%struct.MemSize5_Align2* %S, i64 %a) {
+define void @store5align2(ptr %S, i64 %a) {
; MIPS32-LABEL: store5align2:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: swl $6, 3($4)
; MIPS32R6-NEXT: sb $7, 4($4)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize5_Align2* %S to i40*
- %1 = trunc i64 %a to i40
- store i40 %1, i40* %0, align 2
+ %0 = trunc i64 %a to i40
+ store i40 %0, ptr %S, align 2
ret void
}
-define void @store5align4(%struct.MemSize5_Align4* %S, i64 %a) {
+define void @store5align4(ptr %S, i64 %a) {
; MIPS32-LABEL: store5align4:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sw $6, 0($4)
; MIPS32R6-NEXT: sb $7, 4($4)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize5_Align4* %S to i40*
- %1 = trunc i64 %a to i40
- store i40 %1, i40* %0, align 4
+ %0 = trunc i64 %a to i40
+ store i40 %0, ptr %S, align 4
ret void
}
-define void @store5align8(%struct.MemSize5_Align8* %S, i64 %a) {
+define void @store5align8(ptr %S, i64 %a) {
; MIPS32-LABEL: store5align8:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sw $6, 0($4)
; MIPS32R6-NEXT: sb $7, 4($4)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize5_Align8* %S to i40*
- %1 = trunc i64 %a to i40
- store i40 %1, i40* %0, align 8
+ %0 = trunc i64 %a to i40
+ store i40 %0, ptr %S, align 8
ret void
}
-define void @store6align1(%struct.MemSize6_Align1* %S, i64 %a) {
+define void @store6align1(ptr %S, i64 %a) {
; MIPS32-LABEL: store6align1:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: ori $1, $zero, 4
; MIPS32R6-NEXT: sh $7, 4($4)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize6_Align1* %S to i48*
- %1 = trunc i64 %a to i48
- store i48 %1, i48* %0, align 1
+ %0 = trunc i64 %a to i48
+ store i48 %0, ptr %S, align 1
ret void
}
-define void @store6align2(%struct.MemSize6_Align2* %S, i64 %a) {
+define void @store6align2(ptr %S, i64 %a) {
; MIPS32-LABEL: store6align2:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: swl $6, 3($4)
; MIPS32R6-NEXT: sh $7, 4($4)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize6_Align2* %S to i48*
- %1 = trunc i64 %a to i48
- store i48 %1, i48* %0, align 2
+ %0 = trunc i64 %a to i48
+ store i48 %0, ptr %S, align 2
ret void
}
-define void @store6align4(%struct.MemSize6_Align4* %S, i64 %a) {
+define void @store6align4(ptr %S, i64 %a) {
; MIPS32-LABEL: store6align4:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sw $6, 0($4)
; MIPS32R6-NEXT: sh $7, 4($4)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize6_Align4* %S to i48*
- %1 = trunc i64 %a to i48
- store i48 %1, i48* %0, align 4
+ %0 = trunc i64 %a to i48
+ store i48 %0, ptr %S, align 4
ret void
}
-define void @store6align8(%struct.MemSize6_Align8* %S, i64 %a) {
+define void @store6align8(ptr %S, i64 %a) {
; MIPS32-LABEL: store6align8:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: sw $6, 0($4)
; MIPS32R6-NEXT: sh $7, 4($4)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize6_Align8* %S to i48*
- %1 = trunc i64 %a to i48
- store i48 %1, i48* %0, align 8
+ %0 = trunc i64 %a to i48
+ store i48 %0, ptr %S, align 8
ret void
}
-define void @store7align1(%struct.MemSize7_Align1* %S, i64 %a) {
+define void @store7align1(ptr %S, i64 %a) {
; MIPS32-LABEL: store7align1:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: ori $1, $zero, 4
; MIPS32R6-NEXT: sb $1, 2($2)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize7_Align1* %S to i56*
- %1 = trunc i64 %a to i56
- store i56 %1, i56* %0, align 1
+ %0 = trunc i64 %a to i56
+ store i56 %0, ptr %S, align 1
ret void
}
-define void @store7align2(%struct.MemSize7_Align2* %S, i64 %a) {
+define void @store7align2(ptr %S, i64 %a) {
; MIPS32-LABEL: store7align2:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: ori $1, $zero, 4
; MIPS32R6-NEXT: sb $1, 2($2)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize7_Align2* %S to i56*
- %1 = trunc i64 %a to i56
- store i56 %1, i56* %0, align 2
+ %0 = trunc i64 %a to i56
+ store i56 %0, ptr %S, align 2
ret void
}
-define void @store7align4(%struct.MemSize7_Align4* %S, i64 %a) {
+define void @store7align4(ptr %S, i64 %a) {
; MIPS32-LABEL: store7align4:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: ori $1, $zero, 4
; MIPS32R6-NEXT: sb $1, 2($2)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize7_Align4* %S to i56*
- %1 = trunc i64 %a to i56
- store i56 %1, i56* %0, align 4
+ %0 = trunc i64 %a to i56
+ store i56 %0, ptr %S, align 4
ret void
}
-define void @store7align8(%struct.MemSize7_Align8* %S, i64 %a) {
+define void @store7align8(ptr %S, i64 %a) {
; MIPS32-LABEL: store7align8:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: ori $1, $zero, 4
; MIPS32R6-NEXT: sb $1, 2($2)
; MIPS32R6-NEXT: jrc $ra
entry:
- %0 = bitcast %struct.MemSize7_Align8* %S to i56*
- %1 = trunc i64 %a to i56
- store i56 %1, i56* %0, align 8
+ %0 = trunc i64 %a to i56
+ store i56 %0, ptr %S, align 8
ret void
}
; MIPS32R6-NEXT: sdc1 $f12, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store double %a, double* @double_align1, align 1
+ store double %a, ptr @double_align1, align 1
ret void
}
; MIPS32R6-NEXT: sdc1 $f12, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store double %a, double* @double_align2, align 2
+ store double %a, ptr @double_align2, align 2
ret void
}
; MIPS32R6-NEXT: sdc1 $f12, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store double %a, double* @double_align4, align 4
+ store double %a, ptr @double_align4, align 4
ret void
}
; MIPS32R6-NEXT: sdc1 $f12, 0($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store double %a, double* @double_align8, align 8
+ store double %a, ptr @double_align8, align 8
ret void
}
; MIPS32R6-NEXT: sw $5, 4($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store i64 %a, i64* @i64_align1, align 1
+ store i64 %a, ptr @i64_align1, align 1
ret void
}
; MIPS32R6-NEXT: sw $5, 4($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store i64 %a, i64* @i64_align2, align 2
+ store i64 %a, ptr @i64_align2, align 2
ret void
}
; MIPS32R6-NEXT: sw $5, 4($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store i64 %a, i64* @i64_align4, align 4
+ store i64 %a, ptr @i64_align4, align 4
ret void
}
; MIPS32R6-NEXT: sw $5, 4($1)
; MIPS32R6-NEXT: jrc $ra
entry:
- store i64 %a, i64* @i64_align8, align 8
+ store i64 %a, ptr @i64_align8, align 8
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
-define void @sub_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @sub_v16i8(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sub_v16i8:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w1, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%sub = sub <16 x i8> %1, %0
- store <16 x i8> %sub, <16 x i8>* %c, align 16
+ store <16 x i8> %sub, ptr %c, align 16
ret void
}
-define void @sub_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @sub_v8i16(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sub_v8i16:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w1, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%sub = sub <8 x i16> %1, %0
- store <8 x i16> %sub, <8 x i16>* %c, align 16
+ store <8 x i16> %sub, ptr %c, align 16
ret void
}
-define void @sub_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @sub_v4i32(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sub_v4i32:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w1, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%sub = sub <4 x i32> %1, %0
- store <4 x i32> %sub, <4 x i32>* %c, align 16
+ store <4 x i32> %sub, ptr %c, align 16
ret void
}
-define void @sub_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @sub_v2i64(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sub_v2i64:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w1, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%sub = sub <2 x i64> %1, %0
- store <2 x i64> %sub, <2 x i64>* %c, align 16
+ store <2 x i64> %sub, ptr %c, align 16
ret void
}
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
declare <16 x i8> @llvm.mips.subv.b(<16 x i8>, <16 x i8>)
-define void @sub_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @sub_v16i8_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sub_v16i8_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
- %1 = load <16 x i8>, <16 x i8>* %b, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
+ %1 = load <16 x i8>, ptr %b, align 16
%2 = tail call <16 x i8> @llvm.mips.subv.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* %c, align 16
+ store <16 x i8> %2, ptr %c, align 16
ret void
}
declare <8 x i16> @llvm.mips.subv.h(<8 x i16>, <8 x i16>)
-define void @sub_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @sub_v8i16_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sub_v8i16_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
- %1 = load <8 x i16>, <8 x i16>* %b, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
+ %1 = load <8 x i16>, ptr %b, align 16
%2 = tail call <8 x i16> @llvm.mips.subv.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* %c, align 16
+ store <8 x i16> %2, ptr %c, align 16
ret void
}
declare <4 x i32> @llvm.mips.subv.w(<4 x i32>, <4 x i32>)
-define void @sub_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @sub_v4i32_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sub_v4i32_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
- %1 = load <4 x i32>, <4 x i32>* %b, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
+ %1 = load <4 x i32>, ptr %b, align 16
%2 = tail call <4 x i32> @llvm.mips.subv.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* %c, align 16
+ store <4 x i32> %2, ptr %c, align 16
ret void
}
declare <2 x i64> @llvm.mips.subv.d(<2 x i64>, <2 x i64>)
-define void @sub_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @sub_v2i64_builtin(ptr %a, ptr %b, ptr %c) {
; P5600-LABEL: sub_v2i64_builtin:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
- %1 = load <2 x i64>, <2 x i64>* %b, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
+ %1 = load <2 x i64>, ptr %b, align 16
%2 = tail call <2 x i64> @llvm.mips.subv.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* %c, align 16
+ store <2 x i64> %2, ptr %c, align 16
ret void
}
declare <16 x i8> @llvm.mips.subvi.b(<16 x i8>, i32 immarg)
-define void @sub_v16i8_builtin_imm(<16 x i8>* %a, <16 x i8>* %c) {
+define void @sub_v16i8_builtin_imm(ptr %a, ptr %c) {
; P5600-LABEL: sub_v16i8_builtin_imm:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.b $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <16 x i8>, <16 x i8>* %a, align 16
+ %0 = load <16 x i8>, ptr %a, align 16
%1 = tail call <16 x i8> @llvm.mips.subvi.b(<16 x i8> %0, i32 3)
- store <16 x i8> %1, <16 x i8>* %c, align 16
+ store <16 x i8> %1, ptr %c, align 16
ret void
}
declare <8 x i16> @llvm.mips.subvi.h(<8 x i16>, i32 immarg)
-define void @sub_v8i16_builtin_imm(<8 x i16>* %a, <8 x i16>* %c) {
+define void @sub_v8i16_builtin_imm(ptr %a, ptr %c) {
; P5600-LABEL: sub_v8i16_builtin_imm:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.h $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <8 x i16>, <8 x i16>* %a, align 16
+ %0 = load <8 x i16>, ptr %a, align 16
%1 = tail call <8 x i16> @llvm.mips.subvi.h(<8 x i16> %0, i32 18)
- store <8 x i16> %1, <8 x i16>* %c, align 16
+ store <8 x i16> %1, ptr %c, align 16
ret void
}
declare <4 x i32> @llvm.mips.subvi.w(<4 x i32>, i32 immarg)
-define void @sub_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) {
+define void @sub_v4i32_builtin_imm(ptr %a, ptr %c) {
; P5600-LABEL: sub_v4i32_builtin_imm:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.w $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <4 x i32>, <4 x i32>* %a, align 16
+ %0 = load <4 x i32>, ptr %a, align 16
%1 = tail call <4 x i32> @llvm.mips.subvi.w(<4 x i32> %0, i32 25)
- store <4 x i32> %1, <4 x i32>* %c, align 16
+ store <4 x i32> %1, ptr %c, align 16
ret void
}
declare <2 x i64> @llvm.mips.subvi.d(<2 x i64>, i32 immarg)
-define void @sub_v2i64_builtin_imm(<2 x i64>* %a, <2 x i64>* %c) {
+define void @sub_v2i64_builtin_imm(ptr %a, ptr %c) {
; P5600-LABEL: sub_v2i64_builtin_imm:
; P5600: # %bb.0: # %entry
; P5600-NEXT: ld.d $w0, 0($4)
; P5600-NEXT: jr $ra
; P5600-NEXT: nop
entry:
- %0 = load <2 x i64>, <2 x i64>* %a, align 16
+ %0 = load <2 x i64>, ptr %a, align 16
%1 = tail call <2 x i64> @llvm.mips.subvi.d(<2 x i64> %0, i32 31)
- store <2 x i64> %1, <2 x i64>* %c, align 16
+ store <2 x i64> %1, ptr %c, align 16
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define i32 @outgoing_gpr(i32* %i32_ptr) {
+define i32 @outgoing_gpr(ptr %i32_ptr) {
; MIPS32-LABEL: outgoing_gpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i32, i32* %i32_ptr
+ %0 = load i32, ptr %i32_ptr
ret i32 %0
}
-define float @outgoing_fpr(float* %float_ptr) {
+define float @outgoing_fpr(ptr %float_ptr) {
; MIPS32-LABEL: outgoing_fpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lwc1 $f0, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load float, float* %float_ptr
+ %0 = load float, ptr %float_ptr
ret float %0
}
-define i32 @outgoing_gpr_instr(i32* %i32_ptr1, i32* %i32_ptr2) {
+define i32 @outgoing_gpr_instr(ptr %i32_ptr1, ptr %i32_ptr2) {
; MIPS32-LABEL: outgoing_gpr_instr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i32, i32* %i32_ptr1
- %1 = load i32, i32* %i32_ptr2
+ %0 = load i32, ptr %i32_ptr1
+ %1 = load i32, ptr %i32_ptr2
%outgoing_instr = add i32 %1, %0
ret i32 %outgoing_instr
}
-define float @outgoing_fpr_instr(float* %float_ptr1, float* %float_ptr2) {
+define float @outgoing_fpr_instr(ptr %float_ptr1, ptr %float_ptr2) {
; MIPS32-LABEL: outgoing_fpr_instr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lwc1 $f0, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load float, float* %float_ptr1
- %1 = load float, float* %float_ptr2
+ %0 = load float, ptr %float_ptr1
+ %1 = load float, ptr %float_ptr2
%outgoing_instr = fadd float %0, %1
ret float %outgoing_instr
}
-define i32 @incoming_gpr(i32 %incoming_phys_reg, i1 %test, i32* %a) {
+define i32 @incoming_gpr(i32 %incoming_phys_reg, i1 %test, ptr %a) {
; MIPS32-LABEL: incoming_gpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: move $2, $4
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i32, i32* %a
+ %0 = load i32, ptr %a
%cond = select i1 %test, i32 %0, i32 %incoming_phys_reg
ret i32 %cond
}
-define float @incoming_fpr(float %incoming_phys_reg, i1 %test, float* %a) {
+define float @incoming_fpr(float %incoming_phys_reg, i1 %test, ptr %a) {
; MIPS32-LABEL: incoming_fpr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: mov.s $f0, $f12
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load float, float* %a
+ %0 = load float, ptr %a
%cond = select i1 %test, float %0, float %incoming_phys_reg
ret float %cond
}
-define i32 @incoming_i32_instr(i32 %val1, i32 %val2, i32* %i32_ptr, i1 %test) {
+define i32 @incoming_i32_instr(i32 %val1, i32 %val2, ptr %i32_ptr, i1 %test) {
; MIPS32-LABEL: incoming_i32_instr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $1, 0($6)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i32, i32* %i32_ptr
+ %0 = load i32, ptr %i32_ptr
%incoming_instr = add i32 %val2, %val1
%cond = select i1 %test, i32 %0, i32 %incoming_instr
ret i32 %cond
}
-define float @incoming_float_instr(float %val1, float %val2, float* %float_ptr, i1 %test) {
+define float @incoming_float_instr(float %val1, float %val2, ptr %float_ptr, i1 %test) {
; MIPS32-LABEL: incoming_float_instr:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lwc1 $f1, 0($6)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load float, float* %float_ptr
+ %0 = load float, ptr %float_ptr
%incoming_instr = fadd float %val2, %val1
%cond = select i1 %test, float %0, float %incoming_instr
ret float %cond
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define i8 @load1_s8_to_load1_s32(i8* %px) {
+define i8 @load1_s8_to_load1_s32(ptr %px) {
; MIPS32-LABEL: load1_s8_to_load1_s32:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lbu $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i8, i8* %px
+ %0 = load i8, ptr %px
ret i8 %0
}
-define i16 @load2_s16_to_load2_s32(i16* %px) {
+define i16 @load2_s16_to_load2_s32(ptr %px) {
; MIPS32-LABEL: load2_s16_to_load2_s32:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lhu $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i16, i16* %px
+ %0 = load i16, ptr %px
ret i16 %0
}
-define void @load_store_i1(i1* %px, i1* %py) {
+define void @load_store_i1(ptr %px, ptr %py) {
; MIPS32-LABEL: load_store_i1:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lbu $1, 0($5)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i1, i1* %py
- store i1 %0, i1* %px
+ %0 = load i1, ptr %py
+ store i1 %0, ptr %px
ret void
}
-define void @load_store_i8(i8* %px, i8* %py) {
+define void @load_store_i8(ptr %px, ptr %py) {
; MIPS32-LABEL: load_store_i8:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lbu $1, 0($5)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i8, i8* %py
- store i8 %0, i8* %px
+ %0 = load i8, ptr %py
+ store i8 %0, ptr %px
ret void
}
-define void @load_store_i16(i16* %px, i16* %py) {
+define void @load_store_i16(ptr %px, ptr %py) {
; MIPS32-LABEL: load_store_i16:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lhu $1, 0($5)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i16, i16* %py
- store i16 %0, i16* %px
+ %0 = load i16, ptr %py
+ store i16 %0, ptr %px
ret void
}
-define void @load_store_i32(i32* %px, i32* %py) {
+define void @load_store_i32(ptr %px, ptr %py) {
; MIPS32-LABEL: load_store_i32:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $1, 0($5)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i32, i32* %py
- store i32 %0, i32* %px
+ %0 = load i32, ptr %py
+ store i32 %0, ptr %px
ret void
}
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
@.str = private unnamed_addr constant [11 x i8] c"string %s\0A\00", align 1
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_copy(i8*, i8*)
-declare i32 @printf(i8*, ...)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_copy(ptr, ptr)
+declare i32 @printf(ptr, ...)
-define void @testVaCopyArg(i8* %fmt, ...) {
+define void @testVaCopyArg(ptr %fmt, ...) {
; MIPS32-LABEL: testVaCopyArg:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -40
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %fmt.addr = alloca i8*, align 4
- %ap = alloca i8*, align 4
- %aq = alloca i8*, align 4
- %s = alloca i8*, align 4
- store i8* %fmt, i8** %fmt.addr, align 4
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = bitcast i8** %aq to i8*
- %1 = bitcast i8** %ap to i8*
- call void @llvm.va_copy(i8* %0, i8* %1)
- %argp.cur = load i8*, i8** %aq, align 4
- %argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
- store i8* %argp.next, i8** %aq, align 4
- %2 = bitcast i8* %argp.cur to i8**
- %3 = load i8*, i8** %2, align 4
- store i8* %3, i8** %s, align 4
- %4 = load i8*, i8** %s, align 4
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8* %4)
+ %fmt.addr = alloca ptr, align 4
+ %ap = alloca ptr, align 4
+ %aq = alloca ptr, align 4
+ %s = alloca ptr, align 4
+ store ptr %fmt, ptr %fmt.addr, align 4
+ call void @llvm.va_start(ptr %ap)
+ call void @llvm.va_copy(ptr %aq, ptr %ap)
+ %argp.cur = load ptr, ptr %aq, align 4
+ %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+ store ptr %argp.next, ptr %aq, align 4
+ %0 = load ptr, ptr %argp.cur, align 4
+ store ptr %0, ptr %s, align 4
+ %1 = load ptr, ptr %s, align 4
+ %call = call i32 (ptr, ...) @printf(ptr @.str, ptr %1)
ret void
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
-define i32 @load1_s8_to_zextLoad1_s32(i8* %px) {
+define i32 @load1_s8_to_zextLoad1_s32(ptr %px) {
; MIPS32-LABEL: load1_s8_to_zextLoad1_s32:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lbu $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i8, i8* %px
+ %0 = load i8, ptr %px
%conv = zext i8 %0 to i32
ret i32 %conv
}
-define i32 @load2_s16_to_zextLoad2_s32(i16* %px) {
+define i32 @load2_s16_to_zextLoad2_s32(ptr %px) {
; MIPS32-LABEL: load2_s16_to_zextLoad2_s32:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lhu $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i16, i16* %px
+ %0 = load i16, ptr %px
%conv = zext i16 %0 to i32
ret i32 %conv
}
-define i16 @load1_s8_to_zextLoad1_s16(i8* %px) {
+define i16 @load1_s8_to_zextLoad1_s16(ptr %px) {
; MIPS32-LABEL: load1_s8_to_zextLoad1_s16:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lbu $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i8, i8* %px
+ %0 = load i8, ptr %px
%conv = zext i8 %0 to i16
ret i16 %conv
}
-define zeroext i16 @load1_s8_to_zextLoad1_s16_to_zextLoad1_s32(i8* %px) {
+define zeroext i16 @load1_s8_to_zextLoad1_s16_to_zextLoad1_s32(ptr %px) {
; MIPS32-LABEL: load1_s8_to_zextLoad1_s16_to_zextLoad1_s32:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lbu $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i8, i8* %px
+ %0 = load i8, ptr %px
%conv = zext i8 %0 to i16
ret i16 %conv
}
-define i64 @load4_s32_to_zextLoad4_s64(i32* %px) {
+define i64 @load4_s32_to_zextLoad4_s64(ptr %px) {
; MIPS32-LABEL: load4_s32_to_zextLoad4_s64:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i32, i32* %px
+ %0 = load i32, ptr %px
%conv = zext i32 %0 to i64
ret i64 %conv
}
-define i32 @load1_s8_to_sextLoad1_s32(i8* %px) {
+define i32 @load1_s8_to_sextLoad1_s32(ptr %px) {
; MIPS32-LABEL: load1_s8_to_sextLoad1_s32:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lb $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i8, i8* %px
+ %0 = load i8, ptr %px
%conv = sext i8 %0 to i32
ret i32 %conv
}
-define i32 @load2_s16_to_sextLoad2_s32(i16* %px) {
+define i32 @load2_s16_to_sextLoad2_s32(ptr %px) {
; MIPS32-LABEL: load2_s16_to_sextLoad2_s32:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lh $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i16, i16* %px
+ %0 = load i16, ptr %px
%conv = sext i16 %0 to i32
ret i32 %conv
}
-define i16 @load1_s8_to_sextLoad1_s16(i8* %px) {
+define i16 @load1_s8_to_sextLoad1_s16(ptr %px) {
; MIPS32-LABEL: load1_s8_to_sextLoad1_s16:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lb $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i8, i8* %px
+ %0 = load i8, ptr %px
%conv = sext i8 %0 to i16
ret i16 %conv
}
-define signext i16 @load1_s8_to_sextLoad1_s16_to_sextLoad1_s32(i8* %px) {
+define signext i16 @load1_s8_to_sextLoad1_s16_to_sextLoad1_s32(ptr %px) {
; MIPS32-LABEL: load1_s8_to_sextLoad1_s16_to_sextLoad1_s32:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lb $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i8, i8* %px
+ %0 = load i8, ptr %px
%conv = sext i8 %0 to i16
ret i16 %conv
}
-define i64 @load4_s32_to_sextLoad4_s64(i32* %px) {
+define i64 @load4_s32_to_sextLoad4_s64(ptr %px) {
; MIPS32-LABEL: load4_s32_to_sextLoad4_s64:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $2, 0($4)
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop
entry:
- %0 = load i32, i32* %px
+ %0 = load i32, ptr %px
%conv = sext i32 %0 to i64
ret i64 %conv
}
; RUN: llc < %s -march=mipsel | FileCheck %s
; RUN: llc < %s -march=mips | FileCheck %s
-define void @f(i64 %l, i64* nocapture %p) nounwind {
+define void @f(i64 %l, ptr nocapture %p) nounwind {
entry:
; CHECK: lui
; CHECK: ori
; CHECK: addu
%add = add i64 %l, 1311768467294899695
- store i64 %add, i64* %p, align 4
+ store i64 %add, ptr %p, align 4
ret void
}
define void @foo() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%add = add nsw i32 %0, 5
- store i32 %add, i32* @i, align 4
- %1 = load i32, i32* @j, align 4
+ store i32 %add, ptr @i, align 4
+ %1 = load i32, ptr @j, align 4
%sub = sub nsw i32 %1, 5
- store i32 %sub, i32* @j, align 4
- %2 = load i32, i32* @k, align 4
+ store i32 %sub, ptr @j, align 4
+ %2 = load i32, ptr @k, align 4
%add1 = add nsw i32 %2, 10000
- store i32 %add1, i32* @k, align 4
- %3 = load i32, i32* @l, align 4
+ store i32 %add1, ptr @k, align 4
+ %3 = load i32, ptr @l, align 4
%sub2 = sub nsw i32 %3, 10000
- store i32 %sub2, i32* @l, align 4
+ store i32 %sub2, ptr @l, align 4
; 16: addiu ${{[0-9]+}}, 5 # 16 bit inst
; 16: addiu ${{[0-9]+}}, -5 # 16 bit inst
; 16: addiu ${{[0-9]+}}, 10000
@x = global i32 0
@a = global i32 1
-declare i32 @y(i32*, i32)
+declare i32 @y(ptr, i32)
define i32 @z() {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = call i32 @y(i32 * @x, i32 %0)
+ %0 = load i32, ptr @a, align 4
+ %1 = call i32 @y(ptr @x, i32 %0)
ret i32 %1
}
; MIPS-LABEL: ===== Instruction selection ends:
-; MIPS: t[[A:[0-9]+]]: i32 = LUi TargetGlobalAddress:i32<i32* @x> 0 [TF=4]
-; MIPS: t{{.*}}: i32 = ADDiu t[[A]], TargetGlobalAddress:i32<i32* @x> 0 [TF=5]
+; MIPS: t[[A:[0-9]+]]: i32 = LUi TargetGlobalAddress:i32<ptr @x> 0 [TF=4]
+; MIPS: t{{.*}}: i32 = ADDiu t[[A]], TargetGlobalAddress:i32<ptr @x> 0 [TF=5]
; MIPS-XGOT-LABEL: ===== Instruction selection ends:
-; MIPS-XGOT: t[[B:[0-9]+]]: i32 = LUi TargetGlobalAddress:i32<i32* @x> 0 [TF=20]
+; MIPS-XGOT: t[[B:[0-9]+]]: i32 = LUi TargetGlobalAddress:i32<ptr @x> 0 [TF=20]
; MIPS-XGOT: t[[C:[0-9]+]]: i32 = ADDu t[[B]], Register:i32 %0
-; MIPS-XGOT: t{{.*}}: i32,ch = LW<Mem:(load (s32) from got)> t[[C]], TargetGlobalAddress:i32<i32* @x> 0 [TF=21], t{{.*}}
+; MIPS-XGOT: t{{.*}}: i32,ch = LW<Mem:(load (s32) from got)> t[[C]], TargetGlobalAddress:i32<ptr @x> 0 [TF=21], t{{.*}}
; MM-LABEL: ===== Instruction selection ends:
-; MM: t[[A:[0-9]+]]: i32 = LUi_MM TargetGlobalAddress:i32<i32* @x> 0 [TF=4]
-; MM: t{{.*}}: i32 = ADDiu_MM t[[A]], TargetGlobalAddress:i32<i32* @x> 0 [TF=5]
+; MM: t[[A:[0-9]+]]: i32 = LUi_MM TargetGlobalAddress:i32<ptr @x> 0 [TF=4]
+; MM: t{{.*}}: i32 = ADDiu_MM t[[A]], TargetGlobalAddress:i32<ptr @x> 0 [TF=5]
; MM-XGOT-LABEL: ===== Instruction selection ends:
-; MM-XGOT: t[[B:[0-9]+]]: i32 = LUi_MM TargetGlobalAddress:i32<i32* @x> 0 [TF=20]
+; MM-XGOT: t[[B:[0-9]+]]: i32 = LUi_MM TargetGlobalAddress:i32<ptr @x> 0 [TF=20]
; MM-XGOT: t[[C:[0-9]+]]: i32 = ADDU16_MM t[[B]], Register:i32 %0
-; MM-XGOT: t{{.*}}: i32,ch = LW_MM<Mem:(load (s32) from got)> t[[C]], TargetGlobalAddress:i32<i32* @x> 0 [TF=21], t0
+; MM-XGOT: t{{.*}}: i32,ch = LW_MM<Mem:(load (s32) from got)> t[[C]], TargetGlobalAddress:i32<ptr @x> 0 [TF=21], t0
; CHECK: $BB0_2:
; CHECK-NOT: sll ${{[0-9]+}}, ${{[0-9]+}}, 2
-define i32 @f0(i32 %n, i32 %m, [256 x i32]* nocapture %a, [256 x i32]* nocapture %b) nounwind readonly {
+define i32 @f0(i32 %n, i32 %m, ptr nocapture %a, ptr nocapture %b) nounwind readonly {
entry:
br label %for.cond1.preheader
for.body3:
%s.120 = phi i32 [ %s.022, %for.cond1.preheader ], [ %add7, %for.body3 ]
%j.019 = phi i32 [ 0, %for.cond1.preheader ], [ %add8, %for.body3 ]
- %arrayidx4 = getelementptr inbounds [256 x i32], [256 x i32]* %a, i32 %i.021, i32 %j.019
- %0 = load i32, i32* %arrayidx4, align 4
- %arrayidx6 = getelementptr inbounds [256 x i32], [256 x i32]* %b, i32 %i.021, i32 %j.019
- %1 = load i32, i32* %arrayidx6, align 4
+ %arrayidx4 = getelementptr inbounds [256 x i32], ptr %a, i32 %i.021, i32 %j.019
+ %0 = load i32, ptr %arrayidx4, align 4
+ %arrayidx6 = getelementptr inbounds [256 x i32], ptr %b, i32 %i.021, i32 %j.019
+ %1 = load i32, ptr %arrayidx6, align 4
%add = add i32 %0, %s.120
%add7 = add i32 %add, %1
%add8 = add nsw i32 %j.019, %m
; RUN: llc < %s -march=mips -mcpu=mips64 -target-abi n64 | FileCheck %s -check-prefix=GP64
; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 | FileCheck %s -check-prefix=GP64
-declare void @bar(i32*)
+declare void @bar(ptr)
define void @foo(i32 %sz) {
; ALL-LABEL: foo:
; GP32-NOT: addiu $sp, $sp, 0
; GP64-NOT: daddiu $sp, $sp, 0
%a = alloca i32, i32 %sz
- call void @bar(i32* %a)
+ call void @bar(ptr %a)
ret void
}
@i = global i32 25, align 4
@.str = private unnamed_addr constant [5 x i8] c"%i \0A\00", align 1
-define void @p(i32* %i) nounwind {
+define void @p(ptr %i) nounwind {
entry:
ret void
}
%x = alloca i32, align 8
%zz = alloca i32, align 4
%z = alloca i32, align 4
- %0 = load i32, i32* @i, align 4
- %arrayidx = getelementptr inbounds [512 x i32], [512 x i32]* %y, i32 0, i32 10
- store i32 %0, i32* %arrayidx, align 4
- %1 = load i32, i32* @i, align 4
- store i32 %1, i32* %x, align 8
- call void @p(i32* %x)
- %arrayidx1 = getelementptr inbounds [512 x i32], [512 x i32]* %y, i32 0, i32 10
- call void @p(i32* %arrayidx1)
+ %0 = load i32, ptr @i, align 4
+ %arrayidx = getelementptr inbounds [512 x i32], ptr %y, i32 0, i32 10
+ store i32 %0, ptr %arrayidx, align 4
+ %1 = load i32, ptr @i, align 4
+ store i32 %1, ptr %x, align 8
+ call void @p(ptr %x)
+ %arrayidx1 = getelementptr inbounds [512 x i32], ptr %y, i32 0, i32 10
+ call void @p(ptr %arrayidx1)
ret void
}
; 16: save $ra, 2040
; CHECK: move $4, $[[T0]]
; CHECK: move $4, $[[T2]]
%tmp1 = alloca i8, i32 %size, align 4
- %add.ptr = getelementptr inbounds i8, i8* %tmp1, i32 5
- store i8 97, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %tmp1, i32 5
+ store i8 97, ptr %add.ptr, align 1
%tmp4 = alloca i8, i32 %size, align 4
call void @foo2(double 1.000000e+00, double 2.000000e+00, i32 3) nounwind
- %call = call i32 @foo(i8* %tmp1) nounwind
- %call7 = call i32 @foo(i8* %tmp4) nounwind
+ %call = call i32 @foo(ptr %tmp1) nounwind
+ %call7 = call i32 @foo(ptr %tmp4) nounwind
%add = add nsw i32 %call7, %call
ret i32 %add
}
declare void @foo2(double, double, i32)
-declare i32 @foo(i8*)
+declare i32 @foo(ptr)
@.str = private unnamed_addr constant [22 x i8] c"%d %d %d %d %d %d %d\0A\00", align 1
; CHECK: move $sp, $[[T0]]
%tmp1 = alloca i8, i32 %size, align 4
- %0 = bitcast i8* %tmp1 to i32*
%cmp = icmp sgt i32 %size, 10
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
; CHECK: addiu $4, $[[T0]], 40
- %add.ptr = getelementptr inbounds i8, i8* %tmp1, i32 40
- %1 = bitcast i8* %add.ptr to i32*
- call void @foo3(i32* %1) nounwind
- %arrayidx15.pre = getelementptr inbounds i8, i8* %tmp1, i32 12
- %.pre = bitcast i8* %arrayidx15.pre to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %tmp1, i32 40
+ call void @foo3(ptr %add.ptr) nounwind
+ %arrayidx15.pre = getelementptr inbounds i8, ptr %tmp1, i32 12
br label %if.end
if.else: ; preds = %entry
; CHECK: addiu $4, $[[T0]], 12
- %add.ptr5 = getelementptr inbounds i8, i8* %tmp1, i32 12
- %2 = bitcast i8* %add.ptr5 to i32*
- call void @foo3(i32* %2) nounwind
+ %add.ptr5 = getelementptr inbounds i8, ptr %tmp1, i32 12
+ call void @foo3(ptr %add.ptr5) nounwind
br label %if.end
if.end: ; preds = %if.else, %if.then
; CHECK: lw $5, 0($[[T0]])
; CHECK: lw $25, %call16(printf)
- %.pre-phi = phi i32* [ %2, %if.else ], [ %.pre, %if.then ]
- %tmp7 = load i32, i32* %0, align 4
- %arrayidx9 = getelementptr inbounds i8, i8* %tmp1, i32 4
- %3 = bitcast i8* %arrayidx9 to i32*
- %tmp10 = load i32, i32* %3, align 4
- %arrayidx12 = getelementptr inbounds i8, i8* %tmp1, i32 8
- %4 = bitcast i8* %arrayidx12 to i32*
- %tmp13 = load i32, i32* %4, align 4
- %tmp16 = load i32, i32* %.pre-phi, align 4
- %arrayidx18 = getelementptr inbounds i8, i8* %tmp1, i32 16
- %5 = bitcast i8* %arrayidx18 to i32*
- %tmp19 = load i32, i32* %5, align 4
- %arrayidx21 = getelementptr inbounds i8, i8* %tmp1, i32 20
- %6 = bitcast i8* %arrayidx21 to i32*
- %tmp22 = load i32, i32* %6, align 4
- %arrayidx24 = getelementptr inbounds i8, i8* %tmp1, i32 24
- %7 = bitcast i8* %arrayidx24 to i32*
- %tmp25 = load i32, i32* %7, align 4
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str, i32 0, i32 0), i32 %tmp7, i32 %tmp10, i32 %tmp13, i32 %tmp16, i32 %tmp19, i32 %tmp22, i32 %tmp25) nounwind
+ %arrayidx15.pre-phi = phi ptr [ %add.ptr5, %if.else ], [ %arrayidx15.pre, %if.then ]
+ %tmp7 = load i32, ptr %tmp1, align 4
+ %arrayidx9 = getelementptr inbounds i8, ptr %tmp1, i32 4
+ %tmp10 = load i32, ptr %arrayidx9, align 4
+ %arrayidx12 = getelementptr inbounds i8, ptr %tmp1, i32 8
+ %tmp13 = load i32, ptr %arrayidx12, align 4
+ %tmp16 = load i32, ptr %arrayidx15.pre-phi, align 4
+ %arrayidx18 = getelementptr inbounds i8, ptr %tmp1, i32 16
+ %tmp19 = load i32, ptr %arrayidx18, align 4
+ %arrayidx21 = getelementptr inbounds i8, ptr %tmp1, i32 20
+ %tmp22 = load i32, ptr %arrayidx21, align 4
+ %arrayidx24 = getelementptr inbounds i8, ptr %tmp1, i32 24
+ %tmp25 = load i32, ptr %arrayidx24, align 4
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %tmp7, i32 %tmp10, i32 %tmp13, i32 %tmp16, i32 %tmp19, i32 %tmp22, i32 %tmp25) nounwind
ret i32 0
}
-declare void @foo3(i32*)
+declare void @foo3(ptr)
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
define void @temp(i32 %foo) nounwind {
entry:
%foo.addr = alloca i32, align 4
- store i32 %foo, i32* %foo.addr, align 4
- %0 = load i32, i32* %foo.addr, align 4
- store i32 %0, i32* @t, align 4
+ store i32 %foo, ptr %foo.addr, align 4
+ %0 = load i32, ptr %foo.addr, align 4
+ store i32 %0, ptr @t, align 4
ret void
}
; 16: subu $[[REGISTER:[0-9]+]], ${{[0-9]+}}, ${{[0-9]+}}
; 16: move $sp, $[[REGISTER]]
%sssi = alloca i32, align 4
- %ip = alloca i32*, align 4
+ %ip = alloca ptr, align 4
%sssj = alloca i32, align 4
- %0 = load i32, i32* @iiii, align 4
- store i32 %0, i32* %sssi, align 4
- %1 = load i32, i32* @kkkk, align 4
+ %0 = load i32, ptr @iiii, align 4
+ store i32 %0, ptr %sssi, align 4
+ %1 = load i32, ptr @kkkk, align 4
%mul = mul nsw i32 %1, 100
%2 = alloca i8, i32 %mul
- %3 = bitcast i8* %2 to i32*
- store i32* %3, i32** %ip, align 4
- %4 = load i32, i32* @jjjj, align 4
- store i32 %4, i32* %sssj, align 4
- %5 = load i32, i32* @jjjj, align 4
- %6 = load i32, i32* @iiii, align 4
- %7 = load i32*, i32** %ip, align 4
- %arrayidx = getelementptr inbounds i32, i32* %7, i32 %6
- store i32 %5, i32* %arrayidx, align 4
- %8 = load i32, i32* @kkkk, align 4
- %9 = load i32, i32* @jjjj, align 4
- %10 = load i32*, i32** %ip, align 4
- %arrayidx1 = getelementptr inbounds i32, i32* %10, i32 %9
- store i32 %8, i32* %arrayidx1, align 4
- %11 = load i32, i32* @iiii, align 4
- %12 = load i32, i32* @kkkk, align 4
- %13 = load i32*, i32** %ip, align 4
- %arrayidx2 = getelementptr inbounds i32, i32* %13, i32 %12
- store i32 %11, i32* %arrayidx2, align 4
- %14 = load i32*, i32** %ip, align 4
- %arrayidx3 = getelementptr inbounds i32, i32* %14, i32 25
- %15 = load i32, i32* %arrayidx3, align 4
- store i32 %15, i32* @riii, align 4
- %16 = load i32*, i32** %ip, align 4
- %arrayidx4 = getelementptr inbounds i32, i32* %16, i32 35
- %17 = load i32, i32* %arrayidx4, align 4
- store i32 %17, i32* @rjjj, align 4
- %18 = load i32*, i32** %ip, align 4
- %arrayidx5 = getelementptr inbounds i32, i32* %18, i32 100
- %19 = load i32, i32* %arrayidx5, align 4
- store i32 %19, i32* @rkkk, align 4
- %20 = load i32, i32* @t, align 4
- %21 = load i32*, i32** %ip, align 4
- %arrayidx6 = getelementptr inbounds i32, i32* %21, i32 %20
- %22 = load i32, i32* %arrayidx6, align 4
+ store ptr %2, ptr %ip, align 4
+ %3 = load i32, ptr @jjjj, align 4
+ store i32 %3, ptr %sssj, align 4
+ %4 = load i32, ptr @jjjj, align 4
+ %5 = load i32, ptr @iiii, align 4
+ %6 = load ptr, ptr %ip, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %6, i32 %5
+ store i32 %4, ptr %arrayidx, align 4
+ %7 = load i32, ptr @kkkk, align 4
+ %8 = load i32, ptr @jjjj, align 4
+ %9 = load ptr, ptr %ip, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %9, i32 %8
+ store i32 %7, ptr %arrayidx1, align 4
+ %10 = load i32, ptr @iiii, align 4
+ %11 = load i32, ptr @kkkk, align 4
+ %12 = load ptr, ptr %ip, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %12, i32 %11
+ store i32 %10, ptr %arrayidx2, align 4
+ %13 = load ptr, ptr %ip, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %13, i32 25
+ %14 = load i32, ptr %arrayidx3, align 4
+ store i32 %14, ptr @riii, align 4
+ %15 = load ptr, ptr %ip, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %15, i32 35
+ %16 = load i32, ptr %arrayidx4, align 4
+ store i32 %16, ptr @rjjj, align 4
+ %17 = load ptr, ptr %ip, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %17, i32 100
+ %18 = load i32, ptr %arrayidx5, align 4
+ store i32 %18, ptr @rkkk, align 4
+ %19 = load i32, ptr @t, align 4
+ %20 = load ptr, ptr %ip, align 4
+ %arrayidx6 = getelementptr inbounds i32, ptr %20, i32 %19
+ %21 = load i32, ptr %arrayidx6, align 4
; 16: addiu $sp, -16
- call void @temp(i32 %22)
+ call void @temp(i32 %21)
; 16: addiu $sp, 16
ret void
}
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @x, align 4
- %1 = load i32, i32* @y, align 4
+ %0 = load i32, ptr @x, align 4
+ %1 = load i32, ptr @y, align 4
%and = and i32 %0, %1
; 16: and ${{[0-9]+}}, ${{[0-9]+}}
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %and)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %and)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; RUN: llc -march=mips64 -O0 -mcpu=mips64r6 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPSR6
; RUN: llc -march=mips64el -O0 -mcpu=mips64r6 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPSR6
-define i64 @test_max(i64* nocapture %ptr, i64 signext %val) {
+define i64 @test_max(ptr nocapture %ptr, i64 signext %val) {
; MIPS-LABEL: test_max:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: sync
; MIPSR6-NEXT: sync
; MIPSR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw max i64* %ptr, i64 %val seq_cst
+ %0 = atomicrmw max ptr %ptr, i64 %val seq_cst
ret i64 %0
}
-define i64 @test_min(i64* nocapture %ptr, i64 signext %val) {
+define i64 @test_min(ptr nocapture %ptr, i64 signext %val) {
; MIPS-LABEL: test_min:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: sync
; MIPSR6-NEXT: sync
; MIPSR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw min i64* %ptr, i64 %val seq_cst
+ %0 = atomicrmw min ptr %ptr, i64 %val seq_cst
ret i64 %0
}
-define i64 @test_umax(i64* nocapture %ptr, i64 zeroext %val) {
+define i64 @test_umax(ptr nocapture %ptr, i64 zeroext %val) {
; MIPS-LABEL: test_umax:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: sync
; MIPSR6-NEXT: sync
; MIPSR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw umax i64* %ptr, i64 %val seq_cst
+ %0 = atomicrmw umax ptr %ptr, i64 %val seq_cst
ret i64 %0
}
-define i64 @test_umin(i64* nocapture %ptr, i64 zeroext %val) {
+define i64 @test_umin(ptr nocapture %ptr, i64 zeroext %val) {
; MIPS-LABEL: test_umin:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: sync
; MIPSR6-NEXT: sync
; MIPSR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw umin i64* %ptr, i64 %val seq_cst
+ %0 = atomicrmw umin ptr %ptr, i64 %val seq_cst
ret i64 %0
}
; RUN: llc -march=mips64el -O0 -mcpu=mips64r2 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPS64EL
; RUN: llc -march=mips64el -O0 -mcpu=mips64r6 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPS64ELR6
-define i32 @test_max_32(i32* nocapture %ptr, i32 signext %val) {
+define i32 @test_max_32(ptr nocapture %ptr, i32 signext %val) {
; MIPS-LABEL: test_max_32:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: sync
; MIPS64ELR6-NEXT: sync
; MIPS64ELR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw max i32* %ptr, i32 %val seq_cst
+ %0 = atomicrmw max ptr %ptr, i32 %val seq_cst
ret i32 %0
}
-define i32 @test_min_32(i32* nocapture %ptr, i32 signext %val) {
+define i32 @test_min_32(ptr nocapture %ptr, i32 signext %val) {
; MIPS-LABEL: test_min_32:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: sync
; MIPS64ELR6-NEXT: sync
; MIPS64ELR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw min i32* %ptr, i32 %val seq_cst
+ %0 = atomicrmw min ptr %ptr, i32 %val seq_cst
ret i32 %0
}
-define i32 @test_umax_32(i32* nocapture %ptr, i32 signext %val) {
+define i32 @test_umax_32(ptr nocapture %ptr, i32 signext %val) {
; MIPS-LABEL: test_umax_32:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: sync
; MIPS64ELR6-NEXT: sync
; MIPS64ELR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw umax i32* %ptr, i32 %val seq_cst
+ %0 = atomicrmw umax ptr %ptr, i32 %val seq_cst
ret i32 %0
}
-define i32 @test_umin_32(i32* nocapture %ptr, i32 signext %val) {
+define i32 @test_umin_32(ptr nocapture %ptr, i32 signext %val) {
; MIPS-LABEL: test_umin_32:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: sync
; MIPS64ELR6-NEXT: sync
; MIPS64ELR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw umin i32* %ptr, i32 %val seq_cst
+ %0 = atomicrmw umin ptr %ptr, i32 %val seq_cst
ret i32 %0
}
-define i16 @test_max_16(i16* nocapture %ptr, i16 signext %val) {
+define i16 @test_max_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS-LABEL: test_max_16:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: addiu $sp, $sp, -8
; MIPS64ELR6-NEXT: daddiu $sp, $sp, 16
; MIPS64ELR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw max i16* %ptr, i16 %val seq_cst
+ %0 = atomicrmw max ptr %ptr, i16 %val seq_cst
ret i16 %0
}
-define i16 @test_min_16(i16* nocapture %ptr, i16 signext %val) {
+define i16 @test_min_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS-LABEL: test_min_16:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: addiu $sp, $sp, -8
; MIPS64ELR6-NEXT: daddiu $sp, $sp, 16
; MIPS64ELR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw min i16* %ptr, i16 %val seq_cst
+ %0 = atomicrmw min ptr %ptr, i16 %val seq_cst
ret i16 %0
}
-define i16 @test_umax_16(i16* nocapture %ptr, i16 signext %val) {
+define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS-LABEL: test_umax_16:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: addiu $sp, $sp, -8
; MIPS64ELR6-NEXT: daddiu $sp, $sp, 16
; MIPS64ELR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw umax i16* %ptr, i16 %val seq_cst
+ %0 = atomicrmw umax ptr %ptr, i16 %val seq_cst
ret i16 %0
}
-define i16 @test_umin_16(i16* nocapture %ptr, i16 signext %val) {
+define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS-LABEL: test_umin_16:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: addiu $sp, $sp, -8
; MIPS64ELR6-NEXT: daddiu $sp, $sp, 16
; MIPS64ELR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw umin i16* %ptr, i16 %val seq_cst
+ %0 = atomicrmw umin ptr %ptr, i16 %val seq_cst
ret i16 %0
}
-define i8 @test_max_8(i8* nocapture %ptr, i8 signext %val) {
+define i8 @test_max_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS-LABEL: test_max_8:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: addiu $sp, $sp, -8
; MIPS64ELR6-NEXT: daddiu $sp, $sp, 16
; MIPS64ELR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw max i8* %ptr, i8 %val seq_cst
+ %0 = atomicrmw max ptr %ptr, i8 %val seq_cst
ret i8 %0
}
-define i8 @test_min_8(i8* nocapture %ptr, i8 signext %val) {
+define i8 @test_min_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS-LABEL: test_min_8:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: addiu $sp, $sp, -8
; MIPS64ELR6-NEXT: daddiu $sp, $sp, 16
; MIPS64ELR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw min i8* %ptr, i8 %val seq_cst
+ %0 = atomicrmw min ptr %ptr, i8 %val seq_cst
ret i8 %0
}
-define i8 @test_umax_8(i8* nocapture %ptr, i8 signext %val) {
+define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS-LABEL: test_umax_8:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: addiu $sp, $sp, -8
; MIPS64ELR6-NEXT: daddiu $sp, $sp, 16
; MIPS64ELR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw umax i8* %ptr, i8 %val seq_cst
+ %0 = atomicrmw umax ptr %ptr, i8 %val seq_cst
ret i8 %0
}
-define i8 @test_umin_8(i8* nocapture %ptr, i8 signext %val) {
+define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS-LABEL: test_umin_8:
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: addiu $sp, $sp, -8
; MIPS64ELR6-NEXT: daddiu $sp, $sp, 16
; MIPS64ELR6-NEXT: jrc $ra
entry:
- %0 = atomicrmw umin i8* %ptr, i8 %val seq_cst
+ %0 = atomicrmw umin ptr %ptr, i8 %val seq_cst
ret i8 %0
}
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw add i32* @x, i32 %incr monotonic
+ %0 = atomicrmw add ptr @x, i32 %incr monotonic
ret i32 %0
}
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw sub i32* @x, i32 %incr monotonic
+ %0 = atomicrmw sub ptr @x, i32 %incr monotonic
ret i32 %0
}
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw xor i32* @x, i32 %incr monotonic
+ %0 = atomicrmw xor ptr @x, i32 %incr monotonic
ret i32 %0
}
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw or i32* @x, i32 %incr monotonic
+ %0 = atomicrmw or ptr @x, i32 %incr monotonic
ret i32 %0
}
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw and i32* @x, i32 %incr monotonic
+ %0 = atomicrmw and ptr @x, i32 %incr monotonic
ret i32 %0
}
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw nand i32* @x, i32 %incr monotonic
+ %0 = atomicrmw nand ptr @x, i32 %incr monotonic
ret i32 %0
}
; MIPS32EB-NEXT: addiu $sp, $sp, 8
entry:
%newval.addr = alloca i32, align 4
- store i32 %newval, i32* %newval.addr, align 4
- %tmp = load i32, i32* %newval.addr, align 4
- %0 = atomicrmw xchg i32* @x, i32 %tmp monotonic
+ store i32 %newval, ptr %newval.addr, align 4
+ %tmp = load i32, ptr %newval.addr, align 4
+ %0 = atomicrmw xchg ptr @x, i32 %tmp monotonic
ret i32 %0
}
; MIPS32EB-NEXT: addiu $sp, $sp, 8
entry:
%newval.addr = alloca i32, align 4
- store i32 %newval, i32* %newval.addr, align 4
- %tmp = load i32, i32* %newval.addr, align 4
- %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic monotonic
+ store i32 %newval, ptr %newval.addr, align 4
+ %tmp = load i32, ptr %newval.addr, align 4
+ %0 = cmpxchg ptr @x, i32 %oldval, i32 %tmp monotonic monotonic
%1 = extractvalue { i32, i1 } %0, 0
ret i32 %1
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw add i8* @y, i8 %incr monotonic
+ %0 = atomicrmw add ptr @y, i8 %incr monotonic
ret i8 %0
}
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw sub i8* @y, i8 %incr monotonic
+ %0 = atomicrmw sub ptr @y, i8 %incr monotonic
ret i8 %0
}
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw nand i8* @y, i8 %incr monotonic
+ %0 = atomicrmw nand ptr @y, i8 %incr monotonic
ret i8 %0
}
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw xchg i8* @y, i8 %newval monotonic
+ %0 = atomicrmw xchg ptr @y, i8 %newval monotonic
ret i8 %0
}
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %pair0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic monotonic
+ %pair0 = cmpxchg ptr @y, i8 %oldval, i8 %newval monotonic monotonic
%0 = extractvalue { i8, i1 } %pair0, 0
ret i8 %0
}
-define i1 @AtomicCmpSwapRes8(i8* %ptr, i8 signext %oldval, i8 signext %newval) nounwind {
+define i1 @AtomicCmpSwapRes8(ptr %ptr, i8 signext %oldval, i8 signext %newval) nounwind {
; MIPS32-LABEL: AtomicCmpSwapRes8:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $1, $zero, -4
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: sltiu $2, $1, 1
entry:
- %0 = cmpxchg i8* %ptr, i8 %oldval, i8 %newval monotonic monotonic
+ %0 = cmpxchg ptr %ptr, i8 %oldval, i8 %newval monotonic monotonic
%1 = extractvalue { i8, i1 } %0, 1
ret i1 %1
; FIXME: -march=mips produces a redundant sign extension here...
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw add i16* @z, i16 %incr monotonic
+ %0 = atomicrmw add ptr @z, i16 %incr monotonic
ret i16 %0
}
; value.
; The rest of the functions here are testing the atomic expansion, so
; we just match the end of the function.
-define {i16, i1} @foo(i16* %addr, i16 %l, i16 %r, i16 %new) {
+define {i16, i1} @foo(ptr %addr, i16 %l, i16 %r, i16 %new) {
; MIPS32-LABEL: foo:
; MIPS32: # %bb.0:
; MIPS32-NEXT: addu $1, $5, $6
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
%desired = add i16 %l, %r
- %res = cmpxchg i16* %addr, i16 %desired, i16 %new seq_cst seq_cst
+ %res = cmpxchg ptr %addr, i16 %desired, i16 %new seq_cst seq_cst
ret {i16, i1} %res
}
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw add i32* @countsint, i32 %v seq_cst
+ %0 = atomicrmw add ptr @countsint, i32 %v seq_cst
ret i32 %0
}
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %pair0 = cmpxchg i32* @a, i32 1, i32 0 seq_cst seq_cst
+ %pair0 = cmpxchg ptr @a, i32 1, i32 0 seq_cst seq_cst
%0 = extractvalue { i32, i1 } %pair0, 0
%1 = icmp eq i32 %0, 1
%conv = zext i1 %1 to i32
; MIPS32EB-NEXT: jr $ra
; MIPS32EB-NEXT: nop
entry:
- %0 = atomicrmw add i32* getelementptr(i32, i32* @x, i32 256), i32 %incr monotonic
+ %0 = atomicrmw add ptr getelementptr(i32, ptr @x, i32 256), i32 %incr monotonic
ret i32 %0
}
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: nop
entry:
- %0 = atomicrmw add i64* @x, i64 %incr monotonic
+ %0 = atomicrmw add ptr @x, i64 %incr monotonic
ret i64 %0
}
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: nop
entry:
- %0 = atomicrmw sub i64* @x, i64 %incr monotonic
+ %0 = atomicrmw sub ptr @x, i64 %incr monotonic
ret i64 %0
}
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: nop
entry:
- %0 = atomicrmw and i64* @x, i64 %incr monotonic
+ %0 = atomicrmw and ptr @x, i64 %incr monotonic
ret i64 %0
}
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: nop
entry:
- %0 = atomicrmw or i64* @x, i64 %incr monotonic
+ %0 = atomicrmw or ptr @x, i64 %incr monotonic
ret i64 %0
}
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: nop
entry:
- %0 = atomicrmw xor i64* @x, i64 %incr monotonic
+ %0 = atomicrmw xor ptr @x, i64 %incr monotonic
ret i64 %0
}
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: nop
entry:
- %0 = atomicrmw nand i64* @x, i64 %incr monotonic
+ %0 = atomicrmw nand ptr @x, i64 %incr monotonic
ret i64 %0
}
; MIPS64EB-NEXT: daddiu $sp, $sp, 16
entry:
%newval.addr = alloca i64, align 4
- store i64 %newval, i64* %newval.addr, align 4
- %tmp = load i64, i64* %newval.addr, align 4
- %0 = atomicrmw xchg i64* @x, i64 %tmp monotonic
+ store i64 %newval, ptr %newval.addr, align 4
+ %tmp = load i64, ptr %newval.addr, align 4
+ %0 = atomicrmw xchg ptr @x, i64 %tmp monotonic
ret i64 %0
}
; MIPS64EB-NEXT: daddiu $sp, $sp, 16
entry:
%newval.addr = alloca i64, align 4
- store i64 %newval, i64* %newval.addr, align 4
- %tmp = load i64, i64* %newval.addr, align 4
- %0 = cmpxchg i64* @x, i64 %oldval, i64 %tmp monotonic monotonic
+ store i64 %newval, ptr %newval.addr, align 4
+ %tmp = load i64, ptr %newval.addr, align 4
+ %0 = cmpxchg ptr @x, i64 %oldval, i64 %tmp monotonic monotonic
%1 = extractvalue { i64, i1 } %0, 0
ret i64 %1
; RUN: llc -O0 -mtriple=mips64el-unknown-linux-gnu -mcpu=mips64r2 -target-abi=n64 < %s -filetype=asm -o - \
; RUN: | FileCheck -check-prefixes=N64 %s
-@sym = external global i32 *
+@sym = external global ptr
define void @foo(i32 %new, i32 %old) {
; O32-LABEL: foo:
; N64-NEXT: jr $ra
; N64-NEXT: nop
entry:
- %0 = load i32 *, i32 ** @sym
- cmpxchg i32 * %0, i32 %new, i32 %old seq_cst seq_cst
+ %0 = load ptr, ptr @sym
+ cmpxchg ptr %0, i32 %new, i32 %old seq_cst seq_cst
ret void
}
@.str = private unnamed_addr constant [8 x i8] c"%d, %d\0A\00", align 1
-define i32 @foo(i32* %mem, i32 %val, i32 %c) nounwind {
+define i32 @foo(ptr %mem, i32 %val, i32 %c) nounwind {
entry:
- %0 = atomicrmw add i32* %mem, i32 %val seq_cst
+ %0 = atomicrmw add ptr %mem, i32 %val seq_cst
%add = add nsw i32 %0, %c
ret i32 %add
; 16-LABEL: foo:
define i32 @main() nounwind {
entry:
%x = alloca i32, align 4
- store volatile i32 0, i32* %x, align 4
- %0 = atomicrmw add i32* %x, i32 1 seq_cst
+ store volatile i32 0, ptr %x, align 4
+ %0 = atomicrmw add ptr %x, i32 1 seq_cst
%add.i = add nsw i32 %0, 2
- %1 = load volatile i32, i32* %x, align 4
- %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %add.i, i32 %1) nounwind
- %pair = cmpxchg i32* %x, i32 1, i32 2 seq_cst seq_cst
+ %1 = load volatile i32, ptr %x, align 4
+ %call1 = call i32 (ptr, ...) @printf(ptr @.str, i32 %add.i, i32 %1) nounwind
+ %pair = cmpxchg ptr %x, i32 1, i32 2 seq_cst seq_cst
%2 = extractvalue { i32, i1 } %pair, 0
- %3 = load volatile i32, i32* %x, align 4
- %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %2, i32 %3) nounwind
- %4 = atomicrmw xchg i32* %x, i32 1 seq_cst
- %5 = load volatile i32, i32* %x, align 4
- %call3 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %4, i32 %5) nounwind
+ %3 = load volatile i32, ptr %x, align 4
+ %call2 = call i32 (ptr, ...) @printf(ptr @.str, i32 %2, i32 %3) nounwind
+ %4 = atomicrmw xchg ptr %x, i32 1 seq_cst
+ %5 = load volatile i32, ptr %x, align 4
+ %call3 = call i32 (ptr, ...) @printf(ptr @.str, i32 %4, i32 %5) nounwind
; 16-LABEL: main:
; 16: lw ${{[0-9]+}}, %call16(__sync_synchronize)(${{[0-9]+}})
; 16: lw ${{[0-9]+}}, %call16(__sync_fetch_and_add_4)(${{[0-9]+}})
ret i32 0
}
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
; Function Attrs: nounwind optsize
define i32 @main() #0 {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%cmp = icmp eq i32 %0, 0
%. = select i1 %cmp, i32 10, i32 55
- store i32 %., i32* @j, align 4
+ store i32 %., ptr @j, align 4
; cond-b-short: beqz ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
ret i32 0
}
; Function Attrs: nounwind optsize
define i32 @main() #0 {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.end
; cond-b-short: bnez ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
if.then: ; preds = %entry
- store i32 10, i32* @j, align 4
+ store i32 10, ptr @j, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
; N64-DAG: ld ${{[0-9]+}}, %got_lo(v0)($[[R1]])
; N64-DAG: ld ${{[0-9]+}}, %call_lo(foo0)($[[R3]])
- %0 = load i32, i32* @v0, align 4
+ %0 = load i32, ptr @v0, align 4
tail call void @foo0(i32 %0) nounwind
ret void
}
; call to external function.
-define void @foo2(i32* nocapture %d, i32* nocapture %s, i32 %n) nounwind {
+define void @foo2(ptr nocapture %d, ptr nocapture %s, i32 %n) nounwind {
entry:
; O32-LABEL: foo2:
; O32: lui $[[R2:[0-9]+]], %call_hi(memcpy)
; N64: daddu $[[R3:[0-9]+]], $[[R2]], ${{[a-z0-9]+}}
; N64: ld ${{[0-9]+}}, %call_lo(memcpy)($[[R3]])
- %0 = bitcast i32* %d to i8*
- %1 = bitcast i32* %s to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 %n, i1 false)
+ tail call void @llvm.memcpy.p0.p0.i32(ptr align 4 %d, ptr align 4 %s, i32 %n, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 \
; RUN: -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-MIPS16
-@reg = common global i8* null, align 4
+@reg = common global ptr null, align 4
-define i8* @dummy(i8* %x) nounwind readnone noinline {
+define ptr @dummy(ptr %x) nounwind readnone noinline {
entry:
- ret i8* %x
+ ret ptr %x
}
; PIC-O32: lw $[[R0:[0-9]+]], %got($tmp[[T0:[0-9]+]])
define void @f() nounwind {
entry:
- %call = tail call i8* @dummy(i8* blockaddress(@f, %baz))
- indirectbr i8* %call, [label %baz, label %foo]
+ %call = tail call ptr @dummy(ptr blockaddress(@f, %baz))
+ indirectbr ptr %call, [label %baz, label %foo]
foo: ; preds = %foo, %entry
- store i8* blockaddress(@f, %foo), i8** @reg, align 4
+ store ptr blockaddress(@f, %foo), ptr @reg, align 4
br label %foo
baz: ; preds = %entry
- store i8* null, i8** @reg, align 4
+ store ptr null, ptr @reg, align 4
ret void
}
declare i32 @boo(...)
declare i32 @foo(...)
-define i32 @main(i32 signext %argc, i8** %argv) {
+define i32 @main(i32 signext %argc, ptr %argv) {
; CHECK: main:
; CHECK: # %bb.1:
; CHECK-PIC: addiu
entry:
%retval = alloca i32, align 4
%argc.addr = alloca i32, align 4
- %argv.addr = alloca i8**, align 4
- store i32 0, i32* %retval, align 4
- store i32 %argc, i32* %argc.addr, align 4
- store i8** %argv, i8*** %argv.addr, align 4
- %0 = load i32, i32* %argc.addr, align 4
+ %argv.addr = alloca ptr, align 4
+ store i32 0, ptr %retval, align 4
+ store i32 %argc, ptr %argc.addr, align 4
+ store ptr %argv, ptr %argv.addr, align 4
+ %0 = load i32, ptr %argc.addr, align 4
%cmp = icmp sgt i32 %0, 1
br i1 %cmp, label %if.then, label %if.end4
if.then:
call void asm sideeffect ".space 10", "~{$1}"()
- %1 = load i32, i32* %argc.addr, align 4
+ %1 = load i32, ptr %argc.addr, align 4
%cmp1 = icmp sgt i32 %1, 3
br i1 %cmp1, label %if.then2, label %if.end
if.then2:
call void asm sideeffect ".space 10", "~{$1}"()
- %call = call i32 bitcast (i32 (...)* @boo to i32 ()*)()
- store i32 %call, i32* %retval, align 4
+ %call = call i32 @boo()
+ store i32 %call, ptr %retval, align 4
br label %return
if.end:
call void asm sideeffect ".space 4194228", "~{$1}"()
- %call3 = call i32 bitcast (i32 (...)* @foo to i32 ()*)()
- store i32 %call3, i32* %retval, align 4
+ %call3 = call i32 @foo()
+ store i32 %call3, ptr %retval, align 4
br label %return
if.end4:
- store i32 0, i32* %retval, align 4
+ store i32 0, ptr %retval, align 4
br label %return
return:
- %2 = load i32, i32* %retval, align 4
+ %2 = load i32, ptr %retval, align 4
ret i32 %2
}
define void @test() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
- %1 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @i, align 4
+ %1 = load i32, ptr @j, align 4
%cmp = icmp eq i32 %0, %1
; 16: cmp ${{[0-9]+}}, ${{[0-9]+}}
; 16: bteqz $[[LABEL:[0-9A-Ba-b_]+]]
br i1 %cmp, label %if.end, label %if.then
if.then: ; preds = %entry
- store i32 1, i32* @result, align 4
+ store i32 1, ptr @result, align 4
br label %if.end
if.end: ; preds = %entry, %if.then
define void @test() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%cmp = icmp eq i32 %0, 10
br i1 %cmp, label %if.end, label %if.then
; 16: cmpi ${{[0-9]+}}, {{[0-9]+}}
; 16: bteqz $[[LABEL:[0-9A-Ba-b_]+]]
; 16: $[[LABEL]]:
if.then: ; preds = %entry
- store i32 1, i32* @result, align 4
+ store i32 1, ptr @result, align 4
br label %if.end
if.end: ; preds = %entry, %if.then
define void @test() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.end, label %if.then
; 16: beqz ${{[0-9]+}}, $[[LABEL:[0-9A-Ba-b_]+]]
; 16: $[[LABEL]]:
if.then: ; preds = %entry
- store i32 1, i32* @result, align 4
+ store i32 1, ptr @result, align 4
br label %if.end
if.end: ; preds = %entry, %if.then
define void @test() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
- %1 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @i, align 4
+ %1 = load i32, ptr @j, align 4
%cmp = icmp slt i32 %0, %1
br i1 %cmp, label %if.then, label %if.end
; 16: $[[LABEL]]:
if.then: ; preds = %entry
- store i32 1, i32* @result1, align 4
+ store i32 1, ptr @result1, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
- %2 = load i32, i32* @k, align 4
+ %2 = load i32, ptr @k, align 4
%cmp1 = icmp slt i32 %0, %2
br i1 %cmp1, label %if.then2, label %if.end3
if.then2: ; preds = %if.end
- store i32 1, i32* @result1, align 4
+ store i32 1, ptr @result1, align 4
br label %if.end3
if.end3: ; preds = %if.then2, %if.end
define void @test() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
- %1 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @i, align 4
+ %1 = load i32, ptr @j, align 4
%cmp = icmp sgt i32 %0, %1
br i1 %cmp, label %if.end, label %if.then
; 16: slt ${{[0-9]+}}, ${{[0-9]+}}
; 16: btnez $[[LABEL:[0-9A-Ba-b_]+]]
; 16: $[[LABEL]]:
if.then: ; preds = %entry
- store i32 1, i32* @result, align 4
+ store i32 1, ptr @result, align 4
br label %if.end
if.end: ; preds = %entry, %if.then
define void @test() nounwind {
entry:
- %0 = load i32, i32* @j, align 4
- %1 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @j, align 4
+ %1 = load i32, ptr @i, align 4
%cmp = icmp sgt i32 %0, %1
br i1 %cmp, label %if.then, label %if.end
; 16: $[[LABEL]]:
if.then: ; preds = %entry
- store i32 1, i32* @result1, align 4
+ store i32 1, ptr @result1, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
- %2 = load i32, i32* @k, align 4
+ %2 = load i32, ptr @k, align 4
%cmp1 = icmp sgt i32 %1, %2
br i1 %cmp1, label %if.then2, label %if.end3
if.then2: ; preds = %if.end
- store i32 0, i32* @result1, align 4
+ store i32 0, ptr @result1, align 4
br label %if.end3
if.end3: ; preds = %if.then2, %if.end
define void @test() nounwind {
entry:
- %0 = load i32, i32* @j, align 4
- %1 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @j, align 4
+ %1 = load i32, ptr @i, align 4
%cmp = icmp slt i32 %0, %1
br i1 %cmp, label %if.end, label %if.then
; 16: $[[LABEL]]:
if.then: ; preds = %entry
- store i32 1, i32* @result, align 4
+ store i32 1, ptr @result, align 4
br label %if.end
if.end: ; preds = %entry, %if.then
define void @test() nounwind {
entry:
- %0 = load i32, i32* @j, align 4
- %1 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @j, align 4
+ %1 = load i32, ptr @i, align 4
%cmp = icmp eq i32 %0, %1
br i1 %cmp, label %if.then, label %if.end
; 16: cmp ${{[0-9]+}}, ${{[0-9]+}}
; 16: $[[LABEL]]:
if.then: ; preds = %entry
- store i32 1, i32* @result, align 4
+ store i32 1, ptr @result, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
define void @test() nounwind {
entry:
- %0 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @j, align 4
%cmp = icmp eq i32 %0, 5
br i1 %cmp, label %if.then, label %if.end
; 16: $[[LABEL]]:
if.then: ; preds = %entry
- store i32 1, i32* @result, align 4
+ store i32 1, ptr @result, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
define void @test() nounwind {
entry:
- %0 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @j, align 4
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.end, !prof !1
; 16: $[[LABEL]]:
if.then: ; preds = %entry
- store i32 1, i32* @result, align 4
+ store i32 1, ptr @result, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
define void @foo5(i32 %a) nounwind {
entry:
- %0 = load i32, i32* @g2, align 4
+ %0 = load i32, ptr @g2, align 4
%tobool = icmp eq i32 %a, 0
br i1 %tobool, label %if.else, label %if.then
if.then:
- %1 = load i32, i32* @g1, align 4
+ %1 = load i32, ptr @g1, align 4
%add = add nsw i32 %1, %0
- store i32 %add, i32* @g1, align 4
+ store i32 %add, ptr @g1, align 4
br label %if.end
if.else:
- %2 = load i32, i32* @g3, align 4
+ %2 = load i32, ptr @g3, align 4
%sub = sub nsw i32 %2, %0
- store i32 %sub, i32* @g3, align 4
+ store i32 %sub, ptr @g3, align 4
br label %if.end
if.end:
; STATICO1: jalr ${{[0-9]+}}
; STATICO1-NEXT: sw ${{[0-9]+}}, %lo(g1)
-@foo9 = common global void ()* null, align 4
+@foo9 = common global ptr null, align 4
define i32 @foo8(i32 %a) nounwind {
entry:
- store i32 %a, i32* @g1, align 4
- %0 = load void ()*, void ()** @foo9, align 4
+ store i32 %a, ptr @g1, align 4
+ %0 = load ptr, ptr @foo9, align 4
call void %0() nounwind
- %1 = load i32, i32* @g1, align 4
+ %1 = load i32, ptr @g1, align 4
%add = add nsw i32 %1, %a
ret i32 %add
}
entry:
tail call void @foo11() nounwind
tail call void @foo11() nounwind
- store i32 0, i32* @g1, align 4
+ store i32 0, ptr @g1, align 4
tail call void @foo11() nounwind
- store i32 0, i32* @g1, align 4
+ store i32 0, ptr @g1, align 4
ret void
}
; SUCCBB: bnez ${{[0-9]+}}, $BB
; SUCCBB-NEXT: addiu
-define i32 @succbbs_loop1(i32* nocapture %a, i32 %n) {
+define i32 @succbbs_loop1(ptr nocapture %a, i32 %n) {
entry:
%cmp4 = icmp sgt i32 %n, 0
br i1 %cmp4, label %for.body, label %for.end
for.body: ; preds = %entry, %for.body
%s.06 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%i.05 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.05
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i32 %i.05
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, %s.06
%inc = add nsw i32 %i.05, 1
%exitcond = icmp eq i32 %inc, %n
entry:
%0 = trunc i32 %a to i1
%1 = select i1 %0,
- i8* blockaddress(@test1, %bb),
- i8* blockaddress(@test1, %bb6)
- indirectbr i8* %1, [label %bb, label %bb6]
+ ptr blockaddress(@test1, %bb),
+ ptr blockaddress(@test1, %bb6)
+ indirectbr ptr %1, [label %bb, label %bb6]
; STATIC: PseudoIndirectBranch
; STATIC-MM: PseudoIndirectBranch
; RUN: llc -march=mipsel -mattr=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16
-@main.L = internal unnamed_addr constant [5 x i8*] [i8* blockaddress(@main, %L1), i8* blockaddress(@main, %L2), i8* blockaddress(@main, %L3), i8* blockaddress(@main, %L4), i8* null], align 4
+@main.L = internal unnamed_addr constant [5 x ptr] [ptr blockaddress(@main, %L1), ptr blockaddress(@main, %L2), ptr blockaddress(@main, %L3), ptr blockaddress(@main, %L4), ptr null], align 4
@str = private unnamed_addr constant [2 x i8] c"A\00"
@str5 = private unnamed_addr constant [2 x i8] c"B\00"
@str6 = private unnamed_addr constant [2 x i8] c"C\00"
define i32 @main() nounwind {
entry:
- %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str, i32 0, i32 0))
+ %puts = tail call i32 @puts(ptr @str)
br label %L1
L1: ; preds = %entry, %L3
%i.0 = phi i32 [ 0, %entry ], [ %inc, %L3 ]
- %puts5 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str5, i32 0, i32 0))
+ %puts5 = tail call i32 @puts(ptr @str5)
br label %L2
L2: ; preds = %L1, %L3
%i.1 = phi i32 [ %i.0, %L1 ], [ %inc, %L3 ]
- %puts6 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str6, i32 0, i32 0))
+ %puts6 = tail call i32 @puts(ptr @str6)
br label %L3
L3: ; preds = %L2, %L3
%i.2 = phi i32 [ %i.1, %L2 ], [ %inc, %L3 ]
- %puts7 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str7, i32 0, i32 0))
+ %puts7 = tail call i32 @puts(ptr @str7)
%inc = add i32 %i.2, 1
- %arrayidx = getelementptr inbounds [5 x i8*], [5 x i8*]* @main.L, i32 0, i32 %i.2
- %0 = load i8*, i8** %arrayidx, align 4
- indirectbr i8* %0, [label %L1, label %L2, label %L3, label %L4]
+ %arrayidx = getelementptr inbounds [5 x ptr], ptr @main.L, i32 0, i32 %i.2
+ %0 = load ptr, ptr %arrayidx, align 4
+ indirectbr ptr %0, [label %L1, label %L2, label %L3, label %L4]
; 16: jrc ${{[0-9]+}}
L4: ; preds = %L3
- %puts8 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str8, i32 0, i32 0))
+ %puts8 = tail call i32 @puts(ptr @str8)
ret i32 0
}
-declare i32 @puts(i8* nocapture) nounwind
+declare i32 @puts(ptr nocapture) nounwind
define void @ham() {
bb:
%tmp = alloca i32, align 4
- %tmp13 = ptrtoint i32* %tmp to i32
+ %tmp13 = ptrtoint ptr %tmp to i32
%tmp70 = icmp eq i32 undef, -1
br i1 %tmp70, label %bb72, label %bb40
bb40: ; preds = %bb72, %bb
%tmp41 = phi i32 [ %tmp13, %bb72 ], [ %tmp13, %bb ]
- %tmp55 = inttoptr i32 %tmp41 to i32*
- %tmp58 = insertelement <2 x i32*> undef, i32* %tmp55, i32 1
+ %tmp55 = inttoptr i32 %tmp41 to ptr
+ %tmp58 = insertelement <2 x ptr> undef, ptr %tmp55, i32 1
br label %bb59
bb59: ; preds = %bb59, %bb40
- %tmp60 = phi <2 x i32*> [ %tmp61, %bb59 ], [ %tmp58, %bb40 ]
- %tmp61 = getelementptr i32, <2 x i32*> %tmp60, <2 x i32> <i32 -1, i32 1>
- %tmp62 = extractelement <2 x i32*> %tmp61, i32 1
+ %tmp60 = phi <2 x ptr> [ %tmp61, %bb59 ], [ %tmp58, %bb40 ]
+ %tmp61 = getelementptr i32, <2 x ptr> %tmp60, <2 x i32> <i32 -1, i32 1>
+ %tmp62 = extractelement <2 x ptr> %tmp61, i32 1
br label %bb59
}
define double @f(i32 %a1, double %d) nounwind {
entry:
- store i32 %a1, i32* @a, align 4
+ store i32 %a1, ptr @a, align 4
%add = fadd double %d, 2.000000e+00
ret double %add
}
define i32 @main() {
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0))
- %call1 = call i8* @strcpy(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds ([25 x i8], [25 x i8]* @.str1, i32 0, i32 0)) #3
- call void @llvm.clear_cache(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds (i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0), i32 32)) #3
- %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0))
+ store i32 0, ptr %retval
+ %call = call i32 (ptr, ...) @printf(ptr @.str, ptr @buffer)
+ %call1 = call ptr @strcpy(ptr @buffer, ptr @.str1) #3
+ call void @llvm.clear_cache(ptr @buffer, ptr getelementptr inbounds (i8, ptr @buffer, i32 32)) #3
+ %call2 = call i32 (ptr, ...) @printf(ptr @.str, ptr @buffer)
ret i32 0
}
; CHECK: __clear_cache
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
-declare i8* @strcpy(i8*, i8*)
+declare ptr @strcpy(ptr, ptr)
-declare void @llvm.clear_cache(i8*, i8*)
+declare void @llvm.clear_cache(ptr, ptr)
entry:
%call = tail call double @ceil(double %d)
%call1 = tail call double @ceil(double %call)
- store double %call1, double* @gd2, align 8
+ store double %call1, ptr @gd2, align 8
%call2 = tail call double @ceil(double %call1)
- store double %call2, double* @gd1, align 8
+ store double %call2, ptr @gd1, align 8
ret void
}
define void @double_args(double %a, double %b, double %c, double %d, double %e,
double %f, double %g, double %h, double %i) nounwind {
entry:
- %0 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
- store volatile double %a, double* %0
- %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 2
- store volatile double %b, double* %1
- %2 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 3
- store volatile double %c, double* %2
- %3 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 4
- store volatile double %d, double* %3
- %4 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 5
- store volatile double %e, double* %4
- %5 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 6
- store volatile double %f, double* %5
- %6 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 7
- store volatile double %g, double* %6
- %7 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 8
- store volatile double %h, double* %7
- %8 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 9
- store volatile double %i, double* %8
+ %0 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1
+ store volatile double %a, ptr %0
+ %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 2
+ store volatile double %b, ptr %1
+ %2 = getelementptr [11 x double], ptr @doubles, i32 0, i32 3
+ store volatile double %c, ptr %2
+ %3 = getelementptr [11 x double], ptr @doubles, i32 0, i32 4
+ store volatile double %d, ptr %3
+ %4 = getelementptr [11 x double], ptr @doubles, i32 0, i32 5
+ store volatile double %e, ptr %4
+ %5 = getelementptr [11 x double], ptr @doubles, i32 0, i32 6
+ store volatile double %f, ptr %5
+ %6 = getelementptr [11 x double], ptr @doubles, i32 0, i32 7
+ store volatile double %g, ptr %6
+ %7 = getelementptr [11 x double], ptr @doubles, i32 0, i32 8
+ store volatile double %h, ptr %7
+ %8 = getelementptr [11 x double], ptr @doubles, i32 0, i32 9
+ store volatile double %i, ptr %8
ret void
}
float %f, float %g, float %h, float %i, float %j)
nounwind {
entry:
- %0 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
- store volatile float %a, float* %0
- %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 2
- store volatile float %b, float* %1
- %2 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 3
- store volatile float %c, float* %2
- %3 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 4
- store volatile float %d, float* %3
- %4 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 5
- store volatile float %e, float* %4
- %5 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 6
- store volatile float %f, float* %5
- %6 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 7
- store volatile float %g, float* %6
- %7 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 8
- store volatile float %h, float* %7
- %8 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 9
- store volatile float %i, float* %8
- %9 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 10
- store volatile float %j, float* %9
+ %0 = getelementptr [11 x float], ptr @floats, i32 0, i32 1
+ store volatile float %a, ptr %0
+ %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 2
+ store volatile float %b, ptr %1
+ %2 = getelementptr [11 x float], ptr @floats, i32 0, i32 3
+ store volatile float %c, ptr %2
+ %3 = getelementptr [11 x float], ptr @floats, i32 0, i32 4
+ store volatile float %d, ptr %3
+ %4 = getelementptr [11 x float], ptr @floats, i32 0, i32 5
+ store volatile float %e, ptr %4
+ %5 = getelementptr [11 x float], ptr @floats, i32 0, i32 6
+ store volatile float %f, ptr %5
+ %6 = getelementptr [11 x float], ptr @floats, i32 0, i32 7
+ store volatile float %g, ptr %6
+ %7 = getelementptr [11 x float], ptr @floats, i32 0, i32 8
+ store volatile float %h, ptr %7
+ %8 = getelementptr [11 x float], ptr @floats, i32 0, i32 9
+ store volatile float %i, ptr %8
+ %9 = getelementptr [11 x float], ptr @floats, i32 0, i32 10
+ store volatile float %j, ptr %9
ret void
}
define void @double_arg2(i8 %a, double %b) nounwind {
entry:
- %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
- store volatile i8 %a, i8* %0
- %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
- store volatile double %b, double* %1
+ %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1
+ store volatile i8 %a, ptr %0
+ %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1
+ store volatile double %b, ptr %1
ret void
}
define void @float_arg2(i8 signext %a, float %b) nounwind {
entry:
- %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
- store volatile i8 %a, i8* %0
- %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
- store volatile float %b, float* %1
+ %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1
+ store volatile i8 %a, ptr %0
+ %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 1
+ store volatile float %b, ptr %1
ret void
}
define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind {
entry:
- %0 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 1
- store volatile fp128 %a, fp128* %0
- %1 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 2
- store volatile fp128 %b, fp128* %1
- %2 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 3
- store volatile fp128 %c, fp128* %2
- %3 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 4
- store volatile fp128 %d, fp128* %3
- %4 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 5
- store volatile fp128 %e, fp128* %4
+ %0 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 1
+ store volatile fp128 %a, ptr %0
+ %1 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 2
+ store volatile fp128 %b, ptr %1
+ %2 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 3
+ store volatile fp128 %c, ptr %2
+ %3 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 4
+ store volatile fp128 %d, ptr %3
+ %4 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 5
+ store volatile fp128 %e, ptr %4
ret void
}
define void @double_args(double %a, ...)
nounwind {
entry:
- %0 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
- store volatile double %a, double* %0
-
- %ap = alloca i8*
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap2)
- %b = va_arg i8** %ap, double
- %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 2
- store volatile double %b, double* %1
- call void @llvm.va_end(i8* %ap2)
+ %0 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1
+ store volatile double %a, ptr %0
+
+ %ap = alloca ptr
+ call void @llvm.va_start(ptr %ap)
+ %b = va_arg ptr %ap, double
+ %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 2
+ store volatile double %b, ptr %1
+ call void @llvm.va_end(ptr %ap)
ret void
}
define void @float_args(float %a, ...) nounwind {
entry:
- %0 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
- store volatile float %a, float* %0
-
- %ap = alloca i8*
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap2)
- %b = va_arg i8** %ap, float
- %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 2
- store volatile float %b, float* %1
- call void @llvm.va_end(i8* %ap2)
+ %0 = getelementptr [11 x float], ptr @floats, i32 0, i32 1
+ store volatile float %a, ptr %0
+
+ %ap = alloca ptr
+ call void @llvm.va_start(ptr %ap)
+ %b = va_arg ptr %ap, float
+ %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 2
+ store volatile float %b, ptr %1
+ call void @llvm.va_end(ptr %ap)
ret void
}
; NEWBE-DAG: lwc1 [[FTMP1:\$f[0-9]+]], 12($sp)
; ALL-DAG: swc1 [[FTMP1]], 8([[R2]])
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_copy(i8*, i8*)
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_copy(ptr, ptr)
+declare void @llvm.va_end(ptr)
define void @double_args(double %a, double %b, double %c, double %d, double %e,
double %f, double %g, double %h, double %i) nounwind {
entry:
- %0 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
- store volatile double %a, double* %0
- %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 2
- store volatile double %b, double* %1
- %2 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 3
- store volatile double %c, double* %2
- %3 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 4
- store volatile double %d, double* %3
- %4 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 5
- store volatile double %e, double* %4
- %5 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 6
- store volatile double %f, double* %5
- %6 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 7
- store volatile double %g, double* %6
- %7 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 8
- store volatile double %h, double* %7
- %8 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 9
- store volatile double %i, double* %8
+ %0 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1
+ store volatile double %a, ptr %0
+ %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 2
+ store volatile double %b, ptr %1
+ %2 = getelementptr [11 x double], ptr @doubles, i32 0, i32 3
+ store volatile double %c, ptr %2
+ %3 = getelementptr [11 x double], ptr @doubles, i32 0, i32 4
+ store volatile double %d, ptr %3
+ %4 = getelementptr [11 x double], ptr @doubles, i32 0, i32 5
+ store volatile double %e, ptr %4
+ %5 = getelementptr [11 x double], ptr @doubles, i32 0, i32 6
+ store volatile double %f, ptr %5
+ %6 = getelementptr [11 x double], ptr @doubles, i32 0, i32 7
+ store volatile double %g, ptr %6
+ %7 = getelementptr [11 x double], ptr @doubles, i32 0, i32 8
+ store volatile double %h, ptr %7
+ %8 = getelementptr [11 x double], ptr @doubles, i32 0, i32 9
+ store volatile double %i, ptr %8
ret void
}
define void @float_args(float %a, float %b, float %c, float %d, float %e,
float %f, float %g, float %h, float %i) nounwind {
entry:
- %0 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
- store volatile float %a, float* %0
- %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 2
- store volatile float %b, float* %1
- %2 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 3
- store volatile float %c, float* %2
- %3 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 4
- store volatile float %d, float* %3
- %4 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 5
- store volatile float %e, float* %4
- %5 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 6
- store volatile float %f, float* %5
- %6 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 7
- store volatile float %g, float* %6
- %7 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 8
- store volatile float %h, float* %7
- %8 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 9
- store volatile float %i, float* %8
+ %0 = getelementptr [11 x float], ptr @floats, i32 0, i32 1
+ store volatile float %a, ptr %0
+ %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 2
+ store volatile float %b, ptr %1
+ %2 = getelementptr [11 x float], ptr @floats, i32 0, i32 3
+ store volatile float %c, ptr %2
+ %3 = getelementptr [11 x float], ptr @floats, i32 0, i32 4
+ store volatile float %d, ptr %3
+ %4 = getelementptr [11 x float], ptr @floats, i32 0, i32 5
+ store volatile float %e, ptr %4
+ %5 = getelementptr [11 x float], ptr @floats, i32 0, i32 6
+ store volatile float %f, ptr %5
+ %6 = getelementptr [11 x float], ptr @floats, i32 0, i32 7
+ store volatile float %g, ptr %6
+ %7 = getelementptr [11 x float], ptr @floats, i32 0, i32 8
+ store volatile float %h, ptr %7
+ %8 = getelementptr [11 x float], ptr @floats, i32 0, i32 9
+ store volatile float %i, ptr %8
ret void
}
define void @double_arg2(i8 %a, double %b) nounwind {
entry:
- %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
- store volatile i8 %a, i8* %0
- %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
- store volatile double %b, double* %1
+ %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1
+ store volatile i8 %a, ptr %0
+ %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1
+ store volatile double %b, ptr %1
ret void
}
define void @float_arg2(i8 %a, float %b) nounwind {
entry:
- %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
- store volatile i8 %a, i8* %0
- %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
- store volatile float %b, float* %1
+ %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1
+ store volatile i8 %a, ptr %0
+ %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 1
+ store volatile float %b, ptr %1
ret void
}
define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind {
entry:
- %0 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 1
- store volatile fp128 %a, fp128* %0
- %1 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 2
- store volatile fp128 %b, fp128* %1
- %2 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 3
- store volatile fp128 %c, fp128* %2
- %3 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 4
- store volatile fp128 %d, fp128* %3
- %4 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 5
- store volatile fp128 %e, fp128* %4
+ %0 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 1
+ store volatile fp128 %a, ptr %0
+ %1 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 2
+ store volatile fp128 %b, ptr %1
+ %2 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 3
+ store volatile fp128 %c, ptr %2
+ %3 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 4
+ store volatile fp128 %d, ptr %3
+ %4 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 5
+ store volatile fp128 %e, ptr %4
ret void
}
declare void @fS1(i48 inreg) #1
declare void @fS2(i40 inreg) #1
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #2
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #2
define void @f1() #0 {
entry:
%s1_1 = alloca %struct.S1, align 2
%s1_1.coerce = alloca { i48 }
- %0 = bitcast { i48 }* %s1_1.coerce to i8*
- %1 = bitcast %struct.S1* %s1_1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 6, i1 false)
- %2 = getelementptr { i48 }, { i48 }* %s1_1.coerce, i32 0, i32 0
- %3 = load i48, i48* %2, align 1
- call void @fS1(i48 inreg %3)
+ call void @llvm.memcpy.p0.p0.i64(ptr %s1_1.coerce, ptr %s1_1, i64 6, i1 false)
+ %0 = load i48, ptr %s1_1.coerce, align 1
+ call void @fS1(i48 inreg %0)
ret void
; ALL-LABEL: f1:
entry:
%s2_1 = alloca %struct.S2, align 1
%s2_1.coerce = alloca { i40 }
- %0 = bitcast { i40 }* %s2_1.coerce to i8*
- %1 = bitcast %struct.S2* %s2_1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 5, i1 false)
- %2 = getelementptr { i40 }, { i40 }* %s2_1.coerce, i32 0, i32 0
- %3 = load i40, i40* %2, align 1
- call void @fS2(i40 inreg %3)
+ call void @llvm.memcpy.p0.p0.i64(ptr %s2_1.coerce, ptr %s2_1, i64 5, i1 false)
+ %0 = load i40, ptr %s2_1.coerce, align 1
+ call void @fS2(i40 inreg %0)
ret void
; ALL-LABEL: f2:
define void @s_i8(i8 inreg %a) nounwind {
entry:
- store i8 %a, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @bytes, i32 0, i32 1)
+ store i8 %a, ptr getelementptr inbounds ([2 x i8], ptr @bytes, i32 0, i32 1)
ret void
}
@.str = private unnamed_addr constant [3 x i8] c"01\00", align 1
-declare void @varArgF_SmallStruct(i8* %c, ...)
+declare void @varArgF_SmallStruct(ptr %c, ...)
-define void @smallStruct_1b(%struct.SmallStruct_1b* %ss) #0 {
+define void @smallStruct_1b(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_1b*, align 8
- store %struct.SmallStruct_1b* %ss, %struct.SmallStruct_1b** %ss.addr, align 8
- %0 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss.addr, align 8
- %1 = bitcast %struct.SmallStruct_1b* %0 to { i8 }*
- %2 = getelementptr { i8 }, { i8 }* %1, i32 0, i32 0
- %3 = load i8, i8* %2, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i8 inreg %3)
+ %ss.addr = alloca ptr, align 8
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ %1 = load i8, ptr %0, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i8 inreg %1)
ret void
; CHECK-LABEL: smallStruct_1b:
; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
}
-define void @smallStruct_2b(%struct.SmallStruct_2b* %ss) #0 {
+define void @smallStruct_2b(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_2b*, align 8
- store %struct.SmallStruct_2b* %ss, %struct.SmallStruct_2b** %ss.addr, align 8
- %0 = load %struct.SmallStruct_2b*, %struct.SmallStruct_2b** %ss.addr, align 8
- %1 = bitcast %struct.SmallStruct_2b* %0 to { i16 }*
- %2 = getelementptr { i16 }, { i16 }* %1, i32 0, i32 0
- %3 = load i16, i16* %2, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i16 inreg %3)
+ %ss.addr = alloca ptr, align 8
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ %1 = load i16, ptr %0, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i16 inreg %1)
ret void
; CHECK-LABEL: smallStruct_2b:
; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 48
}
-define void @smallStruct_3b(%struct.SmallStruct_3b* %ss) #0 {
+define void @smallStruct_3b(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_3b*, align 8
+ %ss.addr = alloca ptr, align 8
%.coerce = alloca { i24 }
- store %struct.SmallStruct_3b* %ss, %struct.SmallStruct_3b** %ss.addr, align 8
- %0 = load %struct.SmallStruct_3b*, %struct.SmallStruct_3b** %ss.addr, align 8
- %1 = bitcast { i24 }* %.coerce to i8*
- %2 = bitcast %struct.SmallStruct_3b* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 3, i1 false)
- %3 = getelementptr { i24 }, { i24 }* %.coerce, i32 0, i32 0
- %4 = load i24, i24* %3, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i24 inreg %4)
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 3, i1 false)
+ %1 = load i24, ptr %.coerce, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i24 inreg %1)
ret void
; CHECK-LABEL: smallStruct_3b:
; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 40
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #1
-define void @smallStruct_4b(%struct.SmallStruct_4b* %ss) #0 {
+define void @smallStruct_4b(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_4b*, align 8
- store %struct.SmallStruct_4b* %ss, %struct.SmallStruct_4b** %ss.addr, align 8
- %0 = load %struct.SmallStruct_4b*, %struct.SmallStruct_4b** %ss.addr, align 8
- %1 = bitcast %struct.SmallStruct_4b* %0 to { i32 }*
- %2 = getelementptr { i32 }, { i32 }* %1, i32 0, i32 0
- %3 = load i32, i32* %2, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i32 inreg %3)
+ %ss.addr = alloca ptr, align 8
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ %1 = load i32, ptr %0, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i32 inreg %1)
ret void
; CHECK-LABEL: smallStruct_4b:
; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 32
}
-define void @smallStruct_5b(%struct.SmallStruct_5b* %ss) #0 {
+define void @smallStruct_5b(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_5b*, align 8
+ %ss.addr = alloca ptr, align 8
%.coerce = alloca { i40 }
- store %struct.SmallStruct_5b* %ss, %struct.SmallStruct_5b** %ss.addr, align 8
- %0 = load %struct.SmallStruct_5b*, %struct.SmallStruct_5b** %ss.addr, align 8
- %1 = bitcast { i40 }* %.coerce to i8*
- %2 = bitcast %struct.SmallStruct_5b* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 5, i1 false)
- %3 = getelementptr { i40 }, { i40 }* %.coerce, i32 0, i32 0
- %4 = load i40, i40* %3, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i40 inreg %4)
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 5, i1 false)
+ %1 = load i40, ptr %.coerce, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i40 inreg %1)
ret void
; CHECK-LABEL: smallStruct_5b:
; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 24
}
-define void @smallStruct_6b(%struct.SmallStruct_6b* %ss) #0 {
+define void @smallStruct_6b(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_6b*, align 8
+ %ss.addr = alloca ptr, align 8
%.coerce = alloca { i48 }
- store %struct.SmallStruct_6b* %ss, %struct.SmallStruct_6b** %ss.addr, align 8
- %0 = load %struct.SmallStruct_6b*, %struct.SmallStruct_6b** %ss.addr, align 8
- %1 = bitcast { i48 }* %.coerce to i8*
- %2 = bitcast %struct.SmallStruct_6b* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i1 false)
- %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0
- %4 = load i48, i48* %3, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 6, i1 false)
+ %1 = load i48, ptr %.coerce, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i48 inreg %1)
ret void
; CHECK-LABEL: smallStruct_6b:
; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16
}
-define void @smallStruct_7b(%struct.SmallStruct_7b* %ss) #0 {
+define void @smallStruct_7b(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_7b*, align 8
+ %ss.addr = alloca ptr, align 8
%.coerce = alloca { i56 }
- store %struct.SmallStruct_7b* %ss, %struct.SmallStruct_7b** %ss.addr, align 8
- %0 = load %struct.SmallStruct_7b*, %struct.SmallStruct_7b** %ss.addr, align 8
- %1 = bitcast { i56 }* %.coerce to i8*
- %2 = bitcast %struct.SmallStruct_7b* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 7, i1 false)
- %3 = getelementptr { i56 }, { i56 }* %.coerce, i32 0, i32 0
- %4 = load i56, i56* %3, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i56 inreg %4)
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 7, i1 false)
+ %1 = load i56, ptr %.coerce, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i56 inreg %1)
ret void
; CHECK-LABEL: smallStruct_7b:
; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 8
}
-define void @smallStruct_8b(%struct.SmallStruct_8b* %ss) #0 {
+define void @smallStruct_8b(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_8b*, align 8
- store %struct.SmallStruct_8b* %ss, %struct.SmallStruct_8b** %ss.addr, align 8
- %0 = load %struct.SmallStruct_8b*, %struct.SmallStruct_8b** %ss.addr, align 8
- %1 = bitcast %struct.SmallStruct_8b* %0 to { i64 }*
- %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0
- %3 = load i64, i64* %2, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
+ %ss.addr = alloca ptr, align 8
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ %1 = load i64, ptr %0, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i64 inreg %1)
ret void
; CHECK-LABEL: smallStruct_8b:
; Check that the structure is not shifted before the pointer to str is loaded.
; CHECK: lui
}
-define void @smallStruct_9b(%struct.SmallStruct_9b* %ss) #0 {
+define void @smallStruct_9b(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_9b*, align 8
+ %ss.addr = alloca ptr, align 8
%.coerce = alloca { i64, i8 }
- store %struct.SmallStruct_9b* %ss, %struct.SmallStruct_9b** %ss.addr, align 8
- %0 = load %struct.SmallStruct_9b*, %struct.SmallStruct_9b** %ss.addr, align 8
- %1 = bitcast { i64, i8 }* %.coerce to i8*
- %2 = bitcast %struct.SmallStruct_9b* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 9, i1 false)
- %3 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 0
- %4 = load i64, i64* %3, align 1
- %5 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 1
- %6 = load i8, i8* %5, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %4, i8 inreg %6)
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 9, i1 false)
+ %1 = getelementptr { i64, i8 }, ptr %.coerce, i32 0, i32 0
+ %2 = load i64, ptr %1, align 1
+ %3 = getelementptr { i64, i8 }, ptr %.coerce, i32 0, i32 1
+ %4 = load i8, ptr %3, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i64 inreg %2, i8 inreg %4)
ret void
; CHECK-LABEL: smallStruct_9b:
; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
@.str = private unnamed_addr constant [3 x i8] c"01\00", align 1
-declare void @varArgF_SmallStruct(i8* %c, ...)
+declare void @varArgF_SmallStruct(ptr %c, ...)
-define void @smallStruct_1b1s(%struct.SmallStruct_1b1s* %ss) #0 {
+define void @smallStruct_1b1s(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_1b1s*, align 8
- store %struct.SmallStruct_1b1s* %ss, %struct.SmallStruct_1b1s** %ss.addr, align 8
- %0 = load %struct.SmallStruct_1b1s*, %struct.SmallStruct_1b1s** %ss.addr, align 8
- %1 = bitcast %struct.SmallStruct_1b1s* %0 to { i32 }*
- %2 = getelementptr { i32 }, { i32 }* %1, i32 0, i32 0
- %3 = load i32, i32* %2, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i32 inreg %3)
+ %ss.addr = alloca ptr, align 8
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ %1 = load i32, ptr %0, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i32 inreg %1)
ret void
; CHECK-LABEL: smallStruct_1b1s:
; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 32
}
-define void @smallStruct_1b1i(%struct.SmallStruct_1b1i* %ss) #0 {
+define void @smallStruct_1b1i(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_1b1i*, align 8
- store %struct.SmallStruct_1b1i* %ss, %struct.SmallStruct_1b1i** %ss.addr, align 8
- %0 = load %struct.SmallStruct_1b1i*, %struct.SmallStruct_1b1i** %ss.addr, align 8
- %1 = bitcast %struct.SmallStruct_1b1i* %0 to { i64 }*
- %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0
- %3 = load i64, i64* %2, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
+ %ss.addr = alloca ptr, align 8
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ %1 = load i64, ptr %0, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i64 inreg %1)
ret void
; CHECK-LABEL: smallStruct_1b1i:
; CHECK-NOT: dsll
; CHECK: lui
}
-define void @smallStruct_1b1s1b(%struct.SmallStruct_1b1s1b* %ss) #0 {
+define void @smallStruct_1b1s1b(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_1b1s1b*, align 8
+ %ss.addr = alloca ptr, align 8
%.coerce = alloca { i48 }
- store %struct.SmallStruct_1b1s1b* %ss, %struct.SmallStruct_1b1s1b** %ss.addr, align 8
- %0 = load %struct.SmallStruct_1b1s1b*, %struct.SmallStruct_1b1s1b** %ss.addr, align 8
- %1 = bitcast { i48 }* %.coerce to i8*
- %2 = bitcast %struct.SmallStruct_1b1s1b* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i1 false)
- %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0
- %4 = load i48, i48* %3, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 6, i1 false)
+ %1 = load i48, ptr %.coerce, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i48 inreg %1)
ret void
; CHECK-LABEL: smallStruct_1b1s1b:
; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #1
-define void @smallStruct_1s1i(%struct.SmallStruct_1s1i* %ss) #0 {
+define void @smallStruct_1s1i(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_1s1i*, align 8
- store %struct.SmallStruct_1s1i* %ss, %struct.SmallStruct_1s1i** %ss.addr, align 8
- %0 = load %struct.SmallStruct_1s1i*, %struct.SmallStruct_1s1i** %ss.addr, align 8
- %1 = bitcast %struct.SmallStruct_1s1i* %0 to { i64 }*
- %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0
- %3 = load i64, i64* %2, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
+ %ss.addr = alloca ptr, align 8
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ %1 = load i64, ptr %0, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i64 inreg %1)
ret void
; CHECK-LABEL: smallStruct_1s1i:
; CHECK-NOT: dsll
; CHECK: lui
}
-define void @smallStruct_3b1s(%struct.SmallStruct_3b1s* %ss) #0 {
+define void @smallStruct_3b1s(ptr %ss) #0 {
entry:
- %ss.addr = alloca %struct.SmallStruct_3b1s*, align 8
+ %ss.addr = alloca ptr, align 8
%.coerce = alloca { i48 }
- store %struct.SmallStruct_3b1s* %ss, %struct.SmallStruct_3b1s** %ss.addr, align 8
- %0 = load %struct.SmallStruct_3b1s*, %struct.SmallStruct_3b1s** %ss.addr, align 8
- %1 = bitcast { i48 }* %.coerce to i8*
- %2 = bitcast %struct.SmallStruct_3b1s* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i1 false)
- %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0
- %4 = load i48, i48* %3, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
+ store ptr %ss, ptr %ss.addr, align 8
+ %0 = load ptr, ptr %ss.addr, align 8
+ call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 6, i1 false)
+ %1 = load i48, ptr %.coerce, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i48 inreg %1)
ret void
; CHECK-LABEL: smallStruct_3b1s:
; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16
@.str = private unnamed_addr constant [3 x i8] c"01\00", align 1
-declare void @varArgF_SmallStruct(i8* %c, ...)
+declare void @varArgF_SmallStruct(ptr %c, ...)
-define void @smallStruct_1b_x9(%struct.SmallStruct_1b* %ss1, %struct.SmallStruct_1b* %ss2, %struct.SmallStruct_1b* %ss3, %struct.SmallStruct_1b* %ss4, %struct.SmallStruct_1b* %ss5, %struct.SmallStruct_1b* %ss6, %struct.SmallStruct_1b* %ss7, %struct.SmallStruct_1b* %ss8, %struct.SmallStruct_1b* %ss9) #0 {
+define void @smallStruct_1b_x9(ptr %ss1, ptr %ss2, ptr %ss3, ptr %ss4, ptr %ss5, ptr %ss6, ptr %ss7, ptr %ss8, ptr %ss9) #0 {
entry:
- %ss1.addr = alloca %struct.SmallStruct_1b*, align 8
- %ss2.addr = alloca %struct.SmallStruct_1b*, align 8
- %ss3.addr = alloca %struct.SmallStruct_1b*, align 8
- %ss4.addr = alloca %struct.SmallStruct_1b*, align 8
- %ss5.addr = alloca %struct.SmallStruct_1b*, align 8
- %ss6.addr = alloca %struct.SmallStruct_1b*, align 8
- %ss7.addr = alloca %struct.SmallStruct_1b*, align 8
- %ss8.addr = alloca %struct.SmallStruct_1b*, align 8
- %ss9.addr = alloca %struct.SmallStruct_1b*, align 8
- store %struct.SmallStruct_1b* %ss1, %struct.SmallStruct_1b** %ss1.addr, align 8
- store %struct.SmallStruct_1b* %ss2, %struct.SmallStruct_1b** %ss2.addr, align 8
- store %struct.SmallStruct_1b* %ss3, %struct.SmallStruct_1b** %ss3.addr, align 8
- store %struct.SmallStruct_1b* %ss4, %struct.SmallStruct_1b** %ss4.addr, align 8
- store %struct.SmallStruct_1b* %ss5, %struct.SmallStruct_1b** %ss5.addr, align 8
- store %struct.SmallStruct_1b* %ss6, %struct.SmallStruct_1b** %ss6.addr, align 8
- store %struct.SmallStruct_1b* %ss7, %struct.SmallStruct_1b** %ss7.addr, align 8
- store %struct.SmallStruct_1b* %ss8, %struct.SmallStruct_1b** %ss8.addr, align 8
- store %struct.SmallStruct_1b* %ss9, %struct.SmallStruct_1b** %ss9.addr, align 8
- %0 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss1.addr, align 8
- %1 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss2.addr, align 8
- %2 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss3.addr, align 8
- %3 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss4.addr, align 8
- %4 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss5.addr, align 8
- %5 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss6.addr, align 8
- %6 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss7.addr, align 8
- %7 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss8.addr, align 8
- %8 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss9.addr, align 8
- %9 = bitcast %struct.SmallStruct_1b* %0 to { i8 }*
- %10 = getelementptr { i8 }, { i8 }* %9, i32 0, i32 0
- %11 = load i8, i8* %10, align 1
- %12 = bitcast %struct.SmallStruct_1b* %1 to { i8 }*
- %13 = getelementptr { i8 }, { i8 }* %12, i32 0, i32 0
- %14 = load i8, i8* %13, align 1
- %15 = bitcast %struct.SmallStruct_1b* %2 to { i8 }*
- %16 = getelementptr { i8 }, { i8 }* %15, i32 0, i32 0
- %17 = load i8, i8* %16, align 1
- %18 = bitcast %struct.SmallStruct_1b* %3 to { i8 }*
- %19 = getelementptr { i8 }, { i8 }* %18, i32 0, i32 0
- %20 = load i8, i8* %19, align 1
- %21 = bitcast %struct.SmallStruct_1b* %4 to { i8 }*
- %22 = getelementptr { i8 }, { i8 }* %21, i32 0, i32 0
- %23 = load i8, i8* %22, align 1
- %24 = bitcast %struct.SmallStruct_1b* %5 to { i8 }*
- %25 = getelementptr { i8 }, { i8 }* %24, i32 0, i32 0
- %26 = load i8, i8* %25, align 1
- %27 = bitcast %struct.SmallStruct_1b* %6 to { i8 }*
- %28 = getelementptr { i8 }, { i8 }* %27, i32 0, i32 0
- %29 = load i8, i8* %28, align 1
- %30 = bitcast %struct.SmallStruct_1b* %7 to { i8 }*
- %31 = getelementptr { i8 }, { i8 }* %30, i32 0, i32 0
- %32 = load i8, i8* %31, align 1
- %33 = bitcast %struct.SmallStruct_1b* %8 to { i8 }*
- %34 = getelementptr { i8 }, { i8 }* %33, i32 0, i32 0
- %35 = load i8, i8* %34, align 1
- call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i8 inreg %11, i8 inreg %14, i8 inreg %17, i8 inreg %20, i8 inreg %23, i8 inreg %26, i8 inreg %29, i8 inreg %32, i8 inreg %35)
+ %ss1.addr = alloca ptr, align 8
+ %ss2.addr = alloca ptr, align 8
+ %ss3.addr = alloca ptr, align 8
+ %ss4.addr = alloca ptr, align 8
+ %ss5.addr = alloca ptr, align 8
+ %ss6.addr = alloca ptr, align 8
+ %ss7.addr = alloca ptr, align 8
+ %ss8.addr = alloca ptr, align 8
+ %ss9.addr = alloca ptr, align 8
+ store ptr %ss1, ptr %ss1.addr, align 8
+ store ptr %ss2, ptr %ss2.addr, align 8
+ store ptr %ss3, ptr %ss3.addr, align 8
+ store ptr %ss4, ptr %ss4.addr, align 8
+ store ptr %ss5, ptr %ss5.addr, align 8
+ store ptr %ss6, ptr %ss6.addr, align 8
+ store ptr %ss7, ptr %ss7.addr, align 8
+ store ptr %ss8, ptr %ss8.addr, align 8
+ store ptr %ss9, ptr %ss9.addr, align 8
+ %0 = load ptr, ptr %ss1.addr, align 8
+ %1 = load ptr, ptr %ss2.addr, align 8
+ %2 = load ptr, ptr %ss3.addr, align 8
+ %3 = load ptr, ptr %ss4.addr, align 8
+ %4 = load ptr, ptr %ss5.addr, align 8
+ %5 = load ptr, ptr %ss6.addr, align 8
+ %6 = load ptr, ptr %ss7.addr, align 8
+ %7 = load ptr, ptr %ss8.addr, align 8
+ %8 = load ptr, ptr %ss9.addr, align 8
+ %9 = load i8, ptr %0, align 1
+ %10 = load i8, ptr %1, align 1
+ %11 = load i8, ptr %2, align 1
+ %12 = load i8, ptr %3, align 1
+ %13 = load i8, ptr %4, align 1
+ %14 = load i8, ptr %5, align 1
+ %15 = load i8, ptr %6, align 1
+ %16 = load i8, ptr %7, align 1
+ %17 = load i8, ptr %8, align 1
+ call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i8 inreg %9, i8 inreg %10, i8 inreg %11, i8 inreg %12, i8 inreg %13, i8 inreg %14, i8 inreg %15, i8 inreg %16, i8 inreg %17)
ret void
; CHECK-LABEL: smallStruct_1b_x9:
; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
; Copy the arg to the global
; ALL-DAG: sh [[ARG2]], 4([[GV]])
- %ap = alloca i8*, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap2)
+ %ap = alloca ptr, align 8
+ call void @llvm.va_start(ptr %ap)
call void asm sideeffect "teqi $$zero, 1", ""()
- %arg1 = va_arg i8** %ap, i16
- %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
- store volatile i16 %arg1, i16* %e1, align 2
+ %arg1 = va_arg ptr %ap, i16
+ %e1 = getelementptr [3 x i16], ptr @hwords, i32 0, i32 1
+ store volatile i16 %arg1, ptr %e1, align 2
call void asm sideeffect "teqi $$zero, 2", ""()
- %arg2 = va_arg i8** %ap, i16
- %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
- store volatile i16 %arg2, i16* %e2, align 2
+ %arg2 = va_arg ptr %ap, i16
+ %e2 = getelementptr [3 x i16], ptr @hwords, i32 0, i32 2
+ store volatile i16 %arg2, ptr %e2, align 2
- call void @llvm.va_end(i8* %ap2)
+ call void @llvm.va_end(ptr %ap)
ret void
}
; Copy the arg to the global
; ALL-DAG: sw [[ARG2]], 8([[GV]])
- %ap = alloca i8*, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap2)
+ %ap = alloca ptr, align 8
+ call void @llvm.va_start(ptr %ap)
call void asm sideeffect "teqi $$zero, 1", ""()
- %arg1 = va_arg i8** %ap, i32
- %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
- store volatile i32 %arg1, i32* %e1, align 4
+ %arg1 = va_arg ptr %ap, i32
+ %e1 = getelementptr [3 x i32], ptr @words, i32 0, i32 1
+ store volatile i32 %arg1, ptr %e1, align 4
call void asm sideeffect "teqi $$zero, 2", ""()
- %arg2 = va_arg i8** %ap, i32
- %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
- store volatile i32 %arg2, i32* %e2, align 4
+ %arg2 = va_arg ptr %ap, i32
+ %e2 = getelementptr [3 x i32], ptr @words, i32 0, i32 2
+ store volatile i32 %arg2, ptr %e2, align 4
- call void @llvm.va_end(i8* %ap2)
+ call void @llvm.va_end(ptr %ap)
ret void
}
; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]])
; NEW-DAG: sd [[ARG2]], 16([[GV]])
- %ap = alloca i8*, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap2)
+ %ap = alloca ptr, align 8
+ call void @llvm.va_start(ptr %ap)
call void asm sideeffect "teqi $$zero, 1", ""()
- %arg1 = va_arg i8** %ap, i64
- %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
- store volatile i64 %arg1, i64* %e1, align 8
+ %arg1 = va_arg ptr %ap, i64
+ %e1 = getelementptr [3 x i64], ptr @dwords, i32 0, i32 1
+ store volatile i64 %arg1, ptr %e1, align 8
call void asm sideeffect "teqi $$zero, 2", ""()
- %arg2 = va_arg i8** %ap, i64
- %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
- store volatile i64 %arg2, i64* %e2, align 8
+ %arg2 = va_arg ptr %ap, i64
+ %e2 = getelementptr [3 x i64], ptr @dwords, i32 0, i32 2
+ store volatile i64 %arg2, ptr %e2, align 8
- call void @llvm.va_end(i8* %ap2)
+ call void @llvm.va_end(ptr %ap)
ret void
}
; Copy the arg to the global
; ALL-DAG: sh [[ARG2]], 4([[GV]])
- %ap = alloca i8*, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap2)
+ %ap = alloca ptr, align 8
+ call void @llvm.va_start(ptr %ap)
call void asm sideeffect "teqi $$zero, 1", ""()
- %arg1 = va_arg i8** %ap, i16
- %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
- store volatile i16 %arg1, i16* %e1, align 2
+ %arg1 = va_arg ptr %ap, i16
+ %e1 = getelementptr [3 x i16], ptr @hwords, i32 0, i32 1
+ store volatile i16 %arg1, ptr %e1, align 2
call void asm sideeffect "teqi $$zero, 2", ""()
- %arg2 = va_arg i8** %ap, i16
- %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
- store volatile i16 %arg2, i16* %e2, align 2
+ %arg2 = va_arg ptr %ap, i16
+ %e2 = getelementptr [3 x i16], ptr @hwords, i32 0, i32 2
+ store volatile i16 %arg2, ptr %e2, align 2
- call void @llvm.va_end(i8* %ap2)
+ call void @llvm.va_end(ptr %ap)
ret void
}
; Copy the arg to the global
; ALL-DAG: sw [[ARG2]], 8([[GV]])
- %ap = alloca i8*, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap2)
+ %ap = alloca ptr, align 8
+ call void @llvm.va_start(ptr %ap)
call void asm sideeffect "teqi $$zero, 1", ""()
- %arg1 = va_arg i8** %ap, i32
- %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
- store volatile i32 %arg1, i32* %e1, align 4
+ %arg1 = va_arg ptr %ap, i32
+ %e1 = getelementptr [3 x i32], ptr @words, i32 0, i32 1
+ store volatile i32 %arg1, ptr %e1, align 4
call void asm sideeffect "teqi $$zero, 2", ""()
- %arg2 = va_arg i8** %ap, i32
- %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
- store volatile i32 %arg2, i32* %e2, align 4
+ %arg2 = va_arg ptr %ap, i32
+ %e2 = getelementptr [3 x i32], ptr @words, i32 0, i32 2
+ store volatile i32 %arg2, ptr %e2, align 4
- call void @llvm.va_end(i8* %ap2)
+ call void @llvm.va_end(ptr %ap)
ret void
}
; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]])
; NEW-DAG: sd [[ARG2]], 16([[GV]])
- %ap = alloca i8*, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap2)
+ %ap = alloca ptr, align 8
+ call void @llvm.va_start(ptr %ap)
call void asm sideeffect "teqi $$zero, 1", ""()
- %arg1 = va_arg i8** %ap, i64
- %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
- store volatile i64 %arg1, i64* %e1, align 8
+ %arg1 = va_arg ptr %ap, i64
+ %e1 = getelementptr [3 x i64], ptr @dwords, i32 0, i32 1
+ store volatile i64 %arg1, ptr %e1, align 8
call void asm sideeffect "teqi $$zero, 2", ""()
- %arg2 = va_arg i8** %ap, i64
- %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
- store volatile i64 %arg2, i64* %e2, align 8
+ %arg2 = va_arg ptr %ap, i64
+ %e2 = getelementptr [3 x i64], ptr @dwords, i32 0, i32 2
+ store volatile i64 %arg2, ptr %e2, align 8
- call void @llvm.va_end(i8* %ap2)
+ call void @llvm.va_end(ptr %ap)
ret void
}
; Copy the arg to the global
; ALL-DAG: sh [[ARG2]], 4([[GV]])
- %ap = alloca i8*, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap2)
+ %ap = alloca ptr, align 8
+ call void @llvm.va_start(ptr %ap)
call void asm sideeffect "teqi $$zero, 1", ""()
- %arg1 = va_arg i8** %ap, i16
- %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
- store volatile i16 %arg1, i16* %e1, align 2
+ %arg1 = va_arg ptr %ap, i16
+ %e1 = getelementptr [3 x i16], ptr @hwords, i32 0, i32 1
+ store volatile i16 %arg1, ptr %e1, align 2
call void asm sideeffect "teqi $$zero, 2", ""()
- %arg2 = va_arg i8** %ap, i16
- %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
- store volatile i16 %arg2, i16* %e2, align 2
+ %arg2 = va_arg ptr %ap, i16
+ %e2 = getelementptr [3 x i16], ptr @hwords, i32 0, i32 2
+ store volatile i16 %arg2, ptr %e2, align 2
- call void @llvm.va_end(i8* %ap2)
+ call void @llvm.va_end(ptr %ap)
ret void
}
; Copy the arg to the global
; ALL-DAG: sw [[ARG2]], 8([[GV]])
- %ap = alloca i8*, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap2)
+ %ap = alloca ptr, align 8
+ call void @llvm.va_start(ptr %ap)
call void asm sideeffect "teqi $$zero, 1", ""()
- %arg1 = va_arg i8** %ap, i32
- %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
- store volatile i32 %arg1, i32* %e1, align 4
+ %arg1 = va_arg ptr %ap, i32
+ %e1 = getelementptr [3 x i32], ptr @words, i32 0, i32 1
+ store volatile i32 %arg1, ptr %e1, align 4
call void asm sideeffect "teqi $$zero, 2", ""()
- %arg2 = va_arg i8** %ap, i32
- %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
- store volatile i32 %arg2, i32* %e2, align 4
+ %arg2 = va_arg ptr %ap, i32
+ %e2 = getelementptr [3 x i32], ptr @words, i32 0, i32 2
+ store volatile i32 %arg2, ptr %e2, align 4
- call void @llvm.va_end(i8* %ap2)
+ call void @llvm.va_end(ptr %ap)
ret void
}
; NEW-DAG: ld [[ARG2:\$[0-9]+]], 0([[VA2]])
; NEW-DAG: sd [[ARG2]], 16([[GV]])
- %ap = alloca i8*, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap2)
+ %ap = alloca ptr, align 8
+ call void @llvm.va_start(ptr %ap)
call void asm sideeffect "teqi $$zero, 1", ""()
- %arg1 = va_arg i8** %ap, i64
- %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
- store volatile i64 %arg1, i64* %e1, align 8
+ %arg1 = va_arg ptr %ap, i64
+ %e1 = getelementptr [3 x i64], ptr @dwords, i32 0, i32 1
+ store volatile i64 %arg1, ptr %e1, align 8
call void asm sideeffect "teqi $$zero, 2", ""()
- %arg2 = va_arg i8** %ap, i64
- %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
- store volatile i64 %arg2, i64* %e2, align 8
+ %arg2 = va_arg ptr %ap, i64
+ %e2 = getelementptr [3 x i64], ptr @dwords, i32 0, i32 2
+ store volatile i64 %arg2, ptr %e2, align 8
- call void @llvm.va_end(i8* %ap2)
+ call void @llvm.va_end(ptr %ap)
ret void
}
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)
i8 signext %g, i8 signext %h, i8 signext %i,
i8 signext %j) nounwind {
entry:
- %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
- store volatile i8 %a, i8* %0
- %1 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 2
- store volatile i8 %b, i8* %1
- %2 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 3
- store volatile i8 %c, i8* %2
- %3 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 4
- store volatile i8 %d, i8* %3
- %4 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 5
- store volatile i8 %e, i8* %4
- %5 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 6
- store volatile i8 %f, i8* %5
- %6 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 7
- store volatile i8 %g, i8* %6
- %7 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 8
- store volatile i8 %h, i8* %7
- %8 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 9
- store volatile i8 %i, i8* %8
- %9 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 10
- store volatile i8 %j, i8* %9
+ %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1
+ store volatile i8 %a, ptr %0
+ %1 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 2
+ store volatile i8 %b, ptr %1
+ %2 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 3
+ store volatile i8 %c, ptr %2
+ %3 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 4
+ store volatile i8 %d, ptr %3
+ %4 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 5
+ store volatile i8 %e, ptr %4
+ %5 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 6
+ store volatile i8 %f, ptr %5
+ %6 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 7
+ store volatile i8 %g, ptr %6
+ %7 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 8
+ store volatile i8 %h, ptr %7
+ %8 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 9
+ store volatile i8 %i, ptr %8
+ %9 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 10
+ store volatile i8 %j, ptr %9
ret void
}
i8 signext %d, i8 signext %e, i8 signext %f,
i8 signext %g, i64 signext %i, i8 signext %j) nounwind {
entry:
- %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
- store volatile i8 %a, i8* %0
- %1 = getelementptr [11 x i64], [11 x i64]* @dwords, i32 0, i32 1
- store volatile i64 %b, i64* %1
- %2 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 2
- store volatile i8 %c, i8* %2
- %3 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 3
- store volatile i8 %d, i8* %3
- %4 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 4
- store volatile i8 %e, i8* %4
- %5 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 5
- store volatile i8 %f, i8* %5
- %6 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 6
- store volatile i8 %g, i8* %6
- %7 = getelementptr [11 x i64], [11 x i64]* @dwords, i32 0, i32 2
- store volatile i64 %i, i64* %7
- %8 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 7
- store volatile i8 %j, i8* %8
+ %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1
+ store volatile i8 %a, ptr %0
+ %1 = getelementptr [11 x i64], ptr @dwords, i32 0, i32 1
+ store volatile i64 %b, ptr %1
+ %2 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 2
+ store volatile i8 %c, ptr %2
+ %3 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 3
+ store volatile i8 %d, ptr %3
+ %4 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 4
+ store volatile i8 %e, ptr %4
+ %5 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 5
+ store volatile i8 %f, ptr %5
+ %6 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 6
+ store volatile i8 %g, ptr %6
+ %7 = getelementptr [11 x i64], ptr @dwords, i32 0, i32 2
+ store volatile i64 %i, ptr %7
+ %8 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 7
+ store volatile i8 %j, ptr %8
ret void
}
; O32-SDAG-NOT: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i32<{{.*}}>
; O32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i32'memcpy'
; O32-SDAG-NOT: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
-; O32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<void (%struct.S1*)* @f2>
+; O32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<ptr @f2>
; O32-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
; N32-SDAG-LABEL: Initial selection DAG: %bb.0 'g:entry'
; N32-SDAG-NOT: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i32<{{.*}}>
; N32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i32'memcpy'
; N32-SDAG-NOT: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
-; N32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<void (%struct.S1*)* @f2>
+; N32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<ptr @f2>
; N32-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
; N64-SDAG-LABEL: Initial selection DAG: %bb.0 'g:entry'
; N64-SDAG-NOT: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i64<{{.*}}>
; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i64'memcpy'
; N64-SDAG-NOT: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i64<{{.*}}>
-; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i64<void (%struct.S1*)* @f2>
+; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i64<ptr @f2>
; N64-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i64<{{.*}}>
define dso_local void @g() #0 {
; N64-NEXT: daddu $sp, $sp, $1
entry:
%a = alloca %struct.S1, align 4
- call void @f2(%struct.S1* byval(%struct.S1) align 4 %a)
+ call void @f2(ptr byval(%struct.S1) align 4 %a)
ret void
}
-declare dso_local void @f2(%struct.S1* byval(%struct.S1) align 4) #1
+declare dso_local void @f2(ptr byval(%struct.S1) align 4) #1
; O32-SDAG-LABEL: Initial selection DAG: %bb.0 'g2:entry'
; O32-SDAG: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i32<{{.*}}>
; O32-SDAG-NOT: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i32<{{.*}}>
; O32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i32'memcpy'
; O32-SDAG-NOT: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
-; O32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<void (%struct.S1*)* @f2>
+; O32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<ptr @f2>
; O32-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
; N32-SDAG-LABEL: Initial selection DAG: %bb.0 'g2:entry'
; N32-SDAG-NOT: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i32<{{.*}}>
; N32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i32'memcpy'
; N32-SDAG-NOT: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
-; N32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<void (%struct.S1*)* @f2>
+; N32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<ptr @f2>
; N32-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
; N64-SDAG-LABEL: Initial selection DAG: %bb.0 'g2:entry'
; N64-SDAG-NOT: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i64<{{.*}}>
; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i64'memcpy'
; N64-SDAG-NOT: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i64<{{.*}}>
-; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i64<void (%struct.S1*)* @f2>
+; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i64<ptr @f2>
; N64-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i64<{{.*}}>
-define dso_local void @g2(%struct.S1* %a) {
+define dso_local void @g2(ptr %a) {
; O32-LABEL: g2:
; O32: # %bb.0: # %entry
; O32-NEXT: lui $1, 1
; N64-NEXT: jr $ra
; N64-NEXT: daddu $sp, $sp, $1
entry:
- %a.addr = alloca %struct.S1*, align 4
+ %a.addr = alloca ptr, align 4
%byval-temp = alloca %struct.S1, align 4
- store %struct.S1* %a, %struct.S1** %a.addr, align 4
- %0 = load %struct.S1*, %struct.S1** %a.addr, align 4
- %1 = bitcast %struct.S1* %byval-temp to i8*
- %2 = bitcast %struct.S1* %0 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 1 %2, i32 65520, i1 false)
- call void @f2(%struct.S1* byval(%struct.S1) align 4 %byval-temp)
+ store ptr %a, ptr %a.addr, align 4
+ %0 = load ptr, ptr %a.addr, align 4
+ call void @llvm.memcpy.p0.p0.i32(ptr align 4 %byval-temp, ptr align 1 %0, i32 65520, i1 false)
+ call void @f2(ptr byval(%struct.S1) align 4 %byval-temp)
ret void
}
; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i64'memcpy'
; N64-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i64<0>
-define dso_local i32 @g3(%struct.S1* %a, %struct.S1* %b) #0 {
+define dso_local i32 @g3(ptr %a, ptr %b) #0 {
; O32-LABEL: g3:
; O32: # %bb.0: # %entry
; O32-NEXT: addiu $sp, $sp, -32
; N64-NEXT: jr $ra
; N64-NEXT: daddiu $sp, $sp, 32
entry:
- %a.addr = alloca %struct.S1*, align 4
- %b.addr = alloca %struct.S1*, align 4
- store %struct.S1* %a, %struct.S1** %a.addr, align 4
- store %struct.S1* %b, %struct.S1** %b.addr, align 4
- %0 = load %struct.S1*, %struct.S1** %a.addr, align 4
- %1 = bitcast %struct.S1* %0 to i8*
- %2 = load %struct.S1*, %struct.S1** %b.addr, align 4
- %3 = bitcast %struct.S1* %2 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %1, i8* align 1 %3, i32 65520, i1 false)
+ %a.addr = alloca ptr, align 4
+ %b.addr = alloca ptr, align 4
+ store ptr %a, ptr %a.addr, align 4
+ store ptr %b, ptr %b.addr, align 4
+ %0 = load ptr, ptr %a.addr, align 4
+ %1 = load ptr, ptr %b.addr, align 4
+ call void @llvm.memcpy.p0.p0.i32(ptr align 1 %0, ptr align 1 %1, i32 65520, i1 false)
ret i32 4
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #2
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #2
@float = global float 1.0, align 1
@dword = global i64 283686952306183, align 1
@double = global double 1.0, align 1
-@pointer = global i8* @byte
+@pointer = global ptr @byte
; ALL-NOT: .p2align
; ALL-LABEL: byte:
@float_array = global [2 x float] [float 1.0, float 2.0], align 1
@dword_array = global [2 x i64] [i64 1, i64 2], align 1
@double_array = global [2 x double] [double 1.0, double 2.0], align 1
-@pointer_array = global [2 x i8*] [i8* @byte, i8* @byte]
+@pointer_array = global [2 x ptr] [ptr @byte, ptr @byte]
; ALL-NOT: .p2align
; ALL-LABEL: byte_array:
define float @retfloat() nounwind {
entry:
- %0 = load volatile float, float* @float
+ %0 = load volatile float, ptr @float
ret float %0
}
define double @retdouble() nounwind {
entry:
- %0 = load volatile double, double* @double
+ %0 = load volatile double, ptr @double
ret double %0
}
define float @retfloat() nounwind {
entry:
- %0 = load volatile float, float* @float
+ %0 = load volatile float, ptr @float
ret float %0
}
define double @retdouble() nounwind {
entry:
- %0 = load volatile double, double* @double
+ %0 = load volatile double, ptr @double
ret double %0
}
define { double, double } @retComplexDouble() #0 {
%retval = alloca { double, double }, align 8
- %1 = load { double, double }, { double, double }* %retval
+ %1 = load { double, double }, ptr %retval
ret { double, double } %1
}
define fp128 @retldouble() nounwind {
entry:
- %0 = load volatile fp128, fp128* @fp128
+ %0 = load volatile fp128, ptr @fp128
ret fp128 %0
}
define inreg {fp128} @ret_struct_fp128() nounwind {
entry:
- %0 = load volatile {fp128}, {fp128}* @struct_fp128
+ %0 = load volatile {fp128}, ptr @struct_fp128
ret {fp128} %0
}
define i8 @reti8() nounwind {
entry:
- %0 = load volatile i8, i8* @byte
+ %0 = load volatile i8, ptr @byte
ret i8 %0
}
define i32 @reti32() nounwind {
entry:
- %0 = load volatile i32, i32* @word
+ %0 = load volatile i32, ptr @word
ret i32 %0
}
define i64 @reti64() nounwind {
entry:
- %0 = load volatile i64, i64* @dword
+ %0 = load volatile i64, ptr @dword
ret i64 %0
}
; HARD-FLOAT: sdc1 $f0, 0(${{[0-9]+}})
%call = call fp128 @roundl(fp128 %value)
- store fp128 %call, fp128* @fp128
+ store fp128 %call, ptr @fp128
ret void
}
; MIPS64R5EL-NEXT: jr $ra
; MIPS64R5EL-NEXT: nop
%1 = fadd <2 x float> %a, %b
- store <2 x float> %1, <2 x float> * @float_res_v2f32
+ store <2 x float> %1, ptr @float_res_v2f32
ret void
}
; MIPS64R5EL-NEXT: jr $ra
; MIPS64R5EL-NEXT: nop
%1 = fadd <4 x float> %a, %b
- store <4 x float> %1, <4 x float> * @float_res_v4f32
+ store <4 x float> %1, ptr @float_res_v4f32
ret void
}
; MIPS32R5EL-NEXT: jr $ra
; MIPS32R5EL-NEXT: nop
%1 = fadd <2 x double> %a, %b
- store <2 x double> %1, <2 x double> * @double_v2f64
+ store <2 x double> %1, ptr @double_v2f64
ret void
}
; MIPS64R5-NEXT: lh $2, 0($1)
; MIPS64R5-NEXT: jr $ra
; MIPS64R5-NEXT: nop
- %1 = load <2 x i8>, <2 x i8> * @gv2i8
+ %1 = load <2 x i8>, ptr @gv2i8
ret <2 x i8> %1
}
; MIPS64R5-NEXT: lw $2, 0($1)
; MIPS64R5-NEXT: jr $ra
; MIPS64R5-NEXT: nop
- %1 = load <4 x i8>, <4 x i8> * @gv4i8
+ %1 = load <4 x i8>, ptr @gv4i8
ret <4 x i8> %1
}
; MIPS32R5EL-NEXT: addiu $sp, $sp, 32
; MIPS32R5EL-NEXT: jr $ra
; MIPS32R5EL-NEXT: nop
- %1 = load <8 x i8>, <8 x i8> * @gv8i8
+ %1 = load <8 x i8>, ptr @gv8i8
ret <8 x i8> %1
}
; MIPS64R5-NEXT: copy_s.d $3, $w0[1]
; MIPS64R5-NEXT: jr $ra
; MIPS64R5-NEXT: nop
- %1 = load <16 x i8>, <16 x i8> * @gv16i8
+ %1 = load <16 x i8>, ptr @gv16i8
ret <16 x i8> %1
}
; MIPS64R5-NEXT: lw $2, 0($1)
; MIPS64R5-NEXT: jr $ra
; MIPS64R5-NEXT: nop
- %1 = load <2 x i16>, <2 x i16> * @gv2i16
+ %1 = load <2 x i16>, ptr @gv2i16
ret <2 x i16> %1
}
; MIPS32R5EL-NEXT: addiu $sp, $sp, 32
; MIPS32R5EL-NEXT: jr $ra
; MIPS32R5EL-NEXT: nop
- %1 = load <4 x i16>, <4 x i16> * @gv4i16
+ %1 = load <4 x i16>, ptr @gv4i16
ret <4 x i16> %1
}
; MIPS64R5-NEXT: copy_s.d $3, $w0[1]
; MIPS64R5-NEXT: jr $ra
; MIPS64R5-NEXT: nop
- %1 = load <8 x i16>, <8 x i16> * @gv8i16
+ %1 = load <8 x i16>, ptr @gv8i16
ret <8 x i16> %1
}
; MIPS32R5EL-NEXT: addiu $sp, $sp, 32
; MIPS32R5EL-NEXT: jr $ra
; MIPS32R5EL-NEXT: nop
- %1 = load <2 x i32>, <2 x i32> * @gv2i32
+ %1 = load <2 x i32>, ptr @gv2i32
ret <2 x i32> %1
}
; MIPS64R5-NEXT: copy_s.d $3, $w0[1]
; MIPS64R5-NEXT: jr $ra
; MIPS64R5-NEXT: nop
- %1 = load <4 x i32>, <4 x i32> * @gv4i32
+ %1 = load <4 x i32>, ptr @gv4i32
ret <4 x i32> %1
}
; MIPS64R5-NEXT: copy_s.d $3, $w0[1]
; MIPS64R5-NEXT: jr $ra
; MIPS64R5-NEXT: nop
- %1 = load <2 x i64>, <2 x i64> * @gv2i64
+ %1 = load <2 x i64>, ptr @gv2i64
ret <2 x i64> %1
}
; MIPS64R5-NEXT: jr $ra
; MIPS64R5-NEXT: nop
entry:
- %0 = load <2 x float>, <2 x float> * @gv2f32
+ %0 = load <2 x float>, ptr @gv2f32
ret <2 x float> %0
}
; MIPS64R5-NEXT: jr $ra
; MIPS64R5-NEXT: nop
entry:
- %0 = load <4 x float>, <4 x float> * @gv4f32
+ %0 = load <4 x float>, ptr @gv4f32
ret <4 x float> %0
}
; MIPS64R5-NEXT: jr $ra
; MIPS64R5-NEXT: nop
entry:
- %0 = load <2 x double>, <2 x double> * @gv2f64
+ %0 = load <2 x double>, ptr @gv2f64
ret <2 x double> %0
}
; MIPS64R5EL-NEXT: nop
entry:
%0 = call <2 x i8> @i8_2(<2 x i8> <i8 6, i8 7>, <2 x i8> <i8 12, i8 8>)
- store <2 x i8> %0, <2 x i8> * @gv2i8
+ store <2 x i8> %0, ptr @gv2i8
ret void
}
; MIPS64R5EL-NEXT: nop
entry:
%0 = call <4 x i8> @i8_4(<4 x i8> <i8 6, i8 7, i8 9, i8 10>, <4 x i8> <i8 12, i8 8, i8 9, i8 10>)
- store <4 x i8> %0, <4 x i8> * @gv4i8
+ store <4 x i8> %0, ptr @gv4i8
ret void
}
; MIPS64R5EL-NEXT: nop
entry:
%0 = call <8 x i8> @i8_8(<8 x i8> <i8 6, i8 7, i8 9, i8 10, i8 6, i8 7, i8 9, i8 10>, <8 x i8> <i8 12, i8 8, i8 9, i8 10, i8 6, i8 7, i8 9, i8 10>)
- store <8 x i8> %0, <8 x i8> * @gv8i8
+ store <8 x i8> %0, ptr @gv8i8
ret void
}
; MIPS64EL-NEXT: nop
entry:
%0 = call <16 x i8> @i8_16(<16 x i8> <i8 6, i8 7,i8 6, i8 7,i8 6, i8 7,i8 6, i8 7,i8 6, i8 7,i8 6, i8 7, i8 6, i8 7, i8 9, i8 10>, <16 x i8> <i8 7, i8 9,i8 7, i8 9,i8 7, i8 9,i8 7, i8 9,i8 7, i8 9,i8 7, i8 9,i8 12, i8 8, i8 9, i8 10>)
- store <16 x i8> %0, <16 x i8> * @gv16i8
+ store <16 x i8> %0, ptr @gv16i8
ret void
}
; MIPS64R5EL-NEXT: nop
entry:
%0 = call <2 x i16> @i16_2(<2 x i16> <i16 6, i16 7>, <2 x i16> <i16 12, i16 8>)
- store <2 x i16> %0, <2 x i16> * @gv2i16
+ store <2 x i16> %0, ptr @gv2i16
ret void
}
; MIPS64R5EL-NEXT: nop
entry:
%0 = call <4 x i16> @i16_4(<4 x i16> <i16 6, i16 7, i16 9, i16 10>, <4 x i16> <i16 12, i16 8, i16 9, i16 10>)
- store <4 x i16> %0, <4 x i16> * @gv4i16
+ store <4 x i16> %0, ptr @gv4i16
ret void
}
; MIPS64R5EL-NEXT: nop
entry:
%0 = call <8 x i16> @i16_8(<8 x i16> <i16 6, i16 7, i16 9, i16 10, i16 6, i16 7, i16 9, i16 10>, <8 x i16> <i16 6, i16 7, i16 9, i16 10, i16 12, i16 8, i16 9, i16 10>)
- store <8 x i16> %0, <8 x i16> * @gv8i16
+ store <8 x i16> %0, ptr @gv8i16
ret void
}
; MIPS64R5EL-NEXT: nop
entry:
%0 = call <2 x i32> @i32_2(<2 x i32> <i32 6, i32 7>, <2 x i32> <i32 12, i32 8>)
- store <2 x i32> %0, <2 x i32> * @gv2i32
+ store <2 x i32> %0, ptr @gv2i32
ret void
}
; MIPS64EL-NEXT: nop
entry:
%0 = call <4 x i32> @i32_4(<4 x i32> <i32 6, i32 7, i32 9, i32 10>, <4 x i32> <i32 12, i32 8, i32 9, i32 10>)
- store <4 x i32> %0, <4 x i32> * @gv4i32
+ store <4 x i32> %0, ptr @gv4i32
ret void
}
; MIPS32EL-NEXT: nop
entry:
%0 = call <2 x i64> @i64_2(<2 x i64> <i64 6, i64 7>, <2 x i64> <i64 12, i64 8>)
- store <2 x i64> %0, <2 x i64> * @gv2i64
+ store <2 x i64> %0, ptr @gv2i64
ret void
}
; MIPS64EL-NEXT: nop
entry:
%0 = call <2 x float> @float2_extern(<2 x float> <float 0.0, float -1.0>, <2 x float> <float 12.0, float 14.0>)
- store <2 x float> %0, <2 x float> * @gv2f32
+ store <2 x float> %0, ptr @gv2f32
ret void
}
; MIPS64EL-NEXT: nop
entry:
%0 = call <4 x float> @float4_extern(<4 x float> <float 0.0, float -1.0, float 2.0, float 4.0>, <4 x float> <float 12.0, float 14.0, float 15.0, float 16.0>)
- store <4 x float> %0, <4 x float> * @gv4f32
+ store <4 x float> %0, ptr @gv4f32
ret void
}
; MIPS32EL-NEXT: nop
entry:
%0 = call <2 x double> @double2_extern(<2 x double> <double 0.0, double -1.0>, <2 x double> <double 12.0, double 14.0>)
- store <2 x double> %0, <2 x double> * @gv2f64
+ store <2 x double> %0, ptr @gv2f64
ret void
}
; MIPS64R5EL-NEXT: nop
entry:
%0 = call <2 x i24> @i24x2(<2 x i24> <i24 6, i24 7>, <2 x i24> <i24 12, i24 8>)
- store <2 x i24> %0, <2 x i24> * @gv2i24
+ store <2 x i24> %0, ptr @gv2i24
ret void
}
; CHECK: .cfi_offset 31, -20
; CHECK: .cfi_offset 16, -24
- %val1 = load volatile double, double* @var
- %val2 = load volatile double, double* @var
+ %val1 = load volatile double, ptr @var
+ %val2 = load volatile double, ptr @var
call void (...) @foo() nounwind
- store volatile double %val1, double* @var
- store volatile double %val2, double* @var
+ store volatile double %val1, ptr @var
+ store volatile double %val2, ptr @var
ret void
}
; Function Attrs: nounwind
define void @foo() #0 {
entry:
- store i32 305419896, i32* @i, align 4
- %0 = load i32, i32* @b, align 4
+ store i32 305419896, ptr @i, align 4
+ %0 = load i32, ptr @b, align 4
%tobool = icmp ne i32 %0, 0
br i1 %tobool, label %if.then, label %if.else
if.then: ; preds = %entry
- store i32 10, i32* @b, align 4
+ store i32 10, ptr @b, align 4
br label %if.end
if.else: ; preds = %entry
- store i32 20, i32* @b, align 4
+ store i32 20, ptr @b, align 4
br label %if.end
if.end: ; preds = %if.else, %if.then
call void asm sideeffect ".space 100000", ""() #1, !srcloc !1
- store i32 305419896, i32* @l, align 4
+ store i32 305419896, ptr @l, align 4
ret void
; constisle: $CPI0_1:
; constisle .4byte 305419896 # 0x12345678
; RUN: llc -march=mips64el -mcpu=mips64r6 -relocation-model=pic < %s | FileCheck %s -check-prefixes=ALL,64-CMP
@i1 = global [3 x i32] [i32 1, i32 2, i32 3], align 4
-@i3 = common global i32* null, align 4
+@i3 = common global ptr null, align 4
; ALL-LABEL: cmov1:
; 64-CMP-DAG: or $[[T2:[0-9]+]], $[[T0]], $[[T1]]
; 64-CMP-DAG: ld $2, 0($[[T2]])
-define i32* @cmov1(i32 signext %s) nounwind readonly {
+define ptr @cmov1(i32 signext %s) nounwind readonly {
entry:
%tobool = icmp ne i32 %s, 0
- %tmp1 = load i32*, i32** @i3, align 4
- %cond = select i1 %tobool, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @i1, i32 0, i32 0), i32* %tmp1
- ret i32* %cond
+ %tmp1 = load ptr, ptr @i3, align 4
+ %cond = select i1 %tobool, ptr @i1, ptr %tmp1
+ ret ptr %cond
}
@c = global i32 1, align 4
define i32 @cmov2(i32 signext %s) nounwind readonly {
entry:
%tobool = icmp ne i32 %s, 0
- %tmp1 = load i32, i32* @c, align 4
- %tmp2 = load i32, i32* @d, align 4
+ %tmp1 = load i32, ptr @c, align 4
+ %tmp2 = load i32, ptr @d, align 4
%cond = select i1 %tobool, i32 %tmp1, i32 %tmp2
ret i32 %cond
}
-define void @getSubImagesLuma(%struct.StorablePicture* nocapture %s) #0 {
+define void @getSubImagesLuma(ptr nocapture %s) #0 {
entry:
- %size_y = getelementptr inbounds %struct.StorablePicture, %struct.StorablePicture* %s, i32 0, i32 1
- %0 = load i32, i32* %size_y, align 4
+ %size_y = getelementptr inbounds %struct.StorablePicture, ptr %s, i32 0, i32 1
+ %0 = load i32, ptr %size_y, align 4
%sub = add nsw i32 %0, -1
%add5 = add nsw i32 %0, 20
%cmp6 = icmp sgt i32 %add5, -20
for.body: ; preds = %entry, %for.body
%j.07 = phi i32 [ %inc, %for.body ], [ -20, %entry ]
- %call = tail call i32 bitcast (i32 (...)* @iClip3 to i32 (i32, i32, i32)*)(i32 0, i32 %sub, i32 %j.07) #2
+ %call = tail call i32 @iClip3(i32 0, i32 %sub, i32 %j.07) #2
%inc = add nsw i32 %j.07, 1
- %1 = load i32, i32* %size_y, align 4
+ %1 = load i32, ptr %size_y, align 4
%add = add nsw i32 %1, 20
%cmp = icmp slt i32 %inc, %add
br i1 %cmp, label %for.body, label %for.end
; Starting from dwarf exception handling preparation skips optimizations that
; may simplify out the crucical bnec $4, $4 instruction.
-define internal void @_ZL14TestRemoveLastv(i32* %alist.sroa.0.4) {
+define internal void @_ZL14TestRemoveLastv(ptr %alist.sroa.0.4) {
; CHECK-LABEL: _ZL14TestRemoveLastv:
entry:
- %ascevgep = getelementptr i32, i32* %alist.sroa.0.4, i64 99
+ %ascevgep = getelementptr i32, ptr %alist.sroa.0.4, i64 99
br label %do.body121
for.cond117:
%alsr.iv.next = add nsw i32 %alsr.iv, -1
- %ascevgep340 = getelementptr i32, i32* %alsr.iv339, i64 -1
+ %ascevgep340 = getelementptr i32, ptr %alsr.iv339, i64 -1
%acmp118 = icmp sgt i32 %alsr.iv.next, 0
br i1 %acmp118, label %do.body121, label %if.then143
do.body121:
- %alsr.iv339 = phi i32* [ %ascevgep, %entry ], [ %ascevgep340, %for.cond117 ]
+ %alsr.iv339 = phi ptr [ %ascevgep, %entry ], [ %ascevgep340, %for.cond117 ]
%alsr.iv = phi i32 [ 100, %entry ], [ %alsr.iv.next, %for.cond117 ]
%a9 = add i32 %alsr.iv, -1
%alnot124 = icmp eq i32 %alsr.iv, %alsr.iv
do.body134:
%a10 = add i32 %alsr.iv, -1
- %a11 = load i32, i32* %alsr.iv339, align 4, !tbaa !5
+ %a11 = load i32, ptr %alsr.iv339, align 4, !tbaa !5
; CHECK-NOT: bnec $[[R0:[0-9]+]], $[[R0]]
; CHECK-NOT: beqc $[[R1:[0-9]+]], $[[R1]]
%alnot137 = icmp eq i32 %a9, %a11
}
-define internal void @_ZL14TestRemoveLastv64(i64* %alist.sroa.0.4) {
+define internal void @_ZL14TestRemoveLastv64(ptr %alist.sroa.0.4) {
; CHECK-LABEL: _ZL14TestRemoveLastv64:
entry:
- %ascevgep = getelementptr i64, i64* %alist.sroa.0.4, i64 99
+ %ascevgep = getelementptr i64, ptr %alist.sroa.0.4, i64 99
br label %do.body121
for.cond117:
%alsr.iv.next = add nsw i64 %alsr.iv, -1
- %ascevgep340 = getelementptr i64, i64* %alsr.iv339, i64 -1
+ %ascevgep340 = getelementptr i64, ptr %alsr.iv339, i64 -1
%acmp118 = icmp sgt i64 %alsr.iv.next, 0
br i1 %acmp118, label %do.body121, label %if.then143
do.body121:
- %alsr.iv339 = phi i64* [ %ascevgep, %entry ], [ %ascevgep340, %for.cond117 ]
+ %alsr.iv339 = phi ptr [ %ascevgep, %entry ], [ %ascevgep340, %for.cond117 ]
%alsr.iv = phi i64 [ 100, %entry ], [ %alsr.iv.next, %for.cond117 ]
%a9 = add i64 %alsr.iv, -1
%alnot124 = icmp eq i64 %alsr.iv, %alsr.iv
do.body134:
%a10 = add i64 %alsr.iv, -1
- %a11 = load i64, i64* %alsr.iv339, align 4, !tbaa !5
+ %a11 = load i64, ptr %alsr.iv339, align 4, !tbaa !5
; CHECK-NOT: bnec $[[R0:[0-9]+]], $[[R0]]
; CHECK-NOT: beqc $[[R1:[0-9]+]], $[[R1]]
%alnot137 = icmp eq i64 %a9, %a11
ret void
}
-define i64 @l9(i8* ()* %i) {
+define i64 @l9(ptr %i) {
entry:
; CHECK-LABEL: l9:
- %i.addr = alloca i8* ()*, align 4
- store i8* ()* %i, i8* ()** %i.addr, align 4
+ %i.addr = alloca ptr, align 4
+ store ptr %i, ptr %i.addr, align 4
; CHECK: jalrc $25
%call = call i64 @k()
%cmp = icmp ne i64 %call, 0
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- %0 = load i8* ()*, i8* ()** %i.addr, align 4
+ %0 = load ptr, ptr %i.addr, align 4
; CHECK: jalrc $25
- %call1 = call i8* %0()
+ %call1 = call ptr %0()
br label %if.end
if.end: ; preds = %if.then, %entry
ret void
}
-define i32 @l9(i8* ()* %i) #0 {
+define i32 @l9(ptr %i) #0 {
entry:
- %i.addr = alloca i8* ()*, align 4
- store i8* ()* %i, i8* ()** %i.addr, align 4
+ %i.addr = alloca ptr, align 4
+ store ptr %i, ptr %i.addr, align 4
; STATIC32: jal
; STATIC32: nop
; PIC: jalrc $25
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- %0 = load i8* ()*, i8* ()** %i.addr, align 4
+ %0 = load ptr, ptr %i.addr, align 4
; CHECK: jalrc $25
- %call1 = call i8* %0()
+ %call1 = call ptr %0()
br label %if.end
if.end: ; preds = %if.then, %entry
; Function Attrs: nounwind
define void @_Z3foov() #0 {
entry:
- %0 = load volatile i32, i32* @boo, align 4
+ %0 = load volatile i32, ptr @boo, align 4
switch i32 %0, label %sw.epilog [
i32 0, label %sw.bb
i32 1, label %sw.bb1
]
sw.bb: ; preds = %entry
- store volatile i32 1, i32* @boo, align 4
+ store volatile i32 1, ptr @boo, align 4
br label %sw.epilog
; CHECK: beqzc
; CHECK-NEXT: nop
; CHECK-NEXT: j
sw.bb1: ; preds = %entry, %entry
- store volatile i32 2, i32* @boo, align 4
+ store volatile i32 2, ptr @boo, align 4
br label %sw.epilog
; CHECK: bnezc
; CHECK-NEXT: nop
; Function Attrs: nounwind
define void @t() #0 {
entry:
- store i32 -559023410, i32* @i, align 4
- store i32 -559023410, i32* @j, align 4
- store i32 -87105875, i32* @k, align 4
- store i32 262991277, i32* @l, align 4
+ store i32 -559023410, ptr @i, align 4
+ store i32 -559023410, ptr @j, align 4
+ store i32 -87105875, ptr @k, align 4
+ store i32 262991277, ptr @l, align 4
ret void
; CHECK: lw ${{[0-9]+}}, $CPI0_0
; CHECK: lw ${{[0-9]+}}, $CPI0_1
; Function Attrs: nounwind
define void @t() #0 {
entry:
- store i32 -559023410, i32* @i, align 4
- %0 = load i32, i32* @b, align 4
+ store i32 -559023410, ptr @i, align 4
+ %0 = load i32, ptr @b, align 4
; no-load-relax: lw ${{[0-9]+}}, $CPI0_1 # 16 bit inst
%tobool = icmp ne i32 %0, 0
br i1 %tobool, label %if.then, label %if.else
; no-load-relax: $BB0_3:
; no-load-relax: lw ${{[0-9]+}}, %call16(goo)(${{[0-9]+}})
if.then: ; preds = %entry
- call void bitcast (void (...)* @foo to void ()*)()
+ call void @foo()
br label %if.end
if.else: ; preds = %entry
- call void bitcast (void (...)* @goo to void ()*)()
+ call void @goo()
br label %if.end
if.end: ; preds = %if.else, %if.then
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
ret void
}
; Function Attrs: nounwind
define void @t() #0 {
entry:
- store i32 -559023410, i32* @i, align 4
+ store i32 -559023410, ptr @i, align 4
; load-relax: lw ${{[0-9]+}}, $CPI0_0
; load-relax: jrc $ra
; load-relax: .p2align 2
; no-load-relax: .4byte 3735943886
; no-load-relax: $BB0_2:
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
- call void bitcast (void (...)* @hoo to void ()*)()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
+ call void @hoo()
ret void
}
; Function Attrs: nounwind
define void @t() #0 {
entry:
- store i32 -559023410, i32* @i, align 4
+ store i32 -559023410, ptr @i, align 4
; load-relax-NOT: lw ${{[0-9]+}}, $CPI0_0 # 16 bit inst
; load-relax1: lw ${{[0-9]+}}, $CPI0_0
; load-relax: jrc $ra
define i32 @main() #0 {
entry:
%jmp = alloca float, align 4
- store float 0x4200000000000000, float* %jmp, align 4
- %0 = load float, float* %jmp, align 4
+ store float 0x4200000000000000, ptr %jmp, align 4
+ %0 = load float, ptr %jmp, align 4
call void asm sideeffect "jr $0", "c,~{$1}"(float %0) #1
; CHECK: error: couldn't allocate input reg for constraint 'c'
define i32 @main() #0 {
entry:
%jmp = alloca i32, align 4
- store i32 0, i32* %jmp, align 4
- %0 = load i32, i32* %jmp, align 4
+ store i32 0, ptr %jmp, align 4
+ %0 = load i32, ptr %jmp, align 4
call void asm sideeffect "jr $0", "c,~{$1}"(i32 %0) #1
; CHECK: addiu $25, $zero, 0
entry:
%s = alloca i32, align 4
%x = alloca i32, align 4
- call void asm "", "=*imr,=*m,0,*m,~{$1}"(i32* elementtype(i32) %x, i32* elementtype(i32) %s, i32* %x, i32* elementtype(i32) %s)
+ call void asm "", "=*imr,=*m,0,*m,~{$1}"(ptr elementtype(i32) %x, ptr elementtype(i32) %s, ptr %x, ptr elementtype(i32) %s)
; CHECK: #APP
; CHECK: #NO_APP
define void @foo2() nounwind {
entry:
%s = alloca %struct.S, align 4
- call void @foo1(%struct.S* byval(%struct.S) %s)
+ call void @foo1(ptr byval(%struct.S) %s)
ret void
}
-declare void @foo1(%struct.S* byval(%struct.S))
+declare void @foo1(ptr byval(%struct.S))
define i32 @main() {
entry:
%z = alloca [1048576 x i8], align 1
- %arraydecay = getelementptr inbounds [1048576 x i8], [1048576 x i8]* %z, i32 0, i32 0
- %call = call i32 @foo(i8* %arraydecay)
+ %call = call i32 @foo(ptr %z)
ret i32 0
; CHECK-LABEL: main
}
-declare i32 @foo(i8*)
+declare i32 @foo(ptr)
define i32 @main() #0 {
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32, i32* @x, align 4
+ store i32 0, ptr %retval
+ %0 = load i32, ptr @x, align 4
%1 = call i32 @llvm.ctlz.i32(i32 %0, i1 true)
- store i32 %1, i32* @y, align 4
+ store i32 %1, ptr @y, align 4
ret i32 0
}
; optimization level check in findBetterNeighbors, this test demonstrates
; a severe compile time regression (30~ minutes) vs. <10 seconds at 'optnone'.
-declare i8 @k(i8*)
+declare i8 @k(ptr)
define void @d(i32 signext %e4) #1 {
entry:
%new_val = alloca i8, align 1
%simd = alloca i8, align 1
%code = alloca [269 x i8], align 1
- store i32 %e4, i32* %e4.addr, align 4
- %call = call zeroext i8 @k(i8* %simd)
- store i8 %call, i8* %simd, align 1
+ store i32 %e4, ptr %e4.addr, align 4
+ %call = call zeroext i8 @k(ptr %simd)
+ store i8 %call, ptr %simd, align 1
- %arrayinit.begin = getelementptr inbounds [269 x i8], [269 x i8]* %code, i32 0, i32 0
- store i8 32, i8* %arrayinit.begin, align 1
- %arrayinit.element = getelementptr inbounds i8, i8* %arrayinit.begin, i32 1
- %a2 = load i8, i8* %old_val, align 1
- store i8 %a2, i8* %arrayinit.element, align 1
- %arrayinit.element1 = getelementptr inbounds i8, i8* %arrayinit.element, i32 1
- store i8 -3, i8* %arrayinit.element1, align 1
- %arrayinit.element2 = getelementptr inbounds i8, i8* %arrayinit.element1, i32 1
- store i8 0, i8* %arrayinit.element2, align 1
- %arrayinit.element3 = getelementptr inbounds i8, i8* %arrayinit.element2, i32 1
- store i8 33, i8* %arrayinit.element3, align 1
- %arrayinit.element4 = getelementptr inbounds i8, i8* %arrayinit.element3, i32 1
- %a3 = load i8, i8* %simd, align 1
- store i8 %a3, i8* %arrayinit.element4, align 1
- %arrayinit.element5 = getelementptr inbounds i8, i8* %arrayinit.element4, i32 1
- store i8 32, i8* %arrayinit.element5, align 1
- %arrayinit.element6 = getelementptr inbounds i8, i8* %arrayinit.element5, i32 1
- %a4 = load i8, i8* %simd, align 1
- store i8 %a4, i8* %arrayinit.element6, align 1
- %arrayinit.element7 = getelementptr inbounds i8, i8* %arrayinit.element6, i32 1
- store i8 32, i8* %arrayinit.element7, align 1
- %arrayinit.element8 = getelementptr inbounds i8, i8* %arrayinit.element7, i32 1
- %a5 = load i8, i8* %new_val, align 1
- store i8 %a5, i8* %arrayinit.element8, align 1
- %arrayinit.element9 = getelementptr inbounds i8, i8* %arrayinit.element8, i32 1
- store i8 -3, i8* %arrayinit.element9, align 1
- %arrayinit.element10 = getelementptr inbounds i8, i8* %arrayinit.element9, i32 1
- store i8 2, i8* %arrayinit.element10, align 1
- %arrayinit.element11 = getelementptr inbounds i8, i8* %arrayinit.element10, i32 1
- store i8 0, i8* %arrayinit.element11, align 1
- %arrayinit.element12 = getelementptr inbounds i8, i8* %arrayinit.element11, i32 1
- store i8 33, i8* %arrayinit.element12, align 1
- %arrayinit.element13 = getelementptr inbounds i8, i8* %arrayinit.element12, i32 1
- %a6 = load i8, i8* %simd, align 1
- store i8 %a6, i8* %arrayinit.element13, align 1
- %arrayinit.element14 = getelementptr inbounds i8, i8* %arrayinit.element13, i32 1
- store i8 32, i8* %arrayinit.element14, align 1
- %arrayinit.element15 = getelementptr inbounds i8, i8* %arrayinit.element14, i32 1
- %a7 = load i8, i8* %new_val, align 1
- store i8 %a7, i8* %arrayinit.element15, align 1
- %arrayinit.element16 = getelementptr inbounds i8, i8* %arrayinit.element15, i32 1
- store i8 32, i8* %arrayinit.element16, align 1
- %arrayinit.element17 = getelementptr inbounds i8, i8* %arrayinit.element16, i32 1
- %a8 = load i8, i8* %simd, align 1
- store i8 %a8, i8* %arrayinit.element17, align 1
- %arrayinit.element18 = getelementptr inbounds i8, i8* %arrayinit.element17, i32 1
- store i8 -3, i8* %arrayinit.element18, align 1
- %arrayinit.element19 = getelementptr inbounds i8, i8* %arrayinit.element18, i32 1
- store i8 1, i8* %arrayinit.element19, align 1
- %arrayinit.element20 = getelementptr inbounds i8, i8* %arrayinit.element19, i32 1
- store i8 0, i8* %arrayinit.element20, align 1
- %arrayinit.element21 = getelementptr inbounds i8, i8* %arrayinit.element20, i32 1
- store i8 92, i8* %arrayinit.element21, align 1
- %arrayinit.element22 = getelementptr inbounds i8, i8* %arrayinit.element21, i32 1
- store i8 4, i8* %arrayinit.element22, align 1
- %arrayinit.element23 = getelementptr inbounds i8, i8* %arrayinit.element22, i32 1
- store i8 64, i8* %arrayinit.element23, align 1
- %arrayinit.element24 = getelementptr inbounds i8, i8* %arrayinit.element23, i32 1
- store i8 65, i8* %arrayinit.element24, align 1
- %arrayinit.element25 = getelementptr inbounds i8, i8* %arrayinit.element24, i32 1
- store i8 0, i8* %arrayinit.element25, align 1
- %arrayinit.element26 = getelementptr inbounds i8, i8* %arrayinit.element25, i32 1
- store i8 15, i8* %arrayinit.element26, align 1
- %arrayinit.element27 = getelementptr inbounds i8, i8* %arrayinit.element26, i32 1
- store i8 11, i8* %arrayinit.element27, align 1
- %arrayinit.element28 = getelementptr inbounds i8, i8* %arrayinit.element27, i32 1
- store i8 32, i8* %arrayinit.element28, align 1
- %arrayinit.element29 = getelementptr inbounds i8, i8* %arrayinit.element28, i32 1
- %a9 = load i8, i8* %old_val, align 1
- store i8 %a9, i8* %arrayinit.element29, align 1
- %arrayinit.element30 = getelementptr inbounds i8, i8* %arrayinit.element29, i32 1
- store i8 32, i8* %arrayinit.element30, align 1
- %arrayinit.element31 = getelementptr inbounds i8, i8* %arrayinit.element30, i32 1
- %a10 = load i8, i8* %simd, align 1
- store i8 %a10, i8* %arrayinit.element31, align 1
- %arrayinit.element32 = getelementptr inbounds i8, i8* %arrayinit.element31, i32 1
- store i8 -3, i8* %arrayinit.element32, align 1
- %arrayinit.element33 = getelementptr inbounds i8, i8* %arrayinit.element32, i32 1
- store i8 1, i8* %arrayinit.element33, align 1
- %arrayinit.element34 = getelementptr inbounds i8, i8* %arrayinit.element33, i32 1
- store i8 1, i8* %arrayinit.element34, align 1
- %arrayinit.element35 = getelementptr inbounds i8, i8* %arrayinit.element34, i32 1
- store i8 92, i8* %arrayinit.element35, align 1
- %arrayinit.element36 = getelementptr inbounds i8, i8* %arrayinit.element35, i32 1
- store i8 4, i8* %arrayinit.element36, align 1
- %arrayinit.element37 = getelementptr inbounds i8, i8* %arrayinit.element36, i32 1
- store i8 64, i8* %arrayinit.element37, align 1
- %arrayinit.element38 = getelementptr inbounds i8, i8* %arrayinit.element37, i32 1
- store i8 65, i8* %arrayinit.element38, align 1
- %arrayinit.element39 = getelementptr inbounds i8, i8* %arrayinit.element38, i32 1
- store i8 0, i8* %arrayinit.element39, align 1
- %arrayinit.element40 = getelementptr inbounds i8, i8* %arrayinit.element39, i32 1
- store i8 15, i8* %arrayinit.element40, align 1
- %arrayinit.element41 = getelementptr inbounds i8, i8* %arrayinit.element40, i32 1
- store i8 11, i8* %arrayinit.element41, align 1
- %arrayinit.element42 = getelementptr inbounds i8, i8* %arrayinit.element41, i32 1
- store i8 32, i8* %arrayinit.element42, align 1
- %arrayinit.element43 = getelementptr inbounds i8, i8* %arrayinit.element42, i32 1
- %a11 = load i8, i8* %old_val, align 1
- store i8 %a11, i8* %arrayinit.element43, align 1
- %arrayinit.element44 = getelementptr inbounds i8, i8* %arrayinit.element43, i32 1
- store i8 32, i8* %arrayinit.element44, align 1
- %arrayinit.element45 = getelementptr inbounds i8, i8* %arrayinit.element44, i32 1
- %a12 = load i8, i8* %simd, align 1
- store i8 %a12, i8* %arrayinit.element45, align 1
- %arrayinit.element46 = getelementptr inbounds i8, i8* %arrayinit.element45, i32 1
- store i8 -3, i8* %arrayinit.element46, align 1
- %arrayinit.element47 = getelementptr inbounds i8, i8* %arrayinit.element46, i32 1
- store i8 1, i8* %arrayinit.element47, align 1
- %arrayinit.element48 = getelementptr inbounds i8, i8* %arrayinit.element47, i32 1
- store i8 2, i8* %arrayinit.element48, align 1
- %arrayinit.element49 = getelementptr inbounds i8, i8* %arrayinit.element48, i32 1
- store i8 92, i8* %arrayinit.element49, align 1
- %arrayinit.element50 = getelementptr inbounds i8, i8* %arrayinit.element49, i32 1
- store i8 4, i8* %arrayinit.element50, align 1
- %arrayinit.element51 = getelementptr inbounds i8, i8* %arrayinit.element50, i32 1
- store i8 64, i8* %arrayinit.element51, align 1
- %arrayinit.element52 = getelementptr inbounds i8, i8* %arrayinit.element51, i32 1
- store i8 65, i8* %arrayinit.element52, align 1
- %arrayinit.element53 = getelementptr inbounds i8, i8* %arrayinit.element52, i32 1
- store i8 0, i8* %arrayinit.element53, align 1
- %arrayinit.element54 = getelementptr inbounds i8, i8* %arrayinit.element53, i32 1
- store i8 15, i8* %arrayinit.element54, align 1
- %arrayinit.element55 = getelementptr inbounds i8, i8* %arrayinit.element54, i32 1
- store i8 11, i8* %arrayinit.element55, align 1
- %arrayinit.element56 = getelementptr inbounds i8, i8* %arrayinit.element55, i32 1
- store i8 32, i8* %arrayinit.element56, align 1
- %arrayinit.element57 = getelementptr inbounds i8, i8* %arrayinit.element56, i32 1
- %a13 = load i8, i8* %old_val, align 1
- store i8 %a13, i8* %arrayinit.element57, align 1
- %arrayinit.element58 = getelementptr inbounds i8, i8* %arrayinit.element57, i32 1
- store i8 32, i8* %arrayinit.element58, align 1
- %arrayinit.element59 = getelementptr inbounds i8, i8* %arrayinit.element58, i32 1
- %a14 = load i8, i8* %simd, align 1
- store i8 %a14, i8* %arrayinit.element59, align 1
- %arrayinit.element60 = getelementptr inbounds i8, i8* %arrayinit.element59, i32 1
- store i8 -3, i8* %arrayinit.element60, align 1
- %arrayinit.element61 = getelementptr inbounds i8, i8* %arrayinit.element60, i32 1
- store i8 1, i8* %arrayinit.element61, align 1
- %arrayinit.element62 = getelementptr inbounds i8, i8* %arrayinit.element61, i32 1
- store i8 3, i8* %arrayinit.element62, align 1
- %arrayinit.element63 = getelementptr inbounds i8, i8* %arrayinit.element62, i32 1
- store i8 92, i8* %arrayinit.element63, align 1
- %arrayinit.element64 = getelementptr inbounds i8, i8* %arrayinit.element63, i32 1
- store i8 4, i8* %arrayinit.element64, align 1
- %arrayinit.element65 = getelementptr inbounds i8, i8* %arrayinit.element64, i32 1
- store i8 64, i8* %arrayinit.element65, align 1
- %arrayinit.element66 = getelementptr inbounds i8, i8* %arrayinit.element65, i32 1
- store i8 65, i8* %arrayinit.element66, align 1
- %arrayinit.element67 = getelementptr inbounds i8, i8* %arrayinit.element66, i32 1
- store i8 0, i8* %arrayinit.element67, align 1
- %arrayinit.element68 = getelementptr inbounds i8, i8* %arrayinit.element67, i32 1
- store i8 15, i8* %arrayinit.element68, align 1
- %arrayinit.element69 = getelementptr inbounds i8, i8* %arrayinit.element68, i32 1
- store i8 11, i8* %arrayinit.element69, align 1
- %arrayinit.element70 = getelementptr inbounds i8, i8* %arrayinit.element69, i32 1
- store i8 32, i8* %arrayinit.element70, align 1
- %arrayinit.element71 = getelementptr inbounds i8, i8* %arrayinit.element70, i32 1
- %a15 = load i8, i8* %simd, align 1
- store i8 %a15, i8* %arrayinit.element71, align 1
- %arrayinit.element72 = getelementptr inbounds i8, i8* %arrayinit.element71, i32 1
- store i8 32, i8* %arrayinit.element72, align 1
- %arrayinit.element73 = getelementptr inbounds i8, i8* %arrayinit.element72, i32 1
- %a16 = load i8, i8* %new_val, align 1
- store i8 %a16, i8* %arrayinit.element73, align 1
- %arrayinit.element74 = getelementptr inbounds i8, i8* %arrayinit.element73, i32 1
- store i8 -3, i8* %arrayinit.element74, align 1
- %arrayinit.element75 = getelementptr inbounds i8, i8* %arrayinit.element74, i32 1
- store i8 2, i8* %arrayinit.element75, align 1
- %arrayinit.element76 = getelementptr inbounds i8, i8* %arrayinit.element75, i32 1
- store i8 1, i8* %arrayinit.element76, align 1
- %arrayinit.element77 = getelementptr inbounds i8, i8* %arrayinit.element76, i32 1
- store i8 33, i8* %arrayinit.element77, align 1
- %arrayinit.element78 = getelementptr inbounds i8, i8* %arrayinit.element77, i32 1
- %a17 = load i8, i8* %simd, align 1
- store i8 %a17, i8* %arrayinit.element78, align 1
- %arrayinit.element79 = getelementptr inbounds i8, i8* %arrayinit.element78, i32 1
- store i8 32, i8* %arrayinit.element79, align 1
- %arrayinit.element80 = getelementptr inbounds i8, i8* %arrayinit.element79, i32 1
- %a18 = load i8, i8* %new_val, align 1
- store i8 %a18, i8* %arrayinit.element80, align 1
- %arrayinit.element81 = getelementptr inbounds i8, i8* %arrayinit.element80, i32 1
- store i8 32, i8* %arrayinit.element81, align 1
- %arrayinit.element82 = getelementptr inbounds i8, i8* %arrayinit.element81, i32 1
- %a19 = load i8, i8* %simd, align 1
- store i8 %a19, i8* %arrayinit.element82, align 1
- %arrayinit.element83 = getelementptr inbounds i8, i8* %arrayinit.element82, i32 1
- store i8 -3, i8* %arrayinit.element83, align 1
- %arrayinit.element84 = getelementptr inbounds i8, i8* %arrayinit.element83, i32 1
- store i8 1, i8* %arrayinit.element84, align 1
- %arrayinit.element85 = getelementptr inbounds i8, i8* %arrayinit.element84, i32 1
- store i8 0, i8* %arrayinit.element85, align 1
- %arrayinit.element86 = getelementptr inbounds i8, i8* %arrayinit.element85, i32 1
- store i8 92, i8* %arrayinit.element86, align 1
- %arrayinit.element87 = getelementptr inbounds i8, i8* %arrayinit.element86, i32 1
- store i8 4, i8* %arrayinit.element87, align 1
- %arrayinit.element88 = getelementptr inbounds i8, i8* %arrayinit.element87, i32 1
- store i8 64, i8* %arrayinit.element88, align 1
- %arrayinit.element89 = getelementptr inbounds i8, i8* %arrayinit.element88, i32 1
- store i8 65, i8* %arrayinit.element89, align 1
- %arrayinit.element90 = getelementptr inbounds i8, i8* %arrayinit.element89, i32 1
- store i8 0, i8* %arrayinit.element90, align 1
- %arrayinit.element91 = getelementptr inbounds i8, i8* %arrayinit.element90, i32 1
- store i8 15, i8* %arrayinit.element91, align 1
- %arrayinit.element92 = getelementptr inbounds i8, i8* %arrayinit.element91, i32 1
- store i8 11, i8* %arrayinit.element92, align 1
- %arrayinit.element93 = getelementptr inbounds i8, i8* %arrayinit.element92, i32 1
- store i8 32, i8* %arrayinit.element93, align 1
- %arrayinit.element94 = getelementptr inbounds i8, i8* %arrayinit.element93, i32 1
- %a20 = load i8, i8* %new_val, align 1
- store i8 %a20, i8* %arrayinit.element94, align 1
- %arrayinit.element95 = getelementptr inbounds i8, i8* %arrayinit.element94, i32 1
- store i8 32, i8* %arrayinit.element95, align 1
- %arrayinit.element96 = getelementptr inbounds i8, i8* %arrayinit.element95, i32 1
- %a21 = load i8, i8* %simd, align 1
- store i8 %a21, i8* %arrayinit.element96, align 1
- %arrayinit.element97 = getelementptr inbounds i8, i8* %arrayinit.element96, i32 1
- store i8 -3, i8* %arrayinit.element97, align 1
- %arrayinit.element98 = getelementptr inbounds i8, i8* %arrayinit.element97, i32 1
- store i8 1, i8* %arrayinit.element98, align 1
- %arrayinit.element99 = getelementptr inbounds i8, i8* %arrayinit.element98, i32 1
- store i8 1, i8* %arrayinit.element99, align 1
- %arrayinit.element100 = getelementptr inbounds i8, i8* %arrayinit.element99, i32 1
- store i8 92, i8* %arrayinit.element100, align 1
- %arrayinit.element101 = getelementptr inbounds i8, i8* %arrayinit.element100, i32 1
- store i8 4, i8* %arrayinit.element101, align 1
- %arrayinit.element102 = getelementptr inbounds i8, i8* %arrayinit.element101, i32 1
- store i8 64, i8* %arrayinit.element102, align 1
- %arrayinit.element103 = getelementptr inbounds i8, i8* %arrayinit.element102, i32 1
- store i8 65, i8* %arrayinit.element103, align 1
- %arrayinit.element104 = getelementptr inbounds i8, i8* %arrayinit.element103, i32 1
- store i8 0, i8* %arrayinit.element104, align 1
- %arrayinit.element105 = getelementptr inbounds i8, i8* %arrayinit.element104, i32 1
- store i8 15, i8* %arrayinit.element105, align 1
- %arrayinit.element106 = getelementptr inbounds i8, i8* %arrayinit.element105, i32 1
- store i8 11, i8* %arrayinit.element106, align 1
- %arrayinit.element107 = getelementptr inbounds i8, i8* %arrayinit.element106, i32 1
- store i8 32, i8* %arrayinit.element107, align 1
- %arrayinit.element108 = getelementptr inbounds i8, i8* %arrayinit.element107, i32 1
- %a22 = load i8, i8* %old_val, align 1
- store i8 %a22, i8* %arrayinit.element108, align 1
- %arrayinit.element109 = getelementptr inbounds i8, i8* %arrayinit.element108, i32 1
- store i8 32, i8* %arrayinit.element109, align 1
- %arrayinit.element110 = getelementptr inbounds i8, i8* %arrayinit.element109, i32 1
- %a23 = load i8, i8* %simd, align 1
- store i8 %a23, i8* %arrayinit.element110, align 1
- %arrayinit.element111 = getelementptr inbounds i8, i8* %arrayinit.element110, i32 1
- store i8 -3, i8* %arrayinit.element111, align 1
- %arrayinit.element112 = getelementptr inbounds i8, i8* %arrayinit.element111, i32 1
- store i8 1, i8* %arrayinit.element112, align 1
- %arrayinit.element113 = getelementptr inbounds i8, i8* %arrayinit.element112, i32 1
- store i8 2, i8* %arrayinit.element113, align 1
- %arrayinit.element114 = getelementptr inbounds i8, i8* %arrayinit.element113, i32 1
- store i8 92, i8* %arrayinit.element114, align 1
- %arrayinit.element115 = getelementptr inbounds i8, i8* %arrayinit.element114, i32 1
- store i8 4, i8* %arrayinit.element115, align 1
- %arrayinit.element116 = getelementptr inbounds i8, i8* %arrayinit.element115, i32 1
- store i8 64, i8* %arrayinit.element116, align 1
- %arrayinit.element117 = getelementptr inbounds i8, i8* %arrayinit.element116, i32 1
- store i8 65, i8* %arrayinit.element117, align 1
- %arrayinit.element118 = getelementptr inbounds i8, i8* %arrayinit.element117, i32 1
- store i8 0, i8* %arrayinit.element118, align 1
- %arrayinit.element119 = getelementptr inbounds i8, i8* %arrayinit.element118, i32 1
- store i8 15, i8* %arrayinit.element119, align 1
- %arrayinit.element120 = getelementptr inbounds i8, i8* %arrayinit.element119, i32 1
- store i8 11, i8* %arrayinit.element120, align 1
- %arrayinit.element121 = getelementptr inbounds i8, i8* %arrayinit.element120, i32 1
- store i8 32, i8* %arrayinit.element121, align 1
- %arrayinit.element122 = getelementptr inbounds i8, i8* %arrayinit.element121, i32 1
- %a24 = load i8, i8* %old_val, align 1
- store i8 %a24, i8* %arrayinit.element122, align 1
- %arrayinit.element123 = getelementptr inbounds i8, i8* %arrayinit.element122, i32 1
- store i8 32, i8* %arrayinit.element123, align 1
- %arrayinit.element124 = getelementptr inbounds i8, i8* %arrayinit.element123, i32 1
- %a25 = load i8, i8* %simd, align 1
- store i8 %a25, i8* %arrayinit.element124, align 1
- %arrayinit.element125 = getelementptr inbounds i8, i8* %arrayinit.element124, i32 1
- store i8 -3, i8* %arrayinit.element125, align 1
- %arrayinit.element126 = getelementptr inbounds i8, i8* %arrayinit.element125, i32 1
- store i8 1, i8* %arrayinit.element126, align 1
- %arrayinit.element127 = getelementptr inbounds i8, i8* %arrayinit.element126, i32 1
- store i8 3, i8* %arrayinit.element127, align 1
- %arrayinit.element128 = getelementptr inbounds i8, i8* %arrayinit.element127, i32 1
- store i8 92, i8* %arrayinit.element128, align 1
- %arrayinit.element129 = getelementptr inbounds i8, i8* %arrayinit.element128, i32 1
- store i8 4, i8* %arrayinit.element129, align 1
- %arrayinit.element130 = getelementptr inbounds i8, i8* %arrayinit.element129, i32 1
- store i8 64, i8* %arrayinit.element130, align 1
- %arrayinit.element131 = getelementptr inbounds i8, i8* %arrayinit.element130, i32 1
- store i8 65, i8* %arrayinit.element131, align 1
- %arrayinit.element132 = getelementptr inbounds i8, i8* %arrayinit.element131, i32 1
- store i8 0, i8* %arrayinit.element132, align 1
- %arrayinit.element133 = getelementptr inbounds i8, i8* %arrayinit.element132, i32 1
- store i8 15, i8* %arrayinit.element133, align 1
- %arrayinit.element134 = getelementptr inbounds i8, i8* %arrayinit.element133, i32 1
- store i8 11, i8* %arrayinit.element134, align 1
- %arrayinit.element135 = getelementptr inbounds i8, i8* %arrayinit.element134, i32 1
- store i8 32, i8* %arrayinit.element135, align 1
- %arrayinit.element136 = getelementptr inbounds i8, i8* %arrayinit.element135, i32 1
- %a26 = load i8, i8* %simd, align 1
- store i8 %a26, i8* %arrayinit.element136, align 1
- %arrayinit.element137 = getelementptr inbounds i8, i8* %arrayinit.element136, i32 1
- store i8 32, i8* %arrayinit.element137, align 1
- %arrayinit.element138 = getelementptr inbounds i8, i8* %arrayinit.element137, i32 1
- %a27 = load i8, i8* %new_val, align 1
- store i8 %a27, i8* %arrayinit.element138, align 1
- %arrayinit.element139 = getelementptr inbounds i8, i8* %arrayinit.element138, i32 1
- store i8 -3, i8* %arrayinit.element139, align 1
- %arrayinit.element140 = getelementptr inbounds i8, i8* %arrayinit.element139, i32 1
- store i8 2, i8* %arrayinit.element140, align 1
- %arrayinit.element141 = getelementptr inbounds i8, i8* %arrayinit.element140, i32 1
- store i8 2, i8* %arrayinit.element141, align 1
- %arrayinit.element142 = getelementptr inbounds i8, i8* %arrayinit.element141, i32 1
- store i8 33, i8* %arrayinit.element142, align 1
- %arrayinit.element143 = getelementptr inbounds i8, i8* %arrayinit.element142, i32 1
- %a28 = load i8, i8* %simd, align 1
- store i8 %a28, i8* %arrayinit.element143, align 1
- %arrayinit.element144 = getelementptr inbounds i8, i8* %arrayinit.element143, i32 1
- store i8 32, i8* %arrayinit.element144, align 1
- %arrayinit.element145 = getelementptr inbounds i8, i8* %arrayinit.element144, i32 1
- %a29 = load i8, i8* %new_val, align 1
- store i8 %a29, i8* %arrayinit.element145, align 1
- %arrayinit.element146 = getelementptr inbounds i8, i8* %arrayinit.element145, i32 1
- store i8 32, i8* %arrayinit.element146, align 1
- %arrayinit.element147 = getelementptr inbounds i8, i8* %arrayinit.element146, i32 1
- %a30 = load i8, i8* %simd, align 1
- store i8 %a30, i8* %arrayinit.element147, align 1
- %arrayinit.element148 = getelementptr inbounds i8, i8* %arrayinit.element147, i32 1
- store i8 -3, i8* %arrayinit.element148, align 1
- %arrayinit.element149 = getelementptr inbounds i8, i8* %arrayinit.element148, i32 1
- store i8 1, i8* %arrayinit.element149, align 1
- %arrayinit.element150 = getelementptr inbounds i8, i8* %arrayinit.element149, i32 1
- store i8 0, i8* %arrayinit.element150, align 1
- %arrayinit.element151 = getelementptr inbounds i8, i8* %arrayinit.element150, i32 1
- store i8 92, i8* %arrayinit.element151, align 1
- %arrayinit.element152 = getelementptr inbounds i8, i8* %arrayinit.element151, i32 1
- store i8 4, i8* %arrayinit.element152, align 1
- %arrayinit.element153 = getelementptr inbounds i8, i8* %arrayinit.element152, i32 1
- store i8 64, i8* %arrayinit.element153, align 1
- %arrayinit.element154 = getelementptr inbounds i8, i8* %arrayinit.element153, i32 1
- store i8 65, i8* %arrayinit.element154, align 1
- %arrayinit.element155 = getelementptr inbounds i8, i8* %arrayinit.element154, i32 1
- store i8 0, i8* %arrayinit.element155, align 1
- %arrayinit.element156 = getelementptr inbounds i8, i8* %arrayinit.element155, i32 1
- store i8 15, i8* %arrayinit.element156, align 1
- %arrayinit.element157 = getelementptr inbounds i8, i8* %arrayinit.element156, i32 1
- store i8 11, i8* %arrayinit.element157, align 1
- %arrayinit.element158 = getelementptr inbounds i8, i8* %arrayinit.element157, i32 1
- store i8 32, i8* %arrayinit.element158, align 1
- %arrayinit.element159 = getelementptr inbounds i8, i8* %arrayinit.element158, i32 1
- %a31 = load i8, i8* %new_val, align 1
- store i8 %a31, i8* %arrayinit.element159, align 1
- %arrayinit.element160 = getelementptr inbounds i8, i8* %arrayinit.element159, i32 1
- store i8 32, i8* %arrayinit.element160, align 1
- %arrayinit.element161 = getelementptr inbounds i8, i8* %arrayinit.element160, i32 1
- %a32 = load i8, i8* %simd, align 1
- store i8 %a32, i8* %arrayinit.element161, align 1
- %arrayinit.element162 = getelementptr inbounds i8, i8* %arrayinit.element161, i32 1
- store i8 -3, i8* %arrayinit.element162, align 1
- %arrayinit.element163 = getelementptr inbounds i8, i8* %arrayinit.element162, i32 1
- store i8 1, i8* %arrayinit.element163, align 1
- %arrayinit.element164 = getelementptr inbounds i8, i8* %arrayinit.element163, i32 1
- store i8 1, i8* %arrayinit.element164, align 1
- %arrayinit.element165 = getelementptr inbounds i8, i8* %arrayinit.element164, i32 1
- store i8 92, i8* %arrayinit.element165, align 1
- %arrayinit.element166 = getelementptr inbounds i8, i8* %arrayinit.element165, i32 1
- store i8 4, i8* %arrayinit.element166, align 1
- %arrayinit.element167 = getelementptr inbounds i8, i8* %arrayinit.element166, i32 1
- store i8 64, i8* %arrayinit.element167, align 1
- %arrayinit.element168 = getelementptr inbounds i8, i8* %arrayinit.element167, i32 1
- store i8 65, i8* %arrayinit.element168, align 1
- %arrayinit.element169 = getelementptr inbounds i8, i8* %arrayinit.element168, i32 1
- store i8 0, i8* %arrayinit.element169, align 1
- %arrayinit.element170 = getelementptr inbounds i8, i8* %arrayinit.element169, i32 1
- store i8 15, i8* %arrayinit.element170, align 1
- %arrayinit.element171 = getelementptr inbounds i8, i8* %arrayinit.element170, i32 1
- store i8 11, i8* %arrayinit.element171, align 1
- %arrayinit.element172 = getelementptr inbounds i8, i8* %arrayinit.element171, i32 1
- store i8 32, i8* %arrayinit.element172, align 1
- %arrayinit.element173 = getelementptr inbounds i8, i8* %arrayinit.element172, i32 1
- %a33 = load i8, i8* %new_val, align 1
- store i8 %a33, i8* %arrayinit.element173, align 1
- %arrayinit.element174 = getelementptr inbounds i8, i8* %arrayinit.element173, i32 1
- store i8 32, i8* %arrayinit.element174, align 1
- %arrayinit.element175 = getelementptr inbounds i8, i8* %arrayinit.element174, i32 1
- %a34 = load i8, i8* %simd, align 1
- store i8 %a34, i8* %arrayinit.element175, align 1
- %arrayinit.element176 = getelementptr inbounds i8, i8* %arrayinit.element175, i32 1
- store i8 -3, i8* %arrayinit.element176, align 1
- %arrayinit.element177 = getelementptr inbounds i8, i8* %arrayinit.element176, i32 1
- store i8 1, i8* %arrayinit.element177, align 1
- %arrayinit.element178 = getelementptr inbounds i8, i8* %arrayinit.element177, i32 1
- store i8 2, i8* %arrayinit.element178, align 1
- %arrayinit.element179 = getelementptr inbounds i8, i8* %arrayinit.element178, i32 1
- store i8 92, i8* %arrayinit.element179, align 1
- %arrayinit.element180 = getelementptr inbounds i8, i8* %arrayinit.element179, i32 1
- store i8 4, i8* %arrayinit.element180, align 1
- %arrayinit.element181 = getelementptr inbounds i8, i8* %arrayinit.element180, i32 1
- store i8 64, i8* %arrayinit.element181, align 1
- %arrayinit.element182 = getelementptr inbounds i8, i8* %arrayinit.element181, i32 1
- store i8 65, i8* %arrayinit.element182, align 1
- %arrayinit.element183 = getelementptr inbounds i8, i8* %arrayinit.element182, i32 1
- store i8 0, i8* %arrayinit.element183, align 1
- %arrayinit.element184 = getelementptr inbounds i8, i8* %arrayinit.element183, i32 1
- store i8 15, i8* %arrayinit.element184, align 1
- %arrayinit.element185 = getelementptr inbounds i8, i8* %arrayinit.element184, i32 1
- store i8 11, i8* %arrayinit.element185, align 1
- %arrayinit.element186 = getelementptr inbounds i8, i8* %arrayinit.element185, i32 1
- store i8 32, i8* %arrayinit.element186, align 1
- %arrayinit.element187 = getelementptr inbounds i8, i8* %arrayinit.element186, i32 1
- %a35 = load i8, i8* %old_val, align 1
- store i8 %a35, i8* %arrayinit.element187, align 1
- %arrayinit.element188 = getelementptr inbounds i8, i8* %arrayinit.element187, i32 1
- store i8 32, i8* %arrayinit.element188, align 1
- %arrayinit.element189 = getelementptr inbounds i8, i8* %arrayinit.element188, i32 1
- %a36 = load i8, i8* %simd, align 1
- store i8 %a36, i8* %arrayinit.element189, align 1
- %arrayinit.element190 = getelementptr inbounds i8, i8* %arrayinit.element189, i32 1
- store i8 -3, i8* %arrayinit.element190, align 1
- %arrayinit.element191 = getelementptr inbounds i8, i8* %arrayinit.element190, i32 1
- store i8 1, i8* %arrayinit.element191, align 1
- %arrayinit.element192 = getelementptr inbounds i8, i8* %arrayinit.element191, i32 1
- store i8 3, i8* %arrayinit.element192, align 1
- %arrayinit.element193 = getelementptr inbounds i8, i8* %arrayinit.element192, i32 1
- store i8 92, i8* %arrayinit.element193, align 1
- %arrayinit.element194 = getelementptr inbounds i8, i8* %arrayinit.element193, i32 1
- store i8 4, i8* %arrayinit.element194, align 1
- %arrayinit.element195 = getelementptr inbounds i8, i8* %arrayinit.element194, i32 1
- store i8 64, i8* %arrayinit.element195, align 1
- %arrayinit.element196 = getelementptr inbounds i8, i8* %arrayinit.element195, i32 1
- store i8 65, i8* %arrayinit.element196, align 1
- %arrayinit.element197 = getelementptr inbounds i8, i8* %arrayinit.element196, i32 1
- store i8 0, i8* %arrayinit.element197, align 1
- %arrayinit.element198 = getelementptr inbounds i8, i8* %arrayinit.element197, i32 1
- store i8 15, i8* %arrayinit.element198, align 1
- %arrayinit.element199 = getelementptr inbounds i8, i8* %arrayinit.element198, i32 1
- store i8 11, i8* %arrayinit.element199, align 1
- %arrayinit.element200 = getelementptr inbounds i8, i8* %arrayinit.element199, i32 1
- store i8 32, i8* %arrayinit.element200, align 1
- %arrayinit.element201 = getelementptr inbounds i8, i8* %arrayinit.element200, i32 1
- %a37 = load i8, i8* %simd, align 1
- store i8 %a37, i8* %arrayinit.element201, align 1
- %arrayinit.element202 = getelementptr inbounds i8, i8* %arrayinit.element201, i32 1
- store i8 32, i8* %arrayinit.element202, align 1
- %arrayinit.element203 = getelementptr inbounds i8, i8* %arrayinit.element202, i32 1
- %a38 = load i8, i8* %new_val, align 1
- store i8 %a38, i8* %arrayinit.element203, align 1
- %arrayinit.element204 = getelementptr inbounds i8, i8* %arrayinit.element203, i32 1
- store i8 -3, i8* %arrayinit.element204, align 1
- %arrayinit.element205 = getelementptr inbounds i8, i8* %arrayinit.element204, i32 1
- store i8 2, i8* %arrayinit.element205, align 1
- %arrayinit.element206 = getelementptr inbounds i8, i8* %arrayinit.element205, i32 1
- store i8 3, i8* %arrayinit.element206, align 1
- %arrayinit.element207 = getelementptr inbounds i8, i8* %arrayinit.element206, i32 1
- store i8 33, i8* %arrayinit.element207, align 1
- %arrayinit.element208 = getelementptr inbounds i8, i8* %arrayinit.element207, i32 1
- %a39 = load i8, i8* %simd, align 1
- store i8 %a39, i8* %arrayinit.element208, align 1
- %arrayinit.element209 = getelementptr inbounds i8, i8* %arrayinit.element208, i32 1
- store i8 32, i8* %arrayinit.element209, align 1
- %arrayinit.element210 = getelementptr inbounds i8, i8* %arrayinit.element209, i32 1
- %a40 = load i8, i8* %new_val, align 1
- store i8 %a40, i8* %arrayinit.element210, align 1
- %arrayinit.element211 = getelementptr inbounds i8, i8* %arrayinit.element210, i32 1
- store i8 32, i8* %arrayinit.element211, align 1
- %arrayinit.element212 = getelementptr inbounds i8, i8* %arrayinit.element211, i32 1
- %a41 = load i8, i8* %simd, align 1
- store i8 %a41, i8* %arrayinit.element212, align 1
- %arrayinit.element213 = getelementptr inbounds i8, i8* %arrayinit.element212, i32 1
- store i8 -3, i8* %arrayinit.element213, align 1
- %arrayinit.element214 = getelementptr inbounds i8, i8* %arrayinit.element213, i32 1
- store i8 1, i8* %arrayinit.element214, align 1
- %arrayinit.element215 = getelementptr inbounds i8, i8* %arrayinit.element214, i32 1
- store i8 0, i8* %arrayinit.element215, align 1
- %arrayinit.element216 = getelementptr inbounds i8, i8* %arrayinit.element215, i32 1
- store i8 92, i8* %arrayinit.element216, align 1
- %arrayinit.element217 = getelementptr inbounds i8, i8* %arrayinit.element216, i32 1
- store i8 4, i8* %arrayinit.element217, align 1
- %arrayinit.element218 = getelementptr inbounds i8, i8* %arrayinit.element217, i32 1
- store i8 64, i8* %arrayinit.element218, align 1
- %arrayinit.element219 = getelementptr inbounds i8, i8* %arrayinit.element218, i32 1
- store i8 65, i8* %arrayinit.element219, align 1
- %arrayinit.element220 = getelementptr inbounds i8, i8* %arrayinit.element219, i32 1
- store i8 0, i8* %arrayinit.element220, align 1
- %arrayinit.element221 = getelementptr inbounds i8, i8* %arrayinit.element220, i32 1
- store i8 15, i8* %arrayinit.element221, align 1
- %arrayinit.element222 = getelementptr inbounds i8, i8* %arrayinit.element221, i32 1
- store i8 11, i8* %arrayinit.element222, align 1
- %arrayinit.element223 = getelementptr inbounds i8, i8* %arrayinit.element222, i32 1
- store i8 32, i8* %arrayinit.element223, align 1
- %arrayinit.element224 = getelementptr inbounds i8, i8* %arrayinit.element223, i32 1
- %a42 = load i8, i8* %new_val, align 1
- store i8 %a42, i8* %arrayinit.element224, align 1
- %arrayinit.element225 = getelementptr inbounds i8, i8* %arrayinit.element224, i32 1
- store i8 32, i8* %arrayinit.element225, align 1
- %arrayinit.element226 = getelementptr inbounds i8, i8* %arrayinit.element225, i32 1
- %a43 = load i8, i8* %simd, align 1
- store i8 %a43, i8* %arrayinit.element226, align 1
- %arrayinit.element227 = getelementptr inbounds i8, i8* %arrayinit.element226, i32 1
- store i8 -3, i8* %arrayinit.element227, align 1
- %arrayinit.element228 = getelementptr inbounds i8, i8* %arrayinit.element227, i32 1
- store i8 1, i8* %arrayinit.element228, align 1
- %arrayinit.element229 = getelementptr inbounds i8, i8* %arrayinit.element228, i32 1
- store i8 1, i8* %arrayinit.element229, align 1
- %arrayinit.element230 = getelementptr inbounds i8, i8* %arrayinit.element229, i32 1
- store i8 92, i8* %arrayinit.element230, align 1
- %arrayinit.element231 = getelementptr inbounds i8, i8* %arrayinit.element230, i32 1
- store i8 4, i8* %arrayinit.element231, align 1
- %arrayinit.element232 = getelementptr inbounds i8, i8* %arrayinit.element231, i32 1
- store i8 64, i8* %arrayinit.element232, align 1
- %arrayinit.element233 = getelementptr inbounds i8, i8* %arrayinit.element232, i32 1
- store i8 65, i8* %arrayinit.element233, align 1
- %arrayinit.element234 = getelementptr inbounds i8, i8* %arrayinit.element233, i32 1
- store i8 0, i8* %arrayinit.element234, align 1
- %arrayinit.element235 = getelementptr inbounds i8, i8* %arrayinit.element234, i32 1
- store i8 15, i8* %arrayinit.element235, align 1
- %arrayinit.element236 = getelementptr inbounds i8, i8* %arrayinit.element235, i32 1
- store i8 11, i8* %arrayinit.element236, align 1
- %arrayinit.element237 = getelementptr inbounds i8, i8* %arrayinit.element236, i32 1
- store i8 32, i8* %arrayinit.element237, align 1
- %arrayinit.element238 = getelementptr inbounds i8, i8* %arrayinit.element237, i32 1
- %a44 = load i8, i8* %new_val, align 1
- store i8 %a44, i8* %arrayinit.element238, align 1
- %arrayinit.element239 = getelementptr inbounds i8, i8* %arrayinit.element238, i32 1
- store i8 32, i8* %arrayinit.element239, align 1
- %arrayinit.element240 = getelementptr inbounds i8, i8* %arrayinit.element239, i32 1
- %a45 = load i8, i8* %simd, align 1
- store i8 %a45, i8* %arrayinit.element240, align 1
- %arrayinit.element241 = getelementptr inbounds i8, i8* %arrayinit.element240, i32 1
- store i8 -3, i8* %arrayinit.element241, align 1
- %arrayinit.element242 = getelementptr inbounds i8, i8* %arrayinit.element241, i32 1
- store i8 1, i8* %arrayinit.element242, align 1
- %arrayinit.element243 = getelementptr inbounds i8, i8* %arrayinit.element242, i32 1
- store i8 2, i8* %arrayinit.element243, align 1
- %arrayinit.element244 = getelementptr inbounds i8, i8* %arrayinit.element243, i32 1
- store i8 92, i8* %arrayinit.element244, align 1
- %arrayinit.element245 = getelementptr inbounds i8, i8* %arrayinit.element244, i32 1
- store i8 4, i8* %arrayinit.element245, align 1
- %arrayinit.element246 = getelementptr inbounds i8, i8* %arrayinit.element245, i32 1
- store i8 64, i8* %arrayinit.element246, align 1
- %arrayinit.element247 = getelementptr inbounds i8, i8* %arrayinit.element246, i32 1
- store i8 65, i8* %arrayinit.element247, align 1
- %arrayinit.element248 = getelementptr inbounds i8, i8* %arrayinit.element247, i32 1
- store i8 0, i8* %arrayinit.element248, align 1
- %arrayinit.element249 = getelementptr inbounds i8, i8* %arrayinit.element248, i32 1
- store i8 15, i8* %arrayinit.element249, align 1
- %arrayinit.element250 = getelementptr inbounds i8, i8* %arrayinit.element249, i32 1
- store i8 11, i8* %arrayinit.element250, align 1
- %arrayinit.element251 = getelementptr inbounds i8, i8* %arrayinit.element250, i32 1
- store i8 32, i8* %arrayinit.element251, align 1
- %arrayinit.element252 = getelementptr inbounds i8, i8* %arrayinit.element251, i32 1
- %a46 = load i8, i8* %new_val, align 1
- store i8 %a46, i8* %arrayinit.element252, align 1
- %arrayinit.element253 = getelementptr inbounds i8, i8* %arrayinit.element252, i32 1
- store i8 32, i8* %arrayinit.element253, align 1
- %arrayinit.element254 = getelementptr inbounds i8, i8* %arrayinit.element253, i32 1
- %a47 = load i8, i8* %simd, align 1
- store i8 %a47, i8* %arrayinit.element254, align 1
- %arrayinit.element255 = getelementptr inbounds i8, i8* %arrayinit.element254, i32 1
- store i8 -3, i8* %arrayinit.element255, align 1
- %arrayinit.element256 = getelementptr inbounds i8, i8* %arrayinit.element255, i32 1
- store i8 1, i8* %arrayinit.element256, align 1
- %arrayinit.element257 = getelementptr inbounds i8, i8* %arrayinit.element256, i32 1
- store i8 3, i8* %arrayinit.element257, align 1
- %arrayinit.element258 = getelementptr inbounds i8, i8* %arrayinit.element257, i32 1
- store i8 92, i8* %arrayinit.element258, align 1
- %arrayinit.element259 = getelementptr inbounds i8, i8* %arrayinit.element258, i32 1
- store i8 4, i8* %arrayinit.element259, align 1
- %arrayinit.element260 = getelementptr inbounds i8, i8* %arrayinit.element259, i32 1
- store i8 64, i8* %arrayinit.element260, align 1
- %arrayinit.element261 = getelementptr inbounds i8, i8* %arrayinit.element260, i32 1
- store i8 65, i8* %arrayinit.element261, align 1
- %arrayinit.element262 = getelementptr inbounds i8, i8* %arrayinit.element261, i32 1
- store i8 0, i8* %arrayinit.element262, align 1
- %arrayinit.element263 = getelementptr inbounds i8, i8* %arrayinit.element262, i32 1
- store i8 15, i8* %arrayinit.element263, align 1
- %arrayinit.element264 = getelementptr inbounds i8, i8* %arrayinit.element263, i32 1
- store i8 11, i8* %arrayinit.element264, align 1
- %arrayinit.element265 = getelementptr inbounds i8, i8* %arrayinit.element264, i32 1
- store i8 65, i8* %arrayinit.element265, align 1
- %arrayinit.element266 = getelementptr inbounds i8, i8* %arrayinit.element265, i32 1
- store i8 1, i8* %arrayinit.element266, align 1
- %arrayinit.element267 = getelementptr inbounds i8, i8* %arrayinit.element266, i32 1
- store i8 15, i8* %arrayinit.element267, align 1
- %arraydecay = getelementptr inbounds [269 x i8], [269 x i8]* %code, i32 0, i32 0
- %arraydecay268 = getelementptr inbounds [269 x i8], [269 x i8]* %code, i32 0, i32 0
- %add.ptr = getelementptr inbounds i8, i8* %arraydecay268, i32 269
- call void @g(i8* %arraydecay, i8* %add.ptr)
+ store i8 32, ptr %code, align 1
+ %arrayinit.element = getelementptr inbounds i8, ptr %code, i32 1
+ %a2 = load i8, ptr %old_val, align 1
+ store i8 %a2, ptr %arrayinit.element, align 1
+ %arrayinit.element1 = getelementptr inbounds i8, ptr %arrayinit.element, i32 1
+ store i8 -3, ptr %arrayinit.element1, align 1
+ %arrayinit.element2 = getelementptr inbounds i8, ptr %arrayinit.element1, i32 1
+ store i8 0, ptr %arrayinit.element2, align 1
+ %arrayinit.element3 = getelementptr inbounds i8, ptr %arrayinit.element2, i32 1
+ store i8 33, ptr %arrayinit.element3, align 1
+ %arrayinit.element4 = getelementptr inbounds i8, ptr %arrayinit.element3, i32 1
+ %a3 = load i8, ptr %simd, align 1
+ store i8 %a3, ptr %arrayinit.element4, align 1
+ %arrayinit.element5 = getelementptr inbounds i8, ptr %arrayinit.element4, i32 1
+ store i8 32, ptr %arrayinit.element5, align 1
+ %arrayinit.element6 = getelementptr inbounds i8, ptr %arrayinit.element5, i32 1
+ %a4 = load i8, ptr %simd, align 1
+ store i8 %a4, ptr %arrayinit.element6, align 1
+ %arrayinit.element7 = getelementptr inbounds i8, ptr %arrayinit.element6, i32 1
+ store i8 32, ptr %arrayinit.element7, align 1
+ %arrayinit.element8 = getelementptr inbounds i8, ptr %arrayinit.element7, i32 1
+ %a5 = load i8, ptr %new_val, align 1
+ store i8 %a5, ptr %arrayinit.element8, align 1
+ %arrayinit.element9 = getelementptr inbounds i8, ptr %arrayinit.element8, i32 1
+ store i8 -3, ptr %arrayinit.element9, align 1
+ %arrayinit.element10 = getelementptr inbounds i8, ptr %arrayinit.element9, i32 1
+ store i8 2, ptr %arrayinit.element10, align 1
+ %arrayinit.element11 = getelementptr inbounds i8, ptr %arrayinit.element10, i32 1
+ store i8 0, ptr %arrayinit.element11, align 1
+ %arrayinit.element12 = getelementptr inbounds i8, ptr %arrayinit.element11, i32 1
+ store i8 33, ptr %arrayinit.element12, align 1
+ %arrayinit.element13 = getelementptr inbounds i8, ptr %arrayinit.element12, i32 1
+ %a6 = load i8, ptr %simd, align 1
+ store i8 %a6, ptr %arrayinit.element13, align 1
+ %arrayinit.element14 = getelementptr inbounds i8, ptr %arrayinit.element13, i32 1
+ store i8 32, ptr %arrayinit.element14, align 1
+ %arrayinit.element15 = getelementptr inbounds i8, ptr %arrayinit.element14, i32 1
+ %a7 = load i8, ptr %new_val, align 1
+ store i8 %a7, ptr %arrayinit.element15, align 1
+ %arrayinit.element16 = getelementptr inbounds i8, ptr %arrayinit.element15, i32 1
+ store i8 32, ptr %arrayinit.element16, align 1
+ %arrayinit.element17 = getelementptr inbounds i8, ptr %arrayinit.element16, i32 1
+ %a8 = load i8, ptr %simd, align 1
+ store i8 %a8, ptr %arrayinit.element17, align 1
+ %arrayinit.element18 = getelementptr inbounds i8, ptr %arrayinit.element17, i32 1
+ store i8 -3, ptr %arrayinit.element18, align 1
+ %arrayinit.element19 = getelementptr inbounds i8, ptr %arrayinit.element18, i32 1
+ store i8 1, ptr %arrayinit.element19, align 1
+ %arrayinit.element20 = getelementptr inbounds i8, ptr %arrayinit.element19, i32 1
+ store i8 0, ptr %arrayinit.element20, align 1
+ %arrayinit.element21 = getelementptr inbounds i8, ptr %arrayinit.element20, i32 1
+ store i8 92, ptr %arrayinit.element21, align 1
+ %arrayinit.element22 = getelementptr inbounds i8, ptr %arrayinit.element21, i32 1
+ store i8 4, ptr %arrayinit.element22, align 1
+ %arrayinit.element23 = getelementptr inbounds i8, ptr %arrayinit.element22, i32 1
+ store i8 64, ptr %arrayinit.element23, align 1
+ %arrayinit.element24 = getelementptr inbounds i8, ptr %arrayinit.element23, i32 1
+ store i8 65, ptr %arrayinit.element24, align 1
+ %arrayinit.element25 = getelementptr inbounds i8, ptr %arrayinit.element24, i32 1
+ store i8 0, ptr %arrayinit.element25, align 1
+ %arrayinit.element26 = getelementptr inbounds i8, ptr %arrayinit.element25, i32 1
+ store i8 15, ptr %arrayinit.element26, align 1
+ %arrayinit.element27 = getelementptr inbounds i8, ptr %arrayinit.element26, i32 1
+ store i8 11, ptr %arrayinit.element27, align 1
+ %arrayinit.element28 = getelementptr inbounds i8, ptr %arrayinit.element27, i32 1
+ store i8 32, ptr %arrayinit.element28, align 1
+ %arrayinit.element29 = getelementptr inbounds i8, ptr %arrayinit.element28, i32 1
+ %a9 = load i8, ptr %old_val, align 1
+ store i8 %a9, ptr %arrayinit.element29, align 1
+ %arrayinit.element30 = getelementptr inbounds i8, ptr %arrayinit.element29, i32 1
+ store i8 32, ptr %arrayinit.element30, align 1
+ %arrayinit.element31 = getelementptr inbounds i8, ptr %arrayinit.element30, i32 1
+ %a10 = load i8, ptr %simd, align 1
+ store i8 %a10, ptr %arrayinit.element31, align 1
+ %arrayinit.element32 = getelementptr inbounds i8, ptr %arrayinit.element31, i32 1
+ store i8 -3, ptr %arrayinit.element32, align 1
+ %arrayinit.element33 = getelementptr inbounds i8, ptr %arrayinit.element32, i32 1
+ store i8 1, ptr %arrayinit.element33, align 1
+ %arrayinit.element34 = getelementptr inbounds i8, ptr %arrayinit.element33, i32 1
+ store i8 1, ptr %arrayinit.element34, align 1
+ %arrayinit.element35 = getelementptr inbounds i8, ptr %arrayinit.element34, i32 1
+ store i8 92, ptr %arrayinit.element35, align 1
+ %arrayinit.element36 = getelementptr inbounds i8, ptr %arrayinit.element35, i32 1
+ store i8 4, ptr %arrayinit.element36, align 1
+ %arrayinit.element37 = getelementptr inbounds i8, ptr %arrayinit.element36, i32 1
+ store i8 64, ptr %arrayinit.element37, align 1
+ %arrayinit.element38 = getelementptr inbounds i8, ptr %arrayinit.element37, i32 1
+ store i8 65, ptr %arrayinit.element38, align 1
+ %arrayinit.element39 = getelementptr inbounds i8, ptr %arrayinit.element38, i32 1
+ store i8 0, ptr %arrayinit.element39, align 1
+ %arrayinit.element40 = getelementptr inbounds i8, ptr %arrayinit.element39, i32 1
+ store i8 15, ptr %arrayinit.element40, align 1
+ %arrayinit.element41 = getelementptr inbounds i8, ptr %arrayinit.element40, i32 1
+ store i8 11, ptr %arrayinit.element41, align 1
+ %arrayinit.element42 = getelementptr inbounds i8, ptr %arrayinit.element41, i32 1
+ store i8 32, ptr %arrayinit.element42, align 1
+ %arrayinit.element43 = getelementptr inbounds i8, ptr %arrayinit.element42, i32 1
+ %a11 = load i8, ptr %old_val, align 1
+ store i8 %a11, ptr %arrayinit.element43, align 1
+ %arrayinit.element44 = getelementptr inbounds i8, ptr %arrayinit.element43, i32 1
+ store i8 32, ptr %arrayinit.element44, align 1
+ %arrayinit.element45 = getelementptr inbounds i8, ptr %arrayinit.element44, i32 1
+ %a12 = load i8, ptr %simd, align 1
+ store i8 %a12, ptr %arrayinit.element45, align 1
+ %arrayinit.element46 = getelementptr inbounds i8, ptr %arrayinit.element45, i32 1
+ store i8 -3, ptr %arrayinit.element46, align 1
+ %arrayinit.element47 = getelementptr inbounds i8, ptr %arrayinit.element46, i32 1
+ store i8 1, ptr %arrayinit.element47, align 1
+ %arrayinit.element48 = getelementptr inbounds i8, ptr %arrayinit.element47, i32 1
+ store i8 2, ptr %arrayinit.element48, align 1
+ %arrayinit.element49 = getelementptr inbounds i8, ptr %arrayinit.element48, i32 1
+ store i8 92, ptr %arrayinit.element49, align 1
+ %arrayinit.element50 = getelementptr inbounds i8, ptr %arrayinit.element49, i32 1
+ store i8 4, ptr %arrayinit.element50, align 1
+ %arrayinit.element51 = getelementptr inbounds i8, ptr %arrayinit.element50, i32 1
+ store i8 64, ptr %arrayinit.element51, align 1
+ %arrayinit.element52 = getelementptr inbounds i8, ptr %arrayinit.element51, i32 1
+ store i8 65, ptr %arrayinit.element52, align 1
+ %arrayinit.element53 = getelementptr inbounds i8, ptr %arrayinit.element52, i32 1
+ store i8 0, ptr %arrayinit.element53, align 1
+ %arrayinit.element54 = getelementptr inbounds i8, ptr %arrayinit.element53, i32 1
+ store i8 15, ptr %arrayinit.element54, align 1
+ %arrayinit.element55 = getelementptr inbounds i8, ptr %arrayinit.element54, i32 1
+ store i8 11, ptr %arrayinit.element55, align 1
+ %arrayinit.element56 = getelementptr inbounds i8, ptr %arrayinit.element55, i32 1
+ store i8 32, ptr %arrayinit.element56, align 1
+ %arrayinit.element57 = getelementptr inbounds i8, ptr %arrayinit.element56, i32 1
+ %a13 = load i8, ptr %old_val, align 1
+ store i8 %a13, ptr %arrayinit.element57, align 1
+ %arrayinit.element58 = getelementptr inbounds i8, ptr %arrayinit.element57, i32 1
+ store i8 32, ptr %arrayinit.element58, align 1
+ %arrayinit.element59 = getelementptr inbounds i8, ptr %arrayinit.element58, i32 1
+ %a14 = load i8, ptr %simd, align 1
+ store i8 %a14, ptr %arrayinit.element59, align 1
+ %arrayinit.element60 = getelementptr inbounds i8, ptr %arrayinit.element59, i32 1
+ store i8 -3, ptr %arrayinit.element60, align 1
+ %arrayinit.element61 = getelementptr inbounds i8, ptr %arrayinit.element60, i32 1
+ store i8 1, ptr %arrayinit.element61, align 1
+ %arrayinit.element62 = getelementptr inbounds i8, ptr %arrayinit.element61, i32 1
+ store i8 3, ptr %arrayinit.element62, align 1
+ %arrayinit.element63 = getelementptr inbounds i8, ptr %arrayinit.element62, i32 1
+ store i8 92, ptr %arrayinit.element63, align 1
+ %arrayinit.element64 = getelementptr inbounds i8, ptr %arrayinit.element63, i32 1
+ store i8 4, ptr %arrayinit.element64, align 1
+ %arrayinit.element65 = getelementptr inbounds i8, ptr %arrayinit.element64, i32 1
+ store i8 64, ptr %arrayinit.element65, align 1
+ %arrayinit.element66 = getelementptr inbounds i8, ptr %arrayinit.element65, i32 1
+ store i8 65, ptr %arrayinit.element66, align 1
+ %arrayinit.element67 = getelementptr inbounds i8, ptr %arrayinit.element66, i32 1
+ store i8 0, ptr %arrayinit.element67, align 1
+ %arrayinit.element68 = getelementptr inbounds i8, ptr %arrayinit.element67, i32 1
+ store i8 15, ptr %arrayinit.element68, align 1
+ %arrayinit.element69 = getelementptr inbounds i8, ptr %arrayinit.element68, i32 1
+ store i8 11, ptr %arrayinit.element69, align 1
+ %arrayinit.element70 = getelementptr inbounds i8, ptr %arrayinit.element69, i32 1
+ store i8 32, ptr %arrayinit.element70, align 1
+ %arrayinit.element71 = getelementptr inbounds i8, ptr %arrayinit.element70, i32 1
+ %a15 = load i8, ptr %simd, align 1
+ store i8 %a15, ptr %arrayinit.element71, align 1
+ %arrayinit.element72 = getelementptr inbounds i8, ptr %arrayinit.element71, i32 1
+ store i8 32, ptr %arrayinit.element72, align 1
+ %arrayinit.element73 = getelementptr inbounds i8, ptr %arrayinit.element72, i32 1
+ %a16 = load i8, ptr %new_val, align 1
+ store i8 %a16, ptr %arrayinit.element73, align 1
+ %arrayinit.element74 = getelementptr inbounds i8, ptr %arrayinit.element73, i32 1
+ store i8 -3, ptr %arrayinit.element74, align 1
+ %arrayinit.element75 = getelementptr inbounds i8, ptr %arrayinit.element74, i32 1
+ store i8 2, ptr %arrayinit.element75, align 1
+ %arrayinit.element76 = getelementptr inbounds i8, ptr %arrayinit.element75, i32 1
+ store i8 1, ptr %arrayinit.element76, align 1
+ %arrayinit.element77 = getelementptr inbounds i8, ptr %arrayinit.element76, i32 1
+ store i8 33, ptr %arrayinit.element77, align 1
+ %arrayinit.element78 = getelementptr inbounds i8, ptr %arrayinit.element77, i32 1
+ %a17 = load i8, ptr %simd, align 1
+ store i8 %a17, ptr %arrayinit.element78, align 1
+ %arrayinit.element79 = getelementptr inbounds i8, ptr %arrayinit.element78, i32 1
+ store i8 32, ptr %arrayinit.element79, align 1
+ %arrayinit.element80 = getelementptr inbounds i8, ptr %arrayinit.element79, i32 1
+ %a18 = load i8, ptr %new_val, align 1
+ store i8 %a18, ptr %arrayinit.element80, align 1
+ %arrayinit.element81 = getelementptr inbounds i8, ptr %arrayinit.element80, i32 1
+ store i8 32, ptr %arrayinit.element81, align 1
+ %arrayinit.element82 = getelementptr inbounds i8, ptr %arrayinit.element81, i32 1
+ %a19 = load i8, ptr %simd, align 1
+ store i8 %a19, ptr %arrayinit.element82, align 1
+ %arrayinit.element83 = getelementptr inbounds i8, ptr %arrayinit.element82, i32 1
+ store i8 -3, ptr %arrayinit.element83, align 1
+ %arrayinit.element84 = getelementptr inbounds i8, ptr %arrayinit.element83, i32 1
+ store i8 1, ptr %arrayinit.element84, align 1
+ %arrayinit.element85 = getelementptr inbounds i8, ptr %arrayinit.element84, i32 1
+ store i8 0, ptr %arrayinit.element85, align 1
+ %arrayinit.element86 = getelementptr inbounds i8, ptr %arrayinit.element85, i32 1
+ store i8 92, ptr %arrayinit.element86, align 1
+ %arrayinit.element87 = getelementptr inbounds i8, ptr %arrayinit.element86, i32 1
+ store i8 4, ptr %arrayinit.element87, align 1
+ %arrayinit.element88 = getelementptr inbounds i8, ptr %arrayinit.element87, i32 1
+ store i8 64, ptr %arrayinit.element88, align 1
+ %arrayinit.element89 = getelementptr inbounds i8, ptr %arrayinit.element88, i32 1
+ store i8 65, ptr %arrayinit.element89, align 1
+ %arrayinit.element90 = getelementptr inbounds i8, ptr %arrayinit.element89, i32 1
+ store i8 0, ptr %arrayinit.element90, align 1
+ %arrayinit.element91 = getelementptr inbounds i8, ptr %arrayinit.element90, i32 1
+ store i8 15, ptr %arrayinit.element91, align 1
+ %arrayinit.element92 = getelementptr inbounds i8, ptr %arrayinit.element91, i32 1
+ store i8 11, ptr %arrayinit.element92, align 1
+ %arrayinit.element93 = getelementptr inbounds i8, ptr %arrayinit.element92, i32 1
+ store i8 32, ptr %arrayinit.element93, align 1
+ %arrayinit.element94 = getelementptr inbounds i8, ptr %arrayinit.element93, i32 1
+ %a20 = load i8, ptr %new_val, align 1
+ store i8 %a20, ptr %arrayinit.element94, align 1
+ %arrayinit.element95 = getelementptr inbounds i8, ptr %arrayinit.element94, i32 1
+ store i8 32, ptr %arrayinit.element95, align 1
+ %arrayinit.element96 = getelementptr inbounds i8, ptr %arrayinit.element95, i32 1
+ %a21 = load i8, ptr %simd, align 1
+ store i8 %a21, ptr %arrayinit.element96, align 1
+ %arrayinit.element97 = getelementptr inbounds i8, ptr %arrayinit.element96, i32 1
+ store i8 -3, ptr %arrayinit.element97, align 1
+ %arrayinit.element98 = getelementptr inbounds i8, ptr %arrayinit.element97, i32 1
+ store i8 1, ptr %arrayinit.element98, align 1
+ %arrayinit.element99 = getelementptr inbounds i8, ptr %arrayinit.element98, i32 1
+ store i8 1, ptr %arrayinit.element99, align 1
+ %arrayinit.element100 = getelementptr inbounds i8, ptr %arrayinit.element99, i32 1
+ store i8 92, ptr %arrayinit.element100, align 1
+ %arrayinit.element101 = getelementptr inbounds i8, ptr %arrayinit.element100, i32 1
+ store i8 4, ptr %arrayinit.element101, align 1
+ %arrayinit.element102 = getelementptr inbounds i8, ptr %arrayinit.element101, i32 1
+ store i8 64, ptr %arrayinit.element102, align 1
+ %arrayinit.element103 = getelementptr inbounds i8, ptr %arrayinit.element102, i32 1
+ store i8 65, ptr %arrayinit.element103, align 1
+ %arrayinit.element104 = getelementptr inbounds i8, ptr %arrayinit.element103, i32 1
+ store i8 0, ptr %arrayinit.element104, align 1
+ %arrayinit.element105 = getelementptr inbounds i8, ptr %arrayinit.element104, i32 1
+ store i8 15, ptr %arrayinit.element105, align 1
+ %arrayinit.element106 = getelementptr inbounds i8, ptr %arrayinit.element105, i32 1
+ store i8 11, ptr %arrayinit.element106, align 1
+ %arrayinit.element107 = getelementptr inbounds i8, ptr %arrayinit.element106, i32 1
+ store i8 32, ptr %arrayinit.element107, align 1
+ %arrayinit.element108 = getelementptr inbounds i8, ptr %arrayinit.element107, i32 1
+ %a22 = load i8, ptr %old_val, align 1
+ store i8 %a22, ptr %arrayinit.element108, align 1
+ %arrayinit.element109 = getelementptr inbounds i8, ptr %arrayinit.element108, i32 1
+ store i8 32, ptr %arrayinit.element109, align 1
+ %arrayinit.element110 = getelementptr inbounds i8, ptr %arrayinit.element109, i32 1
+ %a23 = load i8, ptr %simd, align 1
+ store i8 %a23, ptr %arrayinit.element110, align 1
+ %arrayinit.element111 = getelementptr inbounds i8, ptr %arrayinit.element110, i32 1
+ store i8 -3, ptr %arrayinit.element111, align 1
+ %arrayinit.element112 = getelementptr inbounds i8, ptr %arrayinit.element111, i32 1
+ store i8 1, ptr %arrayinit.element112, align 1
+ %arrayinit.element113 = getelementptr inbounds i8, ptr %arrayinit.element112, i32 1
+ store i8 2, ptr %arrayinit.element113, align 1
+ %arrayinit.element114 = getelementptr inbounds i8, ptr %arrayinit.element113, i32 1
+ store i8 92, ptr %arrayinit.element114, align 1
+ %arrayinit.element115 = getelementptr inbounds i8, ptr %arrayinit.element114, i32 1
+ store i8 4, ptr %arrayinit.element115, align 1
+ %arrayinit.element116 = getelementptr inbounds i8, ptr %arrayinit.element115, i32 1
+ store i8 64, ptr %arrayinit.element116, align 1
+ %arrayinit.element117 = getelementptr inbounds i8, ptr %arrayinit.element116, i32 1
+ store i8 65, ptr %arrayinit.element117, align 1
+ %arrayinit.element118 = getelementptr inbounds i8, ptr %arrayinit.element117, i32 1
+ store i8 0, ptr %arrayinit.element118, align 1
+ %arrayinit.element119 = getelementptr inbounds i8, ptr %arrayinit.element118, i32 1
+ store i8 15, ptr %arrayinit.element119, align 1
+ %arrayinit.element120 = getelementptr inbounds i8, ptr %arrayinit.element119, i32 1
+ store i8 11, ptr %arrayinit.element120, align 1
+ %arrayinit.element121 = getelementptr inbounds i8, ptr %arrayinit.element120, i32 1
+ store i8 32, ptr %arrayinit.element121, align 1
+ %arrayinit.element122 = getelementptr inbounds i8, ptr %arrayinit.element121, i32 1
+ %a24 = load i8, ptr %old_val, align 1
+ store i8 %a24, ptr %arrayinit.element122, align 1
+ %arrayinit.element123 = getelementptr inbounds i8, ptr %arrayinit.element122, i32 1
+ store i8 32, ptr %arrayinit.element123, align 1
+ %arrayinit.element124 = getelementptr inbounds i8, ptr %arrayinit.element123, i32 1
+ %a25 = load i8, ptr %simd, align 1
+ store i8 %a25, ptr %arrayinit.element124, align 1
+ %arrayinit.element125 = getelementptr inbounds i8, ptr %arrayinit.element124, i32 1
+ store i8 -3, ptr %arrayinit.element125, align 1
+ %arrayinit.element126 = getelementptr inbounds i8, ptr %arrayinit.element125, i32 1
+ store i8 1, ptr %arrayinit.element126, align 1
+ %arrayinit.element127 = getelementptr inbounds i8, ptr %arrayinit.element126, i32 1
+ store i8 3, ptr %arrayinit.element127, align 1
+ %arrayinit.element128 = getelementptr inbounds i8, ptr %arrayinit.element127, i32 1
+ store i8 92, ptr %arrayinit.element128, align 1
+ %arrayinit.element129 = getelementptr inbounds i8, ptr %arrayinit.element128, i32 1
+ store i8 4, ptr %arrayinit.element129, align 1
+ %arrayinit.element130 = getelementptr inbounds i8, ptr %arrayinit.element129, i32 1
+ store i8 64, ptr %arrayinit.element130, align 1
+ %arrayinit.element131 = getelementptr inbounds i8, ptr %arrayinit.element130, i32 1
+ store i8 65, ptr %arrayinit.element131, align 1
+ %arrayinit.element132 = getelementptr inbounds i8, ptr %arrayinit.element131, i32 1
+ store i8 0, ptr %arrayinit.element132, align 1
+ %arrayinit.element133 = getelementptr inbounds i8, ptr %arrayinit.element132, i32 1
+ store i8 15, ptr %arrayinit.element133, align 1
+ %arrayinit.element134 = getelementptr inbounds i8, ptr %arrayinit.element133, i32 1
+ store i8 11, ptr %arrayinit.element134, align 1
+ %arrayinit.element135 = getelementptr inbounds i8, ptr %arrayinit.element134, i32 1
+ store i8 32, ptr %arrayinit.element135, align 1
+ %arrayinit.element136 = getelementptr inbounds i8, ptr %arrayinit.element135, i32 1
+ %a26 = load i8, ptr %simd, align 1
+ store i8 %a26, ptr %arrayinit.element136, align 1
+ %arrayinit.element137 = getelementptr inbounds i8, ptr %arrayinit.element136, i32 1
+ store i8 32, ptr %arrayinit.element137, align 1
+ %arrayinit.element138 = getelementptr inbounds i8, ptr %arrayinit.element137, i32 1
+ %a27 = load i8, ptr %new_val, align 1
+ store i8 %a27, ptr %arrayinit.element138, align 1
+ %arrayinit.element139 = getelementptr inbounds i8, ptr %arrayinit.element138, i32 1
+ store i8 -3, ptr %arrayinit.element139, align 1
+ %arrayinit.element140 = getelementptr inbounds i8, ptr %arrayinit.element139, i32 1
+ store i8 2, ptr %arrayinit.element140, align 1
+ %arrayinit.element141 = getelementptr inbounds i8, ptr %arrayinit.element140, i32 1
+ store i8 2, ptr %arrayinit.element141, align 1
+ %arrayinit.element142 = getelementptr inbounds i8, ptr %arrayinit.element141, i32 1
+ store i8 33, ptr %arrayinit.element142, align 1
+ %arrayinit.element143 = getelementptr inbounds i8, ptr %arrayinit.element142, i32 1
+ %a28 = load i8, ptr %simd, align 1
+ store i8 %a28, ptr %arrayinit.element143, align 1
+ %arrayinit.element144 = getelementptr inbounds i8, ptr %arrayinit.element143, i32 1
+ store i8 32, ptr %arrayinit.element144, align 1
+ %arrayinit.element145 = getelementptr inbounds i8, ptr %arrayinit.element144, i32 1
+ %a29 = load i8, ptr %new_val, align 1
+ store i8 %a29, ptr %arrayinit.element145, align 1
+ %arrayinit.element146 = getelementptr inbounds i8, ptr %arrayinit.element145, i32 1
+ store i8 32, ptr %arrayinit.element146, align 1
+ %arrayinit.element147 = getelementptr inbounds i8, ptr %arrayinit.element146, i32 1
+ %a30 = load i8, ptr %simd, align 1
+ store i8 %a30, ptr %arrayinit.element147, align 1
+ %arrayinit.element148 = getelementptr inbounds i8, ptr %arrayinit.element147, i32 1
+ store i8 -3, ptr %arrayinit.element148, align 1
+ %arrayinit.element149 = getelementptr inbounds i8, ptr %arrayinit.element148, i32 1
+ store i8 1, ptr %arrayinit.element149, align 1
+ %arrayinit.element150 = getelementptr inbounds i8, ptr %arrayinit.element149, i32 1
+ store i8 0, ptr %arrayinit.element150, align 1
+ %arrayinit.element151 = getelementptr inbounds i8, ptr %arrayinit.element150, i32 1
+ store i8 92, ptr %arrayinit.element151, align 1
+ %arrayinit.element152 = getelementptr inbounds i8, ptr %arrayinit.element151, i32 1
+ store i8 4, ptr %arrayinit.element152, align 1
+ %arrayinit.element153 = getelementptr inbounds i8, ptr %arrayinit.element152, i32 1
+ store i8 64, ptr %arrayinit.element153, align 1
+ %arrayinit.element154 = getelementptr inbounds i8, ptr %arrayinit.element153, i32 1
+ store i8 65, ptr %arrayinit.element154, align 1
+ %arrayinit.element155 = getelementptr inbounds i8, ptr %arrayinit.element154, i32 1
+ store i8 0, ptr %arrayinit.element155, align 1
+ %arrayinit.element156 = getelementptr inbounds i8, ptr %arrayinit.element155, i32 1
+ store i8 15, ptr %arrayinit.element156, align 1
+ %arrayinit.element157 = getelementptr inbounds i8, ptr %arrayinit.element156, i32 1
+ store i8 11, ptr %arrayinit.element157, align 1
+ %arrayinit.element158 = getelementptr inbounds i8, ptr %arrayinit.element157, i32 1
+ store i8 32, ptr %arrayinit.element158, align 1
+ %arrayinit.element159 = getelementptr inbounds i8, ptr %arrayinit.element158, i32 1
+ %a31 = load i8, ptr %new_val, align 1
+ store i8 %a31, ptr %arrayinit.element159, align 1
+ %arrayinit.element160 = getelementptr inbounds i8, ptr %arrayinit.element159, i32 1
+ store i8 32, ptr %arrayinit.element160, align 1
+ %arrayinit.element161 = getelementptr inbounds i8, ptr %arrayinit.element160, i32 1
+ %a32 = load i8, ptr %simd, align 1
+ store i8 %a32, ptr %arrayinit.element161, align 1
+ %arrayinit.element162 = getelementptr inbounds i8, ptr %arrayinit.element161, i32 1
+ store i8 -3, ptr %arrayinit.element162, align 1
+ %arrayinit.element163 = getelementptr inbounds i8, ptr %arrayinit.element162, i32 1
+ store i8 1, ptr %arrayinit.element163, align 1
+ %arrayinit.element164 = getelementptr inbounds i8, ptr %arrayinit.element163, i32 1
+ store i8 1, ptr %arrayinit.element164, align 1
+ %arrayinit.element165 = getelementptr inbounds i8, ptr %arrayinit.element164, i32 1
+ store i8 92, ptr %arrayinit.element165, align 1
+ %arrayinit.element166 = getelementptr inbounds i8, ptr %arrayinit.element165, i32 1
+ store i8 4, ptr %arrayinit.element166, align 1
+ %arrayinit.element167 = getelementptr inbounds i8, ptr %arrayinit.element166, i32 1
+ store i8 64, ptr %arrayinit.element167, align 1
+ %arrayinit.element168 = getelementptr inbounds i8, ptr %arrayinit.element167, i32 1
+ store i8 65, ptr %arrayinit.element168, align 1
+ %arrayinit.element169 = getelementptr inbounds i8, ptr %arrayinit.element168, i32 1
+ store i8 0, ptr %arrayinit.element169, align 1
+ %arrayinit.element170 = getelementptr inbounds i8, ptr %arrayinit.element169, i32 1
+ store i8 15, ptr %arrayinit.element170, align 1
+ %arrayinit.element171 = getelementptr inbounds i8, ptr %arrayinit.element170, i32 1
+ store i8 11, ptr %arrayinit.element171, align 1
+ %arrayinit.element172 = getelementptr inbounds i8, ptr %arrayinit.element171, i32 1
+ store i8 32, ptr %arrayinit.element172, align 1
+ %arrayinit.element173 = getelementptr inbounds i8, ptr %arrayinit.element172, i32 1
+ %a33 = load i8, ptr %new_val, align 1
+ store i8 %a33, ptr %arrayinit.element173, align 1
+ %arrayinit.element174 = getelementptr inbounds i8, ptr %arrayinit.element173, i32 1
+ store i8 32, ptr %arrayinit.element174, align 1
+ %arrayinit.element175 = getelementptr inbounds i8, ptr %arrayinit.element174, i32 1
+ %a34 = load i8, ptr %simd, align 1
+ store i8 %a34, ptr %arrayinit.element175, align 1
+ %arrayinit.element176 = getelementptr inbounds i8, ptr %arrayinit.element175, i32 1
+ store i8 -3, ptr %arrayinit.element176, align 1
+ %arrayinit.element177 = getelementptr inbounds i8, ptr %arrayinit.element176, i32 1
+ store i8 1, ptr %arrayinit.element177, align 1
+ %arrayinit.element178 = getelementptr inbounds i8, ptr %arrayinit.element177, i32 1
+ store i8 2, ptr %arrayinit.element178, align 1
+ %arrayinit.element179 = getelementptr inbounds i8, ptr %arrayinit.element178, i32 1
+ store i8 92, ptr %arrayinit.element179, align 1
+ %arrayinit.element180 = getelementptr inbounds i8, ptr %arrayinit.element179, i32 1
+ store i8 4, ptr %arrayinit.element180, align 1
+ %arrayinit.element181 = getelementptr inbounds i8, ptr %arrayinit.element180, i32 1
+ store i8 64, ptr %arrayinit.element181, align 1
+ %arrayinit.element182 = getelementptr inbounds i8, ptr %arrayinit.element181, i32 1
+ store i8 65, ptr %arrayinit.element182, align 1
+ %arrayinit.element183 = getelementptr inbounds i8, ptr %arrayinit.element182, i32 1
+ store i8 0, ptr %arrayinit.element183, align 1
+ %arrayinit.element184 = getelementptr inbounds i8, ptr %arrayinit.element183, i32 1
+ store i8 15, ptr %arrayinit.element184, align 1
+ %arrayinit.element185 = getelementptr inbounds i8, ptr %arrayinit.element184, i32 1
+ store i8 11, ptr %arrayinit.element185, align 1
+ %arrayinit.element186 = getelementptr inbounds i8, ptr %arrayinit.element185, i32 1
+ store i8 32, ptr %arrayinit.element186, align 1
+ %arrayinit.element187 = getelementptr inbounds i8, ptr %arrayinit.element186, i32 1
+ %a35 = load i8, ptr %old_val, align 1
+ store i8 %a35, ptr %arrayinit.element187, align 1
+ %arrayinit.element188 = getelementptr inbounds i8, ptr %arrayinit.element187, i32 1
+ store i8 32, ptr %arrayinit.element188, align 1
+ %arrayinit.element189 = getelementptr inbounds i8, ptr %arrayinit.element188, i32 1
+ %a36 = load i8, ptr %simd, align 1
+ store i8 %a36, ptr %arrayinit.element189, align 1
+ %arrayinit.element190 = getelementptr inbounds i8, ptr %arrayinit.element189, i32 1
+ store i8 -3, ptr %arrayinit.element190, align 1
+ %arrayinit.element191 = getelementptr inbounds i8, ptr %arrayinit.element190, i32 1
+ store i8 1, ptr %arrayinit.element191, align 1
+ %arrayinit.element192 = getelementptr inbounds i8, ptr %arrayinit.element191, i32 1
+ store i8 3, ptr %arrayinit.element192, align 1
+ %arrayinit.element193 = getelementptr inbounds i8, ptr %arrayinit.element192, i32 1
+ store i8 92, ptr %arrayinit.element193, align 1
+ %arrayinit.element194 = getelementptr inbounds i8, ptr %arrayinit.element193, i32 1
+ store i8 4, ptr %arrayinit.element194, align 1
+ %arrayinit.element195 = getelementptr inbounds i8, ptr %arrayinit.element194, i32 1
+ store i8 64, ptr %arrayinit.element195, align 1
+ %arrayinit.element196 = getelementptr inbounds i8, ptr %arrayinit.element195, i32 1
+ store i8 65, ptr %arrayinit.element196, align 1
+ %arrayinit.element197 = getelementptr inbounds i8, ptr %arrayinit.element196, i32 1
+ store i8 0, ptr %arrayinit.element197, align 1
+ %arrayinit.element198 = getelementptr inbounds i8, ptr %arrayinit.element197, i32 1
+ store i8 15, ptr %arrayinit.element198, align 1
+ %arrayinit.element199 = getelementptr inbounds i8, ptr %arrayinit.element198, i32 1
+ store i8 11, ptr %arrayinit.element199, align 1
+ %arrayinit.element200 = getelementptr inbounds i8, ptr %arrayinit.element199, i32 1
+ store i8 32, ptr %arrayinit.element200, align 1
+ %arrayinit.element201 = getelementptr inbounds i8, ptr %arrayinit.element200, i32 1
+ %a37 = load i8, ptr %simd, align 1
+ store i8 %a37, ptr %arrayinit.element201, align 1
+ %arrayinit.element202 = getelementptr inbounds i8, ptr %arrayinit.element201, i32 1
+ store i8 32, ptr %arrayinit.element202, align 1
+ %arrayinit.element203 = getelementptr inbounds i8, ptr %arrayinit.element202, i32 1
+ %a38 = load i8, ptr %new_val, align 1
+ store i8 %a38, ptr %arrayinit.element203, align 1
+ %arrayinit.element204 = getelementptr inbounds i8, ptr %arrayinit.element203, i32 1
+ store i8 -3, ptr %arrayinit.element204, align 1
+ %arrayinit.element205 = getelementptr inbounds i8, ptr %arrayinit.element204, i32 1
+ store i8 2, ptr %arrayinit.element205, align 1
+ %arrayinit.element206 = getelementptr inbounds i8, ptr %arrayinit.element205, i32 1
+ store i8 3, ptr %arrayinit.element206, align 1
+ %arrayinit.element207 = getelementptr inbounds i8, ptr %arrayinit.element206, i32 1
+ store i8 33, ptr %arrayinit.element207, align 1
+ %arrayinit.element208 = getelementptr inbounds i8, ptr %arrayinit.element207, i32 1
+ %a39 = load i8, ptr %simd, align 1
+ store i8 %a39, ptr %arrayinit.element208, align 1
+ %arrayinit.element209 = getelementptr inbounds i8, ptr %arrayinit.element208, i32 1
+ store i8 32, ptr %arrayinit.element209, align 1
+ %arrayinit.element210 = getelementptr inbounds i8, ptr %arrayinit.element209, i32 1
+ %a40 = load i8, ptr %new_val, align 1
+ store i8 %a40, ptr %arrayinit.element210, align 1
+ %arrayinit.element211 = getelementptr inbounds i8, ptr %arrayinit.element210, i32 1
+ store i8 32, ptr %arrayinit.element211, align 1
+ %arrayinit.element212 = getelementptr inbounds i8, ptr %arrayinit.element211, i32 1
+ %a41 = load i8, ptr %simd, align 1
+ store i8 %a41, ptr %arrayinit.element212, align 1
+ %arrayinit.element213 = getelementptr inbounds i8, ptr %arrayinit.element212, i32 1
+ store i8 -3, ptr %arrayinit.element213, align 1
+ %arrayinit.element214 = getelementptr inbounds i8, ptr %arrayinit.element213, i32 1
+ store i8 1, ptr %arrayinit.element214, align 1
+ %arrayinit.element215 = getelementptr inbounds i8, ptr %arrayinit.element214, i32 1
+ store i8 0, ptr %arrayinit.element215, align 1
+ %arrayinit.element216 = getelementptr inbounds i8, ptr %arrayinit.element215, i32 1
+ store i8 92, ptr %arrayinit.element216, align 1
+ %arrayinit.element217 = getelementptr inbounds i8, ptr %arrayinit.element216, i32 1
+ store i8 4, ptr %arrayinit.element217, align 1
+ %arrayinit.element218 = getelementptr inbounds i8, ptr %arrayinit.element217, i32 1
+ store i8 64, ptr %arrayinit.element218, align 1
+ %arrayinit.element219 = getelementptr inbounds i8, ptr %arrayinit.element218, i32 1
+ store i8 65, ptr %arrayinit.element219, align 1
+ %arrayinit.element220 = getelementptr inbounds i8, ptr %arrayinit.element219, i32 1
+ store i8 0, ptr %arrayinit.element220, align 1
+ %arrayinit.element221 = getelementptr inbounds i8, ptr %arrayinit.element220, i32 1
+ store i8 15, ptr %arrayinit.element221, align 1
+ %arrayinit.element222 = getelementptr inbounds i8, ptr %arrayinit.element221, i32 1
+ store i8 11, ptr %arrayinit.element222, align 1
+ %arrayinit.element223 = getelementptr inbounds i8, ptr %arrayinit.element222, i32 1
+ store i8 32, ptr %arrayinit.element223, align 1
+ %arrayinit.element224 = getelementptr inbounds i8, ptr %arrayinit.element223, i32 1
+ %a42 = load i8, ptr %new_val, align 1
+ store i8 %a42, ptr %arrayinit.element224, align 1
+ %arrayinit.element225 = getelementptr inbounds i8, ptr %arrayinit.element224, i32 1
+ store i8 32, ptr %arrayinit.element225, align 1
+ %arrayinit.element226 = getelementptr inbounds i8, ptr %arrayinit.element225, i32 1
+ %a43 = load i8, ptr %simd, align 1
+ store i8 %a43, ptr %arrayinit.element226, align 1
+ %arrayinit.element227 = getelementptr inbounds i8, ptr %arrayinit.element226, i32 1
+ store i8 -3, ptr %arrayinit.element227, align 1
+ %arrayinit.element228 = getelementptr inbounds i8, ptr %arrayinit.element227, i32 1
+ store i8 1, ptr %arrayinit.element228, align 1
+ %arrayinit.element229 = getelementptr inbounds i8, ptr %arrayinit.element228, i32 1
+ store i8 1, ptr %arrayinit.element229, align 1
+ %arrayinit.element230 = getelementptr inbounds i8, ptr %arrayinit.element229, i32 1
+ store i8 92, ptr %arrayinit.element230, align 1
+ %arrayinit.element231 = getelementptr inbounds i8, ptr %arrayinit.element230, i32 1
+ store i8 4, ptr %arrayinit.element231, align 1
+ %arrayinit.element232 = getelementptr inbounds i8, ptr %arrayinit.element231, i32 1
+ store i8 64, ptr %arrayinit.element232, align 1
+ %arrayinit.element233 = getelementptr inbounds i8, ptr %arrayinit.element232, i32 1
+ store i8 65, ptr %arrayinit.element233, align 1
+ %arrayinit.element234 = getelementptr inbounds i8, ptr %arrayinit.element233, i32 1
+ store i8 0, ptr %arrayinit.element234, align 1
+ %arrayinit.element235 = getelementptr inbounds i8, ptr %arrayinit.element234, i32 1
+ store i8 15, ptr %arrayinit.element235, align 1
+ %arrayinit.element236 = getelementptr inbounds i8, ptr %arrayinit.element235, i32 1
+ store i8 11, ptr %arrayinit.element236, align 1
+ %arrayinit.element237 = getelementptr inbounds i8, ptr %arrayinit.element236, i32 1
+ store i8 32, ptr %arrayinit.element237, align 1
+ %arrayinit.element238 = getelementptr inbounds i8, ptr %arrayinit.element237, i32 1
+ %a44 = load i8, ptr %new_val, align 1
+ store i8 %a44, ptr %arrayinit.element238, align 1
+ %arrayinit.element239 = getelementptr inbounds i8, ptr %arrayinit.element238, i32 1
+ store i8 32, ptr %arrayinit.element239, align 1
+ %arrayinit.element240 = getelementptr inbounds i8, ptr %arrayinit.element239, i32 1
+ %a45 = load i8, ptr %simd, align 1
+ store i8 %a45, ptr %arrayinit.element240, align 1
+ %arrayinit.element241 = getelementptr inbounds i8, ptr %arrayinit.element240, i32 1
+ store i8 -3, ptr %arrayinit.element241, align 1
+ %arrayinit.element242 = getelementptr inbounds i8, ptr %arrayinit.element241, i32 1
+ store i8 1, ptr %arrayinit.element242, align 1
+ %arrayinit.element243 = getelementptr inbounds i8, ptr %arrayinit.element242, i32 1
+ store i8 2, ptr %arrayinit.element243, align 1
+ %arrayinit.element244 = getelementptr inbounds i8, ptr %arrayinit.element243, i32 1
+ store i8 92, ptr %arrayinit.element244, align 1
+ %arrayinit.element245 = getelementptr inbounds i8, ptr %arrayinit.element244, i32 1
+ store i8 4, ptr %arrayinit.element245, align 1
+ %arrayinit.element246 = getelementptr inbounds i8, ptr %arrayinit.element245, i32 1
+ store i8 64, ptr %arrayinit.element246, align 1
+ %arrayinit.element247 = getelementptr inbounds i8, ptr %arrayinit.element246, i32 1
+ store i8 65, ptr %arrayinit.element247, align 1
+ %arrayinit.element248 = getelementptr inbounds i8, ptr %arrayinit.element247, i32 1
+ store i8 0, ptr %arrayinit.element248, align 1
+ %arrayinit.element249 = getelementptr inbounds i8, ptr %arrayinit.element248, i32 1
+ store i8 15, ptr %arrayinit.element249, align 1
+ %arrayinit.element250 = getelementptr inbounds i8, ptr %arrayinit.element249, i32 1
+ store i8 11, ptr %arrayinit.element250, align 1
+ %arrayinit.element251 = getelementptr inbounds i8, ptr %arrayinit.element250, i32 1
+ store i8 32, ptr %arrayinit.element251, align 1
+ %arrayinit.element252 = getelementptr inbounds i8, ptr %arrayinit.element251, i32 1
+ %a46 = load i8, ptr %new_val, align 1
+ store i8 %a46, ptr %arrayinit.element252, align 1
+ %arrayinit.element253 = getelementptr inbounds i8, ptr %arrayinit.element252, i32 1
+ store i8 32, ptr %arrayinit.element253, align 1
+ %arrayinit.element254 = getelementptr inbounds i8, ptr %arrayinit.element253, i32 1
+ %a47 = load i8, ptr %simd, align 1
+ store i8 %a47, ptr %arrayinit.element254, align 1
+ %arrayinit.element255 = getelementptr inbounds i8, ptr %arrayinit.element254, i32 1
+ store i8 -3, ptr %arrayinit.element255, align 1
+ %arrayinit.element256 = getelementptr inbounds i8, ptr %arrayinit.element255, i32 1
+ store i8 1, ptr %arrayinit.element256, align 1
+ %arrayinit.element257 = getelementptr inbounds i8, ptr %arrayinit.element256, i32 1
+ store i8 3, ptr %arrayinit.element257, align 1
+ %arrayinit.element258 = getelementptr inbounds i8, ptr %arrayinit.element257, i32 1
+ store i8 92, ptr %arrayinit.element258, align 1
+ %arrayinit.element259 = getelementptr inbounds i8, ptr %arrayinit.element258, i32 1
+ store i8 4, ptr %arrayinit.element259, align 1
+ %arrayinit.element260 = getelementptr inbounds i8, ptr %arrayinit.element259, i32 1
+ store i8 64, ptr %arrayinit.element260, align 1
+ %arrayinit.element261 = getelementptr inbounds i8, ptr %arrayinit.element260, i32 1
+ store i8 65, ptr %arrayinit.element261, align 1
+ %arrayinit.element262 = getelementptr inbounds i8, ptr %arrayinit.element261, i32 1
+ store i8 0, ptr %arrayinit.element262, align 1
+ %arrayinit.element263 = getelementptr inbounds i8, ptr %arrayinit.element262, i32 1
+ store i8 15, ptr %arrayinit.element263, align 1
+ %arrayinit.element264 = getelementptr inbounds i8, ptr %arrayinit.element263, i32 1
+ store i8 11, ptr %arrayinit.element264, align 1
+ %arrayinit.element265 = getelementptr inbounds i8, ptr %arrayinit.element264, i32 1
+ store i8 65, ptr %arrayinit.element265, align 1
+ %arrayinit.element266 = getelementptr inbounds i8, ptr %arrayinit.element265, i32 1
+ store i8 1, ptr %arrayinit.element266, align 1
+ %arrayinit.element267 = getelementptr inbounds i8, ptr %arrayinit.element266, i32 1
+ store i8 15, ptr %arrayinit.element267, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %code, i32 269
+ call void @g(ptr %code, ptr %add.ptr)
ret void
}
-declare void @g(i8*, i8*)
+declare void @g(ptr, ptr)
attributes #1 = { noinline nounwind optnone ssp uwtable }
; This test was generated with bugpoint from
; MultiSource/Applications/JM/lencod/me_fullsearch.c
-%struct.SubImageContainer = type { i16****, [2 x i16****] }
+%struct.SubImageContainer = type { ptr, [2 x ptr] }
%struct.storable_picture = type { i32, i32, i32, i32, i32, i32,
[6 x [33 x i64]], [6 x [33 x i64]], [6 x [33 x i64]], [6 x [33 x i64]],
i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
- i32, i32, i32, i32, i32, i16**, i16****, i16****, i16*****, i16***,
- i8*, i8***, i64***, i64***, i16****, i8**, i8**, %struct.storable_picture*,
- %struct.storable_picture*, %struct.storable_picture*,
+ i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, ptr,
+ ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr,
+ ptr, ptr,
i32, i32, i32, i32, i32, i32, i32 }
@img_height = external global i16, align 2
@width_pad = external global i32, align 4
@height_pad = external global i32, align 4
-@mvbits = external global i32*, align 4
+@mvbits = external global ptr, align 4
@ref_pic1_sub = external global %struct.SubImageContainer, align 4
@ref_pic2_sub = external global %struct.SubImageContainer, align 4
-@wbp_weight = external global i32****, align 4
+@wbp_weight = external global ptr, align 4
@weight1 = external global i16, align 2
@weight2 = external global i16, align 2
@offsetBi = external global i16, align 2
-@computeBiPred2 = external global [3 x i32 (i16*, i32, i32, i32, i32, i32, i32, i32)*], align 4
-@computeBiPred = external global i32 (i16*, i32, i32, i32, i32, i32, i32, i32)*, align 4
+@computeBiPred2 = external global [3 x ptr], align 4
+@computeBiPred = external global ptr, align 4
@bipred2_access_method = external global i32, align 4
@start_me_refinement_hp = external global i32, align 4
-declare i32 @foobar(i16*, i32 signext , i32 signext , i32 signext ,
+declare i32 @foobar(ptr, i32 signext , i32 signext , i32 signext ,
i32 signext , i32 signext , i32 signext , i32 signext ) #1
-define void @SubPelBlockSearchBiPred(i16* %orig_pic, i16 signext %ref,
+define void @SubPelBlockSearchBiPred(ptr %orig_pic, i16 signext %ref,
i32 signext %pic_pix_x, i32 signext %pic_pix_y, i16 signext %pred_mv_y,
- i16* nocapture %mv_x, i16* nocapture %mv_y, i16* nocapture readonly %s_mv_x,
+ ptr nocapture %mv_x, ptr nocapture %mv_y, ptr nocapture readonly %s_mv_x,
i32 signext %search_pos2, i32 signext %min_mcost) #0 {
; CHECK-LABEL: SubPelBlockSearchBiPred:
entry:
%add40 = shl i32 %pic_pix_x, 2
%shl = add i32 %add40, 80
%add41 = shl i32 %pic_pix_y, 2
- %0 = load i32, i32* @start_me_refinement_hp, align 4, !tbaa !1
+ %0 = load i32, ptr @start_me_refinement_hp, align 4, !tbaa !1
%cond47 = select i1 undef, i32 1, i32 %search_pos2
- %1 = load i16, i16* %s_mv_x, align 2, !tbaa !5
+ %1 = load i16, ptr %s_mv_x, align 2, !tbaa !5
%conv48 = sext i16 %1 to i32
%add49 = add nsw i32 %conv48, %shl
%idxprom52 = sext i16 %ref to i32
- %2 = load i32, i32* null, align 4, !tbaa !1
- store i32 undef, i32* bitcast (%struct.SubImageContainer* @ref_pic1_sub to i32*), align 4, !tbaa !7
- %3 = load i32, i32* undef, align 4, !tbaa !10
- store i32 %3, i32* bitcast (%struct.SubImageContainer* @ref_pic2_sub to i32*), align 4, !tbaa !7
- store i16 0, i16* @img_height, align 2, !tbaa !5
- %size_x_pad = getelementptr inbounds %struct.storable_picture, %struct.storable_picture* null, i32 0, i32 22
- %4 = load i32, i32* %size_x_pad, align 4, !tbaa !12
- store i32 %4, i32* @width_pad, align 4, !tbaa !1
- %5 = load i32, i32* undef, align 4, !tbaa !13
- store i32 %5, i32* @height_pad, align 4, !tbaa !1
- %6 = load i32****, i32***** @wbp_weight, align 4, !tbaa !14
- %arrayidx75 = getelementptr inbounds i32***, i32**** %6, i32 undef
- %7 = load i32***, i32**** %arrayidx75, align 4, !tbaa !14
- %arrayidx76 = getelementptr inbounds i32**, i32*** %7, i32 %idxprom52
- %8 = load i32**, i32*** %arrayidx76, align 4, !tbaa !14
- %cond87.in671 = load i32*, i32** %8, align 4
- %cond87672 = load i32, i32* %cond87.in671, align 4
+ %2 = load i32, ptr null, align 4, !tbaa !1
+ store i32 undef, ptr @ref_pic1_sub, align 4, !tbaa !7
+ %3 = load i32, ptr undef, align 4, !tbaa !10
+ store i32 %3, ptr @ref_pic2_sub, align 4, !tbaa !7
+ store i16 0, ptr @img_height, align 2, !tbaa !5
+ %size_x_pad = getelementptr inbounds %struct.storable_picture, ptr null, i32 0, i32 22
+ %4 = load i32, ptr %size_x_pad, align 4, !tbaa !12
+ store i32 %4, ptr @width_pad, align 4, !tbaa !1
+ %5 = load i32, ptr undef, align 4, !tbaa !13
+ store i32 %5, ptr @height_pad, align 4, !tbaa !1
+ %6 = load ptr, ptr @wbp_weight, align 4, !tbaa !14
+ %arrayidx75 = getelementptr inbounds ptr, ptr %6, i32 undef
+ %7 = load ptr, ptr %arrayidx75, align 4, !tbaa !14
+ %arrayidx76 = getelementptr inbounds ptr, ptr %7, i32 %idxprom52
+ %8 = load ptr, ptr %arrayidx76, align 4, !tbaa !14
+ %cond87.in671 = load ptr, ptr %8, align 4
+ %cond87672 = load i32, ptr %cond87.in671, align 4
%conv88673 = trunc i32 %cond87672 to i16
- store i16 %conv88673, i16* @weight1, align 2, !tbaa !5
- %cond105 = load i32, i32* undef, align 4
+ store i16 %conv88673, ptr @weight1, align 2, !tbaa !5
+ %cond105 = load i32, ptr undef, align 4
%conv106 = trunc i32 %cond105 to i16
- store i16 %conv106, i16* @weight2, align 2, !tbaa !5
- store i16 0, i16* @offsetBi, align 2, !tbaa !5
- %storemerge655 = load i32, i32* bitcast (i32 (i16*, i32, i32, i32, i32, i32, i32, i32)** getelementptr inbounds ([3 x i32 (i16*, i32, i32, i32, i32, i32, i32, i32)*], [3 x i32 (i16*, i32, i32, i32, i32, i32, i32, i32)*]* @computeBiPred2, i32 0, i32 1) to i32*), align 4
- store i32 %storemerge655, i32* bitcast (i32 (i16*, i32, i32, i32, i32, i32, i32, i32)** @computeBiPred to i32*), align 4, !tbaa !14
- %9 = load i16, i16* %mv_x, align 2, !tbaa !5
+ store i16 %conv106, ptr @weight2, align 2, !tbaa !5
+ store i16 0, ptr @offsetBi, align 2, !tbaa !5
+ %storemerge655 = load i32, ptr getelementptr inbounds ([3 x ptr], ptr @computeBiPred2, i32 0, i32 1), align 4
+ store i32 %storemerge655, ptr @computeBiPred, align 4, !tbaa !14
+ %9 = load i16, ptr %mv_x, align 2, !tbaa !5
%cmp270 = icmp sgt i32 undef, 1
%or.cond = and i1 %cmp270, false
br i1 %or.cond, label %land.lhs.true277, label %if.else289
land.lhs.true277: ; preds = %entry
- %10 = load i16, i16* %mv_y, align 2, !tbaa !5
+ %10 = load i16, ptr %mv_y, align 2, !tbaa !5
%conv278 = sext i16 %10 to i32
%add279 = add nsw i32 %conv278, 0
%cmp280 = icmp sgt i32 %add279, 1
if.end290: ; preds = %if.else289, %land.lhs.true277
%storemerge = phi i32 [ 1, %if.else289 ], [ 0, %land.lhs.true277 ]
- store i32 %storemerge, i32* @bipred2_access_method, align 4, !tbaa !1
+ store i32 %storemerge, ptr @bipred2_access_method, align 4, !tbaa !1
%cmp315698 = icmp slt i32 %0, %cond47
br i1 %cmp315698, label %for.body.lr.ph, label %if.end358
%best_pos.0699 = phi i32 [ 0, %for.body.lr.ph ], [ %best_pos.1, %for.inc ]
%conv317 = sext i16 %11 to i32
%add320 = add nsw i32 0, %conv317
- %12 = load i16, i16* %mv_y, align 2, !tbaa !5
+ %12 = load i16, ptr %mv_y, align 2, !tbaa !5
%conv321 = sext i16 %12 to i32
%add324 = add nsw i32 0, %conv321
- %13 = load i32*, i32** @mvbits, align 4, !tbaa !14
- %14 = load i32, i32* undef, align 4, !tbaa !1
+ %13 = load ptr, ptr @mvbits, align 4, !tbaa !14
+ %14 = load i32, ptr undef, align 4, !tbaa !1
%sub329 = sub nsw i32 %add324, %conv328
- %arrayidx330 = getelementptr inbounds i32, i32* %13, i32 %sub329
- %15 = load i32, i32* %arrayidx330, align 4, !tbaa !1
+ %arrayidx330 = getelementptr inbounds i32, ptr %13, i32 %sub329
+ %15 = load i32, ptr %arrayidx330, align 4, !tbaa !1
%add331 = add nsw i32 %15, %14
%mul = mul nsw i32 %add331, %2
%shr332 = ashr i32 %mul, 16
; CHECK: j $BB{{.*}}
%add337 = add nsw i32 %add320, %shl
%add338 = add nsw i32 %add324, 0
- %call340 = tail call i32 undef(i16* %orig_pic, i32 signext undef, i32 signext
+ %call340 = tail call i32 undef(ptr %orig_pic, i32 signext undef, i32 signext
undef, i32 signext 0, i32 signext %add49,
i32 signext undef, i32 signext %add337,
i32 signext %add338) #1
for.inc: ; preds = %if.end336, %for.body
%best_pos.1 = phi i32 [ %best_pos.0699, %for.body ], [ %pos.0.best_pos.0, %if.end336 ]
- %.pre = load i16, i16* %mv_x, align 2, !tbaa !5
+ %.pre = load i16, ptr %mv_x, align 2, !tbaa !5
br label %for.body
if.end358: ; preds = %if.end290
br i1 undef, label %for.body415.lr.ph, label %if.end461
for.body415.lr.ph: ; preds = %if.end358
- %16 = load i16, i16* %mv_y, align 2, !tbaa !5
+ %16 = load i16, ptr %mv_y, align 2, !tbaa !5
%conv420 = sext i16 %16 to i32
%add423 = add nsw i32 0, %conv420
%cmp433 = icmp sgt i32 %.min_mcost.addr.0, 0
if.end436: ; preds = %for.body415.lr.ph
%add438 = add nsw i32 %add423, 0
- %call440 = tail call i32 @foobar(i16* %orig_pic, i32 signext undef, i32 signext undef,
+ %call440 = tail call i32 @foobar(ptr %orig_pic, i32 signext undef, i32 signext undef,
i32 signext 0, i32 signext %add49, i32 signext undef,
i32 signext undef, i32 signext %add438) #1
br label %if.end461
entry:
%bufptr.sroa.0 = alloca i64, align 8
%bufptr.sroa.4 = alloca i64, align 8
- store i64 %bufptr.coerce0, i64* %bufptr.sroa.0, align 8
- store i64 %bufptr.coerce1, i64* %bufptr.sroa.4, align 8
- %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load = load volatile i64, i64* %bufptr.sroa.0, align 8
+ store i64 %bufptr.coerce0, ptr %bufptr.sroa.0, align 8
+ store i64 %bufptr.coerce1, ptr %bufptr.sroa.4, align 8
+ %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load = load volatile i64, ptr %bufptr.sroa.0, align 8
%bf.clear = and i64 %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load, 134217727
%bf.set = or i64 %bf.clear, 16508780544
- store volatile i64 %bf.set, i64* %bufptr.sroa.0, align 8
- %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load2 = load volatile i64, i64* %bufptr.sroa.4, align 8
+ store volatile i64 %bf.set, ptr %bufptr.sroa.0, align 8
+ %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load2 = load volatile i64, ptr %bufptr.sroa.4, align 8
%bf.clear3 = and i64 %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load2, -16911433729
%bf.set4 = or i64 %bf.clear3, 1073741824
- store volatile i64 %bf.set4, i64* %bufptr.sroa.4, align 8
- %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load6 = load volatile i64, i64* %bufptr.sroa.4, align 8
+ store volatile i64 %bf.set4, ptr %bufptr.sroa.4, align 8
+ %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load6 = load volatile i64, ptr %bufptr.sroa.4, align 8
%bf.clear7 = and i64 %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load6, 1125899906842623
%bf.set8 = or i64 %bf.clear7, 5629499534213120
- store volatile i64 %bf.set8, i64* %bufptr.sroa.4, align 8
- %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load11 = load volatile i64, i64* %bufptr.sroa.4, align 8
+ store volatile i64 %bf.set8, ptr %bufptr.sroa.4, align 8
+ %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load11 = load volatile i64, ptr %bufptr.sroa.4, align 8
%bf.lshr = lshr i64 %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load11, 50
- %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load13 = load volatile i64, i64* %bufptr.sroa.4, align 8
+ %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load13 = load volatile i64, ptr %bufptr.sroa.4, align 8
%bf.shl = shl nuw nsw i64 %bf.lshr, 34
%bf.clear14 = and i64 %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load13, -1125882726973441
%bf.set15 = or i64 %bf.clear14, %bf.shl
- store volatile i64 %bf.set15, i64* %bufptr.sroa.4, align 8
- %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load17 = load volatile i64, i64* %bufptr.sroa.0, align 8
+ store volatile i64 %bf.set15, ptr %bufptr.sroa.4, align 8
+ %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load17 = load volatile i64, ptr %bufptr.sroa.0, align 8
%bf.lshr18 = lshr i64 %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load17, 27
ret i64 %bf.lshr18
}
define i32 @foo(i32 signext %x) {
entry:
%x.addr = alloca i32, align 4
- store volatile i32 %x, i32* %x.addr, align 4
- %x.addr.0.x.addr.0. = load volatile i32, i32* %x.addr, align 4
+ store volatile i32 %x, ptr %x.addr, align 4
+ %x.addr.0.x.addr.0. = load volatile i32, ptr %x.addr, align 4
%and = and i32 %x.addr.0.x.addr.0., -4
%or = or i32 %and, 8
- store volatile i32 %or, i32* %x.addr, align 4
+ store volatile i32 %or, ptr %x.addr, align 4
ret i32 %and
}
define i32 @test1(i32 %a) {
entry:
%tobool = icmp eq i32 %a, 0
- %0 = load i32, i32* @g0, align 4
+ %0 = load i32, ptr @g0, align 4
br i1 %tobool, label %if.else, label %if.then
if.then:
%add = add nsw i32 %0, 1
- store i32 %add, i32* @g0, align 4
- %1 = load i32, i32* @g1, align 4
+ store i32 %add, ptr @g0, align 4
+ %1 = load i32, ptr @g1, align 4
%add1 = add nsw i32 %1, 23
br label %if.end
if.else:
%add2 = add nsw i32 %0, 11
- store i32 %add2, i32* @g0, align 4
- %2 = load i32, i32* @g1, align 4
+ store i32 %add2, ptr @g0, align 4
+ %2 = load i32, ptr @g1, align 4
%add3 = add nsw i32 %2, 23
br label %if.end
if.end:
%storemerge = phi i32 [ %add3, %if.else ], [ %add1, %if.then ]
- store i32 %storemerge, i32* @g1, align 4
+ store i32 %storemerge, ptr @g1, align 4
ret i32 %storemerge
}
define void @test() nounwind {
entry:
- %0 = load i32, i32* @iiii, align 4
- %1 = load i32, i32* @jjjj, align 4
+ %0 = load i32, ptr @iiii, align 4
+ %1 = load i32, ptr @jjjj, align 4
%div = sdiv i32 %0, %1
; 16: div $zero, ${{[0-9]+}}, ${{[0-9]+}}
; 16: mflo ${{[0-9]+}}
- store i32 %div, i32* @kkkk, align 4
+ store i32 %div, ptr @kkkk, align 4
ret void
}
define void @test() nounwind {
entry:
- %0 = load i32, i32* @iiii, align 4
- %1 = load i32, i32* @jjjj, align 4
+ %0 = load i32, ptr @iiii, align 4
+ %1 = load i32, ptr @jjjj, align 4
%div = sdiv i32 %0, %1
- store i32 %div, i32* @kkkk, align 4
+ store i32 %div, ptr @kkkk, align 4
%rem = srem i32 %0, %1
; 16: div $zero, ${{[0-9]+}}, ${{[0-9]+}}
; 16: mflo ${{[0-9]+}}
; 16: mfhi ${{[0-9]+}}
- store i32 %rem, i32* @llll, align 4
+ store i32 %rem, ptr @llll, align 4
ret void
}
ret i32 %rem
}
-define i32 @sdivrem1(i32 signext %a0, i32 signext %a1, i32* nocapture %r) nounwind {
+define i32 @sdivrem1(i32 signext %a0, i32 signext %a1, ptr nocapture %r) nounwind {
entry:
; ALL-LABEL: sdivrem1:
; ALL: .end sdivrem1
%rem = srem i32 %a0, %a1
- store i32 %rem, i32* %r, align 4
+ store i32 %rem, ptr %r, align 4
%div = sdiv i32 %a0, %a1
ret i32 %div
}
-define i32 @udivrem1(i32 signext %a0, i32 signext %a1, i32* nocapture %r) nounwind {
+define i32 @udivrem1(i32 signext %a0, i32 signext %a1, ptr nocapture %r) nounwind {
entry:
; ALL-LABEL: udivrem1:
; ALL: .end udivrem1
%rem = urem i32 %a0, %a1
- store i32 %rem, i32* %r, align 4
+ store i32 %rem, ptr %r, align 4
%div = udiv i32 %a0, %a1
ret i32 %div
}
; FIXME: It's not clear what this is supposed to test.
define i32 @killFlags() {
entry:
- %0 = load i32, i32* @g0, align 4
- %1 = load i32, i32* @g1, align 4
+ %0 = load i32, ptr @g0, align 4
+ %1 = load i32, ptr @g1, align 4
%div = sdiv i32 %0, %1
ret i32 %div
}
ret i64 %rem
}
-define i64 @sdivrem2(i64 %a0, i64 %a1, i64* nocapture %r) nounwind {
+define i64 @sdivrem2(i64 %a0, i64 %a1, ptr nocapture %r) nounwind {
entry:
; ALL-LABEL: sdivrem2:
; ALL: .end sdivrem2
%rem = srem i64 %a0, %a1
- store i64 %rem, i64* %r, align 8
+ store i64 %rem, ptr %r, align 8
%div = sdiv i64 %a0, %a1
ret i64 %div
}
-define i64 @udivrem2(i64 %a0, i64 %a1, i64* nocapture %r) nounwind {
+define i64 @udivrem2(i64 %a0, i64 %a1, ptr nocapture %r) nounwind {
entry:
; ALL-LABEL: udivrem2:
; ALL: .end udivrem2
%rem = urem i64 %a0, %a1
- store i64 %rem, i64* %r, align 8
+ store i64 %rem, ptr %r, align 8
%div = udiv i64 %a0, %a1
ret i64 %div
}
define void @test() nounwind {
entry:
- %0 = load i32, i32* @iiii, align 4
- %1 = load i32, i32* @jjjj, align 4
+ %0 = load i32, ptr @iiii, align 4
+ %1 = load i32, ptr @jjjj, align 4
%div = udiv i32 %0, %1
; 16: divu $zero, ${{[0-9]+}}, ${{[0-9]+}}
; 16: mflo ${{[0-9]+}}
- store i32 %div, i32* @kkkk, align 4
+ store i32 %div, ptr @kkkk, align 4
ret void
}
define void @test() nounwind {
entry:
- %0 = load i32, i32* @iiii, align 4
- %1 = load i32, i32* @jjjj, align 4
+ %0 = load i32, ptr @iiii, align 4
+ %1 = load i32, ptr @jjjj, align 4
%div = udiv i32 %0, %1
- store i32 %div, i32* @kkkk, align 4
+ store i32 %div, ptr @kkkk, align 4
%rem = urem i32 %0, %1
; 16: divu $zero, ${{[0-9]+}}, ${{[0-9]+}}
; 16: mflo ${{[0-9]+}}
; 16: mfhi ${{[0-9]+}}
- store i32 %rem, i32* @llll, align 4
+ store i32 %rem, ptr @llll, align 4
ret void
}
; R1-LABEL: test_lbux:
; R1: lbux ${{[0-9]+}}
-define zeroext i8 @test_lbux(i8* nocapture %b, i32 %i) {
+define zeroext i8 @test_lbux(ptr nocapture %b, i32 %i) {
entry:
- %add.ptr = getelementptr inbounds i8, i8* %b, i32 %i
- %0 = load i8, i8* %add.ptr, align 1
+ %add.ptr = getelementptr inbounds i8, ptr %b, i32 %i
+ %0 = load i8, ptr %add.ptr, align 1
ret i8 %0
}
; R1-LABEL: test_lhx:
; R1: lhx ${{[0-9]+}}
-define signext i16 @test_lhx(i16* nocapture %b, i32 %i) {
+define signext i16 @test_lhx(ptr nocapture %b, i32 %i) {
entry:
- %add.ptr = getelementptr inbounds i16, i16* %b, i32 %i
- %0 = load i16, i16* %add.ptr, align 2
+ %add.ptr = getelementptr inbounds i16, ptr %b, i32 %i
+ %0 = load i16, ptr %add.ptr, align 2
ret i16 %0
}
; R1-LABEL: test_lwx:
; R1: lwx ${{[0-9]+}}
-define i32 @test_lwx(i32* nocapture %b, i32 %i) {
+define i32 @test_lwx(ptr nocapture %b, i32 %i) {
entry:
- %add.ptr = getelementptr inbounds i32, i32* %b, i32 %i
- %0 = load i32, i32* %add.ptr, align 4
+ %add.ptr = getelementptr inbounds i32, ptr %b, i32 %i
+ %0 = load i32, ptr %add.ptr, align 4
ret i32 %0
}
declare i32 @llvm.mips.bitrev(i32) nounwind readnone
-define i32 @test__builtin_mips_lbux1(i32 %i0, i8* %a0, i32 %a1) nounwind readonly {
+define i32 @test__builtin_mips_lbux1(i32 %i0, ptr %a0, i32 %a1) nounwind readonly {
entry:
; CHECK: lbux ${{[0-9]+}}
- %0 = tail call i32 @llvm.mips.lbux(i8* %a0, i32 %a1)
+ %0 = tail call i32 @llvm.mips.lbux(ptr %a0, i32 %a1)
ret i32 %0
}
-declare i32 @llvm.mips.lbux(i8*, i32) nounwind readonly
+declare i32 @llvm.mips.lbux(ptr, i32) nounwind readonly
-define i32 @test__builtin_mips_lhx1(i32 %i0, i8* %a0, i32 %a1) nounwind readonly {
+define i32 @test__builtin_mips_lhx1(i32 %i0, ptr %a0, i32 %a1) nounwind readonly {
entry:
; CHECK: lhx ${{[0-9]+}}
- %0 = tail call i32 @llvm.mips.lhx(i8* %a0, i32 %a1)
+ %0 = tail call i32 @llvm.mips.lhx(ptr %a0, i32 %a1)
ret i32 %0
}
-declare i32 @llvm.mips.lhx(i8*, i32) nounwind readonly
+declare i32 @llvm.mips.lhx(ptr, i32) nounwind readonly
-define i32 @test__builtin_mips_lwx1(i32 %i0, i8* %a0, i32 %a1) nounwind readonly {
+define i32 @test__builtin_mips_lwx1(i32 %i0, ptr %a0, i32 %a1) nounwind readonly {
entry:
; CHECK: lwx ${{[0-9]+}}
- %0 = tail call i32 @llvm.mips.lwx(i8* %a0, i32 %a1)
+ %0 = tail call i32 @llvm.mips.lwx(ptr %a0, i32 %a1)
ret i32 %0
}
-declare i32 @llvm.mips.lwx(i8*, i32) nounwind readonly
+declare i32 @llvm.mips.lwx(ptr, i32) nounwind readonly
define i32 @test__builtin_mips_wrdsp1(i32 %i0, i32 %a0) nounwind {
entry:
define void @extend_load_trunc_store_v2i8() {
entry:
- %0 = load <2 x i8>, <2 x i8>* @g1, align 2
- store <2 x i8> %0, <2 x i8>* @g0, align 2
+ %0 = load <2 x i8>, ptr @g1, align 2
+ store <2 x i8> %0, ptr @g0, align 2
ret void
}
; Check dynamic stack realignment in functions without variable-sized objects.
-declare void @helper_01(i32, i32, i32, i32, i32*)
+declare void @helper_01(i32, i32, i32, i32, ptr)
; O32 ABI
define void @func_01() {
; GP32-MMR6: addiu $sp, $sp, 1024
%a = alloca i32, align 512
- call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a)
+ call void @helper_01(i32 0, i32 0, i32 0, i32 0, ptr %a)
ret void
}
declare void @helper_02(i32, i32, i32, i32,
- i32, i32, i32, i32, i32*)
+ i32, i32, i32, i32, ptr)
; N32/N64 ABIs
define void @func_02() {
%a = alloca i32, align 512
call void @helper_02(i32 0, i32 0, i32 0, i32 0,
- i32 0, i32 0, i32 0, i32 0, i32* %a)
+ i32 0, i32 0, i32 0, i32 0, ptr %a)
ret void
}
; Verify that we use $fp for referencing incoming arguments.
-declare void @helper_03(i32, i32, i32, i32, i32*, i32*)
+declare void @helper_03(i32, i32, i32, i32, ptr, ptr)
; O32 ABI
-define void @func_03(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32* %b) {
+define void @func_03(i32 %p0, i32 %p1, i32 %p2, i32 %p3, ptr %b) {
entry:
; GP32-LABEL: func_03:
; GP32-MM-DAG: sw16 $[[T1]], 20(${{[0-9]+}})
%a = alloca i32, align 512
- call void @helper_03(i32 0, i32 0, i32 0, i32 0, i32* %a, i32* %b)
+ call void @helper_03(i32 0, i32 0, i32 0, i32 0, ptr %a, ptr %b)
ret void
}
declare void @helper_04(i32, i32, i32, i32,
- i32, i32, i32, i32, i32*, i32*)
+ i32, i32, i32, i32, ptr, ptr)
; N32/N64 ABIs
define void @func_04(i32 %p0, i32 %p1, i32 %p2, i32 %p3,
i32 %p4, i32 %p5, i32 %p6, i32 %p7,
- i32* %b) {
+ ptr %b) {
entry:
; GP64-LABEL: func_04:
%a = alloca i32, align 512
call void @helper_04(i32 0, i32 0, i32 0, i32 0,
- i32 0, i32 0, i32 0, i32 0, i32* %a, i32* %b)
+ i32 0, i32 0, i32 0, i32 0, ptr %a, ptr %b)
ret void
}
%a0 = alloca i32, i32 %sz, align 512
%a1 = alloca i32, align 4
- store volatile i32 111, i32* %a0, align 512
- store volatile i32 222, i32* %a1, align 4
+ store volatile i32 111, ptr %a0, align 512
+ store volatile i32 222, ptr %a1, align 4
ret void
}
%a0 = alloca i32, i32 %sz, align 512
%a1 = alloca i32, align 4
- store volatile i32 111, i32* %a0, align 512
- store volatile i32 222, i32* %a1, align 4
+ store volatile i32 111, ptr %a0, align 512
+ store volatile i32 222, ptr %a1, align 4
ret void
}
%a0 = alloca i32, i32 %sz, align 512
%a1 = alloca i32, align 4
- store volatile i32 111, i32* %a0, align 512
- store volatile i32 222, i32* %a1, align 4
+ store volatile i32 111, ptr %a0, align 512
+ store volatile i32 222, ptr %a1, align 4
- call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a1)
+ call void @helper_01(i32 0, i32 0, i32 0, i32 0, ptr %a1)
ret void
}
%a0 = alloca i32, i32 %sz, align 512
%a1 = alloca i32, align 4
- store volatile i32 111, i32* %a0, align 512
- store volatile i32 222, i32* %a1, align 4
+ store volatile i32 111, ptr %a0, align 512
+ store volatile i32 222, ptr %a1, align 4
call void @helper_02(i32 0, i32 0, i32 0, i32 0,
- i32 0, i32 0, i32 0, i32 0, i32* %a1)
+ i32 0, i32 0, i32 0, i32 0, ptr %a1)
ret void
}
; ALL-NOT: and $sp, $sp, $[[T0:[0-9]+|ra|gp]]
%a = alloca i32, align 512
- call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a)
+ call void @helper_01(i32 0, i32 0, i32 0, i32 0, ptr %a)
ret void
}
%a0 = alloca i32, i32 %sz, align 512
%a1 = alloca i32, align 4
- store volatile i32 111, i32* %a0, align 512
- store volatile i32 222, i32* %a1, align 4
+ store volatile i32 111, ptr %a0, align 512
+ store volatile i32 222, ptr %a1, align 4
ret void
}
; RUN: llc -march=mips64el -mcpu=mips64 < %s | \
; RUN: FileCheck %s -check-prefix=CHECK-MIPS64
-declare i8* @llvm.eh.dwarf.cfa(i32) nounwind
-declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare ptr @llvm.eh.dwarf.cfa(i32) nounwind
+declare ptr @llvm.frameaddress(i32) nounwind readnone
-define i8* @f1() nounwind {
+define ptr @f1() nounwind {
entry:
%x = alloca [32 x i8], align 1
- %0 = call i8* @llvm.eh.dwarf.cfa(i32 0)
- ret i8* %0
+ %0 = call ptr @llvm.eh.dwarf.cfa(i32 0)
+ ret ptr %0
; CHECK-LABEL: f1:
}
-define i8* @f2() nounwind {
+define ptr @f2() nounwind {
entry:
%x = alloca [65536 x i8], align 1
- %0 = call i8* @llvm.eh.dwarf.cfa(i32 0)
- ret i8* %0
+ %0 = call ptr @llvm.eh.dwarf.cfa(i32 0)
+ ret ptr %0
; CHECK-LABEL: f2:
define i32 @f3() nounwind {
entry:
%x = alloca [32 x i8], align 1
- %0 = call i8* @llvm.eh.dwarf.cfa(i32 0)
- %1 = ptrtoint i8* %0 to i32
- %2 = call i8* @llvm.frameaddress(i32 0)
- %3 = ptrtoint i8* %2 to i32
+ %0 = call ptr @llvm.eh.dwarf.cfa(i32 0)
+ %1 = ptrtoint ptr %0 to i32
+ %2 = call ptr @llvm.frameaddress(i32 0)
+ %3 = ptrtoint ptr %2 to i32
%add = add i32 %1, %3
ret i32 %add
}
-define i8* @f4() nounwind {
+define ptr @f4() nounwind {
entry:
%x = alloca [32 x i8], align 1
- %0 = call i8* @llvm.eh.dwarf.cfa(i32 0)
- ret i8* %0
+ %0 = call ptr @llvm.eh.dwarf.cfa(i32 0)
+ ret ptr %0
; CHECK-LABEL: f4:
; RUN: llc -march=mipsel -mcpu=mips32r2 -asm-show-inst -relocation-model=pic < %s | FileCheck %s -check-prefixes=CHECK,NOT-R6
; RUN: llc -march=mipsel -mcpu=mips32r6 -asm-show-inst -relocation-model=pic < %s | FileCheck %s -check-prefixes=CHECK,R6
-declare void @llvm.eh.return.i32(i32, i8*)
+declare void @llvm.eh.return.i32(i32, ptr)
declare void @foo(...)
-define i8* @f1(i32 %offset, i8* %handler) {
+define ptr @f1(i32 %offset, ptr %handler) {
entry:
call void (...) @foo()
- call void @llvm.eh.return.i32(i32 %offset, i8* %handler)
+ call void @llvm.eh.return.i32(i32 %offset, ptr %handler)
unreachable
; CHECK: f1:
; CHECK: addu $sp, $sp, $3
}
-define i8* @f2(i32 %offset, i8* %handler) {
+define ptr @f2(i32 %offset, ptr %handler) {
entry:
- call void @llvm.eh.return.i32(i32 %offset, i8* %handler)
+ call void @llvm.eh.return.i32(i32 %offset, ptr %handler)
unreachable
; CHECK: f2:
; RUN: llc -march=mips64el -mcpu=mips64r2 -asm-show-inst -relocation-model=pic < %s | FileCheck %s -check-prefixes=CHECK,NOT-R6
; RUN: llc -march=mips64el -mcpu=mips64r6 -asm-show-inst -relocation-model=pic < %s | FileCheck %s -check-prefixes=CHECK,R6
-declare void @llvm.eh.return.i64(i64, i8*)
+declare void @llvm.eh.return.i64(i64, ptr)
declare void @foo(...)
-define void @f1(i64 %offset, i8* %handler) {
+define void @f1(i64 %offset, ptr %handler) {
entry:
call void (...) @foo()
- call void @llvm.eh.return.i64(i64 %offset, i8* %handler)
+ call void @llvm.eh.return.i64(i64 %offset, ptr %handler)
unreachable
; CHECK: f1:
; CHECK: daddu $sp, $sp, $3
}
-define void @f2(i64 %offset, i8* %handler) {
+define void @f2(i64 %offset, ptr %handler) {
entry:
- call void @llvm.eh.return.i64(i64 %offset, i8* %handler)
+ call void @llvm.eh.return.i64(i64 %offset, ptr %handler)
unreachable
; CHECK: f2:
; RUN: llc < %s -march=mips | FileCheck %s -check-prefix=CHECK-EB
@g1 = global double 0.000000e+00, align 8
-@_ZTId = external constant i8*
+@_ZTId = external constant ptr
-define void @_Z1fd(double %i2) personality i32 (...)* @__gxx_personality_v0 {
+define void @_Z1fd(double %i2) personality ptr @__gxx_personality_v0 {
entry:
; CHECK-EL: addiu $sp, $sp
; CHECK-EL: .cfi_def_cfa_offset
; CHECK-EB: .cfi_offset 52, -4
; CHECK-EL: .cfi_offset 31, -12
- %exception = tail call i8* @__cxa_allocate_exception(i32 8) nounwind
- %0 = bitcast i8* %exception to double*
- store double 3.200000e+00, double* %0, align 8
- invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTId to i8*), i8* null) noreturn
+ %exception = tail call ptr @__cxa_allocate_exception(i32 8) nounwind
+ store double 3.200000e+00, ptr %exception, align 8
+ invoke void @__cxa_throw(ptr %exception, ptr @_ZTId, ptr null) noreturn
to label %unreachable unwind label %lpad
lpad: ; preds = %entry
; CHECK-EL: # %lpad
; CHECK-EL: bne $5
- %exn.val = landingpad { i8*, i32 }
+ %exn.val = landingpad { ptr, i32 }
cleanup
- catch i8* bitcast (i8** @_ZTId to i8*)
- %exn = extractvalue { i8*, i32 } %exn.val, 0
- %sel = extractvalue { i8*, i32 } %exn.val, 1
- %1 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTId to i8*)) nounwind
- %2 = icmp eq i32 %sel, %1
- br i1 %2, label %catch, label %eh.resume
+ catch ptr @_ZTId
+ %exn = extractvalue { ptr, i32 } %exn.val, 0
+ %sel = extractvalue { ptr, i32 } %exn.val, 1
+ %0 = tail call i32 @llvm.eh.typeid.for(ptr @_ZTId) nounwind
+ %1 = icmp eq i32 %sel, %0
+ br i1 %1, label %catch, label %eh.resume
catch: ; preds = %lpad
- %3 = tail call i8* @__cxa_begin_catch(i8* %exn) nounwind
- %4 = bitcast i8* %3 to double*
- %exn.scalar = load double, double* %4, align 8
+ %2 = tail call ptr @__cxa_begin_catch(ptr %exn) nounwind
+ %exn.scalar = load double, ptr %2, align 8
%add = fadd double %exn.scalar, %i2
- store double %add, double* @g1, align 8
+ store double %add, ptr @g1, align 8
tail call void @__cxa_end_catch() nounwind
ret void
eh.resume: ; preds = %lpad
- resume { i8*, i32 } %exn.val
+ resume { ptr, i32 } %exn.val
unreachable: ; preds = %entry
unreachable
}
-declare i8* @__cxa_allocate_exception(i32)
+declare ptr @__cxa_allocate_exception(i32)
declare i32 @__gxx_personality_v0(...)
-declare i32 @llvm.eh.typeid.for(i8*) nounwind
+declare i32 @llvm.eh.typeid.for(ptr) nounwind
-declare void @__cxa_throw(i8*, i8*, i8*)
+declare void @__cxa_throw(ptr, ptr, ptr)
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
declare void @__cxa_end_catch()
; RUN: llc -mtriple=mips64-unknown-freebsd11.0 < %s -asm-verbose -relocation-model=pic | \
; RUN: FileCheck -check-prefixes=ALL,FREEBSD,FREEBSD-NEW,N64 %s
-@_ZTISt9exception = external constant i8*
+@_ZTISt9exception = external constant ptr
-define i32 @main() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @main() personality ptr @__gxx_personality_v0 {
; ALL: .cfi_startproc
; Linux must rely on the assembler/linker converting the encodings.
; ALL: jalr
lpad:
- %0 = landingpad { i8*, i32 }
- catch i8* null
- catch i8* bitcast (i8** @_ZTISt9exception to i8*)
+ %0 = landingpad { ptr, i32 }
+ catch ptr null
+ catch ptr @_ZTISt9exception
ret i32 0
cont:
; emergency spill slot. Filed PR48301.
; XFAIL: *
@var = external global i32
-@ptrvar = external global i8*
+@ptrvar = external global ptr
; CHECK-LABEL: func:
define void @func() {
%stackspace = alloca[16384 x i32], align 4
; ensure stackspace is not optimized out
- %stackspace_casted = bitcast [16384 x i32]* %stackspace to i8*
- store volatile i8* %stackspace_casted, i8** @ptrvar
+ store volatile ptr %stackspace, ptr @ptrvar
; Load values to increase register pressure.
- %v0 = load volatile i32, i32* @var
- %v1 = load volatile i32, i32* @var
- %v2 = load volatile i32, i32* @var
- %v3 = load volatile i32, i32* @var
- %v4 = load volatile i32, i32* @var
- %v5 = load volatile i32, i32* @var
- %v6 = load volatile i32, i32* @var
- %v7 = load volatile i32, i32* @var
- %v8 = load volatile i32, i32* @var
- %v9 = load volatile i32, i32* @var
- %v10 = load volatile i32, i32* @var
- %v11 = load volatile i32, i32* @var
- %v12 = load volatile i32, i32* @var
- %v13 = load volatile i32, i32* @var
- %v14 = load volatile i32, i32* @var
- %v15 = load volatile i32, i32* @var
- %v16 = load volatile i32, i32* @var
+ %v0 = load volatile i32, ptr @var
+ %v1 = load volatile i32, ptr @var
+ %v2 = load volatile i32, ptr @var
+ %v3 = load volatile i32, ptr @var
+ %v4 = load volatile i32, ptr @var
+ %v5 = load volatile i32, ptr @var
+ %v6 = load volatile i32, ptr @var
+ %v7 = load volatile i32, ptr @var
+ %v8 = load volatile i32, ptr @var
+ %v9 = load volatile i32, ptr @var
+ %v10 = load volatile i32, ptr @var
+ %v11 = load volatile i32, ptr @var
+ %v12 = load volatile i32, ptr @var
+ %v13 = load volatile i32, ptr @var
+ %v14 = load volatile i32, ptr @var
+ %v15 = load volatile i32, ptr @var
+ %v16 = load volatile i32, ptr @var
; Computing a stack-relative values needs an additional register.
; We should get an emergency spill/reload for this.
; CHECK: sw ${{.*}}, 0($sp)
; CHECK: lw ${{.*}}, 0($sp)
- store volatile i32 %v0, i32* %space
+ store volatile i32 %v0, ptr %space
; store values so they are used.
- store volatile i32 %v0, i32* @var
- store volatile i32 %v1, i32* @var
- store volatile i32 %v2, i32* @var
- store volatile i32 %v3, i32* @var
- store volatile i32 %v4, i32* @var
- store volatile i32 %v5, i32* @var
- store volatile i32 %v6, i32* @var
- store volatile i32 %v7, i32* @var
- store volatile i32 %v8, i32* @var
- store volatile i32 %v9, i32* @var
- store volatile i32 %v10, i32* @var
- store volatile i32 %v11, i32* @var
- store volatile i32 %v12, i32* @var
- store volatile i32 %v13, i32* @var
- store volatile i32 %v14, i32* @var
- store volatile i32 %v15, i32* @var
- store volatile i32 %v16, i32* @var
+ store volatile i32 %v0, ptr @var
+ store volatile i32 %v1, ptr @var
+ store volatile i32 %v2, ptr @var
+ store volatile i32 %v3, ptr @var
+ store volatile i32 %v4, ptr @var
+ store volatile i32 %v5, ptr @var
+ store volatile i32 %v6, ptr @var
+ store volatile i32 %v7, ptr @var
+ store volatile i32 %v8, ptr @var
+ store volatile i32 %v9, ptr @var
+ store volatile i32 %v10, ptr @var
+ store volatile i32 %v11, ptr @var
+ store volatile i32 %v12, ptr @var
+ store volatile i32 %v13, ptr @var
+ store volatile i32 %v14, ptr @var
+ store volatile i32 %v15, ptr @var
+ store volatile i32 %v16, ptr @var
ret void
}
@bigCst = internal constant i82 483673642326615442599424
-define void @accessBig(i64* %storage) {
- %addr = bitcast i64* %storage to i82*
- %bigLoadedCst = load volatile i82, i82* @bigCst
+define void @accessBig(ptr %storage) {
+ %bigLoadedCst = load volatile i82, ptr @bigCst
%tmp = add i82 %bigLoadedCst, 1
- store i82 %tmp, i82* %addr
+ store i82 %tmp, ptr %storage
ret void
}
@notSoBigCst = internal constant i57 72057594037927935
-define void @accessNotSoBig(i64* %storage) {
- %addr = bitcast i64* %storage to i57*
- %bigLoadedCst = load volatile i57, i57* @notSoBigCst
+define void @accessNotSoBig(ptr %storage) {
+ %bigLoadedCst = load volatile i57, ptr @notSoBigCst
%tmp = add i57 %bigLoadedCst, 1
- store i57 %tmp, i57* %addr
+ store i57 %tmp, ptr %storage
ret void
}
@external_y = thread_local global i8 7, align 2
@internal_y = internal thread_local global i64 9, align 16
-define i32* @get_external_x() {
+define ptr @get_external_x() {
entry:
- ret i32* @external_x
+ ret ptr @external_x
}
-define i8* @get_external_y() {
+define ptr @get_external_y() {
entry:
- ret i8* @external_y
+ ret ptr @external_y
}
-define i64* @get_internal_y() {
+define ptr @get_internal_y() {
entry:
- ret i64* @internal_y
+ ret ptr @internal_y
}
; MIPS_32-LABEL: get_external_y:
; RUN: llc -march=mipsel -mattr=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16
@.str = private unnamed_addr constant [6 x i8] c"hello\00", align 1
-@_ZTIPKc = external constant i8*
+@_ZTIPKc = external constant ptr
define i32 @main() {
; 16-LABEL: main:
; 16: .cfi_endproc
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval
- %exception = call i8* @__cxa_allocate_exception(i32 4) nounwind
- %0 = bitcast i8* %exception to i8**
- store i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), i8** %0
- call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIPKc to i8*), i8* null) noreturn
+ store i32 0, ptr %retval
+ %exception = call ptr @__cxa_allocate_exception(i32 4) nounwind
+ store ptr @.str, ptr %exception
+ call void @__cxa_throw(ptr %exception, ptr @_ZTIPKc, ptr null) noreturn
unreachable
return: ; No predecessors!
- %1 = load i32, i32* %retval
- ret i32 %1
+ %0 = load i32, ptr %retval
+ ret i32 %0
}
-declare i8* @__cxa_allocate_exception(i32)
+declare ptr @__cxa_allocate_exception(i32)
-declare void @__cxa_throw(i8*, i8*, i8*)
+declare void @__cxa_throw(ptr, ptr, ptr)
ret i32 %and
}
-define void @ins2_5_9(i32 %s, i32* nocapture %d) nounwind {
+define void @ins2_5_9(i32 %s, ptr nocapture %d) nounwind {
entry:
; 32R2: ins ${{[0-9]+}}, $4, 5, 9
; 16-NOT: ins ${{[0-9]+}}
%and = shl i32 %s, 5
%shl = and i32 %and, 16352
- %tmp3 = load i32, i32* %d, align 4
+ %tmp3 = load i32, ptr %d, align 4
%and5 = and i32 %tmp3, -16353
%or = or i32 %and5, %shl
- store i32 %or, i32* %d, align 4
+ store i32 %or, ptr %d, align 4
ret void
}
; Function Attrs: nounwind optsize
define i32 @main() #0 {
entry:
- %0 = load double, double* @y, align 8
+ %0 = load double, ptr @y, align 8
%call = tail call double @fabs(double %0) #2
- store double %call, double* @x, align 8
+ store double %call, ptr @x, align 8
; static-NOT: .ent __call_stub_fp_fabs
; static-NOT: jal fabs
- %1 = load float, float* @y1, align 4
+ %1 = load float, ptr @y1, align 4
%call2 = tail call float @fabsf(float %1) #2
- store float %call2, float* @x1, align 4
+ store float %call2, ptr @x1, align 4
; static-NOT: .ent __call_stub_fp_fabsf
; static-NOT: jal fabsf
ret i32 0
; This test casts a 32-bit float to a 64-bit int. This would cause a crash due
; to LLVM incorrectly lowering the float on single-float platforms.
-define void @foo(float* %in, i64* %out) {
+define void @foo(ptr %in, ptr %out) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: daddiu $sp, $sp, -16
; CHECK-NEXT: jr $ra
; CHECK-NEXT: daddiu $sp, $sp, 16
entry:
- %in.addr = alloca float*, align 8
- %out.addr = alloca i64*, align 8
- store float* %in, float** %in.addr, align 8
- store i64* %out, i64** %out.addr, align 8
- %0 = load float*, float** %in.addr, align 8
- %1 = load float, float* %0, align 4
+ %in.addr = alloca ptr, align 8
+ %out.addr = alloca ptr, align 8
+ store ptr %in, ptr %in.addr, align 8
+ store ptr %out, ptr %out.addr, align 8
+ %0 = load ptr, ptr %in.addr, align 8
+ %1 = load float, ptr %0, align 4
%conv = fptosi float %1 to i64
- %2 = load i64*, i64** %out.addr, align 8
- store i64 %conv, i64* %2, align 8
+ %2 = load ptr, ptr %out.addr, align 8
+ store i64 %conv, ptr %2, align 8
ret void
}
; CHECK-NACL-NOT: lw $15
; CHECK-NACL-NOT: lw $24
- %0 = load i32, i32* @gi0, align 4
- %1 = load i32, i32* @gi1, align 4
- %2 = load i32, i32* @gi2, align 4
- %3 = load i32, i32* @gi3, align 4
- %4 = load i32, i32* @gi4, align 4
- %5 = load i32, i32* @gi5, align 4
- %6 = load i32, i32* @gi6, align 4
- %7 = load i32, i32* @gi7, align 4
- %8 = load i32, i32* @gi8, align 4
- %9 = load i32, i32* @gi9, align 4
- %10 = load i32, i32* @gi10, align 4
- %11 = load i32, i32* @gi11, align 4
- %12 = load i32, i32* @gi12, align 4
- %13 = load i32, i32* @gi13, align 4
- %14 = load i32, i32* @gi14, align 4
- %15 = load i32, i32* @gi15, align 4
- %16 = load i32, i32* @gi16, align 4
+ %0 = load i32, ptr @gi0, align 4
+ %1 = load i32, ptr @gi1, align 4
+ %2 = load i32, ptr @gi2, align 4
+ %3 = load i32, ptr @gi3, align 4
+ %4 = load i32, ptr @gi4, align 4
+ %5 = load i32, ptr @gi5, align 4
+ %6 = load i32, ptr @gi6, align 4
+ %7 = load i32, ptr @gi7, align 4
+ %8 = load i32, ptr @gi8, align 4
+ %9 = load i32, ptr @gi9, align 4
+ %10 = load i32, ptr @gi10, align 4
+ %11 = load i32, ptr @gi11, align 4
+ %12 = load i32, ptr @gi12, align 4
+ %13 = load i32, ptr @gi13, align 4
+ %14 = load i32, ptr @gi14, align 4
+ %15 = load i32, ptr @gi15, align 4
+ %16 = load i32, ptr @gi16, align 4
tail call fastcc void @callee0(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, i32 %14, i32 %15, i32 %16)
ret void
}
; CHECK-NACL-NOT: sw $15
; CHECK-NACL-NOT: sw $24
- store i32 %a0, i32* @g0, align 4
- store i32 %a1, i32* @g1, align 4
- store i32 %a2, i32* @g2, align 4
- store i32 %a3, i32* @g3, align 4
- store i32 %a4, i32* @g4, align 4
- store i32 %a5, i32* @g5, align 4
- store i32 %a6, i32* @g6, align 4
- store i32 %a7, i32* @g7, align 4
- store i32 %a8, i32* @g8, align 4
- store i32 %a9, i32* @g9, align 4
- store i32 %a10, i32* @g10, align 4
- store i32 %a11, i32* @g11, align 4
- store i32 %a12, i32* @g12, align 4
- store i32 %a13, i32* @g13, align 4
- store i32 %a14, i32* @g14, align 4
- store i32 %a15, i32* @g15, align 4
- store i32 %a16, i32* @g16, align 4
+ store i32 %a0, ptr @g0, align 4
+ store i32 %a1, ptr @g1, align 4
+ store i32 %a2, ptr @g2, align 4
+ store i32 %a3, ptr @g3, align 4
+ store i32 %a4, ptr @g4, align 4
+ store i32 %a5, ptr @g5, align 4
+ store i32 %a6, ptr @g6, align 4
+ store i32 %a7, ptr @g7, align 4
+ store i32 %a8, ptr @g8, align 4
+ store i32 %a9, ptr @g9, align 4
+ store i32 %a10, ptr @g10, align 4
+ store i32 %a11, ptr @g11, align 4
+ store i32 %a12, ptr @g12, align 4
+ store i32 %a13, ptr @g13, align 4
+ store i32 %a14, ptr @g14, align 4
+ store i32 %a15, ptr @g15, align 4
+ store i32 %a16, ptr @g16, align 4
ret void
}
; CHECK: lwc1 $f1
; CHECK: lwc1 $f0
- %0 = load float, float* @gfa0, align 4
- %1 = load float, float* @gfa1, align 4
- %2 = load float, float* @gfa2, align 4
- %3 = load float, float* @gfa3, align 4
- %4 = load float, float* @gfa4, align 4
- %5 = load float, float* @gfa5, align 4
- %6 = load float, float* @gfa6, align 4
- %7 = load float, float* @gfa7, align 4
- %8 = load float, float* @gfa8, align 4
- %9 = load float, float* @gfa9, align 4
- %10 = load float, float* @gfa10, align 4
- %11 = load float, float* @gfa11, align 4
- %12 = load float, float* @gfa12, align 4
- %13 = load float, float* @gfa13, align 4
- %14 = load float, float* @gfa14, align 4
- %15 = load float, float* @gfa15, align 4
- %16 = load float, float* @gfa16, align 4
- %17 = load float, float* @gfa17, align 4
- %18 = load float, float* @gfa18, align 4
- %19 = load float, float* @gfa19, align 4
- %20 = load float, float* @gfa20, align 4
+ %0 = load float, ptr @gfa0, align 4
+ %1 = load float, ptr @gfa1, align 4
+ %2 = load float, ptr @gfa2, align 4
+ %3 = load float, ptr @gfa3, align 4
+ %4 = load float, ptr @gfa4, align 4
+ %5 = load float, ptr @gfa5, align 4
+ %6 = load float, ptr @gfa6, align 4
+ %7 = load float, ptr @gfa7, align 4
+ %8 = load float, ptr @gfa8, align 4
+ %9 = load float, ptr @gfa9, align 4
+ %10 = load float, ptr @gfa10, align 4
+ %11 = load float, ptr @gfa11, align 4
+ %12 = load float, ptr @gfa12, align 4
+ %13 = load float, ptr @gfa13, align 4
+ %14 = load float, ptr @gfa14, align 4
+ %15 = load float, ptr @gfa15, align 4
+ %16 = load float, ptr @gfa16, align 4
+ %17 = load float, ptr @gfa17, align 4
+ %18 = load float, ptr @gfa18, align 4
+ %19 = load float, ptr @gfa19, align 4
+ %20 = load float, ptr @gfa20, align 4
tail call fastcc void @callee1(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8, float %9, float %10, float %11, float %12, float %13, float %14, float %15, float %16, float %17, float %18, float %19, float %20)
ret void
}
; CHECK-DAG: swc1 $f18
; CHECK-DAG: swc1 $f19
- store float %a0, float* @gf0, align 4
- store float %a1, float* @gf1, align 4
- store float %a2, float* @gf2, align 4
- store float %a3, float* @gf3, align 4
- store float %a4, float* @gf4, align 4
- store float %a5, float* @gf5, align 4
- store float %a6, float* @gf6, align 4
- store float %a7, float* @gf7, align 4
- store float %a8, float* @gf8, align 4
- store float %a9, float* @gf9, align 4
- store float %a10, float* @gf10, align 4
- store float %a11, float* @gf11, align 4
- store float %a12, float* @gf12, align 4
- store float %a13, float* @gf13, align 4
- store float %a14, float* @gf14, align 4
- store float %a15, float* @gf15, align 4
- store float %a16, float* @gf16, align 4
- store float %a17, float* @gf17, align 4
- store float %a18, float* @gf18, align 4
- store float %a19, float* @gf19, align 4
- store float %a20, float* @gf20, align 4
+ store float %a0, ptr @gf0, align 4
+ store float %a1, ptr @gf1, align 4
+ store float %a2, ptr @gf2, align 4
+ store float %a3, ptr @gf3, align 4
+ store float %a4, ptr @gf4, align 4
+ store float %a5, ptr @gf5, align 4
+ store float %a6, ptr @gf6, align 4
+ store float %a7, ptr @gf7, align 4
+ store float %a8, ptr @gf8, align 4
+ store float %a9, ptr @gf9, align 4
+ store float %a10, ptr @gf10, align 4
+ store float %a11, ptr @gf11, align 4
+ store float %a12, ptr @gf12, align 4
+ store float %a13, ptr @gf13, align 4
+ store float %a14, ptr @gf14, align 4
+ store float %a15, ptr @gf15, align 4
+ store float %a16, ptr @gf16, align 4
+ store float %a17, ptr @gf17, align 4
+ store float %a18, ptr @gf18, align 4
+ store float %a19, ptr @gf19, align 4
+ store float %a20, ptr @gf20, align 4
ret void
}
; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], 40($[[R0]])
; NOODDSPREG-DAG: swc1 $[[F0]], 0($sp)
- %0 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 0), align 4
- %1 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 1), align 4
- %2 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 2), align 4
- %3 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 3), align 4
- %4 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 4), align 4
- %5 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 5), align 4
- %6 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 6), align 4
- %7 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 7), align 4
- %8 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 8), align 4
- %9 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 9), align 4
- %10 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 10), align 4
+ %0 = load float, ptr @fa, align 4
+ %1 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 1), align 4
+ %2 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 2), align 4
+ %3 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 3), align 4
+ %4 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 4), align 4
+ %5 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 5), align 4
+ %6 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 6), align 4
+ %7 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 7), align 4
+ %8 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 8), align 4
+ %9 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 9), align 4
+ %10 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 10), align 4
tail call fastcc void @callee2(float %0, float %1, float %2, float %3,
float %4, float %5, float %6, float %7,
float %8, float %9, float %10)
; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], {{[0-9]+}}($sp)
; NOODDSPREG-DAG: swc1 $[[F0]], 40($[[R0]])
- store float %a0, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 0), align 4
- store float %a1, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 1), align 4
- store float %a2, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 2), align 4
- store float %a3, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 3), align 4
- store float %a4, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 4), align 4
- store float %a5, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 5), align 4
- store float %a6, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 6), align 4
- store float %a7, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 7), align 4
- store float %a8, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 8), align 4
- store float %a9, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 9), align 4
- store float %a10, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 10), align 4
+ store float %a0, ptr @fa, align 4
+ store float %a1, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 1), align 4
+ store float %a2, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 2), align 4
+ store float %a3, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 3), align 4
+ store float %a4, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 4), align 4
+ store float %a5, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 5), align 4
+ store float %a6, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 6), align 4
+ store float %a7, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 7), align 4
+ store float %a8, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 8), align 4
+ store float %a9, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 9), align 4
+ store float %a10, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 10), align 4
ret void
}
; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], 80($[[R0]])
; FP64-NOODDSPREG-DAG: sdc1 $[[F0]], 0($sp)
- %0 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 0), align 8
- %1 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 1), align 8
- %2 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 2), align 8
- %3 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 3), align 8
- %4 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 4), align 8
- %5 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 5), align 8
- %6 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 6), align 8
- %7 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 7), align 8
- %8 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 8), align 8
- %9 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 9), align 8
- %10 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 10), align 8
+ %0 = load double, ptr @da, align 8
+ %1 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 1), align 8
+ %2 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 2), align 8
+ %3 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 3), align 8
+ %4 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 4), align 8
+ %5 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 5), align 8
+ %6 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 6), align 8
+ %7 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 7), align 8
+ %8 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 8), align 8
+ %9 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 9), align 8
+ %10 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 10), align 8
tail call fastcc void @callee3(double %0, double %1, double %2, double %3,
double %4, double %5, double %6, double %7,
double %8, double %9, double %10)
; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], 0($sp)
; FP64-NOODDSPREG-DAG: sdc1 $[[F0]], 80($[[R0]])
- store double %a0, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 0), align 8
- store double %a1, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 1), align 8
- store double %a2, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 2), align 8
- store double %a3, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 3), align 8
- store double %a4, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 4), align 8
- store double %a5, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 5), align 8
- store double %a6, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 6), align 8
- store double %a7, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 7), align 8
- store double %a8, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 8), align 8
- store double %a9, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 9), align 8
- store double %a10, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 10), align 8
+ store double %a0, ptr @da, align 8
+ store double %a1, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 1), align 8
+ store double %a2, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 2), align 8
+ store double %a3, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 3), align 8
+ store double %a4, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 4), align 8
+ store double %a5, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 5), align 8
+ store double %a6, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 6), align 8
+ store double %a7, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 7), align 8
+ store double %a8, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 8), align 8
+ store double %a9, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 9), align 8
+ store double %a10, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 10), align 8
ret void
}
; Test that a load comes after a store to the same memory location when passing
; a byVal parameter to a function which has a fastcc function call
-%struct.str = type { i32, i32, [3 x i32*] }
+%struct.str = type { i32, i32, [3 x ptr] }
-declare fastcc void @_Z1F3str(%struct.str* noalias nocapture sret(%struct.str) %agg.result, %struct.str* byval(%struct.str) nocapture readonly align 4 %s)
+declare fastcc void @_Z1F3str(ptr noalias nocapture sret(%struct.str) %agg.result, ptr byval(%struct.str) nocapture readonly align 4 %s)
-define i32 @_Z1g3str(%struct.str* byval(%struct.str) nocapture readonly align 4 %s) {
+define i32 @_Z1g3str(ptr byval(%struct.str) nocapture readonly align 4 %s) {
; CHECK-LABEL: _Z1g3str:
; CHECK: sw $7, [[OFFSET:[0-9]+]]($sp)
; CHECK: lw ${{[0-9]+}}, [[OFFSET]]($sp)
entry:
%ref.tmp = alloca %struct.str, align 4
- %0 = bitcast %struct.str* %ref.tmp to i8*
- call void @llvm.lifetime.start.p0i8(i64 20, i8* nonnull %0)
- call fastcc void @_Z1F3str(%struct.str* nonnull sret(%struct.str) %ref.tmp, %struct.str* byval(%struct.str) nonnull align 4 %s)
- %cl.sroa.3.0..sroa_idx2 = getelementptr inbounds %struct.str, %struct.str* %ref.tmp, i32 0, i32 1
- %cl.sroa.3.0.copyload = load i32, i32* %cl.sroa.3.0..sroa_idx2, align 4
- call void @llvm.lifetime.end.p0i8(i64 20, i8* nonnull %0)
+ call void @llvm.lifetime.start.p0(i64 20, ptr nonnull %ref.tmp)
+ call fastcc void @_Z1F3str(ptr nonnull sret(%struct.str) %ref.tmp, ptr byval(%struct.str) nonnull align 4 %s)
+ %cl.sroa.3.0..sroa_idx2 = getelementptr inbounds %struct.str, ptr %ref.tmp, i32 0, i32 1
+ %cl.sroa.3.0.copyload = load i32, ptr %cl.sroa.3.0..sroa_idx2, align 4
+ call void @llvm.lifetime.end.p0(i64 20, ptr nonnull %ref.tmp)
ret i32 %cl.sroa.3.0.copyload
}
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
; Function Attrs: nounwind optsize
define void @foo() {
entry:
- %0 = load double, double* @x, align 8
+ %0 = load double, ptr @x, align 8
%conv = fptoui double %0 to i32
- store i32 %conv, i32* @y, align 4
+ store i32 %conv, ptr @y, align 4
; pic1: lw ${{[0-9]+}}, %call16(__fixunsdfsi)(${{[0-9]+}})
; pic2: lw ${{[0-9]+}}, %got(__mips16_call_stub_2)(${{[0-9]+}})
ret void
declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>)
declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>)
-define void @foo(<4 x float>* %agg.result, <4 x float>* %acc, <4 x float>* %a, <4 x float>* %b) {
+define void @foo(ptr %agg.result, ptr %acc, ptr %a, ptr %b) {
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
- %1 = load <4 x float>, <4 x float>* %b, align 16
+ %0 = load <4 x float>, ptr %a, align 16
+ %1 = load <4 x float>, ptr %b, align 16
%2 = call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1)
- %3 = load <4 x float>, <4 x float>* %acc, align 16
+ %3 = load <4 x float>, ptr %acc, align 16
%4 = call <4 x float> @llvm.mips.fadd.w(<4 x float> %3, <4 x float> %2)
- store <4 x float> %4, <4 x float>* %agg.result, align 16
+ store <4 x float> %4, ptr %agg.result, align 16
ret void
; CHECK-CONTRACT-OFF: fmul.w
; CHECK-CONTRACT-OFF: fadd.w
; CHECK-CONTRACT-FAST: fmadd.w
}
-define void @boo(<4 x float>* %agg.result, <4 x float>* %acc, <4 x float>* %a, <4 x float>* %b) {
+define void @boo(ptr %agg.result, ptr %acc, ptr %a, ptr %b) {
entry:
- %0 = load <4 x float>, <4 x float>* %a, align 16
- %1 = load <4 x float>, <4 x float>* %b, align 16
+ %0 = load <4 x float>, ptr %a, align 16
+ %1 = load <4 x float>, ptr %b, align 16
%2 = call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1)
- %3 = load <4 x float>, <4 x float>* %acc, align 16
+ %3 = load <4 x float>, ptr %acc, align 16
%4 = call <4 x float> @llvm.mips.fsub.w(<4 x float> %3, <4 x float> %2)
- store <4 x float> %4, <4 x float>* %agg.result, align 16
+ store <4 x float> %4, ptr %agg.result, align 16
ret void
; CHECK-CONTRACT-OFF: fmul.w
; CHECK-CONTRACT-OFF: fsub.w
@s2 = external global [4 x %struct.S2]
@s3 = external global %struct.S3
-define float @foo0(float* nocapture %b, i32 %o) nounwind readonly {
+define float @foo0(ptr nocapture %b, i32 %o) nounwind readonly {
entry:
; ALL-LABEL: foo0:
; CHECK-NACL-NOT: lwxc1
- %arrayidx = getelementptr inbounds float, float* %b, i32 %o
- %0 = load float, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %b, i32 %o
+ %0 = load float, ptr %arrayidx, align 4
ret float %0
}
-define double @foo1(double* nocapture %b, i32 %o) nounwind readonly {
+define double @foo1(ptr nocapture %b, i32 %o) nounwind readonly {
entry:
; ALL-LABEL: foo1:
; CHECK-NACL-NOT: ldxc1
- %arrayidx = getelementptr inbounds double, double* %b, i32 %o
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %b, i32 %o
+ %0 = load double, ptr %arrayidx, align 8
ret double %0
}
; luxc1 was removed in MIPS64r6
; MIPS64R6-NOT: luxc1
- %arrayidx1 = getelementptr inbounds [4 x %struct.S], [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
- %0 = load float, float* %arrayidx1, align 1
+ %arrayidx1 = getelementptr inbounds [4 x %struct.S], ptr @s, i32 0, i32 %b, i32 0, i32 %c
+ %0 = load float, ptr %arrayidx1, align 1
ret float %0
}
-define void @foo3(float* nocapture %b, i32 %o) nounwind {
+define void @foo3(ptr nocapture %b, i32 %o) nounwind {
entry:
; ALL-LABEL: foo3:
; CHECK-NACL-NOT: swxc1
- %0 = load float, float* @gf, align 4
- %arrayidx = getelementptr inbounds float, float* %b, i32 %o
- store float %0, float* %arrayidx, align 4
+ %0 = load float, ptr @gf, align 4
+ %arrayidx = getelementptr inbounds float, ptr %b, i32 %o
+ store float %0, ptr %arrayidx, align 4
ret void
}
-define void @foo4(double* nocapture %b, i32 %o) nounwind {
+define void @foo4(ptr nocapture %b, i32 %o) nounwind {
entry:
; ALL-LABEL: foo4:
; CHECK-NACL-NOT: sdxc1
- %0 = load double, double* @gd, align 8
- %arrayidx = getelementptr inbounds double, double* %b, i32 %o
- store double %0, double* %arrayidx, align 8
+ %0 = load double, ptr @gd, align 8
+ %arrayidx = getelementptr inbounds double, ptr %b, i32 %o
+ store double %0, ptr %arrayidx, align 8
ret void
}
; MIPS64R6-NOT: suxc1
- %0 = load float, float* @gf, align 4
- %arrayidx1 = getelementptr inbounds [4 x %struct.S], [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
- store float %0, float* %arrayidx1, align 1
+ %0 = load float, ptr @gf, align 4
+ %arrayidx1 = getelementptr inbounds [4 x %struct.S], ptr @s, i32 0, i32 %b, i32 0, i32 %c
+ store float %0, ptr %arrayidx1, align 1
ret void
}
; MIPS64R6-NOT: luxc1
- %arrayidx1 = getelementptr inbounds [4 x %struct.S2], [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
- %0 = load double, double* %arrayidx1, align 1
+ %arrayidx1 = getelementptr inbounds [4 x %struct.S2], ptr @s2, i32 0, i32 %b, i32 0, i32 %c
+ %0 = load double, ptr %arrayidx1, align 1
ret double %0
}
; MIPS64R6-NOT: suxc1
- %0 = load double, double* @gd, align 8
- %arrayidx1 = getelementptr inbounds [4 x %struct.S2], [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
- store double %0, double* %arrayidx1, align 1
+ %0 = load double, ptr @gd, align 8
+ %arrayidx1 = getelementptr inbounds [4 x %struct.S2], ptr @s2, i32 0, i32 %b, i32 0, i32 %c
+ store double %0, ptr %arrayidx1, align 1
ret void
}
; MIPS64R6-NOT: luxc1
- %0 = load float, float* getelementptr inbounds (%struct.S3, %struct.S3* @s3, i32 0, i32 1), align 1
+ %0 = load float, ptr getelementptr inbounds (%struct.S3, ptr @s3, i32 0, i32 1), align 1
ret float %0
}
; MIPS64R6-NOT: suxc1
- store float %f, float* getelementptr inbounds (%struct.S3, %struct.S3* @s3, i32 0, i32 1), align 1
+ store float %f, ptr getelementptr inbounds (%struct.S3, ptr @s3, i32 0, i32 1), align 1
ret void
}
; RUN: llc -march=mipsel -relocation-model=pic < %s | FileCheck %s
; check that $fp is not reserved.
-define void @foo0(i32* nocapture %b) nounwind {
+define void @foo0(ptr nocapture %b) nounwind {
entry:
; CHECK: sw $fp
; CHECK: lw $fp
- %0 = load i32, i32* %b, align 4
- %arrayidx.1 = getelementptr inbounds i32, i32* %b, i32 1
- %1 = load i32, i32* %arrayidx.1, align 4
+ %0 = load i32, ptr %b, align 4
+ %arrayidx.1 = getelementptr inbounds i32, ptr %b, i32 1
+ %1 = load i32, ptr %arrayidx.1, align 4
%add.1 = add nsw i32 %1, 1
- %arrayidx.2 = getelementptr inbounds i32, i32* %b, i32 2
- %2 = load i32, i32* %arrayidx.2, align 4
+ %arrayidx.2 = getelementptr inbounds i32, ptr %b, i32 2
+ %2 = load i32, ptr %arrayidx.2, align 4
%add.2 = add nsw i32 %2, 2
- %arrayidx.3 = getelementptr inbounds i32, i32* %b, i32 3
- %3 = load i32, i32* %arrayidx.3, align 4
+ %arrayidx.3 = getelementptr inbounds i32, ptr %b, i32 3
+ %3 = load i32, ptr %arrayidx.3, align 4
%add.3 = add nsw i32 %3, 3
- %arrayidx.4 = getelementptr inbounds i32, i32* %b, i32 4
- %4 = load i32, i32* %arrayidx.4, align 4
+ %arrayidx.4 = getelementptr inbounds i32, ptr %b, i32 4
+ %4 = load i32, ptr %arrayidx.4, align 4
%add.4 = add nsw i32 %4, 4
- %arrayidx.5 = getelementptr inbounds i32, i32* %b, i32 5
- %5 = load i32, i32* %arrayidx.5, align 4
+ %arrayidx.5 = getelementptr inbounds i32, ptr %b, i32 5
+ %5 = load i32, ptr %arrayidx.5, align 4
%add.5 = add nsw i32 %5, 5
- %arrayidx.6 = getelementptr inbounds i32, i32* %b, i32 6
- %6 = load i32, i32* %arrayidx.6, align 4
+ %arrayidx.6 = getelementptr inbounds i32, ptr %b, i32 6
+ %6 = load i32, ptr %arrayidx.6, align 4
%add.6 = add nsw i32 %6, 6
- %arrayidx.7 = getelementptr inbounds i32, i32* %b, i32 7
- %7 = load i32, i32* %arrayidx.7, align 4
+ %arrayidx.7 = getelementptr inbounds i32, ptr %b, i32 7
+ %7 = load i32, ptr %arrayidx.7, align 4
%add.7 = add nsw i32 %7, 7
call void @foo2(i32 %0, i32 %add.1, i32 %add.2, i32 %add.3, i32 %add.4, i32 %add.5, i32 %add.6, i32 %add.7) nounwind
- call void bitcast (void (...)* @foo1 to void ()*)() nounwind
+ call void @foo1() nounwind
call void @foo2(i32 %0, i32 %add.1, i32 %add.2, i32 %add.3, i32 %add.4, i32 %add.5, i32 %add.6, i32 %add.7) nounwind
ret void
}
; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
; CHECK-LIBCALL-DAG: add.s
; CHECK-LIBCALL-DAG: %call16(__gnu_f2h_ieee)
-define void @test_fadd(half* %p, half* %q) #0 {
- %a = load half, half* %p, align 2
- %b = load half, half* %q, align 2
+define void @test_fadd(ptr %p, ptr %q) #0 {
+ %a = load half, ptr %p, align 2
+ %b = load half, ptr %q, align 2
%r = fadd half %a, %b
- store half %r, half* %p
+ store half %r, ptr %p
ret void
}
; CHECK-LIBCALL-LABEL: test_fpext_float:
; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
-define float @test_fpext_float(half* %p) {
- %a = load half, half* %p, align 2
+define float @test_fpext_float(ptr %p) {
+ %a = load half, ptr %p, align 2
%r = fpext half %a to float
ret float %r
}
; CHECK-LIBCALL-LABEL: test_fpext_double:
; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
; CHECK-LIBCALL: cvt.d.s
-define double @test_fpext_double(half* %p) {
- %a = load half, half* %p, align 2
+define double @test_fpext_double(ptr %p) {
+ %a = load half, ptr %p, align 2
%r = fpext half %a to double
ret double %r
}
; CHECK-LIBCALL-LABEL: test_fptrunc_float:
; CHECK-LIBCALL: %call16(__gnu_f2h_ieee)
-define void @test_fptrunc_float(float %f, half* %p) #0 {
+define void @test_fptrunc_float(float %f, ptr %p) #0 {
%a = fptrunc float %f to half
- store half %a, half* %p
+ store half %a, ptr %p
ret void
}
; CHECK-LIBCALL-LABEL: test_fptrunc_double:
; CHECK-LIBCALL: %call16(__truncdfhf2)
-define void @test_fptrunc_double(double %d, half* %p) #0 {
+define void @test_fptrunc_double(double %d, ptr %p) #0 {
%a = fptrunc double %d to half
- store half %a, half* %p
+ store half %a, ptr %p
ret void
}
; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
-define <4 x float> @test_vec_fpext_float(<4 x half>* %p) #0 {
- %a = load <4 x half>, <4 x half>* %p, align 8
+define <4 x float> @test_vec_fpext_float(ptr %p) #0 {
+ %a = load <4 x half>, ptr %p, align 8
%b = fpext <4 x half> %a to <4 x float>
ret <4 x float> %b
}
; CHECK-LIBCALL: cvt.d.s
; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
; CHECK-LIBCALL: cvt.d.s
-define <4 x double> @test_vec_fpext_double(<4 x half>* %p) #0 {
- %a = load <4 x half>, <4 x half>* %p, align 8
+define <4 x double> @test_vec_fpext_double(ptr %p) #0 {
+ %a = load <4 x half>, ptr %p, align 8
%b = fpext <4 x half> %a to <4 x double>
ret <4 x double> %b
}
; CHECK-LIBCALL: %call16(__gnu_f2h_ieee)
; CHECK-LIBCALL: %call16(__gnu_f2h_ieee)
; CHECK-LIBCALL: %call16(__gnu_f2h_ieee)
-define void @test_vec_fptrunc_float(<4 x float> %a, <4 x half>* %p) #0 {
+define void @test_vec_fptrunc_float(<4 x float> %a, ptr %p) #0 {
%b = fptrunc <4 x float> %a to <4 x half>
- store <4 x half> %b, <4 x half>* %p, align 8
+ store <4 x half> %b, ptr %p, align 8
ret void
}
; CHECK-LIBCALL: %call16(__truncdfhf2)
; CHECK-LIBCALL: %call16(__truncdfhf2)
; CHECK-LIBCALL: %call16(__truncdfhf2)
-define void @test_vec_fptrunc_double(<4 x double> %a, <4 x half>* %p) #0 {
+define void @test_vec_fptrunc_double(<4 x double> %a, ptr %p) #0 {
%b = fptrunc <4 x double> %a to <4 x half>
- store <4 x half> %b, <4 x half>* %p, align 8
+ store <4 x half> %b, ptr %p, align 8
ret void
}
; fmask: .set reorder
; fmask: .end foo1
entry:
- %0 = load float, float* @x, align 4
- %1 = load float, float* @one, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @one, align 4
%call = call float @copysignf(float %0, float %1) #2
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; fmask: save {{.*}}
; fmask: .end foo2
entry:
- %0 = load float, float* @x, align 4
- %1 = load float, float* @negone, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @negone, align 4
%call = call float @copysignf(float %0, float %1) #2
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; fmask: .set macro
; fmask: .set reorder
; fmask: .end foo3
- %0 = load double, double* @xd, align 8
- %1 = load float, float* @oned, align 4
+ %0 = load double, ptr @xd, align 8
+ %1 = load float, ptr @oned, align 4
%conv = fpext float %1 to double
%call = call double @copysign(double %0, double %conv) #2
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
; fmask: .ent foo4
; fmask: save {{.*}}
; fmask: .end foo4
- %0 = load double, double* @xd, align 8
- %1 = load double, double* @negoned, align 8
+ %0 = load double, ptr @xd, align 8
+ %1 = load double, ptr @negoned, align 8
%call = call double @copysign(double %0, double %1) #2
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
; Function Attrs: nounwind
define void @foo5() #0 {
entry:
- %0 = load float, float* @xn, align 4
+ %0 = load float, ptr @xn, align 4
%call = call float @fabsf(float %0) #2
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; Function Attrs: nounwind
define void @foo6() #0 {
entry:
- %0 = load double, double* @xdn, align 8
+ %0 = load double, ptr @xdn, align 8
%call = call double @fabs(double %0) #2
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
; Function Attrs: nounwind
define void @foo7() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%call = call float @sinf(float %0) #3
;pic: lw ${{[0-9]+}}, %call16(sinf)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; Function Attrs: nounwind
define void @foo8() #0 {
entry:
- %0 = load double, double* @xd, align 8
+ %0 = load double, ptr @xd, align 8
%call = call double @sin(double %0) #3
;pic: lw ${{[0-9]+}}, %call16(sin)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
; Function Attrs: nounwind
define void @foo9() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%call = call float @cosf(float %0) #3
;pic: lw ${{[0-9]+}}, %call16(cosf)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; Function Attrs: nounwind
define void @foo10() #0 {
entry:
- %0 = load double, double* @xd, align 8
+ %0 = load double, ptr @xd, align 8
%call = call double @cos(double %0) #3
;pic: lw ${{[0-9]+}}, %call16(cos)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
; Function Attrs: nounwind
define void @foo11() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%call = call float @sqrtf(float %0) #3
;pic: lw ${{[0-9]+}}, %call16(sqrtf)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; Function Attrs: nounwind
define void @foo12() #0 {
entry:
- %0 = load double, double* @xd, align 8
+ %0 = load double, ptr @xd, align 8
%call = call double @sqrt(double %0) #3
;pic: lw ${{[0-9]+}}, %call16(sqrt)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
; Function Attrs: nounwind
define void @foo13() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%call = call float @floorf(float %0) #2
;pic: lw ${{[0-9]+}}, %call16(floorf)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; Function Attrs: nounwind
define void @foo14() #0 {
entry:
- %0 = load double, double* @xd, align 8
+ %0 = load double, ptr @xd, align 8
%call = call double @floor(double %0) #2
;pic: lw ${{[0-9]+}}, %call16(floor)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
; Function Attrs: nounwind
define void @foo15() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%call = call float @nearbyintf(float %0) #2
;pic: lw ${{[0-9]+}}, %call16(nearbyintf)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; Function Attrs: nounwind
define void @foo16() #0 {
entry:
- %0 = load double, double* @xd, align 8
+ %0 = load double, ptr @xd, align 8
%call = call double @nearbyint(double %0) #2
;pic: lw ${{[0-9]+}}, %call16(nearbyint)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
; Function Attrs: nounwind
define void @foo17() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%call = call float @ceilf(float %0) #2
;pic: lw ${{[0-9]+}}, %call16(ceilf)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; Function Attrs: nounwind
define void @foo18() #0 {
entry:
- %0 = load double, double* @xd, align 8
+ %0 = load double, ptr @xd, align 8
%call = call double @ceil(double %0) #2
;pic: lw ${{[0-9]+}}, %call16(ceil)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
; Function Attrs: nounwind
define void @foo19() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%call = call float @rintf(float %0) #2
;pic: lw ${{[0-9]+}}, %call16(rintf)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; Function Attrs: nounwind
define void @foo20() #0 {
entry:
- %0 = load double, double* @xd, align 8
+ %0 = load double, ptr @xd, align 8
%call = call double @rint(double %0) #2
;pic: lw ${{[0-9]+}}, %call16(rint)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
; Function Attrs: nounwind
define void @foo21() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%call = call float @truncf(float %0) #2
;pic: lw ${{[0-9]+}}, %call16(truncf)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; Function Attrs: nounwind
define void @foo22() #0 {
entry:
- %0 = load double, double* @xd, align 8
+ %0 = load double, ptr @xd, align 8
%call = call double @trunc(double %0) #2
;pic: lw ${{[0-9]+}}, %call16(trunc)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
; Function Attrs: nounwind
define void @foo23() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%call = call float @log2f(float %0) #3
;pic: lw ${{[0-9]+}}, %call16(log2f)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; Function Attrs: nounwind
define void @foo24() #0 {
entry:
- %0 = load double, double* @xd, align 8
+ %0 = load double, ptr @xd, align 8
%call = call double @log2(double %0) #3
;pic: lw ${{[0-9]+}}, %call16(log2)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
; Function Attrs: nounwind
define void @foo25() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%call = call float @exp2f(float %0) #3
;pic: lw ${{[0-9]+}}, %call16(exp2f)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
- store float %call, float* @y, align 4
+ store float %call, ptr @y, align 4
ret void
}
; Function Attrs: nounwind
define void @foo26() #0 {
entry:
- %0 = load double, double* @xd, align 8
+ %0 = load double, ptr @xd, align 8
%call = call double @exp2(double %0) #3
;pic: lw ${{[0-9]+}}, %call16(exp2)(${{[0-9]+}})
;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
- store double %call, double* @yd, align 8
+ store double %call, ptr @yd, align 8
ret void
}
define void @foo() nounwind {
entry:
- %0 = load float, float* @x, align 4
- %1 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @x, align 4
%mul = fmul float %0, %1
- store float %mul, float* @x, align 4
+ store float %mul, ptr @x, align 4
; CHECK-STATIC16: jal __mips16_mulsf3
ret void
}
define void @vf(float %x) #0 {
entry:
%x.addr = alloca float, align 4
- store float %x, float* %x.addr, align 4
+ store float %x, ptr %x.addr, align 4
ret void
}
define void @vd(double %x) #0 {
entry:
%x.addr = alloca double, align 8
- store double %x, double* %x.addr, align 8
+ store double %x, ptr %x.addr, align 8
ret void
}
define void @foo1() #0 {
entry:
- store float 1.000000e+00, float* @zz, align 4
- %0 = load float, float* @y, align 4
- %1 = load float, float* @x, align 4
+ store float 1.000000e+00, ptr @zz, align 4
+ %0 = load float, ptr @y, align 4
+ %1 = load float, ptr @x, align 4
%add = fadd float %0, %1
- store float %add, float* @z, align 4
+ store float %add, ptr @z, align 4
ret void
}
define void @foo2() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
call void @vf(float %0)
ret void
}
define void @foo3() #0 {
entry:
%call = call float @fv()
- store float %call, float* @x, align 4
+ store float %call, ptr @x, align 4
ret void
}
define i32 @iv() #0 {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
ret i32 %0
}
entry:
%i.addr = alloca i32, align 4
%f.addr = alloca float, align 4
- store i32 %i, i32* %i.addr, align 4
- store float %f, float* %f.addr, align 4
+ store i32 %i, ptr %i.addr, align 4
+ store float %f, ptr %f.addr, align 4
ret void
}
define void @foo() #0 {
entry:
- store float 2.000000e+00, float* @f, align 4
+ store float 2.000000e+00, ptr @f, align 4
ret void
}
; RUN: not llc -march=mips < %s 2>&1 | FileCheck %s
-declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare ptr @llvm.frameaddress(i32) nounwind readnone
-define i8* @f() nounwind {
+define ptr @f() nounwind {
entry:
- %0 = call i8* @llvm.frameaddress(i32 1)
- ret i8* %0
+ %0 = call ptr @llvm.frameaddress(i32 1)
+ ret ptr %0
; CHECK: error: return address can be determined only for current frame
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=mipsel < %s | FileCheck %s
-declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare ptr @llvm.frameaddress(i32) nounwind readnone
-define i8* @f() nounwind uwtable {
+define ptr @f() nounwind uwtable {
; CHECK-LABEL: f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addiu $sp, $sp, -8
; CHECK-NEXT: jr $ra
; CHECK-NEXT: addiu $sp, $sp, 8
entry:
- %0 = call i8* @llvm.frameaddress(i32 0)
- ret i8* %0
+ %0 = call ptr @llvm.frameaddress(i32 0)
+ ret ptr %0
}
define i32 @k() {
entry:
%h = alloca i32, align 4
- %call = call i32 @g(i32* %h)
+ %call = call i32 @g(ptr %h)
ret i32 %call
}
-declare i32 @g(i32*)
+declare i32 @g(ptr)
; STATIC-N64: daddiu $[[R3]], $[[R3]], %hi(g1)
; STATIC-N64: lw ${{[0-9]+}}, %lo(g1)($[[R3]])
- %0 = load i32, i32* @s1, align 4
+ %0 = load i32, ptr @s1, align 4
tail call void @foo1(i32 %0) nounwind
- %1 = load i32, i32* @g1, align 4
- store i32 %1, i32* @s1, align 4
+ %1 = load i32, ptr @g1, align 4
+ store i32 %1, ptr @s1, align 4
%add = add nsw i32 %1, 2
- store i32 %add, i32* @g1, align 4
+ store i32 %add, ptr @g1, align 4
ret void
}
; CHECK: addu $[[GP:[0-9]+]], $[[R1]], $25
; CHECK: lw ${{[0-9]+}}, %call16(foo2)($[[GP]])
- tail call void @foo2(i32* @g0) nounwind
- tail call void @foo2(i32* @g1) nounwind
- tail call void @foo2(i32* @g2) nounwind
+ tail call void @foo2(ptr @g0) nounwind
+ tail call void @foo2(ptr @g1) nounwind
+ tail call void @foo2(ptr @g2) nounwind
ret void
}
-declare void @foo2(i32*)
+declare void @foo2(ptr)
define i32 @g() {
entry:
- %0 = load i32, i32* getelementptr inbounds ([2 x i32], [2 x i32]* @a, i32 0, i32 0), align 4
+ %0 = load i32, ptr @a, align 4
ret i32 %0
}
define i32 @f() {
entry:
- %0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i32 0), align 4
+ %0 = load i32, ptr @b, align 4
ret i32 %0
}
define i32 @h() {
entry:
- %0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i32 0), align 4
+ %0 = load i32, ptr @c, align 4
ret i32 %0
}
define internal fastcc void @internalFunc() nounwind noinline {
entry:
- %0 = load i32, i32* @g, align 4
+ %0 = load i32, ptr @g, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @g, align 4
+ store i32 %inc, ptr @g, align 4
ret void
}
-define void @no_lazy(void (i32)* %pf) {
+define void @no_lazy(ptr %pf) {
; CHECK-LABEL: no_lazy
; CHECK-NOT: gp_disp
; O3N32-NEXT: addiu $sp, $sp, 32
entry:
tail call void @f1() nounwind
- %tmp = load i32, i32* @p, align 4
+ %tmp = load i32, ptr @p, align 4
tail call void @f2(i32 %tmp) nounwind
- %tmp1 = load i32, i32* @q, align 4
+ %tmp1 = load i32, ptr @q, align 4
tail call void @f2(i32 %tmp1) nounwind
- %tmp2 = load i32, i32* @r, align 4
+ %tmp2 = load i32, ptr @r, align 4
tail call void @f3(i32 %tmp1, i32 %tmp2) nounwind
ret void
}
define i32 @main() nounwind {
entry:
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0))
+ %call = call i32 (ptr, ...) @printf(ptr @.str)
ret i32 0
; SR: .set mips16
; SR32: .set reorder
; SR: .end main
; SR32: .end main
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
define void @foo() nounwind {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
call void @v_sf(float %0)
- %1 = load double, double* @xd, align 8
+ %1 = load double, ptr @xd, align 8
call void @v_df(double %1)
- %2 = load float, float* @x, align 4
- %3 = load float, float* @y, align 4
+ %2 = load float, ptr @x, align 4
+ %3 = load float, ptr @y, align 4
call void @v_sf_sf(float %2, float %3)
- %4 = load double, double* @xd, align 8
- %5 = load float, float* @x, align 4
+ %4 = load double, ptr @xd, align 8
+ %5 = load float, ptr @x, align 4
call void @v_df_sf(double %4, float %5)
- %6 = load double, double* @xd, align 8
- %7 = load double, double* @yd, align 8
+ %6 = load double, ptr @xd, align 8
+ %7 = load double, ptr @yd, align 8
call void @v_df_df(double %6, double %7)
%call = call float @sf_v()
- %8 = load float, float* @x, align 4
+ %8 = load float, ptr @x, align 4
%call1 = call float @sf_sf(float %8)
- %9 = load double, double* @xd, align 8
+ %9 = load double, ptr @xd, align 8
%call2 = call float @sf_df(double %9)
- %10 = load float, float* @x, align 4
- %11 = load float, float* @y, align 4
+ %10 = load float, ptr @x, align 4
+ %11 = load float, ptr @y, align 4
%call3 = call float @sf_sf_sf(float %10, float %11)
- %12 = load double, double* @xd, align 8
- %13 = load float, float* @x, align 4
+ %12 = load double, ptr @xd, align 8
+ %13 = load float, ptr @x, align 4
%call4 = call float @sf_df_sf(double %12, float %13)
- %14 = load double, double* @xd, align 8
- %15 = load double, double* @yd, align 8
+ %14 = load double, ptr @xd, align 8
+ %15 = load double, ptr @yd, align 8
%call5 = call float @sf_df_df(double %14, double %15)
%call6 = call double @df_v()
- %16 = load float, float* @x, align 4
+ %16 = load float, ptr @x, align 4
%call7 = call double @df_sf(float %16)
- %17 = load double, double* @xd, align 8
+ %17 = load double, ptr @xd, align 8
%call8 = call double @df_df(double %17)
- %18 = load float, float* @x, align 4
- %19 = load float, float* @y, align 4
+ %18 = load float, ptr @x, align 4
+ %19 = load float, ptr @y, align 4
%call9 = call double @df_sf_sf(float %18, float %19)
- %20 = load double, double* @xd, align 8
- %21 = load float, float* @x, align 4
+ %20 = load double, ptr @xd, align 8
+ %21 = load float, ptr @x, align 4
%call10 = call double @df_df_sf(double %20, float %21)
- %22 = load double, double* @xd, align 8
- %23 = load double, double* @yd, align 8
+ %22 = load double, ptr @xd, align 8
+ %23 = load double, ptr @yd, align 8
%call11 = call double @df_df_df(double %22, double %23)
%call12 = call { float, float } @sc_v()
%24 = extractvalue { float, float } %call12, 0
%25 = extractvalue { float, float } %call12, 1
- %26 = load float, float* @x, align 4
+ %26 = load float, ptr @x, align 4
%call13 = call { float, float } @sc_sf(float %26)
%27 = extractvalue { float, float } %call13, 0
%28 = extractvalue { float, float } %call13, 1
- %29 = load double, double* @xd, align 8
+ %29 = load double, ptr @xd, align 8
%call14 = call { float, float } @sc_df(double %29)
%30 = extractvalue { float, float } %call14, 0
%31 = extractvalue { float, float } %call14, 1
- %32 = load float, float* @x, align 4
- %33 = load float, float* @y, align 4
+ %32 = load float, ptr @x, align 4
+ %33 = load float, ptr @y, align 4
%call15 = call { float, float } @sc_sf_sf(float %32, float %33)
%34 = extractvalue { float, float } %call15, 0
%35 = extractvalue { float, float } %call15, 1
- %36 = load double, double* @xd, align 8
- %37 = load float, float* @x, align 4
+ %36 = load double, ptr @xd, align 8
+ %37 = load float, ptr @x, align 4
%call16 = call { float, float } @sc_df_sf(double %36, float %37)
%38 = extractvalue { float, float } %call16, 0
%39 = extractvalue { float, float } %call16, 1
- %40 = load double, double* @xd, align 8
- %41 = load double, double* @yd, align 8
+ %40 = load double, ptr @xd, align 8
+ %41 = load double, ptr @yd, align 8
%call17 = call { float, float } @sc_df_df(double %40, double %41)
%42 = extractvalue { float, float } %call17, 0
%43 = extractvalue { float, float } %call17, 1
%call18 = call { double, double } @dc_v()
%44 = extractvalue { double, double } %call18, 0
%45 = extractvalue { double, double } %call18, 1
- %46 = load float, float* @x, align 4
+ %46 = load float, ptr @x, align 4
%call19 = call { double, double } @dc_sf(float %46)
%47 = extractvalue { double, double } %call19, 0
%48 = extractvalue { double, double } %call19, 1
- %49 = load double, double* @xd, align 8
+ %49 = load double, ptr @xd, align 8
%call20 = call { double, double } @dc_df(double %49)
%50 = extractvalue { double, double } %call20, 0
%51 = extractvalue { double, double } %call20, 1
- %52 = load float, float* @x, align 4
- %53 = load float, float* @y, align 4
+ %52 = load float, ptr @x, align 4
+ %53 = load float, ptr @y, align 4
%call21 = call { double, double } @dc_sf_sf(float %52, float %53)
%54 = extractvalue { double, double } %call21, 0
%55 = extractvalue { double, double } %call21, 1
- %56 = load double, double* @xd, align 8
- %57 = load float, float* @x, align 4
+ %56 = load double, ptr @xd, align 8
+ %57 = load float, ptr @x, align 4
%call22 = call { double, double } @dc_df_sf(double %56, float %57)
%58 = extractvalue { double, double } %call22, 0
%59 = extractvalue { double, double } %call22, 1
- %60 = load double, double* @xd, align 8
- %61 = load double, double* @yd, align 8
+ %60 = load double, ptr @xd, align 8
+ %61 = load double, ptr @yd, align 8
%call23 = call { double, double } @dc_df_df(double %60, double %61)
%62 = extractvalue { double, double } %call23, 0
%63 = extractvalue { double, double } %call23, 1
; Function Attrs: nounwind
define void @clear() #0 {
entry:
- store float 1.000000e+00, float* @x, align 4
- store float 1.000000e+00, float* @y, align 4
- store double 1.000000e+00, double* @xd, align 8
- store double 1.000000e+00, double* @yd, align 8
- store float 1.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 0)
- store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 1)
- store double 1.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 0)
- store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 1)
- store float 1.000000e+00, float* @ret_sf, align 4
- store double 1.000000e+00, double* @ret_df, align 8
- store float 1.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
- store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
- store double 1.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
- store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
- store float 0.000000e+00, float* @lx, align 4
- store float 0.000000e+00, float* @ly, align 4
- store double 0.000000e+00, double* @lxd, align 8
- store double 0.000000e+00, double* @lyd, align 8
- store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @lxy, i32 0, i32 0)
- store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @lxy, i32 0, i32 1)
- store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @lxyd, i32 0, i32 0)
- store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @lxyd, i32 0, i32 1)
- store float 0.000000e+00, float* @lret_sf, align 4
- store double 0.000000e+00, double* @lret_df, align 8
- store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
- store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
- store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
- store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
+ store float 1.000000e+00, ptr @x, align 4
+ store float 1.000000e+00, ptr @y, align 4
+ store double 1.000000e+00, ptr @xd, align 8
+ store double 1.000000e+00, ptr @yd, align 8
+ store float 1.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 0)
+ store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 1)
+ store double 1.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 0)
+ store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 1)
+ store float 1.000000e+00, ptr @ret_sf, align 4
+ store double 1.000000e+00, ptr @ret_df, align 8
+ store float 1.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+ store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
+ store double 1.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+ store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+ store float 0.000000e+00, ptr @lx, align 4
+ store float 0.000000e+00, ptr @ly, align 4
+ store double 0.000000e+00, ptr @lxd, align 8
+ store double 0.000000e+00, ptr @lyd, align 8
+ store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @lxy, i32 0, i32 0)
+ store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @lxy, i32 0, i32 1)
+ store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @lxyd, i32 0, i32 0)
+ store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @lxyd, i32 0, i32 1)
+ store float 0.000000e+00, ptr @lret_sf, align 4
+ store double 0.000000e+00, ptr @lret_df, align 8
+ store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+ store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
+ store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+ store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
ret void
}
define i32 @main() #0 {
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval
+ store i32 0, ptr %retval
call void @clear()
- store float 1.500000e+00, float* @lx, align 4
- %0 = load float, float* @lx, align 4
+ store float 1.500000e+00, ptr @lx, align 4
+ %0 = load float, ptr @lx, align 4
call void @v_sf(float %0)
- %1 = load float, float* @x, align 4
+ %1 = load float, ptr @x, align 4
%conv = fpext float %1 to double
- %2 = load float, float* @lx, align 4
+ %2 = load float, ptr @lx, align 4
%conv1 = fpext float %2 to double
- %3 = load float, float* @x, align 4
- %4 = load float, float* @lx, align 4
+ %3 = load float, ptr @x, align 4
+ %4 = load float, ptr @lx, align 4
%cmp = fcmp oeq float %3, %4
%conv2 = zext i1 %cmp to i32
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), double %conv, double %conv1, i32 %conv2)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, double %conv, double %conv1, i32 %conv2)
call void @clear()
- store double 0x41678C29C0000000, double* @lxd, align 8
- %5 = load double, double* @lxd, align 8
+ store double 0x41678C29C0000000, ptr @lxd, align 8
+ %5 = load double, ptr @lxd, align 8
call void @v_df(double %5)
- %6 = load double, double* @xd, align 8
- %7 = load double, double* @lxd, align 8
- %8 = load double, double* @xd, align 8
- %9 = load double, double* @lxd, align 8
+ %6 = load double, ptr @xd, align 8
+ %7 = load double, ptr @lxd, align 8
+ %8 = load double, ptr @xd, align 8
+ %9 = load double, ptr @lxd, align 8
%cmp3 = fcmp oeq double %8, %9
%conv4 = zext i1 %cmp3 to i32
- %call5 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), double %6, double %7, i32 %conv4)
+ %call5 = call i32 (ptr, ...) @printf(ptr @.str, double %6, double %7, i32 %conv4)
call void @clear()
- store float 9.000000e+00, float* @lx, align 4
- store float 1.000000e+01, float* @ly, align 4
- %10 = load float, float* @lx, align 4
- %11 = load float, float* @ly, align 4
+ store float 9.000000e+00, ptr @lx, align 4
+ store float 1.000000e+01, ptr @ly, align 4
+ %10 = load float, ptr @lx, align 4
+ %11 = load float, ptr @ly, align 4
call void @v_sf_sf(float %10, float %11)
- %12 = load float, float* @x, align 4
+ %12 = load float, ptr @x, align 4
%conv6 = fpext float %12 to double
- %13 = load float, float* @lx, align 4
+ %13 = load float, ptr @lx, align 4
%conv7 = fpext float %13 to double
- %14 = load float, float* @y, align 4
+ %14 = load float, ptr @y, align 4
%conv8 = fpext float %14 to double
- %15 = load float, float* @ly, align 4
+ %15 = load float, ptr @ly, align 4
%conv9 = fpext float %15 to double
- %16 = load float, float* @x, align 4
- %17 = load float, float* @lx, align 4
+ %16 = load float, ptr @x, align 4
+ %17 = load float, ptr @lx, align 4
%cmp10 = fcmp oeq float %16, %17
br i1 %cmp10, label %land.rhs, label %land.end
land.rhs: ; preds = %entry
- %18 = load float, float* @y, align 4
- %19 = load float, float* @ly, align 4
+ %18 = load float, ptr @y, align 4
+ %19 = load float, ptr @ly, align 4
%cmp12 = fcmp oeq float %18, %19
br label %land.end
land.end: ; preds = %land.rhs, %entry
%20 = phi i1 [ false, %entry ], [ %cmp12, %land.rhs ]
%land.ext = zext i1 %20 to i32
- %call14 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %conv6, double %conv7, double %conv8, double %conv9, i32 %land.ext)
+ %call14 = call i32 (ptr, ...) @printf(ptr @.str1, double %conv6, double %conv7, double %conv8, double %conv9, i32 %land.ext)
call void @clear()
- store float 0x3FFE666660000000, float* @lx, align 4
- store double 0x4007E613249FF279, double* @lyd, align 8
- %21 = load float, float* @lx, align 4
- %22 = load double, double* @lyd, align 8
+ store float 0x3FFE666660000000, ptr @lx, align 4
+ store double 0x4007E613249FF279, ptr @lyd, align 8
+ %21 = load float, ptr @lx, align 4
+ %22 = load double, ptr @lyd, align 8
call void @v_sf_df(float %21, double %22)
- %23 = load float, float* @x, align 4
+ %23 = load float, ptr @x, align 4
%conv15 = fpext float %23 to double
- %24 = load float, float* @lx, align 4
+ %24 = load float, ptr @lx, align 4
%conv16 = fpext float %24 to double
- %25 = load double, double* @yd, align 8
- %26 = load double, double* @lyd, align 8
- %27 = load float, float* @x, align 4
- %28 = load float, float* @lx, align 4
+ %25 = load double, ptr @yd, align 8
+ %26 = load double, ptr @lyd, align 8
+ %27 = load float, ptr @x, align 4
+ %28 = load float, ptr @lx, align 4
%cmp17 = fcmp oeq float %27, %28
%conv18 = zext i1 %cmp17 to i32
- %29 = load double, double* @yd, align 8
- %30 = load double, double* @lyd, align 8
+ %29 = load double, ptr @yd, align 8
+ %30 = load double, ptr @lyd, align 8
%cmp19 = fcmp oeq double %29, %30
%conv20 = zext i1 %cmp19 to i32
%and = and i32 %conv18, %conv20
- %call21 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %conv15, double %conv16, double %25, double %26, i32 %and)
+ %call21 = call i32 (ptr, ...) @printf(ptr @.str1, double %conv15, double %conv16, double %25, double %26, i32 %and)
call void @clear()
- store double 0x4194E54F94000000, double* @lxd, align 8
- store float 7.600000e+01, float* @ly, align 4
- %31 = load double, double* @lxd, align 8
- %32 = load float, float* @ly, align 4
+ store double 0x4194E54F94000000, ptr @lxd, align 8
+ store float 7.600000e+01, ptr @ly, align 4
+ %31 = load double, ptr @lxd, align 8
+ %32 = load float, ptr @ly, align 4
call void @v_df_sf(double %31, float %32)
- %33 = load double, double* @xd, align 8
- %34 = load double, double* @lxd, align 8
- %35 = load float, float* @y, align 4
+ %33 = load double, ptr @xd, align 8
+ %34 = load double, ptr @lxd, align 8
+ %35 = load float, ptr @y, align 4
%conv22 = fpext float %35 to double
- %36 = load float, float* @ly, align 4
+ %36 = load float, ptr @ly, align 4
%conv23 = fpext float %36 to double
- %37 = load double, double* @xd, align 8
- %38 = load double, double* @lxd, align 8
+ %37 = load double, ptr @xd, align 8
+ %38 = load double, ptr @lxd, align 8
%cmp24 = fcmp oeq double %37, %38
%conv25 = zext i1 %cmp24 to i32
- %39 = load float, float* @y, align 4
- %40 = load float, float* @ly, align 4
+ %39 = load float, ptr @y, align 4
+ %40 = load float, ptr @ly, align 4
%cmp26 = fcmp oeq float %39, %40
%conv27 = zext i1 %cmp26 to i32
%and28 = and i32 %conv25, %conv27
- %call29 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %33, double %34, double %conv22, double %conv23, i32 %and28)
+ %call29 = call i32 (ptr, ...) @printf(ptr @.str1, double %33, double %34, double %conv22, double %conv23, i32 %and28)
call void @clear()
- store double 7.365198e+07, double* @lxd, align 8
- store double 0x416536CD80000000, double* @lyd, align 8
- %41 = load double, double* @lxd, align 8
- %42 = load double, double* @lyd, align 8
+ store double 7.365198e+07, ptr @lxd, align 8
+ store double 0x416536CD80000000, ptr @lyd, align 8
+ %41 = load double, ptr @lxd, align 8
+ %42 = load double, ptr @lyd, align 8
call void @v_df_df(double %41, double %42)
- %43 = load double, double* @xd, align 8
- %44 = load double, double* @lxd, align 8
- %45 = load double, double* @yd, align 8
- %46 = load double, double* @lyd, align 8
- %47 = load double, double* @xd, align 8
- %48 = load double, double* @lxd, align 8
+ %43 = load double, ptr @xd, align 8
+ %44 = load double, ptr @lxd, align 8
+ %45 = load double, ptr @yd, align 8
+ %46 = load double, ptr @lyd, align 8
+ %47 = load double, ptr @xd, align 8
+ %48 = load double, ptr @lxd, align 8
%cmp30 = fcmp oeq double %47, %48
%conv31 = zext i1 %cmp30 to i32
- %49 = load double, double* @yd, align 8
- %50 = load double, double* @lyd, align 8
+ %49 = load double, ptr @yd, align 8
+ %50 = load double, ptr @lyd, align 8
%cmp32 = fcmp oeq double %49, %50
%conv33 = zext i1 %cmp32 to i32
%and34 = and i32 %conv31, %conv33
- %call35 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %43, double %44, double %45, double %46, i32 %and34)
+ %call35 = call i32 (ptr, ...) @printf(ptr @.str1, double %43, double %44, double %45, double %46, i32 %and34)
call void @clear()
- store float 0x4016666660000000, float* @ret_sf, align 4
+ store float 0x4016666660000000, ptr @ret_sf, align 4
%call36 = call float @sf_v()
- store float %call36, float* @lret_sf, align 4
- %51 = load float, float* @ret_sf, align 4
+ store float %call36, ptr @lret_sf, align 4
+ %51 = load float, ptr @ret_sf, align 4
%conv37 = fpext float %51 to double
- %52 = load float, float* @lret_sf, align 4
+ %52 = load float, ptr @lret_sf, align 4
%conv38 = fpext float %52 to double
- %53 = load float, float* @ret_sf, align 4
- %54 = load float, float* @lret_sf, align 4
+ %53 = load float, ptr @ret_sf, align 4
+ %54 = load float, ptr @lret_sf, align 4
%cmp39 = fcmp oeq float %53, %54
%conv40 = zext i1 %cmp39 to i32
- %call41 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), double %conv37, double %conv38, i32 %conv40)
+ %call41 = call i32 (ptr, ...) @printf(ptr @.str, double %conv37, double %conv38, i32 %conv40)
call void @clear()
- store float 4.587300e+06, float* @ret_sf, align 4
- store float 3.420000e+02, float* @lx, align 4
- %55 = load float, float* @lx, align 4
+ store float 4.587300e+06, ptr @ret_sf, align 4
+ store float 3.420000e+02, ptr @lx, align 4
+ %55 = load float, ptr @lx, align 4
%call42 = call float @sf_sf(float %55)
- store float %call42, float* @lret_sf, align 4
- %56 = load float, float* @ret_sf, align 4
+ store float %call42, ptr @lret_sf, align 4
+ %56 = load float, ptr @ret_sf, align 4
%conv43 = fpext float %56 to double
- %57 = load float, float* @lret_sf, align 4
+ %57 = load float, ptr @lret_sf, align 4
%conv44 = fpext float %57 to double
- %58 = load float, float* @x, align 4
+ %58 = load float, ptr @x, align 4
%conv45 = fpext float %58 to double
- %59 = load float, float* @lx, align 4
+ %59 = load float, ptr @lx, align 4
%conv46 = fpext float %59 to double
- %60 = load float, float* @ret_sf, align 4
- %61 = load float, float* @lret_sf, align 4
+ %60 = load float, ptr @ret_sf, align 4
+ %61 = load float, ptr @lret_sf, align 4
%cmp47 = fcmp oeq float %60, %61
%conv48 = zext i1 %cmp47 to i32
- %62 = load float, float* @x, align 4
- %63 = load float, float* @lx, align 4
+ %62 = load float, ptr @x, align 4
+ %63 = load float, ptr @lx, align 4
%cmp49 = fcmp oeq float %62, %63
%conv50 = zext i1 %cmp49 to i32
%and51 = and i32 %conv48, %conv50
- %call52 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %conv43, double %conv44, double %conv45, double %conv46, i32 %and51)
+ %call52 = call i32 (ptr, ...) @printf(ptr @.str1, double %conv43, double %conv44, double %conv45, double %conv46, i32 %and51)
call void @clear()
- store float 4.445910e+06, float* @ret_sf, align 4
- store double 0x419A7DB294000000, double* @lxd, align 8
- %64 = load double, double* @lxd, align 8
+ store float 4.445910e+06, ptr @ret_sf, align 4
+ store double 0x419A7DB294000000, ptr @lxd, align 8
+ %64 = load double, ptr @lxd, align 8
%call53 = call float @sf_df(double %64)
- store float %call53, float* @lret_sf, align 4
- %65 = load float, float* @ret_sf, align 4
+ store float %call53, ptr @lret_sf, align 4
+ %65 = load float, ptr @ret_sf, align 4
%conv54 = fpext float %65 to double
- %66 = load float, float* @lret_sf, align 4
+ %66 = load float, ptr @lret_sf, align 4
%conv55 = fpext float %66 to double
- %67 = load double, double* @xd, align 8
- %68 = load double, double* @lxd, align 8
- %69 = load float, float* @ret_sf, align 4
- %70 = load float, float* @lret_sf, align 4
+ %67 = load double, ptr @xd, align 8
+ %68 = load double, ptr @lxd, align 8
+ %69 = load float, ptr @ret_sf, align 4
+ %70 = load float, ptr @lret_sf, align 4
%cmp56 = fcmp oeq float %69, %70
%conv57 = zext i1 %cmp56 to i32
- %71 = load double, double* @xd, align 8
- %72 = load double, double* @lxd, align 8
+ %71 = load double, ptr @xd, align 8
+ %72 = load double, ptr @lxd, align 8
%cmp58 = fcmp oeq double %71, %72
%conv59 = zext i1 %cmp58 to i32
%and60 = and i32 %conv57, %conv59
- %call61 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %conv54, double %conv55, double %67, double %68, i32 %and60)
+ %call61 = call i32 (ptr, ...) @printf(ptr @.str1, double %conv54, double %conv55, double %67, double %68, i32 %and60)
call void @clear()
- store float 0x3FFF4BC6A0000000, float* @ret_sf, align 4
- store float 4.445500e+03, float* @lx, align 4
- store float 0x4068ACCCC0000000, float* @ly, align 4
- %73 = load float, float* @lx, align 4
- %74 = load float, float* @ly, align 4
+ store float 0x3FFF4BC6A0000000, ptr @ret_sf, align 4
+ store float 4.445500e+03, ptr @lx, align 4
+ store float 0x4068ACCCC0000000, ptr @ly, align 4
+ %73 = load float, ptr @lx, align 4
+ %74 = load float, ptr @ly, align 4
%call62 = call float @sf_sf_sf(float %73, float %74)
- store float %call62, float* @lret_sf, align 4
- %75 = load float, float* @ret_sf, align 4
+ store float %call62, ptr @lret_sf, align 4
+ %75 = load float, ptr @ret_sf, align 4
%conv63 = fpext float %75 to double
- %76 = load float, float* @lret_sf, align 4
+ %76 = load float, ptr @lret_sf, align 4
%conv64 = fpext float %76 to double
- %77 = load float, float* @x, align 4
+ %77 = load float, ptr @x, align 4
%conv65 = fpext float %77 to double
- %78 = load float, float* @lx, align 4
+ %78 = load float, ptr @lx, align 4
%conv66 = fpext float %78 to double
- %79 = load float, float* @y, align 4
+ %79 = load float, ptr @y, align 4
%conv67 = fpext float %79 to double
- %80 = load float, float* @ly, align 4
+ %80 = load float, ptr @ly, align 4
%conv68 = fpext float %80 to double
- %81 = load float, float* @ret_sf, align 4
- %82 = load float, float* @lret_sf, align 4
+ %81 = load float, ptr @ret_sf, align 4
+ %82 = load float, ptr @lret_sf, align 4
%cmp69 = fcmp oeq float %81, %82
br i1 %cmp69, label %land.lhs.true, label %land.end76
land.lhs.true: ; preds = %land.end
- %83 = load float, float* @x, align 4
- %84 = load float, float* @lx, align 4
+ %83 = load float, ptr @x, align 4
+ %84 = load float, ptr @lx, align 4
%cmp71 = fcmp oeq float %83, %84
br i1 %cmp71, label %land.rhs73, label %land.end76
land.rhs73: ; preds = %land.lhs.true
- %85 = load float, float* @y, align 4
- %86 = load float, float* @ly, align 4
+ %85 = load float, ptr @y, align 4
+ %86 = load float, ptr @ly, align 4
%cmp74 = fcmp oeq float %85, %86
br label %land.end76
land.end76: ; preds = %land.rhs73, %land.lhs.true, %land.end
%87 = phi i1 [ false, %land.lhs.true ], [ false, %land.end ], [ %cmp74, %land.rhs73 ]
%land.ext77 = zext i1 %87 to i32
- %call78 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %conv63, double %conv64, double %conv65, double %conv66, double %conv67, double %conv68, i32 %land.ext77)
+ %call78 = call i32 (ptr, ...) @printf(ptr @.str2, double %conv63, double %conv64, double %conv65, double %conv66, double %conv67, double %conv68, i32 %land.ext77)
call void @clear()
- store float 9.991300e+04, float* @ret_sf, align 4
- store float 1.114500e+04, float* @lx, align 4
- store double 9.994445e+07, double* @lyd, align 8
- %88 = load float, float* @lx, align 4
- %89 = load double, double* @lyd, align 8
+ store float 9.991300e+04, ptr @ret_sf, align 4
+ store float 1.114500e+04, ptr @lx, align 4
+ store double 9.994445e+07, ptr @lyd, align 8
+ %88 = load float, ptr @lx, align 4
+ %89 = load double, ptr @lyd, align 8
%call79 = call float @sf_sf_df(float %88, double %89)
- store float %call79, float* @lret_sf, align 4
- %90 = load float, float* @ret_sf, align 4
+ store float %call79, ptr @lret_sf, align 4
+ %90 = load float, ptr @ret_sf, align 4
%conv80 = fpext float %90 to double
- %91 = load float, float* @lret_sf, align 4
+ %91 = load float, ptr @lret_sf, align 4
%conv81 = fpext float %91 to double
- %92 = load float, float* @x, align 4
+ %92 = load float, ptr @x, align 4
%conv82 = fpext float %92 to double
- %93 = load float, float* @lx, align 4
+ %93 = load float, ptr @lx, align 4
%conv83 = fpext float %93 to double
- %94 = load double, double* @yd, align 8
- %95 = load double, double* @lyd, align 8
- %96 = load float, float* @ret_sf, align 4
- %97 = load float, float* @lret_sf, align 4
+ %94 = load double, ptr @yd, align 8
+ %95 = load double, ptr @lyd, align 8
+ %96 = load float, ptr @ret_sf, align 4
+ %97 = load float, ptr @lret_sf, align 4
%cmp84 = fcmp oeq float %96, %97
br i1 %cmp84, label %land.lhs.true86, label %land.end92
land.lhs.true86: ; preds = %land.end76
- %98 = load float, float* @x, align 4
- %99 = load float, float* @lx, align 4
+ %98 = load float, ptr @x, align 4
+ %99 = load float, ptr @lx, align 4
%cmp87 = fcmp oeq float %98, %99
br i1 %cmp87, label %land.rhs89, label %land.end92
land.rhs89: ; preds = %land.lhs.true86
- %100 = load double, double* @yd, align 8
- %101 = load double, double* @lyd, align 8
+ %100 = load double, ptr @yd, align 8
+ %101 = load double, ptr @lyd, align 8
%cmp90 = fcmp oeq double %100, %101
br label %land.end92
land.end92: ; preds = %land.rhs89, %land.lhs.true86, %land.end76
%102 = phi i1 [ false, %land.lhs.true86 ], [ false, %land.end76 ], [ %cmp90, %land.rhs89 ]
%land.ext93 = zext i1 %102 to i32
- %call94 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %conv80, double %conv81, double %conv82, double %conv83, double %94, double %95, i32 %land.ext93)
+ %call94 = call i32 (ptr, ...) @printf(ptr @.str2, double %conv80, double %conv81, double %conv82, double %conv83, double %94, double %95, i32 %land.ext93)
call void @clear()
- store float 0x417CCC7A00000000, float* @ret_sf, align 4
- store double 0x4172034530000000, double* @lxd, align 8
- store float 4.456200e+04, float* @ly, align 4
- %103 = load double, double* @lxd, align 8
- %104 = load float, float* @ly, align 4
+ store float 0x417CCC7A00000000, ptr @ret_sf, align 4
+ store double 0x4172034530000000, ptr @lxd, align 8
+ store float 4.456200e+04, ptr @ly, align 4
+ %103 = load double, ptr @lxd, align 8
+ %104 = load float, ptr @ly, align 4
%call95 = call float @sf_df_sf(double %103, float %104)
- store float %call95, float* @lret_sf, align 4
- %105 = load float, float* @ret_sf, align 4
+ store float %call95, ptr @lret_sf, align 4
+ %105 = load float, ptr @ret_sf, align 4
%conv96 = fpext float %105 to double
- %106 = load float, float* @lret_sf, align 4
+ %106 = load float, ptr @lret_sf, align 4
%conv97 = fpext float %106 to double
- %107 = load double, double* @xd, align 8
- %108 = load double, double* @lxd, align 8
- %109 = load float, float* @y, align 4
+ %107 = load double, ptr @xd, align 8
+ %108 = load double, ptr @lxd, align 8
+ %109 = load float, ptr @y, align 4
%conv98 = fpext float %109 to double
- %110 = load float, float* @ly, align 4
+ %110 = load float, ptr @ly, align 4
%conv99 = fpext float %110 to double
- %111 = load float, float* @ret_sf, align 4
- %112 = load float, float* @lret_sf, align 4
+ %111 = load float, ptr @ret_sf, align 4
+ %112 = load float, ptr @lret_sf, align 4
%cmp100 = fcmp oeq float %111, %112
br i1 %cmp100, label %land.lhs.true102, label %land.end108
land.lhs.true102: ; preds = %land.end92
- %113 = load double, double* @xd, align 8
- %114 = load double, double* @lxd, align 8
+ %113 = load double, ptr @xd, align 8
+ %114 = load double, ptr @lxd, align 8
%cmp103 = fcmp oeq double %113, %114
br i1 %cmp103, label %land.rhs105, label %land.end108
land.rhs105: ; preds = %land.lhs.true102
- %115 = load float, float* @y, align 4
- %116 = load float, float* @ly, align 4
+ %115 = load float, ptr @y, align 4
+ %116 = load float, ptr @ly, align 4
%cmp106 = fcmp oeq float %115, %116
br label %land.end108
land.end108: ; preds = %land.rhs105, %land.lhs.true102, %land.end92
%117 = phi i1 [ false, %land.lhs.true102 ], [ false, %land.end92 ], [ %cmp106, %land.rhs105 ]
%land.ext109 = zext i1 %117 to i32
- %call110 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %conv96, double %conv97, double %107, double %108, double %conv98, double %conv99, i32 %land.ext109)
+ %call110 = call i32 (ptr, ...) @printf(ptr @.str2, double %conv96, double %conv97, double %107, double %108, double %conv98, double %conv99, i32 %land.ext109)
call void @clear()
- store float 3.987721e+06, float* @ret_sf, align 4
- store double 0x3FF1F49F6DDDC2D8, double* @lxd, align 8
- store double 0x409129F306A2B170, double* @lyd, align 8
- %118 = load double, double* @lxd, align 8
- %119 = load double, double* @lyd, align 8
+ store float 3.987721e+06, ptr @ret_sf, align 4
+ store double 0x3FF1F49F6DDDC2D8, ptr @lxd, align 8
+ store double 0x409129F306A2B170, ptr @lyd, align 8
+ %118 = load double, ptr @lxd, align 8
+ %119 = load double, ptr @lyd, align 8
%call111 = call float @sf_df_df(double %118, double %119)
- store float %call111, float* @lret_sf, align 4
- %120 = load float, float* @ret_sf, align 4
+ store float %call111, ptr @lret_sf, align 4
+ %120 = load float, ptr @ret_sf, align 4
%conv112 = fpext float %120 to double
- %121 = load float, float* @lret_sf, align 4
+ %121 = load float, ptr @lret_sf, align 4
%conv113 = fpext float %121 to double
- %122 = load double, double* @xd, align 8
- %123 = load double, double* @lxd, align 8
- %124 = load double, double* @yd, align 8
- %125 = load double, double* @lyd, align 8
- %126 = load float, float* @ret_sf, align 4
- %127 = load float, float* @lret_sf, align 4
+ %122 = load double, ptr @xd, align 8
+ %123 = load double, ptr @lxd, align 8
+ %124 = load double, ptr @yd, align 8
+ %125 = load double, ptr @lyd, align 8
+ %126 = load float, ptr @ret_sf, align 4
+ %127 = load float, ptr @lret_sf, align 4
%cmp114 = fcmp oeq float %126, %127
br i1 %cmp114, label %land.lhs.true116, label %land.end122
land.lhs.true116: ; preds = %land.end108
- %128 = load double, double* @xd, align 8
- %129 = load double, double* @lxd, align 8
+ %128 = load double, ptr @xd, align 8
+ %129 = load double, ptr @lxd, align 8
%cmp117 = fcmp oeq double %128, %129
br i1 %cmp117, label %land.rhs119, label %land.end122
land.rhs119: ; preds = %land.lhs.true116
- %130 = load double, double* @yd, align 8
- %131 = load double, double* @lyd, align 8
+ %130 = load double, ptr @yd, align 8
+ %131 = load double, ptr @lyd, align 8
%cmp120 = fcmp oeq double %130, %131
br label %land.end122
land.end122: ; preds = %land.rhs119, %land.lhs.true116, %land.end108
%132 = phi i1 [ false, %land.lhs.true116 ], [ false, %land.end108 ], [ %cmp120, %land.rhs119 ]
%land.ext123 = zext i1 %132 to i32
- %call124 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %conv112, double %conv113, double %122, double %123, double %124, double %125, i32 %land.ext123)
+ %call124 = call i32 (ptr, ...) @printf(ptr @.str2, double %conv112, double %conv113, double %122, double %123, double %124, double %125, i32 %land.ext123)
call void @clear()
- store double 1.561234e+01, double* @ret_df, align 8
+ store double 1.561234e+01, ptr @ret_df, align 8
%call125 = call double @df_v()
- store double %call125, double* @lret_df, align 8
- %133 = load double, double* @ret_df, align 8
- %134 = load double, double* @lret_df, align 8
- %135 = load double, double* @ret_df, align 8
- %136 = load double, double* @lret_df, align 8
+ store double %call125, ptr @lret_df, align 8
+ %133 = load double, ptr @ret_df, align 8
+ %134 = load double, ptr @lret_df, align 8
+ %135 = load double, ptr @ret_df, align 8
+ %136 = load double, ptr @lret_df, align 8
%cmp126 = fcmp oeq double %135, %136
%conv127 = zext i1 %cmp126 to i32
- %call128 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), double %133, double %134, i32 %conv127)
+ %call128 = call i32 (ptr, ...) @printf(ptr @.str, double %133, double %134, i32 %conv127)
call void @clear()
- store double 1.345873e+01, double* @ret_df, align 8
- store float 3.434520e+05, float* @lx, align 4
- %137 = load float, float* @lx, align 4
+ store double 1.345873e+01, ptr @ret_df, align 8
+ store float 3.434520e+05, ptr @lx, align 4
+ %137 = load float, ptr @lx, align 4
%call129 = call double @df_sf(float %137)
- store double %call129, double* @lret_df, align 8
- %138 = load double, double* @ret_df, align 8
- %139 = load double, double* @lret_df, align 8
- %140 = load float, float* @x, align 4
+ store double %call129, ptr @lret_df, align 8
+ %138 = load double, ptr @ret_df, align 8
+ %139 = load double, ptr @lret_df, align 8
+ %140 = load float, ptr @x, align 4
%conv130 = fpext float %140 to double
- %141 = load float, float* @lx, align 4
+ %141 = load float, ptr @lx, align 4
%conv131 = fpext float %141 to double
- %142 = load double, double* @ret_df, align 8
- %143 = load double, double* @lret_df, align 8
+ %142 = load double, ptr @ret_df, align 8
+ %143 = load double, ptr @lret_df, align 8
%cmp132 = fcmp oeq double %142, %143
%conv133 = zext i1 %cmp132 to i32
- %144 = load float, float* @x, align 4
- %145 = load float, float* @lx, align 4
+ %144 = load float, ptr @x, align 4
+ %145 = load float, ptr @lx, align 4
%cmp134 = fcmp oeq float %144, %145
%conv135 = zext i1 %cmp134 to i32
%and136 = and i32 %conv133, %conv135
- %call137 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %138, double %139, double %conv130, double %conv131, i32 %and136)
+ %call137 = call i32 (ptr, ...) @printf(ptr @.str1, double %138, double %139, double %conv130, double %conv131, i32 %and136)
call void @clear()
- store double 0x4084F3AB7AA25D8D, double* @ret_df, align 8
- store double 0x4114F671D2F1A9FC, double* @lxd, align 8
- %146 = load double, double* @lxd, align 8
+ store double 0x4084F3AB7AA25D8D, ptr @ret_df, align 8
+ store double 0x4114F671D2F1A9FC, ptr @lxd, align 8
+ %146 = load double, ptr @lxd, align 8
%call138 = call double @df_df(double %146)
- store double %call138, double* @lret_df, align 8
- %147 = load double, double* @ret_df, align 8
- %148 = load double, double* @lret_df, align 8
- %149 = load double, double* @xd, align 8
- %150 = load double, double* @lxd, align 8
- %151 = load double, double* @ret_df, align 8
- %152 = load double, double* @lret_df, align 8
+ store double %call138, ptr @lret_df, align 8
+ %147 = load double, ptr @ret_df, align 8
+ %148 = load double, ptr @lret_df, align 8
+ %149 = load double, ptr @xd, align 8
+ %150 = load double, ptr @lxd, align 8
+ %151 = load double, ptr @ret_df, align 8
+ %152 = load double, ptr @lret_df, align 8
%cmp139 = fcmp oeq double %151, %152
%conv140 = zext i1 %cmp139 to i32
- %153 = load double, double* @xd, align 8
- %154 = load double, double* @lxd, align 8
+ %153 = load double, ptr @xd, align 8
+ %154 = load double, ptr @lxd, align 8
%cmp141 = fcmp oeq double %153, %154
%conv142 = zext i1 %cmp141 to i32
%and143 = and i32 %conv140, %conv142
- %call144 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %147, double %148, double %149, double %150, i32 %and143)
+ %call144 = call i32 (ptr, ...) @printf(ptr @.str1, double %147, double %148, double %149, double %150, i32 %and143)
call void @clear()
- store double 6.781956e+03, double* @ret_df, align 8
- store float 4.445500e+03, float* @lx, align 4
- store float 0x4068ACCCC0000000, float* @ly, align 4
- %155 = load float, float* @lx, align 4
- %156 = load float, float* @ly, align 4
+ store double 6.781956e+03, ptr @ret_df, align 8
+ store float 4.445500e+03, ptr @lx, align 4
+ store float 0x4068ACCCC0000000, ptr @ly, align 4
+ %155 = load float, ptr @lx, align 4
+ %156 = load float, ptr @ly, align 4
%call145 = call double @df_sf_sf(float %155, float %156)
- store double %call145, double* @lret_df, align 8
- %157 = load double, double* @ret_df, align 8
- %158 = load double, double* @lret_df, align 8
- %159 = load float, float* @x, align 4
+ store double %call145, ptr @lret_df, align 8
+ %157 = load double, ptr @ret_df, align 8
+ %158 = load double, ptr @lret_df, align 8
+ %159 = load float, ptr @x, align 4
%conv146 = fpext float %159 to double
- %160 = load float, float* @lx, align 4
+ %160 = load float, ptr @lx, align 4
%conv147 = fpext float %160 to double
- %161 = load float, float* @y, align 4
+ %161 = load float, ptr @y, align 4
%conv148 = fpext float %161 to double
- %162 = load float, float* @ly, align 4
+ %162 = load float, ptr @ly, align 4
%conv149 = fpext float %162 to double
- %163 = load double, double* @ret_df, align 8
- %164 = load double, double* @lret_df, align 8
+ %163 = load double, ptr @ret_df, align 8
+ %164 = load double, ptr @lret_df, align 8
%cmp150 = fcmp oeq double %163, %164
br i1 %cmp150, label %land.lhs.true152, label %land.end158
land.lhs.true152: ; preds = %land.end122
- %165 = load float, float* @x, align 4
- %166 = load float, float* @lx, align 4
+ %165 = load float, ptr @x, align 4
+ %166 = load float, ptr @lx, align 4
%cmp153 = fcmp oeq float %165, %166
br i1 %cmp153, label %land.rhs155, label %land.end158
land.rhs155: ; preds = %land.lhs.true152
- %167 = load float, float* @y, align 4
- %168 = load float, float* @ly, align 4
+ %167 = load float, ptr @y, align 4
+ %168 = load float, ptr @ly, align 4
%cmp156 = fcmp oeq float %167, %168
br label %land.end158
land.end158: ; preds = %land.rhs155, %land.lhs.true152, %land.end122
%169 = phi i1 [ false, %land.lhs.true152 ], [ false, %land.end122 ], [ %cmp156, %land.rhs155 ]
%land.ext159 = zext i1 %169 to i32
- %call160 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %157, double %158, double %conv146, double %conv147, double %conv148, double %conv149, i32 %land.ext159)
+ %call160 = call i32 (ptr, ...) @printf(ptr @.str2, double %157, double %158, double %conv146, double %conv147, double %conv148, double %conv149, i32 %land.ext159)
call void @clear()
- store double 1.889130e+05, double* @ret_df, align 8
- store float 9.111450e+05, float* @lx, align 4
- store double 0x4185320A58000000, double* @lyd, align 8
- %170 = load float, float* @lx, align 4
- %171 = load double, double* @lyd, align 8
+ store double 1.889130e+05, ptr @ret_df, align 8
+ store float 9.111450e+05, ptr @lx, align 4
+ store double 0x4185320A58000000, ptr @lyd, align 8
+ %170 = load float, ptr @lx, align 4
+ %171 = load double, ptr @lyd, align 8
%call161 = call double @df_sf_df(float %170, double %171)
- store double %call161, double* @lret_df, align 8
- %172 = load double, double* @ret_df, align 8
- %173 = load double, double* @lret_df, align 8
- %174 = load float, float* @x, align 4
+ store double %call161, ptr @lret_df, align 8
+ %172 = load double, ptr @ret_df, align 8
+ %173 = load double, ptr @lret_df, align 8
+ %174 = load float, ptr @x, align 4
%conv162 = fpext float %174 to double
- %175 = load float, float* @lx, align 4
+ %175 = load float, ptr @lx, align 4
%conv163 = fpext float %175 to double
- %176 = load double, double* @yd, align 8
- %177 = load double, double* @lyd, align 8
- %178 = load double, double* @ret_df, align 8
- %179 = load double, double* @lret_df, align 8
+ %176 = load double, ptr @yd, align 8
+ %177 = load double, ptr @lyd, align 8
+ %178 = load double, ptr @ret_df, align 8
+ %179 = load double, ptr @lret_df, align 8
%cmp164 = fcmp oeq double %178, %179
br i1 %cmp164, label %land.lhs.true166, label %land.end172
land.lhs.true166: ; preds = %land.end158
- %180 = load float, float* @x, align 4
- %181 = load float, float* @lx, align 4
+ %180 = load float, ptr @x, align 4
+ %181 = load float, ptr @lx, align 4
%cmp167 = fcmp oeq float %180, %181
br i1 %cmp167, label %land.rhs169, label %land.end172
land.rhs169: ; preds = %land.lhs.true166
- %182 = load double, double* @yd, align 8
- %183 = load double, double* @lyd, align 8
+ %182 = load double, ptr @yd, align 8
+ %183 = load double, ptr @lyd, align 8
%cmp170 = fcmp oeq double %182, %183
br label %land.end172
land.end172: ; preds = %land.rhs169, %land.lhs.true166, %land.end158
%184 = phi i1 [ false, %land.lhs.true166 ], [ false, %land.end158 ], [ %cmp170, %land.rhs169 ]
%land.ext173 = zext i1 %184 to i32
- %call174 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %172, double %173, double %conv162, double %conv163, double %176, double %177, i32 %land.ext173)
+ %call174 = call i32 (ptr, ...) @printf(ptr @.str2, double %172, double %173, double %conv162, double %conv163, double %176, double %177, i32 %land.ext173)
call void @clear()
- store double 0x418B2DB900000000, double* @ret_df, align 8
- store double 0x41B1EF2ED3000000, double* @lxd, align 8
- store float 1.244562e+06, float* @ly, align 4
- %185 = load double, double* @lxd, align 8
- %186 = load float, float* @ly, align 4
+ store double 0x418B2DB900000000, ptr @ret_df, align 8
+ store double 0x41B1EF2ED3000000, ptr @lxd, align 8
+ store float 1.244562e+06, ptr @ly, align 4
+ %185 = load double, ptr @lxd, align 8
+ %186 = load float, ptr @ly, align 4
%call175 = call double @df_df_sf(double %185, float %186)
- store double %call175, double* @lret_df, align 8
- %187 = load double, double* @ret_df, align 8
- %188 = load double, double* @lret_df, align 8
- %189 = load double, double* @xd, align 8
- %190 = load double, double* @lxd, align 8
- %191 = load float, float* @y, align 4
+ store double %call175, ptr @lret_df, align 8
+ %187 = load double, ptr @ret_df, align 8
+ %188 = load double, ptr @lret_df, align 8
+ %189 = load double, ptr @xd, align 8
+ %190 = load double, ptr @lxd, align 8
+ %191 = load float, ptr @y, align 4
%conv176 = fpext float %191 to double
- %192 = load float, float* @ly, align 4
+ %192 = load float, ptr @ly, align 4
%conv177 = fpext float %192 to double
- %193 = load double, double* @ret_df, align 8
- %194 = load double, double* @lret_df, align 8
+ %193 = load double, ptr @ret_df, align 8
+ %194 = load double, ptr @lret_df, align 8
%cmp178 = fcmp oeq double %193, %194
br i1 %cmp178, label %land.lhs.true180, label %land.end186
land.lhs.true180: ; preds = %land.end172
- %195 = load double, double* @xd, align 8
- %196 = load double, double* @lxd, align 8
+ %195 = load double, ptr @xd, align 8
+ %196 = load double, ptr @lxd, align 8
%cmp181 = fcmp oeq double %195, %196
br i1 %cmp181, label %land.rhs183, label %land.end186
land.rhs183: ; preds = %land.lhs.true180
- %197 = load float, float* @y, align 4
- %198 = load float, float* @ly, align 4
+ %197 = load float, ptr @y, align 4
+ %198 = load float, ptr @ly, align 4
%cmp184 = fcmp oeq float %197, %198
br label %land.end186
land.end186: ; preds = %land.rhs183, %land.lhs.true180, %land.end172
%199 = phi i1 [ false, %land.lhs.true180 ], [ false, %land.end172 ], [ %cmp184, %land.rhs183 ]
%land.ext187 = zext i1 %199 to i32
- %call188 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %187, double %188, double %189, double %190, double %conv176, double %conv177, i32 %land.ext187)
+ %call188 = call i32 (ptr, ...) @printf(ptr @.str2, double %187, double %188, double %189, double %190, double %conv176, double %conv177, i32 %land.ext187)
call void @clear()
- store double 3.987721e+06, double* @ret_df, align 8
- store double 5.223560e+00, double* @lxd, align 8
- store double 0x40B7D37CC1A8AC5C, double* @lyd, align 8
- %200 = load double, double* @lxd, align 8
- %201 = load double, double* @lyd, align 8
+ store double 3.987721e+06, ptr @ret_df, align 8
+ store double 5.223560e+00, ptr @lxd, align 8
+ store double 0x40B7D37CC1A8AC5C, ptr @lyd, align 8
+ %200 = load double, ptr @lxd, align 8
+ %201 = load double, ptr @lyd, align 8
%call189 = call double @df_df_df(double %200, double %201)
- store double %call189, double* @lret_df, align 8
- %202 = load double, double* @ret_df, align 8
- %203 = load double, double* @lret_df, align 8
- %204 = load double, double* @xd, align 8
- %205 = load double, double* @lxd, align 8
- %206 = load double, double* @yd, align 8
- %207 = load double, double* @lyd, align 8
- %208 = load double, double* @ret_df, align 8
- %209 = load double, double* @lret_df, align 8
+ store double %call189, ptr @lret_df, align 8
+ %202 = load double, ptr @ret_df, align 8
+ %203 = load double, ptr @lret_df, align 8
+ %204 = load double, ptr @xd, align 8
+ %205 = load double, ptr @lxd, align 8
+ %206 = load double, ptr @yd, align 8
+ %207 = load double, ptr @lyd, align 8
+ %208 = load double, ptr @ret_df, align 8
+ %209 = load double, ptr @lret_df, align 8
%cmp190 = fcmp oeq double %208, %209
br i1 %cmp190, label %land.lhs.true192, label %land.end198
land.lhs.true192: ; preds = %land.end186
- %210 = load double, double* @xd, align 8
- %211 = load double, double* @lxd, align 8
+ %210 = load double, ptr @xd, align 8
+ %211 = load double, ptr @lxd, align 8
%cmp193 = fcmp oeq double %210, %211
br i1 %cmp193, label %land.rhs195, label %land.end198
land.rhs195: ; preds = %land.lhs.true192
- %212 = load double, double* @yd, align 8
- %213 = load double, double* @lyd, align 8
+ %212 = load double, ptr @yd, align 8
+ %213 = load double, ptr @lyd, align 8
%cmp196 = fcmp oeq double %212, %213
br label %land.end198
land.end198: ; preds = %land.rhs195, %land.lhs.true192, %land.end186
%214 = phi i1 [ false, %land.lhs.true192 ], [ false, %land.end186 ], [ %cmp196, %land.rhs195 ]
%land.ext199 = zext i1 %214 to i32
- %call200 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %202, double %203, double %204, double %205, double %206, double %207, i32 %land.ext199)
+ %call200 = call i32 (ptr, ...) @printf(ptr @.str2, double %202, double %203, double %204, double %205, double %206, double %207, i32 %land.ext199)
call void @clear()
- store float 4.500000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
- store float 7.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
+ store float 4.500000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+ store float 7.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
%call201 = call { float, float } @sc_v()
%215 = extractvalue { float, float } %call201, 0
%216 = extractvalue { float, float } %call201, 1
- store float %215, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
- store float %216, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
- %ret_sc.real = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
- %ret_sc.imag = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
+ store float %215, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+ store float %216, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
+ %ret_sc.real = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+ %ret_sc.imag = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
%conv202 = fpext float %ret_sc.real to double
%conv203 = fpext float %ret_sc.imag to double
- %ret_sc.real204 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
- %ret_sc.imag205 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
+ %ret_sc.real204 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+ %ret_sc.imag205 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
%conv206 = fpext float %ret_sc.real204 to double
%conv207 = fpext float %ret_sc.imag205 to double
- %lret_sc.real = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
- %lret_sc.imag = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
+ %lret_sc.real = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+ %lret_sc.imag = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
%conv208 = fpext float %lret_sc.real to double
%conv209 = fpext float %lret_sc.imag to double
- %lret_sc.real210 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
- %lret_sc.imag211 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
+ %lret_sc.real210 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+ %lret_sc.imag211 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
%conv212 = fpext float %lret_sc.real210 to double
%conv213 = fpext float %lret_sc.imag211 to double
- %ret_sc.real214 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
- %ret_sc.imag215 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
- %lret_sc.real216 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
- %lret_sc.imag217 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
+ %ret_sc.real214 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+ %ret_sc.imag215 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
+ %lret_sc.real216 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+ %lret_sc.imag217 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
%cmp.r = fcmp oeq float %ret_sc.real214, %lret_sc.real216
%cmp.i = fcmp oeq float %ret_sc.imag215, %lret_sc.imag217
%and.ri = and i1 %cmp.r, %cmp.i
%conv218 = zext i1 %and.ri to i32
- %call219 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str3, i32 0, i32 0), double %conv202, double %conv207, double %conv208, double %conv213, i32 %conv218)
+ %call219 = call i32 (ptr, ...) @printf(ptr @.str3, double %conv202, double %conv207, double %conv208, double %conv213, i32 %conv218)
call void @clear()
- store float 0x3FF7A99300000000, float* @lx, align 4
- store float 4.500000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
- store float 7.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
- %217 = load float, float* @lx, align 4
+ store float 0x3FF7A99300000000, ptr @lx, align 4
+ store float 4.500000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+ store float 7.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
+ %217 = load float, ptr @lx, align 4
%call220 = call { float, float } @sc_sf(float %217)
%218 = extractvalue { float, float } %call220, 0
%219 = extractvalue { float, float } %call220, 1
- store float %218, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
- store float %219, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
- %ret_sc.real221 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
- %ret_sc.imag222 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
+ store float %218, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+ store float %219, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
+ %ret_sc.real221 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+ %ret_sc.imag222 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
%conv223 = fpext float %ret_sc.real221 to double
%conv224 = fpext float %ret_sc.imag222 to double
- %ret_sc.real225 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
- %ret_sc.imag226 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
+ %ret_sc.real225 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+ %ret_sc.imag226 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
%conv227 = fpext float %ret_sc.real225 to double
%conv228 = fpext float %ret_sc.imag226 to double
- %lret_sc.real229 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
- %lret_sc.imag230 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
+ %lret_sc.real229 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+ %lret_sc.imag230 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
%conv231 = fpext float %lret_sc.real229 to double
%conv232 = fpext float %lret_sc.imag230 to double
- %lret_sc.real233 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
- %lret_sc.imag234 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
+ %lret_sc.real233 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+ %lret_sc.imag234 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
%conv235 = fpext float %lret_sc.real233 to double
%conv236 = fpext float %lret_sc.imag234 to double
- %220 = load float, float* @x, align 4
+ %220 = load float, ptr @x, align 4
%conv237 = fpext float %220 to double
- %221 = load float, float* @lx, align 4
+ %221 = load float, ptr @lx, align 4
%conv238 = fpext float %221 to double
- %ret_sc.real239 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
- %ret_sc.imag240 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
- %lret_sc.real241 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
- %lret_sc.imag242 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
+ %ret_sc.real239 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+ %ret_sc.imag240 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
+ %lret_sc.real241 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+ %lret_sc.imag242 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
%cmp.r243 = fcmp oeq float %ret_sc.real239, %lret_sc.real241
%cmp.i244 = fcmp oeq float %ret_sc.imag240, %lret_sc.imag242
%and.ri245 = and i1 %cmp.r243, %cmp.i244
br i1 %and.ri245, label %land.rhs247, label %land.end250
land.rhs247: ; preds = %land.end198
- %222 = load float, float* @x, align 4
- %223 = load float, float* @lx, align 4
+ %222 = load float, ptr @x, align 4
+ %223 = load float, ptr @lx, align 4
%cmp248 = fcmp oeq float %222, %223
br label %land.end250
land.end250: ; preds = %land.rhs247, %land.end198
%224 = phi i1 [ false, %land.end198 ], [ %cmp248, %land.rhs247 ]
%land.ext251 = zext i1 %224 to i32
- %call252 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str4, i32 0, i32 0), double %conv223, double %conv228, double %conv231, double %conv236, double %conv237, double %conv238, i32 %land.ext251)
+ %call252 = call i32 (ptr, ...) @printf(ptr @.str4, double %conv223, double %conv228, double %conv231, double %conv236, double %conv237, double %conv238, i32 %land.ext251)
call void @clear()
- store double 1.234500e+03, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
- store double 7.677000e+03, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
+ store double 1.234500e+03, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+ store double 7.677000e+03, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
%call253 = call { double, double } @dc_v()
%225 = extractvalue { double, double } %call253, 0
%226 = extractvalue { double, double } %call253, 1
- store double %225, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
- store double %226, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
- %ret_dc.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
- %ret_dc.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
- %ret_dc.real254 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
- %ret_dc.imag255 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
- %lret_dc.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
- %lret_dc.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
- %lret_dc.real256 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
- %lret_dc.imag257 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
- %ret_dc.real258 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
- %ret_dc.imag259 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
- %lret_dc.real260 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
- %lret_dc.imag261 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
+ store double %225, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+ store double %226, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
+ %ret_dc.real = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+ %ret_dc.imag = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+ %ret_dc.real254 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+ %ret_dc.imag255 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+ %lret_dc.real = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+ %lret_dc.imag = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
+ %lret_dc.real256 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+ %lret_dc.imag257 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
+ %ret_dc.real258 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+ %ret_dc.imag259 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+ %lret_dc.real260 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+ %lret_dc.imag261 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
%cmp.r262 = fcmp oeq double %ret_dc.real258, %lret_dc.real260
%cmp.i263 = fcmp oeq double %ret_dc.imag259, %lret_dc.imag261
%and.ri264 = and i1 %cmp.r262, %cmp.i263
%conv265 = zext i1 %and.ri264 to i32
- %call266 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str3, i32 0, i32 0), double %ret_dc.real, double %ret_dc.imag255, double %lret_dc.real, double %lret_dc.imag257, i32 %conv265)
+ %call266 = call i32 (ptr, ...) @printf(ptr @.str3, double %ret_dc.real, double %ret_dc.imag255, double %lret_dc.real, double %lret_dc.imag257, i32 %conv265)
call void @clear()
- store double 0x40AAF6F532617C1C, double* @lxd, align 8
- store double 4.444500e+03, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
- store double 7.888000e+03, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
- %227 = load float, float* @lx, align 4
+ store double 0x40AAF6F532617C1C, ptr @lxd, align 8
+ store double 4.444500e+03, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+ store double 7.888000e+03, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+ %227 = load float, ptr @lx, align 4
%call267 = call { double, double } @dc_sf(float %227)
%228 = extractvalue { double, double } %call267, 0
%229 = extractvalue { double, double } %call267, 1
- store double %228, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
- store double %229, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
- %ret_dc.real268 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
- %ret_dc.imag269 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
- %ret_dc.real270 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
- %ret_dc.imag271 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
- %lret_dc.real272 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
- %lret_dc.imag273 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
- %lret_dc.real274 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
- %lret_dc.imag275 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
- %230 = load float, float* @x, align 4
+ store double %228, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+ store double %229, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
+ %ret_dc.real268 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+ %ret_dc.imag269 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+ %ret_dc.real270 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+ %ret_dc.imag271 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+ %lret_dc.real272 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+ %lret_dc.imag273 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
+ %lret_dc.real274 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+ %lret_dc.imag275 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
+ %230 = load float, ptr @x, align 4
%conv276 = fpext float %230 to double
- %231 = load float, float* @lx, align 4
+ %231 = load float, ptr @lx, align 4
%conv277 = fpext float %231 to double
- %ret_dc.real278 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
- %ret_dc.imag279 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
- %lret_dc.real280 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
- %lret_dc.imag281 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
+ %ret_dc.real278 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+ %ret_dc.imag279 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+ %lret_dc.real280 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+ %lret_dc.imag281 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
%cmp.r282 = fcmp oeq double %ret_dc.real278, %lret_dc.real280
%cmp.i283 = fcmp oeq double %ret_dc.imag279, %lret_dc.imag281
%and.ri284 = and i1 %cmp.r282, %cmp.i283
br i1 %and.ri284, label %land.rhs286, label %land.end289
land.rhs286: ; preds = %land.end250
- %232 = load float, float* @x, align 4
- %233 = load float, float* @lx, align 4
+ %232 = load float, ptr @x, align 4
+ %233 = load float, ptr @lx, align 4
%cmp287 = fcmp oeq float %232, %233
br label %land.end289
land.end289: ; preds = %land.rhs286, %land.end250
%234 = phi i1 [ false, %land.end250 ], [ %cmp287, %land.rhs286 ]
%land.ext290 = zext i1 %234 to i32
- %call291 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str4, i32 0, i32 0), double %ret_dc.real268, double %ret_dc.imag271, double %lret_dc.real272, double %lret_dc.imag275, double %conv276, double %conv277, i32 %land.ext290)
- %235 = load i32, i32* %retval
+ %call291 = call i32 (ptr, ...) @printf(ptr @.str4, double %ret_dc.real268, double %ret_dc.imag271, double %lret_dc.real272, double %lret_dc.imag275, double %conv276, double %conv277, i32 %land.ext290)
+ %235 = load i32, ptr %retval
ret i32 %235
}
; stel: jr $25
; stel: .end __call_stub_fp_v_sf
-declare i32 @printf(i8*, ...) #1
+declare i32 @printf(ptr, ...) #1
declare void @v_df(double) #1
; stel: .section .mips16.call.fp.v_df,"ax",@progbits
define void @v_sf(float %p) #0 {
entry:
%p.addr = alloca float, align 4
- store float %p, float* %p.addr, align 4
- %0 = load float, float* %p.addr, align 4
- store float %0, float* @x, align 4
+ store float %p, ptr %p.addr, align 4
+ %0 = load float, ptr %p.addr, align 4
+ store float %0, ptr @x, align 4
ret void
}
; stel: .section .mips16.fn.v_sf,"ax",@progbits
; stel: .set $__fn_local_v_sf, v_sf
; stel: .end __fn_stub_v_sf
-declare i32 @printf(i8*, ...) #1
+declare i32 @printf(ptr, ...) #1
; Function Attrs: nounwind
define void @v_df(double %p) #0 {
entry:
%p.addr = alloca double, align 8
- store double %p, double* %p.addr, align 8
- %0 = load double, double* %p.addr, align 8
- store double %0, double* @xd, align 8
+ store double %p, ptr %p.addr, align 8
+ %0 = load double, ptr %p.addr, align 8
+ store double %0, ptr @xd, align 8
ret void
}
entry:
%p1.addr = alloca float, align 4
%p2.addr = alloca float, align 4
- store float %p1, float* %p1.addr, align 4
- store float %p2, float* %p2.addr, align 4
- %0 = load float, float* %p1.addr, align 4
- store float %0, float* @x, align 4
- %1 = load float, float* %p2.addr, align 4
- store float %1, float* @y, align 4
+ store float %p1, ptr %p1.addr, align 4
+ store float %p2, ptr %p2.addr, align 4
+ %0 = load float, ptr %p1.addr, align 4
+ store float %0, ptr @x, align 4
+ %1 = load float, ptr %p2.addr, align 4
+ store float %1, ptr @y, align 4
ret void
}
entry:
%p1.addr = alloca float, align 4
%p2.addr = alloca double, align 8
- store float %p1, float* %p1.addr, align 4
- store double %p2, double* %p2.addr, align 8
- %0 = load float, float* %p1.addr, align 4
- store float %0, float* @x, align 4
- %1 = load double, double* %p2.addr, align 8
- store double %1, double* @yd, align 8
+ store float %p1, ptr %p1.addr, align 4
+ store double %p2, ptr %p2.addr, align 8
+ %0 = load float, ptr %p1.addr, align 4
+ store float %0, ptr @x, align 4
+ %1 = load double, ptr %p2.addr, align 8
+ store double %1, ptr @yd, align 8
ret void
}
entry:
%p1.addr = alloca double, align 8
%p2.addr = alloca float, align 4
- store double %p1, double* %p1.addr, align 8
- store float %p2, float* %p2.addr, align 4
- %0 = load double, double* %p1.addr, align 8
- store double %0, double* @xd, align 8
- %1 = load float, float* %p2.addr, align 4
- store float %1, float* @y, align 4
+ store double %p1, ptr %p1.addr, align 8
+ store float %p2, ptr %p2.addr, align 4
+ %0 = load double, ptr %p1.addr, align 8
+ store double %0, ptr @xd, align 8
+ %1 = load float, ptr %p2.addr, align 4
+ store float %1, ptr @y, align 4
ret void
}
entry:
%p1.addr = alloca double, align 8
%p2.addr = alloca double, align 8
- store double %p1, double* %p1.addr, align 8
- store double %p2, double* %p2.addr, align 8
- %0 = load double, double* %p1.addr, align 8
- store double %0, double* @xd, align 8
- %1 = load double, double* %p2.addr, align 8
- store double %1, double* @yd, align 8
+ store double %p1, ptr %p1.addr, align 8
+ store double %p2, ptr %p2.addr, align 8
+ %0 = load double, ptr %p1.addr, align 8
+ store double %0, ptr @xd, align 8
+ %1 = load double, ptr %p2.addr, align 8
+ store double %1, ptr @yd, align 8
ret void
}
; Function Attrs: nounwind
define float @sf_v() #0 {
entry:
- %0 = load float, float* @ret_sf, align 4
+ %0 = load float, ptr @ret_sf, align 4
ret float %0
}
define float @sf_sf(float %p) #0 {
entry:
%p.addr = alloca float, align 4
- store float %p, float* %p.addr, align 4
- %0 = load float, float* %p.addr, align 4
- store float %0, float* @x, align 4
- %1 = load float, float* @ret_sf, align 4
+ store float %p, ptr %p.addr, align 4
+ %0 = load float, ptr %p.addr, align 4
+ store float %0, ptr @x, align 4
+ %1 = load float, ptr @ret_sf, align 4
ret float %1
}
define float @sf_df(double %p) #0 {
entry:
%p.addr = alloca double, align 8
- store double %p, double* %p.addr, align 8
- %0 = load double, double* %p.addr, align 8
- store double %0, double* @xd, align 8
- %1 = load float, float* @ret_sf, align 4
+ store double %p, ptr %p.addr, align 8
+ %0 = load double, ptr %p.addr, align 8
+ store double %0, ptr @xd, align 8
+ %1 = load float, ptr @ret_sf, align 4
ret float %1
}
entry:
%p1.addr = alloca float, align 4
%p2.addr = alloca float, align 4
- store float %p1, float* %p1.addr, align 4
- store float %p2, float* %p2.addr, align 4
- %0 = load float, float* %p1.addr, align 4
- store float %0, float* @x, align 4
- %1 = load float, float* %p2.addr, align 4
- store float %1, float* @y, align 4
- %2 = load float, float* @ret_sf, align 4
+ store float %p1, ptr %p1.addr, align 4
+ store float %p2, ptr %p2.addr, align 4
+ %0 = load float, ptr %p1.addr, align 4
+ store float %0, ptr @x, align 4
+ %1 = load float, ptr %p2.addr, align 4
+ store float %1, ptr @y, align 4
+ %2 = load float, ptr @ret_sf, align 4
ret float %2
}
entry:
%p1.addr = alloca float, align 4
%p2.addr = alloca double, align 8
- store float %p1, float* %p1.addr, align 4
- store double %p2, double* %p2.addr, align 8
- %0 = load float, float* %p1.addr, align 4
- store float %0, float* @x, align 4
- %1 = load double, double* %p2.addr, align 8
- store double %1, double* @yd, align 8
- %2 = load float, float* @ret_sf, align 4
+ store float %p1, ptr %p1.addr, align 4
+ store double %p2, ptr %p2.addr, align 8
+ %0 = load float, ptr %p1.addr, align 4
+ store float %0, ptr @x, align 4
+ %1 = load double, ptr %p2.addr, align 8
+ store double %1, ptr @yd, align 8
+ %2 = load float, ptr @ret_sf, align 4
ret float %2
}
entry:
%p1.addr = alloca double, align 8
%p2.addr = alloca float, align 4
- store double %p1, double* %p1.addr, align 8
- store float %p2, float* %p2.addr, align 4
- %0 = load double, double* %p1.addr, align 8
- store double %0, double* @xd, align 8
- %1 = load float, float* %p2.addr, align 4
- store float %1, float* @y, align 4
- %2 = load float, float* @ret_sf, align 4
+ store double %p1, ptr %p1.addr, align 8
+ store float %p2, ptr %p2.addr, align 4
+ %0 = load double, ptr %p1.addr, align 8
+ store double %0, ptr @xd, align 8
+ %1 = load float, ptr %p2.addr, align 4
+ store float %1, ptr @y, align 4
+ %2 = load float, ptr @ret_sf, align 4
ret float %2
}
entry:
%p1.addr = alloca double, align 8
%p2.addr = alloca double, align 8
- store double %p1, double* %p1.addr, align 8
- store double %p2, double* %p2.addr, align 8
- %0 = load double, double* %p1.addr, align 8
- store double %0, double* @xd, align 8
- %1 = load double, double* %p2.addr, align 8
- store double %1, double* @yd, align 8
- %2 = load float, float* @ret_sf, align 4
+ store double %p1, ptr %p1.addr, align 8
+ store double %p2, ptr %p2.addr, align 8
+ %0 = load double, ptr %p1.addr, align 8
+ store double %0, ptr @xd, align 8
+ %1 = load double, ptr %p2.addr, align 8
+ store double %1, ptr @yd, align 8
+ %2 = load float, ptr @ret_sf, align 4
ret float %2
}
define void @v_sf(float %p) #0 {
entry:
%p.addr = alloca float, align 4
- store float %p, float* %p.addr, align 4
- %0 = load float, float* %p.addr, align 4
- store float %0, float* @x, align 4
+ store float %p, ptr %p.addr, align 4
+ %0 = load float, ptr %p.addr, align 4
+ store float %0, ptr @x, align 4
ret void
}
; ALL-LABEL: .ent __fn_stub_v_sf
; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -relocation-model=pic < %s | FileCheck %s -check-prefix=picel
-@ptrsv = global float ()* @sv, align 4
-@ptrdv = global double ()* @dv, align 4
-@ptrscv = global { float, float } ()* @scv, align 4
-@ptrdcv = global { double, double } ()* @dcv, align 4
+@ptrsv = global ptr @sv, align 4
+@ptrdv = global ptr @dv, align 4
+@ptrscv = global ptr @scv, align 4
+@ptrdcv = global ptr @dcv, align 4
@x = common global float 0.000000e+00, align 4
@.str = private unnamed_addr constant [4 x i8] c"%f\0A\00", align 1
@xd = common global double 0.000000e+00, align 8
define { float, float } @scv() #0 {
entry:
%retval = alloca { float, float }, align 4
- %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
- %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
- store float 5.000000e+00, float* %real
- store float 9.900000e+01, float* %imag
- %0 = load { float, float }, { float, float }* %retval
+ %real = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 1
+ store float 5.000000e+00, ptr %real
+ store float 9.900000e+01, ptr %imag
+ %0 = load { float, float }, ptr %retval
ret { float, float } %0
}
define { double, double } @dcv() #0 {
entry:
%retval = alloca { double, double }, align 8
- %real = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 0
- %imag = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 1
- store double 0x416BC8B0A0000000, double* %real
- store double 0x41CDCCB763800000, double* %imag
- %0 = load { double, double }, { double, double }* %retval
+ %real = getelementptr inbounds { double, double }, ptr %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { double, double }, ptr %retval, i32 0, i32 1
+ store double 0x416BC8B0A0000000, ptr %real
+ store double 0x41CDCCB763800000, ptr %imag
+ %0 = load { double, double }, ptr %retval
ret { double, double } %0
}
; Function Attrs: nounwind
define i32 @main() #0 {
entry:
- %0 = load float ()*, float ()** @ptrsv, align 4
+ %0 = load ptr, ptr @ptrsv, align 4
%call = call float %0()
- store float %call, float* @x, align 4
- %1 = load float, float* @x, align 4
+ store float %call, ptr @x, align 4
+ %1 = load float, ptr @x, align 4
%conv = fpext float %1 to double
- %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), double %conv)
- %2 = load double ()*, double ()** @ptrdv, align 4
+ %call1 = call i32 (ptr, ...) @printf(ptr @.str, double %conv)
+ %2 = load ptr, ptr @ptrdv, align 4
%call2 = call double %2()
- store double %call2, double* @xd, align 8
- %3 = load double, double* @xd, align 8
- %call3 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), double %3)
- %4 = load { float, float } ()*, { float, float } ()** @ptrscv, align 4
+ store double %call2, ptr @xd, align 8
+ %3 = load double, ptr @xd, align 8
+ %call3 = call i32 (ptr, ...) @printf(ptr @.str, double %3)
+ %4 = load ptr, ptr @ptrscv, align 4
%call4 = call { float, float } %4()
%5 = extractvalue { float, float } %call4, 0
%6 = extractvalue { float, float } %call4, 1
- store float %5, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 0)
- store float %6, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 1)
- %xy.real = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 0)
- %xy.imag = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 1)
+ store float %5, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 0)
+ store float %6, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 1)
+ %xy.real = load float, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 0)
+ %xy.imag = load float, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 1)
%conv5 = fpext float %xy.real to double
%conv6 = fpext float %xy.imag to double
- %xy.real7 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 0)
- %xy.imag8 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 1)
+ %xy.real7 = load float, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 0)
+ %xy.imag8 = load float, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 1)
%conv9 = fpext float %xy.real7 to double
%conv10 = fpext float %xy.imag8 to double
- %call11 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str1, i32 0, i32 0), double %conv5, double %conv10)
- %7 = load { double, double } ()*, { double, double } ()** @ptrdcv, align 4
+ %call11 = call i32 (ptr, ...) @printf(ptr @.str1, double %conv5, double %conv10)
+ %7 = load ptr, ptr @ptrdcv, align 4
%call12 = call { double, double } %7()
%8 = extractvalue { double, double } %call12, 0
%9 = extractvalue { double, double } %call12, 1
- store double %8, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 0)
- store double %9, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 1)
- %xyd.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 0)
- %xyd.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 1)
- %xyd.real13 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 0)
- %xyd.imag14 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 1)
- %call15 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str1, i32 0, i32 0), double %xyd.real, double %xyd.imag14)
+ store double %8, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 0)
+ store double %9, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 1)
+ %xyd.real = load double, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 0)
+ %xyd.imag = load double, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 1)
+ %xyd.real13 = load double, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 0)
+ %xyd.imag14 = load double, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 1)
+ %call15 = call i32 (ptr, ...) @printf(ptr @.str1, double %xyd.real, double %xyd.imag14)
ret i32 0
}
; picel: lw ${{[0-9]+}}, %got(__mips16_call_stub_dc_0)(${{[0-9]+}})
-declare i32 @printf(i8*, ...) #1
+declare i32 @printf(ptr, ...) #1
attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
define i32 @main() nounwind {
entry:
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 1075344593) nounwind
+ %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 1075344593) nounwind
; 16: lw ${{[0-9]+}}, 1f
; 16: b 2f
; 16: .align 2
; 16: 1: .word 1075344593
; 16: 2:
- %call1 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 -1075344593) nounwind
+ %call1 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 -1075344593) nounwind
; 16: lw ${{[0-9]+}}, 1f
; 16: b 2f
ret i32 0
}
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
; RUN: -mips-tail-calls=1 -mcpu=mips64r6 -mattr=+use-indirect-jump-hazard \
; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=PIC-MIPS64R6
-define void @fooNonTail(void (i32)* nocapture %f1) nounwind {
+define void @fooNonTail(ptr nocapture %f1) nounwind {
; MIPS32R2-LABEL: fooNonTail:
; MIPS32R2: # %bb.0: # %entry
; MIPS32R2-NEXT: addiu $sp, $sp, -24
ret void
}
-define i32 @fooTail(i32 (i32)* nocapture %f1) nounwind {
+define i32 @fooTail(ptr nocapture %f1) nounwind {
; MIPS32R2-LABEL: fooTail:
; MIPS32R2: # %bb.0: # %entry
; MIPS32R2-NEXT: move $25, $4
@.str.6 = private unnamed_addr constant [2 x i8] c"G\00", align 1
@.str.7 = private unnamed_addr constant [1 x i8] zeroinitializer, align 1
-define i8* @_Z3fooi(i32 signext %Letter) {
+define ptr @_Z3fooi(i32 signext %Letter) {
; MIPS32R2-LABEL: _Z3fooi:
; MIPS32R2: # %bb.0: # %entry
; MIPS32R2-NEXT: addiu $sp, $sp, -16
; PIC-MIPS64R6-NEXT: jr $ra
; PIC-MIPS64R6-NEXT: daddiu $sp, $sp, 16
entry:
- %retval = alloca i8*, align 8
+ %retval = alloca ptr, align 8
%Letter.addr = alloca i32, align 4
- store i32 %Letter, i32* %Letter.addr, align 4
- %0 = load i32, i32* %Letter.addr, align 4
+ store i32 %Letter, ptr %Letter.addr, align 4
+ %0 = load i32, ptr %Letter.addr, align 4
switch i32 %0, label %sw.epilog [
i32 0, label %sw.bb
i32 1, label %sw.bb1
]
sw.bb:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str, ptr %retval, align 8
br label %return
sw.bb1:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.1, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.1, ptr %retval, align 8
br label %return
sw.bb2:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.2, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.2, ptr %retval, align 8
br label %return
sw.bb3:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.3, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.3, ptr %retval, align 8
br label %return
sw.bb4:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.4, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.4, ptr %retval, align 8
br label %return
sw.bb5:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.5, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.5, ptr %retval, align 8
br label %return
sw.bb6:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.6, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.6, ptr %retval, align 8
br label %return
sw.epilog:
- store i8* getelementptr inbounds ([1 x i8], [1 x i8]* @.str.7, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.7, ptr %retval, align 8
br label %return
return:
- %1 = load i8*, i8** %retval, align 8
- ret i8* %1
+ %1 = load ptr, ptr %retval, align 8
+ ret ptr %1
}
br i1 %cmp, label %end, label %then
then:
- store i32 1, i32* @x, align 4
+ store i32 1, ptr @x, align 4
br label %end
end:
; RUN: -verify-machineinstrs | FileCheck -check-prefix=N64 %s
declare void @callee()
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1)
@val = internal unnamed_addr global [20 x i32] zeroinitializer, align 4
; N64-NEXT: jr $ra
; N64-NEXT: daddiu $sp, $sp, 16
call void @callee()
- call void @llvm.memset.p0i8.i32(i8* align 4 bitcast ([20 x i32]* @val to i8*), i8 0, i32 80, i1 false)
+ call void @llvm.memset.p0.i32(ptr align 4 @val, i8 0, i32 80, i1 false)
ret void
}
; RUN: llc < %s -mtriple=mipsel -relocation-model=static -mips-tail-calls=1 | FileCheck %s
-define void @foo0(void (i32)* nocapture %f1) nounwind {
+define void @foo0(ptr nocapture %f1) nounwind {
entry:
; CHECK: jr $25
tail call void %f1(i32 13) nounwind
target triple = "mipsel-unknown-linux"
-@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @test, i8* null }]
+@llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @test, ptr null }]
; CHECK: .section
; CHECK: .init_array
; CHECK-NOT: .ctors
; CHECK-NEXT: #NO_APP
%a = alloca i32, align 4
%b = alloca i32, align 4
- store i32 20, i32* %a, align 4
- %0 = load i32, i32* %a, align 4
+ store i32 20, ptr %a, align 4
+ %0 = load i32, ptr %a, align 4
%1 = call i32 asm sideeffect "addi $$9, $1, 8\0A\09ori $0, $$9, 6", "=r,r,~{$1}"(i32 %0)
- store i32 %1, i32* %b, align 4
+ store i32 %1, ptr %b, align 4
ret void
}
@data = global [8193 x i32] zeroinitializer
-define void @R(i32 *%p) nounwind {
+define void @R(ptr %p) nounwind {
entry:
; CHECK-LABEL: R:
- call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
+ call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(ptr elementtype(i32) @data)
; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
; CHECK: #APP
ret void
}
-define void @R_offset_4(i32 *%p) nounwind {
+define void @R_offset_4(ptr %p) nounwind {
entry:
; CHECK-LABEL: R_offset_4:
- call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
+ call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 1))
; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
; CHECK: #APP
ret void
}
-define void @R_offset_254(i32 *%p) nounwind {
+define void @R_offset_254(ptr %p) nounwind {
entry:
; CHECK-LABEL: R_offset_254:
- call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 63))
+ call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 63))
; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
; CHECK: #APP
ret void
}
-define void @R_offset_256(i32 *%p) nounwind {
+define void @R_offset_256(ptr %p) nounwind {
entry:
; CHECK-LABEL: R_offset_256:
- call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 64))
+ call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 64))
; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
; CHECK: addiu $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], 256
@data = global [8193 x i32] zeroinitializer
-define void @ZC(i32 *%p) nounwind {
+define void @ZC(ptr %p) nounwind {
entry:
; ALL-LABEL: ZC:
- call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
+ call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) @data)
; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
; ALL: #APP
ret void
}
-define void @ZC_offset_n4(i32 *%p) nounwind {
+define void @ZC_offset_n4(ptr %p) nounwind {
entry:
; ALL-LABEL: ZC_offset_n4:
- call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 -1))
+ call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 -1))
; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
; ALL: #APP
ret void
}
-define void @ZC_offset_4(i32 *%p) nounwind {
+define void @ZC_offset_4(ptr %p) nounwind {
entry:
; ALL-LABEL: ZC_offset_4:
- call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
+ call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 1))
; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
; ALL: #APP
ret void
}
-define void @ZC_offset_252(i32 *%p) nounwind {
+define void @ZC_offset_252(ptr %p) nounwind {
entry:
; ALL-LABEL: ZC_offset_252:
- call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 63))
+ call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 63))
; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
; ALL: #APP
ret void
}
-define void @ZC_offset_256(i32 *%p) nounwind {
+define void @ZC_offset_256(ptr %p) nounwind {
entry:
; ALL-LABEL: ZC_offset_256:
- call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 64))
+ call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 64))
; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
ret void
}
-define void @ZC_offset_2044(i32 *%p) nounwind {
+define void @ZC_offset_2044(ptr %p) nounwind {
entry:
; ALL-LABEL: ZC_offset_2044:
- call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 511))
+ call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 511))
; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
ret void
}
-define void @ZC_offset_2048(i32 *%p) nounwind {
+define void @ZC_offset_2048(ptr %p) nounwind {
entry:
; ALL-LABEL: ZC_offset_2048:
- call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 512))
+ call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 512))
; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
ret void
}
-define void @ZC_offset_32764(i32 *%p) nounwind {
+define void @ZC_offset_32764(ptr %p) nounwind {
entry:
; ALL-LABEL: ZC_offset_32764:
- call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191))
+ call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 8191))
; ALL-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
ret void
}
-define void @ZC_offset_32768(i32 *%p) nounwind {
+define void @ZC_offset_32768(ptr %p) nounwind {
entry:
; ALL-LABEL: ZC_offset_32768:
- call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192))
+ call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 8192))
; ALL-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
; ALL-DAG: ori $[[T0:[0-9]+]], $zero, 32768
; CHECK-LABEL: Atomic:
entry:
%s = alloca %struct.anon, align 4
- %0 = bitcast %struct.anon* %s to i8*
- %count = getelementptr inbounds %struct.anon, %struct.anon* %s, i64 0, i32 1
- store i32 0, i32* %count, align 4
+ %count = getelementptr inbounds %struct.anon, ptr %s, i64 0, i32 1
+ store i32 0, ptr %count, align 4
; R6: addiu $[[R0:[0-9a-z]+]], $sp, {{[0-9]+}}
; ALL: #APP
; ALL: #NO_APP
- %1 = call { i32, i32 } asm sideeffect ".set push\0A.set noreorder\0A1:\0All $0, $2\0Aaddu $1, $0, $3\0Asc $1, $2\0Abeqz $1, 1b\0Aaddu $1, $0, $3\0A.set pop\0A", "=&r,=&r,=*^ZC,Ir,*^ZC,~{memory},~{$1}"(i32* elementtype(i32) %count, i32 10, i32* elementtype(i32) %count)
- %asmresult1.i = extractvalue { i32, i32 } %1, 1
+ %0 = call { i32, i32 } asm sideeffect ".set push\0A.set noreorder\0A1:\0All $0, $2\0Aaddu $1, $0, $3\0Asc $1, $2\0Abeqz $1, 1b\0Aaddu $1, $0, $3\0A.set pop\0A", "=&r,=&r,=*^ZC,Ir,*^ZC,~{memory},~{$1}"(ptr elementtype(i32) %count, i32 10, ptr elementtype(i32) %count)
+ %asmresult1.i = extractvalue { i32, i32 } %0, 1
%cmp = icmp ne i32 %asmresult1.i, 10
%conv = zext i1 %cmp to i32
%call2 = call i32 @f(i32 signext %conv)
@data = global [8193 x i32] zeroinitializer
-define void @m(i32 *%p) nounwind {
+define void @m(ptr %p) nounwind {
entry:
; CHECK-LABEL: m:
- call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
+ call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(ptr elementtype(i32) @data)
; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
; CHECK: #APP
ret void
}
-define void @m_offset_4(i32 *%p) nounwind {
+define void @m_offset_4(ptr %p) nounwind {
entry:
; CHECK-LABEL: m_offset_4:
- call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
+ call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 1))
; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
; CHECK: #APP
ret void
}
-define void @m_offset_32764(i32 *%p) nounwind {
+define void @m_offset_32764(ptr %p) nounwind {
entry:
; CHECK-LABEL: m_offset_32764:
- call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191))
+ call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 8191))
; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
; CHECK: #APP
ret void
}
-define void @m_offset_32768(i32 *%p) nounwind {
+define void @m_offset_32768(ptr %p) nounwind {
entry:
; CHECK-LABEL: m_offset_32768:
- call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192))
+ call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 8192))
; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
; CHECK-DAG: ori $[[T0:[0-9]+]], $zero, 32768
; CHECK: sw $[[T3]], 0($[[T1]])
%l1 = alloca i32, align 4
- call void asm "sw $1, $0", "=*m,r"(i32* elementtype(i32) %l1, i32 %x) nounwind
- %0 = call i32 asm "lw $0, $1", "=r,*m"(i32* elementtype(i32) %l1) nounwind
- store i32 %0, i32* @g1, align 4
+ call void asm "sw $1, $0", "=*m,r"(ptr elementtype(i32) %l1, i32 %x) nounwind
+ %0 = call i32 asm "lw $0, $1", "=r,*m"(ptr elementtype(i32) %l1) nounwind
+ store i32 %0, ptr @g1, align 4
ret i32 %0
}
define void @main() {
entry:
; Second word:
- tail call void asm sideeffect " lw $0, ${1:D}", "r,*m,~{$11}"(i32 undef, i32* elementtype(i32) getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
+ tail call void asm sideeffect " lw $0, ${1:D}", "r,*m,~{$11}"(i32 undef, ptr elementtype(i32) getelementptr inbounds ([20 x i32], ptr @b, i32 0, i32 3))
; First word. Notice, no 'D':
- tail call void asm sideeffect " lw $0, ${1}", "r,*m,~{$11}"(i32 undef, i32* elementtype(i32) getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
+ tail call void asm sideeffect " lw $0, ${1}", "r,*m,~{$11}"(i32 undef, ptr elementtype(i32) getelementptr inbounds ([20 x i32], ptr @b, i32 0, i32 3))
; High-order part.
- tail call void asm sideeffect " lw $0, ${1:M}", "r,*m,~{$11}"(i32 undef, i32* elementtype(i32) getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
+ tail call void asm sideeffect " lw $0, ${1:M}", "r,*m,~{$11}"(i32 undef, ptr elementtype(i32) getelementptr inbounds ([20 x i32], ptr @b, i32 0, i32 3))
; Low-order part.
- tail call void asm sideeffect " lw $0, ${1:L}", "r,*m,~{$11}"(i32 undef, i32* elementtype(i32) getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
+ tail call void asm sideeffect " lw $0, ${1:L}", "r,*m,~{$11}"(i32 undef, ptr elementtype(i32) getelementptr inbounds ([20 x i32], ptr @b, i32 0, i32 3))
ret void
}
@data = global [8193 x i32] zeroinitializer
-define void @o(i32 *%p) nounwind {
+define void @o(ptr %p) nounwind {
entry:
; CHECK-LABEL: o:
- call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
+ call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(ptr elementtype(i32) @data)
; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
; CHECK: #APP
ret void
}
-define void @o_offset_4(i32 *%p) nounwind {
+define void @o_offset_4(ptr %p) nounwind {
entry:
; CHECK-LABEL: o_offset_4:
- call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
+ call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 1))
; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
; CHECK: #APP
ret void
}
-define void @o_offset_32764(i32 *%p) nounwind {
+define void @o_offset_32764(ptr %p) nounwind {
entry:
; CHECK-LABEL: o_offset_32764:
- call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191))
+ call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 8191))
; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
; CHECK: #APP
ret void
}
-define void @o_offset_32768(i32 *%p) nounwind {
+define void @o_offset_32768(ptr %p) nounwind {
entry:
; CHECK-LABEL: o_offset_32768:
- call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192))
+ call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 8192))
; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
; CHECK-DAG: ori $[[T0:[0-9]+]], $zero, 32768
define void @b() {
entry:
- %0 = load i8, i8* @a, align 1
+ %0 = load i8, ptr @a, align 1
%tobool = trunc i8 %0 to i1
call void asm sideeffect "", "Jr,~{$1}"(i1 %tobool)
ret void
; CHECK-NEXT: mflo ${{[0-9]+}}
%bosco = alloca i32, align 4
call i32 asm sideeffect "\09mtlo $3 \0A\09\09madd $1, $2 ", "=l,r,r,r"(i32 7, i32 6, i32 44) nounwind
- store volatile i32 %4, i32* %bosco, align 4
+ store volatile i32 %4, ptr %bosco, align 4
; Check the 'l' constraint for 16-bit type.
; CHECK: #APP
; CHECK-NEXT: mflo ${{[0-9]+}}
%bosco16 = alloca i16, align 4
call i16 asm sideeffect "\09mtlo $3 \0A\09\09madd $1, $2 ", "=l,r,r,r"(i32 7, i32 6, i32 44) nounwind
- store volatile i16 %5, i16* %bosco16, align 4
+ store volatile i16 %5, ptr %bosco16, align 4
ret i32 0
}
; LE32: or ${{[0-9]+}}, $[[SECOND]], ${{[0-9]+}}
; BE32: or ${{[0-9]+}}, $[[SECOND]], ${{[0-9]+}}
; ALL: #NO_APP
- %bosco = load i64, i64* getelementptr inbounds (%union.u_tag, %union.u_tag* @uval, i32 0, i32 0), align 8
+ %bosco = load i64, ptr @uval, align 8
%trunc1 = trunc i64 %bosco to i32
tail call i32 asm sideeffect "or $0, ${1:D}, $2", "=r,r,r"(i64 %bosco, i32 %trunc1) nounwind
ret i32 0
; LE32: or ${{[0-9]+}}, $[[FIRST]], ${{[0-9]+}}
; BE32: or ${{[0-9]+}}, $[[SECOND]], ${{[0-9]+}}
; ALL: #NO_APP
- %bosco = load i64, i64* getelementptr inbounds (%union.u_tag, %union.u_tag* @uval, i32 0, i32 0), align 8
+ %bosco = load i64, ptr @uval, align 8
%trunc1 = trunc i64 %bosco to i32
tail call i32 asm sideeffect "or $0, ${1:L}, $2", "=r,r,r"(i64 %bosco, i32 %trunc1) nounwind
ret i32 0
; LE32: or ${{[0-9]+}}, $[[SECOND]], ${{[0-9]+}}
; BE32: or ${{[0-9]+}}, $[[FIRST]], ${{[0-9]+}}
; ALL: #NO_APP
- %bosco = load i64, i64* getelementptr inbounds (%union.u_tag, %union.u_tag* @uval, i32 0, i32 0), align 8
+ %bosco = load i64, ptr @uval, align 8
%trunc1 = trunc i64 %bosco to i32
tail call i32 asm sideeffect "or $0, ${1:M}, $2", "=r,r,r"(i64 %bosco, i32 %trunc1) nounwind
ret i32 0
; CHECK: #TEST baz
@baz = internal global i32 0, align 4
define dso_local i32 @test_inlineasm_c_output_template1() {
- tail call void asm sideeffect "#TEST ${0:c}", "i"(i32* nonnull @baz)
+ tail call void asm sideeffect "#TEST ${0:c}", "i"(ptr nonnull @baz)
ret i32 42
}
declare i32 @foo(...)
declare void @bar()
-define void @main() personality i8* bitcast (i32 (...)* @foo to i8*) {
+define void @main() personality ptr @foo {
entry:
invoke void @bar() #0
to label %unreachable unwind label %return
unreachable
return:
- %0 = landingpad { i8*, i32 }
- catch i8* null
+ %0 = landingpad { ptr, i32 }
+ catch ptr null
ret void
}
; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=64
@i1 = global [3 x i32] [i32 1, i32 2, i32 3], align 4
-@i3 = common global i32* null, align 4
+@i3 = common global ptr null, align 4
; 32-LABEL: test_float_int_:
; 32: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]]
; RUN: llc < %s -march=mipsel -relocation-model=pic | FileCheck %s
-@caller.sf1 = internal unnamed_addr global void (...)* null, align 4
-@gf1 = external global void (...)*
+@caller.sf1 = internal unnamed_addr global ptr null, align 4
+@gf1 = external global ptr
@.str = private unnamed_addr constant [3 x i8] c"f2\00"
-define i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
+define i32 @main(i32 %argc, ptr nocapture %argv) nounwind {
entry:
; CHECK: lw $[[R0:[0-9]+]], %got(f2)
; CHECK: addiu $25, $[[R0]], %lo(f2)
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- %tmp1 = load void (...)*, void (...)** @caller.sf1, align 4
+ %tmp1 = load ptr, ptr @caller.sf1, align 4
tail call void (...) %tmp1() nounwind
br label %if.end
; CHECK: lw $[[R3:[0-9]+]], %got(caller.sf1)
; CHECK: sw ${{[0-9]+}}, %lo(caller.sf1)($[[R3]])
%tobool3 = icmp ne i32 %a0, 0
- %tmp4 = load void (...)*, void (...)** @gf1, align 4
- %cond = select i1 %tobool3, void (...)* %tmp4, void (...)* bitcast (void ()* @sf2 to void (...)*)
- store void (...)* %cond, void (...)** @caller.sf1, align 4
+ %tmp4 = load ptr, ptr @gf1, align 4
+ %cond = select i1 %tobool3, ptr %tmp4, ptr @sf2
+ store ptr %cond, ptr @caller.sf1, align 4
ret void
}
define internal void @sf2() nounwind {
entry:
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0)) nounwind
+ %call = tail call i32 (ptr, ...) @printf(ptr @.str) nounwind
ret void
}
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
define internal fastcc void @f2() nounwind noinline {
entry:
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0)) nounwind
+ %call = tail call i32 (ptr, ...) @printf(ptr @.str) nounwind
ret void
}
; CHECK: sw $26, [[R3:[0-9]+]]($sp)
; CHECK: mfhi $26
; CHECK: sw $26, [[R4:[0-9]+]]($sp)
- call void bitcast (void (...)* @write to void ()*)()
+ call void @write()
; CHECK: lw $26, [[R4:[0-9]+]]($sp)
; CHECK: mthi $26
; CHECK: lw $26, [[R3:[0-9]+]]($sp)
define void @test(i32 %i) nounwind {
entry:
%i.addr = alloca i32, align 4
- store i32 %i, i32* %i.addr, align 4
- %0 = load i32, i32* %i.addr, align 4
+ store i32 %i, ptr %i.addr, align 4
+ %0 = load i32, ptr %i.addr, align 4
switch i32 %0, label %sw.epilog [
i32 115, label %sw.bb
i32 105, label %sw.bb1
]
sw.bb: ; preds = %entry
- store i8 115, i8* @c, align 1
+ store i8 115, ptr @c, align 1
br label %sw.epilog
sw.bb1: ; preds = %entry
- store i8 105, i8* @c, align 1
+ store i8 105, ptr @c, align 1
br label %sw.epilog
sw.bb2: ; preds = %entry
- store i8 100, i8* @c, align 1
+ store i8 100, ptr @c, align 1
br label %sw.epilog
sw.bb3: ; preds = %entry
- store i8 108, i8* @c, align 1
+ store i8 108, ptr @c, align 1
br label %sw.epilog
sw.bb4: ; preds = %entry
- store i8 99, i8* @c, align 1
+ store i8 99, ptr @c, align 1
br label %sw.epilog
sw.bb5: ; preds = %entry
- store i8 68, i8* @c, align 1
+ store i8 68, ptr @c, align 1
br label %sw.epilog
sw.bb6: ; preds = %entry
- store i8 81, i8* @c, align 1
+ store i8 81, ptr @c, align 1
br label %sw.epilog
sw.bb7: ; preds = %entry
- store i8 76, i8* @c, align 1
+ store i8 76, ptr @c, align 1
br label %sw.epilog
sw.epilog: ; preds = %entry, %sw.bb7, %sw.bb6, %sw.bb5, %sw.bb4, %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb
@.str.6 = private unnamed_addr constant [2 x i8] c"G\00", align 1
@.str.7 = private unnamed_addr constant [1 x i8] zeroinitializer, align 1
-define i8* @_Z3fooi(i32 signext %Letter) {
+define ptr @_Z3fooi(i32 signext %Letter) {
entry:
- %retval = alloca i8*, align 8
+ %retval = alloca ptr, align 8
%Letter.addr = alloca i32, align 4
- store i32 %Letter, i32* %Letter.addr, align 4
- %0 = load i32, i32* %Letter.addr, align 4
+ store i32 %Letter, ptr %Letter.addr, align 4
+ %0 = load i32, ptr %Letter.addr, align 4
switch i32 %0, label %sw.epilog [
i32 0, label %sw.bb
i32 1, label %sw.bb1
]
sw.bb:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str, ptr %retval, align 8
br label %return
sw.bb1:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.1, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.1, ptr %retval, align 8
br label %return
sw.bb2:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.2, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.2, ptr %retval, align 8
br label %return
sw.bb3:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.3, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.3, ptr %retval, align 8
br label %return
sw.bb4:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.4, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.4, ptr %retval, align 8
br label %return
sw.bb5:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.5, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.5, ptr %retval, align 8
br label %return
sw.bb6:
- store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.6, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.6, ptr %retval, align 8
br label %return
sw.epilog:
- store i8* getelementptr inbounds ([1 x i8], [1 x i8]* @.str.7, i32 0, i32 0), i8** %retval, align 8
+ store ptr @.str.7, ptr %retval, align 8
br label %return
return:
- %1 = load i8*, i8** %retval, align 8
- ret i8* %1
+ %1 = load ptr, ptr %retval, align 8
+ ret ptr %1
}
; Function Attrs: nounwind
define void @_Z3foov() #0 {
entry:
- %0 = load double, double* @d1, align 8
+ %0 = load double, ptr @d1, align 8
%conv = fptosi double %0 to i64
- store i64 %conv, i64* @ll1, align 8
- %1 = load double, double* @d2, align 8
+ store i64 %conv, ptr @ll1, align 8
+ %1 = load double, ptr @d2, align 8
%conv1 = fptoui double %1 to i64
- store i64 %conv1, i64* @ull1, align 8
- %2 = load float, float* @f1, align 4
+ store i64 %conv1, ptr @ull1, align 8
+ %2 = load float, ptr @f1, align 4
%conv2 = fptosi float %2 to i64
- store i64 %conv2, i64* @ll2, align 8
- %3 = load float, float* @f2, align 4
+ store i64 %conv2, ptr @ll2, align 8
+ %3 = load float, ptr @f2, align 4
%conv3 = fptoui float %3 to i64
- store i64 %conv3, i64* @ull2, align 8
- %4 = load double, double* @d3, align 8
+ store i64 %conv3, ptr @ull2, align 8
+ %4 = load double, ptr @d3, align 8
%conv4 = fptosi double %4 to i32
- store i32 %conv4, i32* @l1, align 4
- %5 = load double, double* @d4, align 8
+ store i32 %conv4, ptr @l1, align 4
+ %5 = load double, ptr @d4, align 8
%conv5 = fptoui double %5 to i32
- store i32 %conv5, i32* @ul1, align 4
- %6 = load float, float* @f3, align 4
+ store i32 %conv5, ptr @ul1, align 4
+ %6 = load float, ptr @f3, align 4
%conv6 = fptosi float %6 to i32
- store i32 %conv6, i32* @l2, align 4
- %7 = load float, float* @f4, align 4
+ store i32 %conv6, ptr @l2, align 4
+ %7 = load float, ptr @f4, align 4
%conv7 = fptoui float %7 to i32
- store i32 %conv7, i32* @ul2, align 4
+ store i32 %conv7, ptr @ul2, align 4
ret void
}
; Function Attrs: nounwind
define void @_Z3goov() #0 {
entry:
- %0 = load i64, i64* @ll1, align 8
+ %0 = load i64, ptr @ll1, align 8
%conv = sitofp i64 %0 to double
- store double %conv, double* @d1, align 8
- %1 = load i64, i64* @ull1, align 8
+ store double %conv, ptr @d1, align 8
+ %1 = load i64, ptr @ull1, align 8
%conv1 = uitofp i64 %1 to double
- store double %conv1, double* @d2, align 8
- %2 = load i64, i64* @ll2, align 8
+ store double %conv1, ptr @d2, align 8
+ %2 = load i64, ptr @ll2, align 8
%conv2 = sitofp i64 %2 to float
- store float %conv2, float* @f1, align 4
- %3 = load i64, i64* @ull2, align 8
+ store float %conv2, ptr @f1, align 4
+ %3 = load i64, ptr @ull2, align 8
%conv3 = uitofp i64 %3 to float
- store float %conv3, float* @f2, align 4
- %4 = load i32, i32* @l1, align 4
+ store float %conv3, ptr @f2, align 4
+ %4 = load i32, ptr @l1, align 4
%conv4 = sitofp i32 %4 to double
- store double %conv4, double* @d3, align 8
- %5 = load i32, i32* @ul1, align 4
+ store double %conv4, ptr @d3, align 8
+ %5 = load i32, ptr @ul1, align 4
%conv5 = uitofp i32 %5 to double
- store double %conv5, double* @d4, align 8
- %6 = load i32, i32* @l2, align 4
+ store double %conv5, ptr @d4, align 8
+ %6 = load i32, ptr @l2, align 4
%conv6 = sitofp i32 %6 to float
- store float %conv6, float* @f3, align 4
- %7 = load i32, i32* @ul2, align 4
+ store float %conv6, ptr @f3, align 4
+ %7 = load i32, ptr @ul2, align 4
%conv7 = uitofp i32 %7 to float
- store float %conv7, float* @f4, align 4
+ store float %conv7, ptr @f4, align 4
ret void
}
define void @f() nounwind {
entry:
%a1 = alloca [1073741824 x i8], align 1
- %arrayidx = getelementptr inbounds [1073741824 x i8], [1073741824 x i8]* %a1, i32 0, i32 1048676
- call void @f2(i8* %arrayidx) nounwind
+ %arrayidx = getelementptr inbounds [1073741824 x i8], ptr %a1, i32 0, i32 1048676
+ call void @f2(ptr %arrayidx) nounwind
ret void
; CHECK-LABEL: f:
; CHECK: addu ${{[0-9]+}}, $sp, $[[R2]]
}
-declare void @f2(i8*)
+declare void @f2(ptr)
; 64: sd $ra, 24($[[R1]])
%agg.tmp = alloca %struct.S1, align 1
- %tmp = getelementptr inbounds %struct.S1, %struct.S1* %agg.tmp, i32 0, i32 0, i32 0
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp, i8* align 1 getelementptr inbounds (%struct.S1, %struct.S1* @s1, i32 0, i32 0, i32 0), i32 65536, i1 false)
- call void @f2(%struct.S1* byval(%struct.S1) %agg.tmp) nounwind
+ call void @llvm.memcpy.p0.p0.i32(ptr align 1 %agg.tmp, ptr align 1 @s1, i32 65536, i1 false)
+ call void @f2(ptr byval(%struct.S1) %agg.tmp) nounwind
ret void
}
-declare void @f2(%struct.S1* byval(%struct.S1))
+declare void @f2(ptr byval(%struct.S1))
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
define i32 @main() nounwind {
entry:
%i = alloca i32, align 4
- %0 = load i8, i8* @c, align 1
+ %0 = load i8, ptr @c, align 1
; 16: lb ${{[0-9]+}}, 0(${{[0-9]+}})
%conv = sext i8 %0 to i32
- store i32 %conv, i32* %i, align 4
- %1 = load i32, i32* %i, align 4
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1)
+ store i32 %conv, ptr %i, align 4
+ %1 = load i32, ptr %i, align 4
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
define i32 @main() nounwind {
entry:
%i = alloca i32, align 4
- %0 = load i8, i8* @c, align 1
+ %0 = load i8, ptr @c, align 1
%conv = zext i8 %0 to i32
; 16: lbu ${{[0-9]+}}, 0(${{[0-9]+}})
- store i32 %conv, i32* %i, align 4
- %1 = load i8, i8* @c, align 1
+ store i32 %conv, ptr %i, align 4
+ %1 = load i8, ptr @c, align 1
%conv1 = zext i8 %1 to i32
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %conv1)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %conv1)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; Function Attrs: nounwind optsize
define i32 @bnez() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !5
- store i32 0, i32* @i, align 4, !tbaa !1
+ store i32 0, ptr @i, align 4, !tbaa !1
br label %if.end
if.end: ; preds = %if.then, %entry
; Function Attrs: nounwind optsize
define i32 @beqz() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- store i32 10, i32* @j, align 4, !tbaa !1
+ store i32 10, ptr @j, align 4, !tbaa !1
tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !6
br label %if.end
if.else: ; preds = %entry
- store i32 55, i32* @j, align 4, !tbaa !1
+ store i32 55, ptr @j, align 4, !tbaa !1
tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !7
br label %if.end
; Function Attrs: nounwind optsize
define void @bteqz() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
- %1 = load i32, i32* @j, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
+ %1 = load i32, ptr @j, align 4, !tbaa !1
%cmp = icmp eq i32 %0, %1
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- store i32 1, i32* @k, align 4, !tbaa !1
+ store i32 1, ptr @k, align 4, !tbaa !1
tail call void asm sideeffect ".space 1000", ""() #1, !srcloc !8
br label %if.end
if.else: ; preds = %entry
tail call void asm sideeffect ".space 1000", ""() #1, !srcloc !9
- store i32 2, i32* @k, align 4, !tbaa !1
+ store i32 2, ptr @k, align 4, !tbaa !1
br label %if.end
if.end: ; preds = %if.else, %if.then
; Function Attrs: nounwind optsize
define void @btz() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
- %1 = load i32, i32* @j, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
+ %1 = load i32, ptr @j, align 4, !tbaa !1
%cmp1 = icmp sgt i32 %0, %1
br i1 %cmp1, label %if.then, label %if.end
if.then: ; preds = %entry, %if.then
tail call void asm sideeffect ".space 60000", ""() #1, !srcloc !10
- %2 = load i32, i32* @i, align 4, !tbaa !1
- %3 = load i32, i32* @j, align 4, !tbaa !1
+ %2 = load i32, ptr @i, align 4, !tbaa !1
+ %3 = load i32, ptr @j, align 4, !tbaa !1
%cmp = icmp sgt i32 %2, %3
br i1 %cmp, label %if.then, label %if.end
; Function Attrs: nounwind
define i32 @s() #0 {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- store i32 0, i32* @i, align 4
+ store i32 0, ptr @i, align 4
call void asm sideeffect ".space 1000", ""() #1, !srcloc !1
br label %if.end
if.else: ; preds = %entry
- store i32 1, i32* @i, align 4
+ store i32 1, ptr @i, align 4
br label %if.end
if.end: ; preds = %if.else, %if.then
; Function Attrs: nounwind
define i32 @b() #0 {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- store i32 0, i32* @i, align 4
+ store i32 0, ptr @i, align 4
call void asm sideeffect ".space 1000000", ""() #1, !srcloc !2
br label %if.end
if.else: ; preds = %entry
- store i32 1, i32* @i, align 4
+ store i32 1, ptr @i, align 4
br label %if.end
if.end: ; preds = %if.else, %if.then
; Function Attrs: nounwind optsize
define i32 @foo() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
if.end: ; preds = %if.else, %if.then
%storemerge = phi i32 [ 1, %if.else ], [ 0, %if.then ]
- store i32 %storemerge, i32* @i, align 4, !tbaa !1
+ store i32 %storemerge, ptr @i, align 4, !tbaa !1
ret i32 0
}
; Function Attrs: nounwind optsize
define i32 @goo() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
if.end: ; preds = %if.else, %if.then
%storemerge = phi i32 [ 1, %if.else ], [ 0, %if.then ]
- store i32 %storemerge, i32* @i, align 4, !tbaa !1
+ store i32 %storemerge, ptr @i, align 4, !tbaa !1
ret i32 0
}
; Function Attrs: nounwind optsize
define i32 @x0() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
if.end: ; preds = %if.else, %if.then
%storemerge = phi i32 [ 1, %if.else ], [ 0, %if.then ]
- store i32 %storemerge, i32* @i, align 4, !tbaa !1
+ store i32 %storemerge, ptr @i, align 4, !tbaa !1
ret i32 0
}
; Function Attrs: nounwind optsize
define i32 @x1() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
if.end: ; preds = %if.else, %if.then
%storemerge = phi i32 [ 1, %if.else ], [ 0, %if.then ]
- store i32 %storemerge, i32* @i, align 4, !tbaa !1
+ store i32 %storemerge, ptr @i, align 4, !tbaa !1
ret i32 0
}
; Function Attrs: nounwind optsize
define i32 @y0() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- store i32 10, i32* @j, align 4, !tbaa !1
+ store i32 10, ptr @j, align 4, !tbaa !1
tail call void asm sideeffect ".space 1000", ""() #1, !srcloc !9
br label %if.end
if.else: ; preds = %entry
- store i32 55, i32* @j, align 4, !tbaa !1
+ store i32 55, ptr @j, align 4, !tbaa !1
tail call void asm sideeffect ".space 1004", ""() #1, !srcloc !10
br label %if.end
; Function Attrs: nounwind optsize
define i32 @y1() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- store i32 10, i32* @j, align 4, !tbaa !1
+ store i32 10, ptr @j, align 4, !tbaa !1
tail call void asm sideeffect ".space 1000000", ""() #1, !srcloc !11
br label %if.end
if.else: ; preds = %entry
- store i32 55, i32* @j, align 4, !tbaa !1
+ store i32 55, ptr @j, align 4, !tbaa !1
tail call void asm sideeffect ".space 1000004", ""() #1, !srcloc !12
br label %if.end
; Function Attrs: nounwind optsize
define void @z0() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
- %1 = load i32, i32* @j, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
+ %1 = load i32, ptr @j, align 4, !tbaa !1
%cmp = icmp eq i32 %0, %1
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- store i32 1, i32* @k, align 4, !tbaa !1
+ store i32 1, ptr @k, align 4, !tbaa !1
tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !13
br label %if.end
if.else: ; preds = %entry
tail call void asm sideeffect ".space 10004", ""() #1, !srcloc !14
- store i32 2, i32* @k, align 4, !tbaa !1
+ store i32 2, ptr @k, align 4, !tbaa !1
br label %if.end
if.end: ; preds = %if.else, %if.then
; Function Attrs: nounwind optsize
define void @z1() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
- %1 = load i32, i32* @j, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
+ %1 = load i32, ptr @j, align 4, !tbaa !1
%cmp = icmp eq i32 %0, %1
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- store i32 1, i32* @k, align 4, !tbaa !1
+ store i32 1, ptr @k, align 4, !tbaa !1
tail call void asm sideeffect ".space 10000000", ""() #1, !srcloc !15
br label %if.end
if.else: ; preds = %entry
tail call void asm sideeffect ".space 10000004", ""() #1, !srcloc !16
- store i32 2, i32* @k, align 4, !tbaa !1
+ store i32 2, ptr @k, align 4, !tbaa !1
br label %if.end
if.end: ; preds = %if.else, %if.then
; Function Attrs: nounwind optsize
define void @z3() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
- %1 = load i32, i32* @j, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
+ %1 = load i32, ptr @j, align 4, !tbaa !1
%cmp1 = icmp sgt i32 %0, %1
br i1 %cmp1, label %if.then, label %if.end
if.then: ; preds = %entry, %if.then
tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !17
- %2 = load i32, i32* @i, align 4, !tbaa !1
- %3 = load i32, i32* @j, align 4, !tbaa !1
+ %2 = load i32, ptr @i, align 4, !tbaa !1
+ %3 = load i32, ptr @j, align 4, !tbaa !1
%cmp = icmp sgt i32 %2, %3
br i1 %cmp, label %if.then, label %if.end
; Function Attrs: nounwind optsize
define void @z4() #0 {
entry:
- %0 = load i32, i32* @i, align 4, !tbaa !1
- %1 = load i32, i32* @j, align 4, !tbaa !1
+ %0 = load i32, ptr @i, align 4, !tbaa !1
+ %1 = load i32, ptr @j, align 4, !tbaa !1
%cmp1 = icmp sgt i32 %0, %1
br i1 %cmp1, label %if.then, label %if.end
if.then: ; preds = %entry, %if.then
tail call void asm sideeffect ".space 10000000", ""() #1, !srcloc !18
- %2 = load i32, i32* @i, align 4, !tbaa !1
- %3 = load i32, i32* @j, align 4, !tbaa !1
+ %2 = load i32, ptr @i, align 4, !tbaa !1
+ %3 = load i32, ptr @j, align 4, !tbaa !1
%cmp = icmp sgt i32 %2, %3
br i1 %cmp, label %if.then, label %if.end
define i32 @main() nounwind {
entry:
%i = alloca i32, align 4
- %0 = load i16, i16* @s, align 2
+ %0 = load i16, ptr @s, align 2
%conv = sext i16 %0 to i32
; 16: lh ${{[0-9]+}}, 0(${{[0-9]+}})
- store i32 %conv, i32* %i, align 4
- %1 = load i32, i32* %i, align 4
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1)
+ store i32 %conv, ptr %i, align 4
+ %1 = load i32, ptr %i, align 4
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
define i32 @main() nounwind {
entry:
%i = alloca i32, align 4
- %0 = load i16, i16* @s, align 2
+ %0 = load i16, ptr @s, align 2
%conv = zext i16 %0 to i32
; 16: lhu ${{[0-9]+}}, 0(${{[0-9]+}})
- store i32 %conv, i32* %i, align 4
- %1 = load i32, i32* %i, align 4
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1)
+ store i32 %conv, ptr %i, align 4
+ %1 = load i32, ptr %i, align 4
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
define void @test1() nounwind {
entry:
- %0 = load i64, i64* @i, align 8
- %1 = load i64, i64* @j, align 8
+ %0 = load i64, ptr @i, align 8
+ %1 = load i64, ptr @j, align 8
%add = add nsw i64 %1, %0
- store i64 %add, i64* @k, align 8
+ store i64 %add, ptr @k, align 8
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
define void @test2() nounwind {
entry:
- %0 = load i64, i64* @i, align 8
- %1 = load i64, i64* @j, align 8
+ %0 = load i64, ptr @i, align 8
+ %1 = load i64, ptr @j, align 8
%sub = sub nsw i64 %0, %1
; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
; 16: move ${{[0-9]+}}, $24
; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
- store i64 %sub, i64* @l, align 8
+ store i64 %sub, ptr @l, align 8
ret void
}
define void @test3() nounwind {
entry:
- %0 = load i64, i64* @ii, align 8
+ %0 = load i64, ptr @ii, align 8
%add = add nsw i64 %0, 15
; 16: addiu ${{[0-9]+}}, 15
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
; 16: move ${{[0-9]+}}, $24
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
- store i64 %add, i64* @m, align 8
+ store i64 %add, ptr @m, align 8
ret void
}
; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s -check-prefix=ALL
; Address spaces 1-255 are software defined.
-define i32* @cast(i32 *%arg) {
- %1 = addrspacecast i32* %arg to i32 addrspace(1)*
- %2 = addrspacecast i32 addrspace(1)* %1 to i32 addrspace(2)*
- %3 = addrspacecast i32 addrspace(2)* %2 to i32 addrspace(0)*
- ret i32* %3
+define ptr @cast(ptr %arg) {
+ %1 = addrspacecast ptr %arg to ptr addrspace(1)
+ %2 = addrspacecast ptr addrspace(1) %1 to ptr addrspace(2)
+ %3 = addrspacecast ptr addrspace(2) %2 to ptr addrspace(0)
+ ret ptr %3
}
; ALL-LABEL: cast:
;CHK32: LL_R6
;CHK32: SC_R6
- %1 = atomicrmw add i32* @a, i32 %x monotonic
+ %1 = atomicrmw add ptr @a, i32 %x monotonic
ret i32 %1
}
;CHK64: LLD_R6
;CHK64: SCD_R6
- %1 = atomicrmw add i64* @b, i64 %x monotonic
+ %1 = atomicrmw add ptr @b, i64 %x monotonic
ret i64 %1
}
ret float %2
}
-define i32 @indirect_call_void_void(void ()* %addr) {
+define i32 @indirect_call_void_void(ptr %addr) {
; ALL-LABEL: indirect_call_void_void:
; ALL: move $25, $4
ret i32 0
}
-define i32 @indirect_call_i32_void(i32 ()* %addr) {
+define i32 @indirect_call_i32_void(ptr %addr) {
; ALL-LABEL: indirect_call_i32_void:
; ALL: move $25, $4
ret i32 %2
}
-define float @indirect_call_float_void(float ()* %addr) {
+define float @indirect_call_float_void(ptr %addr) {
; ALL-LABEL: indirect_call_float_void:
; ALL: move $25, $4
; We can't use 'musttail' here because the verifier is too conservative and
; prohibits any prototype difference.
-define void @tail_indirect_call_void_void(void ()* %addr) {
+define void @tail_indirect_call_void_void(ptr %addr) {
; ALL-LABEL: tail_indirect_call_void_void:
; ALL: move $25, $4
ret void
}
-define i32 @tail_indirect_call_i32_void(i32 ()* %addr) {
+define i32 @tail_indirect_call_i32_void(ptr %addr) {
; ALL-LABEL: tail_indirect_call_i32_void:
; ALL: move $25, $4
ret i32 %1
}
-define float @tail_indirect_call_float_void(float ()* %addr) {
+define float @tail_indirect_call_float_void(ptr %addr) {
; ALL-LABEL: tail_indirect_call_float_void:
; ALL: move $25, $4
; R6C: jalrc $[[TGT]]
; ALL-NOT: {{jal }}
- call void () inttoptr (i32 1234 to void ()*)()
+ call void () inttoptr (i32 1234 to ptr)()
; R6C: jrc $ra
ret i32 0
}
; RUN: llc -march=mips64 -mcpu=mips64r5 -asm-show-inst < %s | FileCheck %s -check-prefixes=ALL,NOT-R6
; RUN: llc -march=mips64 -mcpu=mips64r6 -asm-show-inst < %s | FileCheck %s -check-prefixes=ALL,R6
-define i32 @br(i8 *%addr) {
+define i32 @br(ptr %addr) {
; ALL-LABEL: br:
; NOT-R6: jr $4 # <MCInst #{{[0-9]+}} JR
; R6C: jrc $4 # <MCInst #{{[0-9]+}} JIC
; ALL: addiu $2, $zero, 1
entry:
- indirectbr i8* %addr, [label %L1, label %L2]
+ indirectbr ptr %addr, [label %L1, label %L2]
L1:
ret i32 0
entry:
; CHECK-LABEL: lhfunc
; CHECK: lh $[[REG1:[0-9]+]], 0(${{[0-9]+}})
- %0 = load i16, i16* @us, align 2
+ %0 = load i16, ptr @us, align 2
%conv = sext i16 %0 to i32
ret i32 %conv
}
entry:
; CHECK-LABEL: lhfunc_atomic
; CHECK: lh $[[REG1:[0-9]+]], 0(${{[0-9]+}})
- %0 = load atomic i16, i16* @us acquire, align 2
+ %0 = load atomic i16, ptr @us acquire, align 2
ret i16 %0
}
entry:
; CHECK-LABEL: lhufunc
; CHECK: lhu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
- %0 = load i16, i16* @us, align 2
+ %0 = load i16, ptr @us, align 2
%conv = zext i16 %0 to i32
ret i32 %conv
}
; RUN: llc -march=mips64 -mcpu=mips64r6 < %s | \
; RUN: FileCheck %s -check-prefixes=ALL,M64
-define i8 @load_i8(i8* %ptr) {
+define i8 @load_i8(ptr %ptr) {
; ALL-LABEL: load_i8
; ALL: lb $2, 0($4)
; ALL: sync
- %val = load atomic i8, i8* %ptr acquire, align 1
+ %val = load atomic i8, ptr %ptr acquire, align 1
ret i8 %val
}
-define i16 @load_i16(i16* %ptr) {
+define i16 @load_i16(ptr %ptr) {
; ALL-LABEL: load_i16
; ALL: lh $2, 0($4)
; ALL: sync
- %val = load atomic i16, i16* %ptr acquire, align 2
+ %val = load atomic i16, ptr %ptr acquire, align 2
ret i16 %val
}
-define i32 @load_i32(i32* %ptr) {
+define i32 @load_i32(ptr %ptr) {
; ALL-LABEL: load_i32
; ALL: lw $2, 0($4)
; ALL: sync
- %val = load atomic i32, i32* %ptr acquire, align 4
+ %val = load atomic i32, ptr %ptr acquire, align 4
ret i32 %val
}
-define i64 @load_i64(i64* %ptr) {
+define i64 @load_i64(ptr %ptr) {
; M64-LABEL: load_i64
; M64: ld $2, 0($4)
; M64: sync
- %val = load atomic i64, i64* %ptr acquire, align 8
+ %val = load atomic i64, ptr %ptr acquire, align 8
ret i64 %val
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Expr:(%lo(a))>>
entry:
- %0 = load i8, i8 * @a
+ %0 = load i8, ptr @a
ret i8 %0
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Expr:(%lo(a))>>
entry:
- %0 = load i8, i8 * @a
+ %0 = load i8, ptr @a
%1 = sext i8 %0 to i32
ret i32 %1
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Expr:(%lo(b))>>
entry:
- %0 = load i16, i16 * @b
+ %0 = load i16, ptr @b
ret i16 %0
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Expr:(%lo(b))>>
entry:
- %0 = load i16, i16 * @b
+ %0 = load i16, ptr @b
%1 = sext i16 %0 to i32
ret i32 %1
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Expr:(%lo(c))>>
entry:
- %0 = load i32, i32 * @c
+ %0 = load i32, ptr @c
ret i32 %0
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Imm:0>>
entry:
- %0 = load i32, i32 * @c
+ %0 = load i32, ptr @c
%1 = zext i32 %0 to i64
ret i64 %1
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Imm:31>>
entry:
- %0 = load i32, i32 * @c
+ %0 = load i32, ptr @c
%1 = sext i32 %0 to i64
ret i64 %1
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Expr:(%lo(e))>>
entry:
- %0 = load float, float * @e
+ %0 = load float, ptr @e
ret float %0
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Expr:(%lo(f))>>
entry:
- %0 = load double, double * @f
+ %0 = load double, ptr @f
ret double %0
}
ret i64 %r
}
-define i8* @tst_select_word_cst(i8* %a, i8* %b) {
+define ptr @tst_select_word_cst(ptr %a, ptr %b) {
; ALL-LABEL: tst_select_word_cst:
; M2: addiu $[[T0:[0-9]+]], $zero, -1
; MM32R6: sltu $[[T2:[0-9]+]], $zero, $[[T1]]
; MM32R6: seleqz $2, $4, $[[T2]]
- %cmp = icmp eq i8* %b, inttoptr (i64 -1 to i8*)
- %r = select i1 %cmp, i8* %a, i8* null
- ret i8* %r
+ %cmp = icmp eq ptr %b, inttoptr (i64 -1 to ptr)
+ %r = select i1 %cmp, ptr %a, ptr null
+ ret ptr %r
}
; RUN: llc -march=mips64 -mcpu=mips64r6 < %s | \
; RUN: FileCheck %s -check-prefixes=ALL,M64
-define void @store_i8(i8* %ptr, i8 signext %v) {
+define void @store_i8(ptr %ptr, i8 signext %v) {
; ALL-LABEL: store_i8
; ALL: sync
; ALL: sb $5, 0($4)
- store atomic i8 %v, i8* %ptr release, align 1
+ store atomic i8 %v, ptr %ptr release, align 1
ret void
}
-define void @store_i16(i16* %ptr, i16 signext %v) {
+define void @store_i16(ptr %ptr, i16 signext %v) {
; ALL-LABEL: store_i16
; ALL: sync
; ALL: sh $5, 0($4)
- store atomic i16 %v, i16* %ptr release, align 2
+ store atomic i16 %v, ptr %ptr release, align 2
ret void
}
-define void @store_i32(i32* %ptr, i32 signext %v) {
+define void @store_i32(ptr %ptr, i32 signext %v) {
; ALL-LABEL: store_i32
; ALL: sync
; ALL: sw $5, 0($4)
- store atomic i32 %v, i32* %ptr release, align 4
+ store atomic i32 %v, ptr %ptr release, align 4
ret void
}
-define void @store_i64(i64* %ptr, i64 %v) {
+define void @store_i64(ptr %ptr, i64 %v) {
; M64-LABEL: store_i64
; M64: sync
; M64: sd $5, 0($4)
- store atomic i64 %v, i64* %ptr release, align 8
+ store atomic i64 %v, ptr %ptr release, align 8
ret void
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Expr:(%lo(a))>>
- store i8 %a, i8 * @a
+ store i8 %a, ptr @a
ret void
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Expr:(%lo(b))>>
- store i16 %a, i16 * @b
+ store i16 %a, ptr @b
ret void
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Expr:(%lo(c))>>
- store i32 %a, i32 * @c
+ store i32 %a, ptr @c
ret void
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Imm:4>>
- store i64 %a, i64 * @d
+ store i64 %a, ptr @d
ret void
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Expr:(%lo(e))>>
- store float %e, float * @e
+ store float %e, ptr @e
ret void
}
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Reg:{{[0-9]+}}>
; MIPS32R5FP643-NEXT: # <MCOperand Expr:(%lo(f))>>
- store double %f, double * @f
+ store double %f, ptr @f
ret void
}
; RUN: | FileCheck -check-prefix=ON64 %s
declare void @callee()
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1)
@val = internal unnamed_addr global [20 x i32] zeroinitializer, align 4
; ON64: jalr $25
call void @callee()
- call void @llvm.memset.p0i8.i32(i8* align 4 bitcast ([20 x i32]* @val to i8*), i8 0, i32 80, i1 false)
+ call void @llvm.memset.p0.i32(ptr align 4 @val, i8 0, i32 80, i1 false)
ret void
}
br i1 %cmp, label %end, label %then
then:
- store i32 1, i32* @x, align 4
+ store i32 1, ptr @x, align 4
br label %end
end:
entry:
%retval = alloca i32, align 4
%argc.addr = alloca i32, align 4
- store i32 0, i32* %retval, align 4
- store i32 %argc, i32* %argc.addr, align 4
+ store i32 0, ptr %retval, align 4
+ store i32 %argc, ptr %argc.addr, align 4
call void asm sideeffect "test_label_1:", "~{$1}"()
- %0 = load i32, i32* %argc.addr, align 4
+ %0 = load i32, ptr %argc.addr, align 4
%cmp = icmp sgt i32 %0, 1
br i1 %cmp, label %if.then, label %if.end
if.then:
call void asm sideeffect ".space 68435052", "~{$1}"()
- %call = call i32 bitcast (i32 (...)* @foo to i32 ()*)()
- store i32 %call, i32* %retval, align 4
+ %call = call i32 @foo()
+ store i32 %call, ptr %retval, align 4
br label %return
if.end:
- store i32 0, i32* %retval, align 4
+ store i32 0, ptr %retval, align 4
br label %return
return:
- %1 = load i32, i32* %retval, align 4
+ %1 = load i32, ptr %retval, align 4
ret i32 %1
}
entry:
%retval = alloca i32, align 4
%argc.addr = alloca i32, align 4
- store i32 0, i32* %retval, align 4
- store i32 %argc, i32* %argc.addr, align 4
+ store i32 0, ptr %retval, align 4
+ store i32 %argc, ptr %argc.addr, align 4
call void asm sideeffect "test_label_2:", "~{$1}"()
- %0 = load i32, i32* %argc.addr, align 4
+ %0 = load i32, ptr %argc.addr, align 4
%cmp = icmp sgt i32 %0, 1
br i1 %cmp, label %if.then, label %if.end
if.then:
call void asm sideeffect ".space 268435052", "~{$1}"()
- %call = call i32 bitcast (i32 (...)* @foo to i32 ()*)()
- store i32 %call, i32* %retval, align 4
+ %call = call i32 @foo()
+ store i32 %call, ptr %retval, align 4
br label %return
if.end:
- store i32 0, i32* %retval, align 4
+ store i32 0, ptr %retval, align 4
br label %return
return:
- %1 = load i32, i32* %retval, align 4
+ %1 = load i32, ptr %retval, align 4
ret i32 %1
}
entry:
%retval = alloca i32, align 4
%argc.addr = alloca i32, align 4
- store i32 0, i32* %retval, align 4
- store i32 %argc, i32* %argc.addr, align 4
+ store i32 0, ptr %retval, align 4
+ store i32 %argc, ptr %argc.addr, align 4
call void asm sideeffect "test_label_3:", "~{$1}"()
- %0 = load i32, i32* %argc.addr, align 4
+ %0 = load i32, ptr %argc.addr, align 4
%cmp = icmp sgt i32 %0, 1
br i1 %cmp, label %if.then, label %if.end
if.then:
call void asm sideeffect ".space 268435452", "~{$1}"()
- %call = call i32 bitcast (i32 (...)* @foo to i32 ()*)()
- store i32 %call, i32* %retval, align 4
+ %call = call i32 @foo()
+ store i32 %call, ptr %retval, align 4
br label %return
if.end:
- store i32 0, i32* %retval, align 4
+ store i32 0, ptr %retval, align 4
br label %return
return:
- %1 = load i32, i32* %retval, align 4
+ %1 = load i32, ptr %retval, align 4
ret i32 %1
}
$_ZN1TaSERKS_ = comdat any
-define linkonce_odr void @_ZN1TaSERKS_(%struct.T* %this, %struct.T* dereferenceable(4) %t) #0 comdat align 2 {
+define linkonce_odr void @_ZN1TaSERKS_(ptr %this, ptr dereferenceable(4) %t) #0 comdat align 2 {
entry:
- %this.addr = alloca %struct.T*, align 4
- %t.addr = alloca %struct.T*, align 4
- %this1 = load %struct.T*, %struct.T** %this.addr, align 4
- %0 = load %struct.T*, %struct.T** %t.addr, align 4
- %V3 = getelementptr inbounds %struct.T, %struct.T* %0, i32 0, i32 0
- %1 = load i32, i32* %V3, align 4
- %V4 = getelementptr inbounds %struct.T, %struct.T* %this1, i32 0, i32 0
- store i32 %1, i32* %V4, align 4
+ %this.addr = alloca ptr, align 4
+ %t.addr = alloca ptr, align 4
+ %this1 = load ptr, ptr %this.addr, align 4
+ %0 = load ptr, ptr %t.addr, align 4
+ %1 = load i32, ptr %0, align 4
+ store i32 %1, ptr %this1, align 4
ret void
}
define void @foo() nounwind {
entry:
- %0 = load i32, i32* @g, align 4
+ %0 = load i32, ptr @g, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
%add = add nsw i32 %0, 10
- store i32 %add, i32* @g, align 4
+ store i32 %add, ptr @g, align 4
br label %if.end
if.end: ; preds = %entry, %if.then
define i32 @main() #0 {
entry:
%retval = alloca i32, align 4
- store i32 0, i32* %retval
+ store i32 0, ptr %retval
br label %z
z: ; preds = %y, %entry
- %call = call i32 bitcast (i32 (...)* @foo to i32 ()*)()
+ %call = call i32 @foo()
call void asm sideeffect ".space 10000000", ""() #2, !srcloc !1
br label %y
y: ; preds = %z
- %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0))
+ %call1 = call i32 (ptr, ...) @printf(ptr @.str)
br label %z
return: ; No predecessors!
- %0 = load i32, i32* %retval
+ %0 = load i32, ptr %retval
ret i32 %0
; jal16: jal $BB{{[0-9]+}}_{{[0-9]+}}
}
declare i32 @foo(...) #1
-declare i32 @printf(i8*, ...) #1
+declare i32 @printf(ptr, ...) #1
attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
@.str = private unnamed_addr constant [31 x i8] c"abcdefghijklmnopqrstuvwxyzABCD\00", align 1
-define void @foo1(%struct.S1* %s1, i8 signext %n) nounwind {
+define void @foo1(ptr %s1, i8 signext %n) nounwind {
entry:
; CHECK-NOT: call16(memcpy
- %arraydecay = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1, i32 0
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %arraydecay, i8* align 1 getelementptr inbounds ([31 x i8], [31 x i8]* @.str, i32 0, i32 0), i32 31, i1 false)
- %arrayidx = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1, i32 40
- store i8 %n, i8* %arrayidx, align 1
+ %arraydecay = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 1, i32 0
+ tail call void @llvm.memcpy.p0.p0.i32(ptr align 1 %arraydecay, ptr align 1 @.str, i32 31, i1 false)
+ %arrayidx = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 1, i32 40
+ store i8 %n, ptr %arrayidx, align 1
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @x, align 4
+ %0 = load i32, ptr @x, align 4
%addiu1 = add i32 %0, -7
- %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds
- ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %addiu1)
+ %call1 = call i32 (ptr, ...) @printf(ptr @.str, i32 %addiu1)
- %1 = load i32, i32* @y, align 4
+ %1 = load i32, ptr @y, align 4
%addiu2 = add i32 %1, 55
- %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds
- ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %addiu2)
+ %call2 = call i32 (ptr, ...) @printf(ptr @.str, i32 %addiu2)
- %2 = load i32, i32* @z, align 4
+ %2 = load i32, ptr @z, align 4
%addiu3 = add i32 %2, 24
- %call3 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds
- ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %addiu3)
+ %call3 = call i32 (ptr, ...) @printf(ptr @.str, i32 %addiu3)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; CHECK: addius5 ${{[0-9]+}}, -7
; CHECK: addiu ${{[0-9]+}}, ${{[0-9]+}}, 55
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32, i32* %b, align 4
- %1 = load i32, i32* %c, align 4
+ store i32 0, ptr %retval
+ %0 = load i32, ptr %b, align 4
+ %1 = load i32, ptr %c, align 4
%add = add nsw i32 %0, %1
- store i32 %add, i32* %a, align 4
+ store i32 %add, ptr %a, align 4
ret i32 0
}
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32, i32* %b, align 4
- %1 = load i32, i32* %c, align 4
+ store i32 0, ptr %retval
+ %0 = load i32, ptr %b, align 4
+ %1 = load i32, ptr %c, align 4
%and = and i32 %0, %1
- store i32 %and, i32* %a, align 4
+ store i32 %and, ptr %a, align 4
ret i32 0
}
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @x, align 4
+ %0 = load i32, ptr @x, align 4
%and1 = and i32 %0, 4
- %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds
- ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %and1)
+ %call1 = call i32 (ptr, ...) @printf(ptr @.str, i32 %and1)
- %1 = load i32, i32* @y, align 4
+ %1 = load i32, ptr @y, align 4
%and2 = and i32 %1, 5
- %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds
- ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %and2)
+ %call2 = call i32 (ptr, ...) @printf(ptr @.str, i32 %and2)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; CHECK: andi16 ${{[2-7]|16|17}}, ${{[2-7]|16|17}}
; CHECK: andi ${{[0-9]+}}, ${{[0-9]+}}
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: jrc $ra
entry:
- %0 = atomicrmw add i32* @x, i32 %incr monotonic
+ %0 = atomicrmw add ptr @x, i32 %incr monotonic
ret i32 %0
}
define signext i8 @AtomicLoadAdd8(i8 signext %incr) nounwind {
entry:
- %0 = atomicrmw add i8* @y, i8 %incr monotonic
+ %0 = atomicrmw add ptr @y, i8 %incr monotonic
ret i8 %0
; MICROMIPS: ll ${{[0-9]+}}, 0(${{[0-9]+}})
define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
entry:
- %pair0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic monotonic
+ %pair0 = cmpxchg ptr @y, i8 %oldval, i8 %newval monotonic monotonic
%0 = extractvalue { i8, i1 } %pair0, 0
ret i8 %0
@x = external global i32, align 4
define void @foo() {
- %1 = load i32, i32* @x, align 4
+ %1 = load i32, ptr @x, align 4
%2 = icmp sgt i32 %1, 0
br i1 %2, label %la, label %lf
define void @main() nounwind uwtable {
entry:
%x = alloca i32, align 4
- %0 = load i32, i32* %x, align 4
+ %0 = load i32, ptr %x, align 4
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.end, !prof !1
if.then:
- store i32 10, i32* %x, align 4
+ store i32 10, ptr %x, align 4
br label %if.end
if.end:
; RUN: llc -mtriple=mipsel -mcpu=mips32r2 -mattr=+micromips \
; RUN: -relocation-model=static -O2 < %s | FileCheck %s
-@main.L = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@main, %L1), i8* blockaddress(@main, %L2), i8* null], align 4
+@main.L = internal unnamed_addr constant [3 x ptr] [ptr blockaddress(@main, %L1), ptr blockaddress(@main, %L2), ptr null], align 4
@str = private unnamed_addr constant [2 x i8] c"A\00"
@str2 = private unnamed_addr constant [2 x i8] c"B\00"
L1: ; preds = %entry, %L1
%i.0 = phi i32 [ 0, %entry ], [ %inc, %L1 ]
- %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str, i32 0, i32 0))
+ %puts = tail call i32 @puts(ptr @str)
%inc = add i32 %i.0, 1
- %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @main.L, i32 0, i32 %i.0
- %0 = load i8*, i8** %arrayidx, align 4, !tbaa !1
- indirectbr i8* %0, [label %L1, label %L2]
+ %arrayidx = getelementptr inbounds [3 x ptr], ptr @main.L, i32 0, i32 %i.0
+ %0 = load ptr, ptr %arrayidx, align 4, !tbaa !1
+ indirectbr ptr %0, [label %L1, label %L2]
L2: ; preds = %L1
- %puts2 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str2, i32 0, i32 0))
+ %puts2 = tail call i32 @puts(ptr @str2)
ret i32 0
}
-declare i32 @puts(i8* nocapture readonly) #1
+declare i32 @puts(ptr nocapture readonly) #1
!1 = !{!2, !2, i64 0}
!2 = !{!"any pointer", !3, i64 0}
%struct.foostruct = type { [3 x float] }
%struct.barstruct = type { %struct.foostruct, float }
@bar_ary = common global [4 x %struct.barstruct] zeroinitializer, align 4
-define float* @spooky(i32 signext %i) #0 {
+define ptr @spooky(i32 signext %i) #0 {
- %safe = getelementptr inbounds [4 x %struct.barstruct], [4 x %struct.barstruct]* @bar_ary, i32 0, i32 %i, i32 1
- store float 1.420000e+02, float* %safe, align 4, !tbaa !1
- ret float* %safe
+ %safe = getelementptr inbounds [4 x %struct.barstruct], ptr @bar_ary, i32 0, i32 %i, i32 1
+ store float 1.420000e+02, ptr %safe, align 4, !tbaa !1
+ ret ptr %safe
}
; CHECK: spooky:
define i32 @foo(i32 signext %a) #0 {
entry:
%a.addr = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- %0 = load i32, i32* %a.addr, align 4
+ store i32 %a, ptr %a.addr, align 4
+ %0 = load i32, ptr %a.addr, align 4
%shl = shl i32 %0, 2
%call = call i32 @bar(i32 signext %shl)
ret i32 %call
; CHECK-NEXT: 0000 ff9b1501 0c011100 00110e1f 011f1800
; CHECK-NEXT: 0010 00010000 00000000
-@_ZTIi = external constant i8*
+@_ZTIi = external constant ptr
-define dso_local i32 @main() local_unnamed_addr norecurse personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local i32 @main() local_unnamed_addr norecurse personality ptr @__gxx_personality_v0 {
entry:
- %exception.i = tail call i8* @__cxa_allocate_exception(i32 4) nounwind
- %0 = bitcast i8* %exception.i to i32*
- store i32 5, i32* %0, align 16
- invoke void @__cxa_throw(i8* %exception.i, i8* bitcast (i8** @_ZTIi to i8*), i8* null) noreturn
+ %exception.i = tail call ptr @__cxa_allocate_exception(i32 4) nounwind
+ store i32 5, ptr %exception.i, align 16
+ invoke void @__cxa_throw(ptr %exception.i, ptr @_ZTIi, ptr null) noreturn
to label %.noexc unwind label %return
.noexc:
unreachable
return:
- %1 = landingpad { i8*, i32 }
- catch i8* null
- %2 = extractvalue { i8*, i32 } %1, 0
- %3 = tail call i8* @__cxa_begin_catch(i8* %2) nounwind
+ %0 = landingpad { ptr, i32 }
+ catch ptr null
+ %1 = extractvalue { ptr, i32 } %0, 0
+ %2 = tail call ptr @__cxa_begin_catch(ptr %1) nounwind
tail call void @__cxa_end_catch()
ret i32 0
}
declare i32 @__gxx_personality_v0(...)
-declare i8* @__cxa_begin_catch(i8*) local_unnamed_addr
+declare ptr @__cxa_begin_catch(ptr) local_unnamed_addr
declare void @__cxa_end_catch() local_unnamed_addr
-declare i8* @__cxa_allocate_exception(i32) local_unnamed_addr
+declare ptr @__cxa_allocate_exception(i32) local_unnamed_addr
-declare void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr
+declare void @__cxa_throw(ptr, ptr, ptr) local_unnamed_addr
; Function Attrs: noreturn nounwind
define void @foo() #0 {
entry:
- %0 = load i32, i32* @g, align 4
+ %0 = load i32, ptr @g, align 4
tail call void @exit(i32 signext %0)
unreachable
}
entry:
%a.addr = alloca i32, align 4
%b.addr = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- store i32 %b, i32* %b.addr, align 4
- %0 = load i32, i32* %a.addr, align 4
- %1 = load i32, i32* %b.addr, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store i32 %b, ptr %b.addr, align 4
+ %0 = load i32, ptr %a.addr, align 4
+ %1 = load i32, ptr %b.addr, align 4
%add = add nsw i32 %0, %1
ret i32 %add
}
%x = alloca i32, align 4
%y = alloca i32, align 4
%z = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32, i32* %y, align 4
- %1 = load i32, i32* %z, align 4
+ store i32 0, ptr %retval
+ %0 = load i32, ptr %y, align 4
+ %1 = load i32, ptr %z, align 4
%call = call i32 @sum(i32 %0, i32 %1)
- store i32 %call, i32* %x, align 4
- %2 = load i32, i32* %x, align 4
+ store i32 %call, ptr %x, align 4
+ %2 = load i32, ptr %x, align 4
ret i32 %2
}
define i32 @main() nounwind {
entry:
- store i32 1, i32* @x, align 4
- store i32 2148, i32* @y, align 4
- store i32 33332, i32* @z, align 4
+ store i32 1, ptr @x, align 4
+ store i32 2148, ptr @y, align 4
+ store i32 33332, ptr @z, align 4
ret i32 0
}
; RUN: llc %s -march=mipsel -mattr=micromips -filetype=asm \
; RUN: -relocation-model=pic -O3 -o - | FileCheck %s
-define i32 @sum(i32* %x, i32* %y) nounwind uwtable {
+define i32 @sum(ptr %x, ptr %y) nounwind uwtable {
entry:
- %x.addr = alloca i32*, align 8
- %y.addr = alloca i32*, align 8
- store i32* %x, i32** %x.addr, align 8
- store i32* %y, i32** %y.addr, align 8
- %0 = load i32*, i32** %x.addr, align 8
- %1 = load i32, i32* %0, align 4
- %2 = load i32*, i32** %y.addr, align 8
- %3 = load i32, i32* %2, align 4
+ %x.addr = alloca ptr, align 8
+ %y.addr = alloca ptr, align 8
+ store ptr %x, ptr %x.addr, align 8
+ store ptr %y, ptr %y.addr, align 8
+ %0 = load ptr, ptr %x.addr, align 8
+ %1 = load i32, ptr %0, align 4
+ %2 = load ptr, ptr %y.addr, align 8
+ %3 = load i32, ptr %2, align 4
%add = add nsw i32 %1, %3
ret i32 %add
}
%retval = alloca i32, align 4
%x = alloca i32, align 4
%y = alloca i32, align 4
- store i32 0, i32* %retval
- %call = call i32 @sum(i32* %x, i32* %y)
+ store i32 0, ptr %retval
+ %call = call i32 @sum(ptr %x, ptr %y)
ret i32 %call
}
; MM32: lw $[[R3:[0-9]+]], %got(gf0)($[[R2]])
; MM32: lwc1 $f0, 0($[[R3]])
- %0 = load float, float* @gf0, align 4
+ %0 = load float, ptr @gf0, align 4
ret float %0
}
; MM32: lw $[[R3:[0-9]+]], %got(gf0)($[[R2]])
; MM32: swc1 $f12, 0($[[R3]])
- store float %a, float* @gf0, align 4
+ store float %a, ptr @gf0, align 4
ret void
}
entry:
%retval = alloca i32, align 4
%x = alloca i64, align 8
- store i32 0, i32* %retval
- %0 = load i64, i64* %x, align 8
+ store i32 0, ptr %retval
+ %0 = load i64, ptr %x, align 8
%cmp = icmp ne i64 %0, 9223372036854775807
br i1 %cmp, label %if.then, label %if.end
if.then:
- store i32 1, i32* %retval
+ store i32 1, ptr %retval
br label %return
if.end:
- store i32 0, i32* %retval
+ store i32 0, ptr %retval
br label %return
return:
- %1 = load i32, i32* %retval
+ %1 = load i32, ptr %retval
ret i32 %1
}
; CHECK: rdhwr
; CHECK: .set pop
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
ret i32 %0
}
define i32 @shift_left() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%shl = shl i32 %0, 4
- store i32 %shl, i32* @b, align 4
+ store i32 %shl, ptr @b, align 4
- %1 = load i32, i32* @c, align 4
+ %1 = load i32, ptr @c, align 4
%shl1 = shl i32 %1, 10
- store i32 %shl1, i32* @d, align 4
+ store i32 %shl1, ptr @d, align 4
ret i32 0
}
define i32 @shift_right() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%shr = lshr i32 %0, 4
- store i32 %shr, i32* @j, align 4
+ store i32 %shr, ptr @j, align 4
- %1 = load i32, i32* @m, align 4
+ %1 = load i32, ptr @m, align 4
%shr1 = lshr i32 %1, 10
- store i32 %shr1, i32* @n, align 4
+ store i32 %shr1, ptr @n, align 4
ret i32 0
}
; CHECK: addiur1sp
; CHECK: addiusp
%a = alloca [10 x i32], align 4
- %index = getelementptr inbounds [10 x i32], [10 x i32]* %a, i32 0, i32 0
- call void @init(i32* %index)
- %0 = load i32, i32* %index, align 4
+ call void @init(ptr %a)
+ %0 = load i32, ptr %a, align 4
ret i32 %0
}
-declare void @init(i32*)
+declare void @init(ptr)
; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips -verify-machineinstrs < %s | FileCheck %s
-define void @f1(i8* %p) {
+define void @f1(ptr %p) {
entry:
; CHECK-LABEL: f1:
; CHECK: lbu16
; CHECK: sb16
- %0 = load i8, i8* %p, align 4
+ %0 = load i8, ptr %p, align 4
%a = zext i8 %0 to i32
%and = and i32 %a, 1
%cmp = icmp eq i32 %and, 0
br i1 %cmp, label %if.then, label %if.end
if.then:
- store i8 0, i8* %p, align 1
+ store i8 0, ptr %p, align 1
br label %if.end
if.end:
ret void
}
-define void @f2(i16* %p) {
+define void @f2(ptr %p) {
entry:
; CHECK-LABEL: f2:
; CHECK: lhu16
; CHECK: sh16
- %0 = load i16, i16* %p, align 2
+ %0 = load i16, ptr %p, align 2
%a = zext i16 %0 to i32
%and = and i32 %a, 2
%cmp = icmp eq i32 %and, 0
br i1 %cmp, label %if.then, label %if.end
if.then:
- store i16 0, i16* %p, align 2
+ store i16 0, ptr %p, align 2
br label %if.end
if.end:
; RUN: -verify-machineinstrs < %s | FileCheck %s
; Function Attrs: nounwind
-define i32 @fun(i32* %adr, i32 %val) {
+define i32 @fun(ptr %adr, i32 %val) {
; CHECK-LABEL: fun:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addiusp -32
; CHECK-NEXT: addiusp 32
; CHECK-NEXT: jrc $ra
entry:
- %call1 = call i32* @fun1()
- store i32 %val, i32* %adr, align 4
+ %call1 = call ptr @fun1()
+ store i32 %val, ptr %adr, align 4
ret i32 0
}
-declare i32* @fun1()
+declare ptr @fun1()
; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips -asm-show-inst -verify-machineinstrs < %s | FileCheck %s
; Function Attrs: nounwind
-define i32 @function1(i32 (i32)* %f) {
+define i32 @function1(ptr %f) {
entry:
; CHECK-LABEL: function1:
; CHECK: SWSP_MM
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32, i32* %b, align 4
- %1 = load i32, i32* %c, align 4
+ store i32 0, ptr %retval
+ %0 = load i32, ptr %b, align 4
+ %1 = load i32, ptr %c, align 4
%sub = sub nsw i32 %0, %1
- store i32 %sub, i32* %a, align 4
+ store i32 %sub, ptr %a, align 4
ret i32 0
}
; RUN: -relocation-model=pic -O3 -o - | FileCheck %s
; Function Attrs: noinline nounwind
-define void @bar(i32* %p) #0 {
+define void @bar(ptr %p) #0 {
entry:
- %p.addr = alloca i32*, align 4
- store i32* %p, i32** %p.addr, align 4
- %0 = load i32*, i32** %p.addr, align 4
- %1 = load i32, i32* %0, align 4
+ %p.addr = alloca ptr, align 4
+ store ptr %p, ptr %p.addr, align 4
+ %0 = load ptr, ptr %p.addr, align 4
+ %1 = load i32, ptr %0, align 4
%add = add nsw i32 7, %1
- %2 = load i32*, i32** %p.addr, align 4
- store i32 %add, i32* %2, align 4
- %3 = load i32*, i32** %p.addr, align 4
- %add.ptr = getelementptr inbounds i32, i32* %3, i32 1
- %4 = load i32, i32* %add.ptr, align 4
+ %2 = load ptr, ptr %p.addr, align 4
+ store i32 %add, ptr %2, align 4
+ %3 = load ptr, ptr %p.addr, align 4
+ %add.ptr = getelementptr inbounds i32, ptr %3, i32 1
+ %4 = load i32, ptr %add.ptr, align 4
%add1 = add nsw i32 7, %4
- %5 = load i32*, i32** %p.addr, align 4
- %add.ptr2 = getelementptr inbounds i32, i32* %5, i32 1
- store i32 %add1, i32* %add.ptr2, align 4
+ %5 = load ptr, ptr %p.addr, align 4
+ %add.ptr2 = getelementptr inbounds i32, ptr %5, i32 1
+ store i32 %add1, ptr %add.ptr2, align 4
ret void
}
; MM6-LABEL: <fun>:
; MM6: fb fd 00 14 sw $ra, 20($sp)
entry:
- call i32* @fun1()
+ call ptr @fun1()
ret void
}
-declare i32* @fun1()
+declare ptr @fun1()
; MM6: JAL_MMR6 &memset
; MM6-NOT: JALRC16_MMR6
-define dso_local void @foo(i32* nocapture %ar) local_unnamed_addr {
+define dso_local void @foo(ptr nocapture %ar) local_unnamed_addr {
entry:
call void @bar()
- %0 = bitcast i32* %ar to i8*
- tail call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 100, i1 false)
+ tail call void @llvm.memset.p0.i32(ptr align 4 %ar, i8 0, i32 100, i1 false)
ret void
}
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1)
declare void @bar()
%a = alloca i32, align 4
%b = alloca i32, align 4
%c = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32, i32* %b, align 4
- %1 = load i32, i32* %c, align 4
+ store i32 0, ptr %retval
+ %0 = load i32, ptr %b, align 4
+ %1 = load i32, ptr %c, align 4
%xor = xor i32 %0, %1
- store i32 %xor, i32* %a, align 4
+ store i32 %xor, ptr %a, align 4
ret i32 0
}
target triple = "mipsel-unknown-unknown-elf"
; Function Attrs: noinline nounwind optnone
-define dso_local i32 @add_two_pointers(i32* %a, i32* %b) #0 {
+define dso_local i32 @add_two_pointers(ptr %a, ptr %b) #0 {
entry:
; ALL-LABEL: add_two_pointers:
- %a.addr = alloca i32*, align 4
- %b.addr = alloca i32*, align 4
- store i32* %a, i32** %a.addr, align 4
- store i32* %b, i32** %b.addr, align 4
- %0 = load i32*, i32** %a.addr, align 4
- %1 = load i32, i32* %0, align 4
+ %a.addr = alloca ptr, align 4
+ %b.addr = alloca ptr, align 4
+ store ptr %a, ptr %a.addr, align 4
+ store ptr %b, ptr %b.addr, align 4
+ %0 = load ptr, ptr %a.addr, align 4
+ %1 = load i32, ptr %0, align 4
; ALL: lw $1, 4($fp)
; MIPS1: nop
; MIPS2-NOT: nop
; MIPS32-NOT: nop
; ALL: lw $1, 0($1)
- %2 = load i32*, i32** %b.addr, align 4
- %3 = load i32, i32* %2, align 4
+ %2 = load ptr, ptr %b.addr, align 4
+ %3 = load i32, ptr %2, align 4
; ALL: lw $2, 0($fp)
; MIPS1: nop
; MIPS2-NOT: nop
define void @foo() #0 {
entry:
- store i32 10, i32* @i, align 4
+ store i32 10, ptr @i, align 4
ret void
}
define void @nofoo() #1 {
entry:
- store i32 20, i32* @i, align 4
- %0 = load float, float* @x, align 4
- %1 = load float, float* @y, align 4
+ store i32 20, ptr @i, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @y, align 4
%add = fadd float %0, %1
- store float %add, float* @f, align 4
- %2 = load float, float* @f, align 4
+ store float %add, ptr @f, align 4
+ %2 = load float, ptr @f, align 4
%conv = fpext float %2 to double
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), double %conv)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, double %conv)
ret void
}
; 32: .set macro
; 32: .set reorder
; 32: .end nofoo
-declare i32 @printf(i8*, ...) #2
+declare i32 @printf(ptr, ...) #2
define i32 @main() #3 {
entry:
call void @foo()
- %0 = load i32, i32* @i, align 4
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str1, i32 0, i32 0), i32 %0)
+ %0 = load i32, ptr @i, align 4
+ %call = call i32 (ptr, ...) @printf(ptr @.str1, i32 %0)
call void @nofoo()
- %1 = load i32, i32* @i, align 4
- %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str2, i32 0, i32 0), i32 %1)
+ %1 = load i32, ptr @i, align 4
+ %call1 = call i32 (ptr, ...) @printf(ptr @.str2, i32 %1)
ret i32 0
}
define float @foox() {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
ret float %0
; 1: .ent foox
; 1: lw $2, %lo(x)(${{[0-9]+}})
define double @foodx() {
entry:
- %0 = load double, double* @dx, align 8
+ %0 = load double, ptr @dx, align 8
ret double %0
; 1: .ent foodx
; 1: lw $2, %lo(dx)(${{[0-9]+}})
define { float, float } @foocx() {
entry:
%retval = alloca { float, float }, align 4
- %cx.real = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cx, i32 0, i32 0)
- %cx.imag = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cx, i32 0, i32 1)
- %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
- %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
- store float %cx.real, float* %real
- store float %cx.imag, float* %imag
- %0 = load { float, float }, { float, float }* %retval
+ %cx.real = load float, ptr getelementptr inbounds ({ float, float }, ptr @cx, i32 0, i32 0)
+ %cx.imag = load float, ptr getelementptr inbounds ({ float, float }, ptr @cx, i32 0, i32 1)
+ %real = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 1
+ store float %cx.real, ptr %real
+ store float %cx.imag, ptr %imag
+ %0 = load { float, float }, ptr %retval
ret { float, float } %0
; 1: .ent foocx
; 1: lw $2, %lo(cx)(${{[0-9]+}})
define { double, double } @foodcx() {
entry:
%retval = alloca { double, double }, align 8
- %dcx.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @dcx, i32 0, i32 0)
- %dcx.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @dcx, i32 0, i32 1)
- %real = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 0
- %imag = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 1
- store double %dcx.real, double* %real
- store double %dcx.imag, double* %imag
- %0 = load { double, double }, { double, double }* %retval
+ %dcx.real = load double, ptr getelementptr inbounds ({ double, double }, ptr @dcx, i32 0, i32 0)
+ %dcx.imag = load double, ptr getelementptr inbounds ({ double, double }, ptr @dcx, i32 0, i32 1)
+ %real = getelementptr inbounds { double, double }, ptr %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { double, double }, ptr %retval, i32 0, i32 1
+ store double %dcx.real, ptr %real
+ store double %dcx.imag, ptr %imag
+ %0 = load { double, double }, ptr %retval
ret { double, double } %0
; 1: .ent foodcx
; 1: lw ${{[0-9]}}, %lo(dcx)(${{[0-9]+}})
;16-NEXT: .cfi_startproc
;16-NEXT: .cfi_personality
@.str = private unnamed_addr constant [7 x i8] c"hello\0A\00", align 1
-@_ZTIi = external constant i8*
+@_ZTIi = external constant ptr
@.str1 = private unnamed_addr constant [15 x i8] c"exception %i \0A\00", align 1
-define i32 @main() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @main() personality ptr @__gxx_personality_v0 {
entry:
%retval = alloca i32, align 4
- %exn.slot = alloca i8*
+ %exn.slot = alloca ptr
%ehselector.slot = alloca i32
%e = alloca i32, align 4
- store i32 0, i32* %retval
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0))
- %exception = call i8* @__cxa_allocate_exception(i32 4) nounwind
- %0 = bitcast i8* %exception to i32*
- store i32 20, i32* %0
- invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) noreturn
+ store i32 0, ptr %retval
+ %call = call i32 (ptr, ...) @printf(ptr @.str)
+ %exception = call ptr @__cxa_allocate_exception(i32 4) nounwind
+ store i32 20, ptr %exception
+ invoke void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null) noreturn
to label %unreachable unwind label %lpad
lpad: ; preds = %entry
- %1 = landingpad { i8*, i32 }
- catch i8* bitcast (i8** @_ZTIi to i8*)
- %2 = extractvalue { i8*, i32 } %1, 0
- store i8* %2, i8** %exn.slot
- %3 = extractvalue { i8*, i32 } %1, 1
- store i32 %3, i32* %ehselector.slot
+ %0 = landingpad { ptr, i32 }
+ catch ptr @_ZTIi
+ %1 = extractvalue { ptr, i32 } %0, 0
+ store ptr %1, ptr %exn.slot
+ %2 = extractvalue { ptr, i32 } %0, 1
+ store i32 %2, ptr %ehselector.slot
br label %catch.dispatch
catch.dispatch: ; preds = %lpad
- %sel = load i32, i32* %ehselector.slot
- %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) nounwind
- %matches = icmp eq i32 %sel, %4
+ %sel = load i32, ptr %ehselector.slot
+ %3 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) nounwind
+ %matches = icmp eq i32 %sel, %3
br i1 %matches, label %catch, label %eh.resume
catch: ; preds = %catch.dispatch
- %exn = load i8*, i8** %exn.slot
- %5 = call i8* @__cxa_begin_catch(i8* %exn) nounwind
- %6 = bitcast i8* %5 to i32*
- %exn.scalar = load i32, i32* %6
- store i32 %exn.scalar, i32* %e, align 4
- %7 = load i32, i32* %e, align 4
- %call2 = invoke i32 (i8*, ...) @printf(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str1, i32 0, i32 0), i32 %7)
+ %exn = load ptr, ptr %exn.slot
+ %4 = call ptr @__cxa_begin_catch(ptr %exn) nounwind
+ %exn.scalar = load i32, ptr %4
+ store i32 %exn.scalar, ptr %e, align 4
+ %5 = load i32, ptr %e, align 4
+ %call2 = invoke i32 (ptr, ...) @printf(ptr @.str1, i32 %5)
to label %invoke.cont unwind label %lpad1
invoke.cont: ; preds = %catch
ret i32 0
lpad1: ; preds = %catch
- %8 = landingpad { i8*, i32 }
+ %6 = landingpad { ptr, i32 }
cleanup
- %9 = extractvalue { i8*, i32 } %8, 0
- store i8* %9, i8** %exn.slot
- %10 = extractvalue { i8*, i32 } %8, 1
- store i32 %10, i32* %ehselector.slot
+ %7 = extractvalue { ptr, i32 } %6, 0
+ store ptr %7, ptr %exn.slot
+ %8 = extractvalue { ptr, i32 } %6, 1
+ store i32 %8, ptr %ehselector.slot
call void @__cxa_end_catch() nounwind
br label %eh.resume
eh.resume: ; preds = %lpad1, %catch.dispatch
- %exn3 = load i8*, i8** %exn.slot
- %sel4 = load i32, i32* %ehselector.slot
- %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn3, 0
- %lpad.val5 = insertvalue { i8*, i32 } %lpad.val, i32 %sel4, 1
- resume { i8*, i32 } %lpad.val5
+ %exn3 = load ptr, ptr %exn.slot
+ %sel4 = load i32, ptr %ehselector.slot
+ %lpad.val = insertvalue { ptr, i32 } undef, ptr %exn3, 0
+ %lpad.val5 = insertvalue { ptr, i32 } %lpad.val, i32 %sel4, 1
+ resume { ptr, i32 } %lpad.val5
unreachable: ; preds = %entry
unreachable
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
-declare i8* @__cxa_allocate_exception(i32)
+declare ptr @__cxa_allocate_exception(i32)
declare i32 @__gxx_personality_v0(...)
-declare void @__cxa_throw(i8*, i8*, i8*)
+declare void @__cxa_throw(ptr, ptr, ptr)
-declare i32 @llvm.eh.typeid.for(i8*) nounwind readnone
+declare i32 @llvm.eh.typeid.for(ptr) nounwind readnone
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
declare void @__cxa_end_catch()
define void @test_addsf3() nounwind {
entry:
;16hf-LABEL: test_addsf3:
- %0 = load float, float* @x, align 4
- %1 = load float, float* @y, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @y, align 4
%add = fadd float %0, %1
- store float %add, float* @addsf3_result, align 4
+ store float %add, ptr @addsf3_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_addsf3)(${{[0-9]+}})
ret void
}
define void @test_adddf3() nounwind {
entry:
;16hf-LABEL: test_adddf3:
- %0 = load double, double* @xd, align 8
- %1 = load double, double* @yd, align 8
+ %0 = load double, ptr @xd, align 8
+ %1 = load double, ptr @yd, align 8
%add = fadd double %0, %1
- store double %add, double* @adddf3_result, align 8
+ store double %add, ptr @adddf3_result, align 8
;16hf: lw ${{[0-9]+}}, %call16(__mips16_adddf3)(${{[0-9]+}})
ret void
}
define void @test_subsf3() nounwind {
entry:
;16hf-LABEL: test_subsf3:
- %0 = load float, float* @x, align 4
- %1 = load float, float* @y, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @y, align 4
%sub = fsub float %0, %1
- store float %sub, float* @subsf3_result, align 4
+ store float %sub, ptr @subsf3_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_subsf3)(${{[0-9]+}})
ret void
}
define void @test_subdf3() nounwind {
entry:
;16hf-LABEL: test_subdf3:
- %0 = load double, double* @xd, align 8
- %1 = load double, double* @yd, align 8
+ %0 = load double, ptr @xd, align 8
+ %1 = load double, ptr @yd, align 8
%sub = fsub double %0, %1
- store double %sub, double* @subdf3_result, align 8
+ store double %sub, ptr @subdf3_result, align 8
;16hf: lw ${{[0-9]+}}, %call16(__mips16_subdf3)(${{[0-9]+}})
ret void
}
define void @test_mulsf3() nounwind {
entry:
;16hf-LABEL: test_mulsf3:
- %0 = load float, float* @x, align 4
- %1 = load float, float* @y, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @y, align 4
%mul = fmul float %0, %1
- store float %mul, float* @mulsf3_result, align 4
+ store float %mul, ptr @mulsf3_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_mulsf3)(${{[0-9]+}})
ret void
}
define void @test_muldf3() nounwind {
entry:
;16hf-LABEL: test_muldf3:
- %0 = load double, double* @xd, align 8
- %1 = load double, double* @yd, align 8
+ %0 = load double, ptr @xd, align 8
+ %1 = load double, ptr @yd, align 8
%mul = fmul double %0, %1
- store double %mul, double* @muldf3_result, align 8
+ store double %mul, ptr @muldf3_result, align 8
;16hf: lw ${{[0-9]+}}, %call16(__mips16_muldf3)(${{[0-9]+}})
ret void
}
define void @test_divsf3() nounwind {
entry:
;16hf-LABEL: test_divsf3:
- %0 = load float, float* @y, align 4
- %1 = load float, float* @x, align 4
+ %0 = load float, ptr @y, align 4
+ %1 = load float, ptr @x, align 4
%div = fdiv float %0, %1
- store float %div, float* @divsf3_result, align 4
+ store float %div, ptr @divsf3_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_divsf3)(${{[0-9]+}})
ret void
}
define void @test_divdf3() nounwind {
entry:
;16hf-LABEL: test_divdf3:
- %0 = load double, double* @yd, align 8
+ %0 = load double, ptr @yd, align 8
%mul = fmul double %0, 2.000000e+00
- %1 = load double, double* @xd, align 8
+ %1 = load double, ptr @xd, align 8
%div = fdiv double %mul, %1
- store double %div, double* @divdf3_result, align 8
+ store double %div, ptr @divdf3_result, align 8
;16hf: lw ${{[0-9]+}}, %call16(__mips16_divdf3)(${{[0-9]+}})
ret void
}
define void @test_extendsfdf2() nounwind {
entry:
;16hf-LABEL: test_extendsfdf2:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%conv = fpext float %0 to double
- store double %conv, double* @extendsfdf2_result, align 8
+ store double %conv, ptr @extendsfdf2_result, align 8
;16hf: lw ${{[0-9]+}}, %call16(__mips16_extendsfdf2)(${{[0-9]+}})
ret void
}
define void @test_truncdfsf2() nounwind {
entry:
;16hf-LABEL: test_truncdfsf2:
- %0 = load double, double* @xd2, align 8
+ %0 = load double, ptr @xd2, align 8
%conv = fptrunc double %0 to float
- store float %conv, float* @truncdfsf2_result, align 4
+ store float %conv, ptr @truncdfsf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_truncdfsf2)(${{[0-9]+}})
ret void
}
define void @test_fix_truncsfsi() nounwind {
entry:
;16hf-LABEL: test_fix_truncsfsi:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%conv = fptosi float %0 to i32
- store i32 %conv, i32* @fix_truncsfsi_result, align 4
+ store i32 %conv, ptr @fix_truncsfsi_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_fix_truncsfsi)(${{[0-9]+}})
ret void
}
define void @test_fix_truncdfsi() nounwind {
entry:
;16hf-LABEL: test_fix_truncdfsi:
- %0 = load double, double* @xd, align 8
+ %0 = load double, ptr @xd, align 8
%conv = fptosi double %0 to i32
- store i32 %conv, i32* @fix_truncdfsi_result, align 4
+ store i32 %conv, ptr @fix_truncdfsi_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_fix_truncdfsi)(${{[0-9]+}})
ret void
}
define void @test_floatsisf() nounwind {
entry:
;16hf-LABEL: test_floatsisf:
- %0 = load i32, i32* @si, align 4
+ %0 = load i32, ptr @si, align 4
%conv = sitofp i32 %0 to float
- store float %conv, float* @floatsisf_result, align 4
+ store float %conv, ptr @floatsisf_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatsisf)(${{[0-9]+}})
ret void
}
define void @test_floatsidf() nounwind {
entry:
;16hf-LABEL: test_floatsidf:
- %0 = load i32, i32* @si, align 4
+ %0 = load i32, ptr @si, align 4
%conv = sitofp i32 %0 to double
- store double %conv, double* @floatsidf_result, align 8
+ store double %conv, ptr @floatsidf_result, align 8
;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatsidf)(${{[0-9]+}})
ret void
}
define void @test_floatunsisf() nounwind {
entry:
;16hf-LABEL: test_floatunsisf:
- %0 = load i32, i32* @ui, align 4
+ %0 = load i32, ptr @ui, align 4
%conv = uitofp i32 %0 to float
- store float %conv, float* @floatunsisf_result, align 4
+ store float %conv, ptr @floatunsisf_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatunsisf)(${{[0-9]+}})
ret void
}
define void @test_floatunsidf() nounwind {
entry:
;16hf-LABEL: test_floatunsidf:
- %0 = load i32, i32* @ui, align 4
+ %0 = load i32, ptr @ui, align 4
%conv = uitofp i32 %0 to double
- store double %conv, double* @floatunsidf_result, align 8
+ store double %conv, ptr @floatunsidf_result, align 8
;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatunsidf)(${{[0-9]+}})
ret void
}
define void @test_eqsf2() nounwind {
entry:
;16hf-LABEL: test_eqsf2:
- %0 = load float, float* @x, align 4
- %1 = load float, float* @xx, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @xx, align 4
%cmp = fcmp oeq float %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @eqsf2_result, align 4
+ store i32 %conv, ptr @eqsf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_eqsf2)(${{[0-9]+}})
ret void
}
define void @test_eqdf2() nounwind {
entry:
;16hf-LABEL: test_eqdf2:
- %0 = load double, double* @xd, align 8
- %1 = load double, double* @xxd, align 8
+ %0 = load double, ptr @xd, align 8
+ %1 = load double, ptr @xxd, align 8
%cmp = fcmp oeq double %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @eqdf2_result, align 4
+ store i32 %conv, ptr @eqdf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_eqdf2)(${{[0-9]+}})
ret void
}
define void @test_nesf2() nounwind {
entry:
;16hf-LABEL: test_nesf2:
- %0 = load float, float* @x, align 4
- %1 = load float, float* @y, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @y, align 4
%cmp = fcmp une float %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @nesf2_result, align 4
+ store i32 %conv, ptr @nesf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_nesf2)(${{[0-9]+}})
ret void
}
define void @test_nedf2() nounwind {
entry:
;16hf-LABEL: test_nedf2:
- %0 = load double, double* @xd, align 8
- %1 = load double, double* @yd, align 8
+ %0 = load double, ptr @xd, align 8
+ %1 = load double, ptr @yd, align 8
%cmp = fcmp une double %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @nedf2_result, align 4
+ store i32 %conv, ptr @nedf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_nedf2)(${{[0-9]+}})
ret void
}
define void @test_gesf2() nounwind {
entry:
;16hf-LABEL: test_gesf2:
- %0 = load float, float* @x, align 4
- %1 = load float, float* @xx, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @xx, align 4
%cmp = fcmp oge float %0, %1
- %2 = load float, float* @y, align 4
+ %2 = load float, ptr @y, align 4
%cmp1 = fcmp oge float %2, %0
%and3 = and i1 %cmp, %cmp1
%and = zext i1 %and3 to i32
- store i32 %and, i32* @gesf2_result, align 4
+ store i32 %and, ptr @gesf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_gesf2)(${{[0-9]+}})
ret void
}
define void @test_gedf2() nounwind {
entry:
;16hf-LABEL: test_gedf2:
- %0 = load double, double* @xd, align 8
- %1 = load double, double* @xxd, align 8
+ %0 = load double, ptr @xd, align 8
+ %1 = load double, ptr @xxd, align 8
%cmp = fcmp oge double %0, %1
- %2 = load double, double* @yd, align 8
+ %2 = load double, ptr @yd, align 8
%cmp1 = fcmp oge double %2, %0
%and3 = and i1 %cmp, %cmp1
%and = zext i1 %and3 to i32
- store i32 %and, i32* @gedf2_result, align 4
+ store i32 %and, ptr @gedf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_gedf2)(${{[0-9]+}})
ret void
}
define void @test_ltsf2() nounwind {
entry:
;16hf-LABEL: test_ltsf2:
- %0 = load float, float* @x, align 4
- %1 = load float, float* @xx, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @xx, align 4
%lnot = fcmp uge float %0, %1
- %2 = load float, float* @y, align 4
+ %2 = load float, ptr @y, align 4
%cmp1 = fcmp olt float %0, %2
%and2 = and i1 %lnot, %cmp1
%and = zext i1 %and2 to i32
- store i32 %and, i32* @ltsf2_result, align 4
+ store i32 %and, ptr @ltsf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_ltsf2)(${{[0-9]+}})
;16hf: lw ${{[0-9]+}}, %call16(__mips16_ltsf2)(${{[0-9]+}})
ret void
define void @test_ltdf2() nounwind {
entry:
;16hf-LABEL: test_ltdf2:
- %0 = load double, double* @xd, align 8
- %1 = load double, double* @xxd, align 8
+ %0 = load double, ptr @xd, align 8
+ %1 = load double, ptr @xxd, align 8
%lnot = fcmp uge double %0, %1
- %2 = load double, double* @yd, align 8
+ %2 = load double, ptr @yd, align 8
%cmp1 = fcmp olt double %0, %2
%and2 = and i1 %lnot, %cmp1
%and = zext i1 %and2 to i32
- store i32 %and, i32* @ltdf2_result, align 4
+ store i32 %and, ptr @ltdf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_ltdf2)(${{[0-9]+}})
;16hf: lw ${{[0-9]+}}, %call16(__mips16_ltdf2)(${{[0-9]+}})
ret void
define void @test_lesf2() nounwind {
entry:
;16hf-LABEL: test_lesf2:
- %0 = load float, float* @x, align 4
- %1 = load float, float* @xx, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @xx, align 4
%cmp = fcmp ole float %0, %1
- %2 = load float, float* @y, align 4
+ %2 = load float, ptr @y, align 4
%cmp1 = fcmp ole float %0, %2
%and3 = and i1 %cmp, %cmp1
%and = zext i1 %and3 to i32
- store i32 %and, i32* @lesf2_result, align 4
+ store i32 %and, ptr @lesf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_lesf2)(${{[0-9]+}})
ret void
}
define void @test_ledf2() nounwind {
entry:
;16hf-LABEL: test_ledf2:
- %0 = load double, double* @xd, align 8
- %1 = load double, double* @xxd, align 8
+ %0 = load double, ptr @xd, align 8
+ %1 = load double, ptr @xxd, align 8
%cmp = fcmp ole double %0, %1
- %2 = load double, double* @yd, align 8
+ %2 = load double, ptr @yd, align 8
%cmp1 = fcmp ole double %0, %2
%and3 = and i1 %cmp, %cmp1
%and = zext i1 %and3 to i32
- store i32 %and, i32* @ledf2_result, align 4
+ store i32 %and, ptr @ledf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_ledf2)(${{[0-9]+}})
ret void
}
define void @test_gtsf2() nounwind {
entry:
;16hf-LABEL: test_gtsf2:
- %0 = load float, float* @x, align 4
- %1 = load float, float* @xx, align 4
+ %0 = load float, ptr @x, align 4
+ %1 = load float, ptr @xx, align 4
%lnot = fcmp ule float %0, %1
- %2 = load float, float* @y, align 4
+ %2 = load float, ptr @y, align 4
%cmp1 = fcmp ogt float %2, %0
%and2 = and i1 %lnot, %cmp1
%and = zext i1 %and2 to i32
- store i32 %and, i32* @gtsf2_result, align 4
+ store i32 %and, ptr @gtsf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_gtsf2)(${{[0-9]+}})
ret void
}
define void @test_gtdf2() nounwind {
entry:
;16hf-LABEL: test_gtdf2:
- %0 = load double, double* @xd, align 8
- %1 = load double, double* @xxd, align 8
+ %0 = load double, ptr @xd, align 8
+ %1 = load double, ptr @xxd, align 8
%lnot = fcmp ule double %0, %1
- %2 = load double, double* @yd, align 8
+ %2 = load double, ptr @yd, align 8
%cmp1 = fcmp ogt double %2, %0
%and2 = and i1 %lnot, %cmp1
%and = zext i1 %and2 to i32
- store i32 %and, i32* @gtdf2_result, align 4
+ store i32 %and, ptr @gtdf2_result, align 4
;16hf: lw ${{[0-9]+}}, %call16(__mips16_gtdf2)(${{[0-9]+}})
ret void
}
; CHECK-NOT: Cannot scavenge register without an emergency spill slot!
-@n = external local_unnamed_addr global i32*, align 8
+@n = external local_unnamed_addr global ptr, align 8
-define void @o(i32* nocapture readonly %a, i64* %b) local_unnamed_addr {
+define void @o(ptr nocapture readonly %a, ptr %b) local_unnamed_addr {
entry:
- %0 = load i32, i32* undef, align 4
+ %0 = load i32, ptr undef, align 4
%and12 = and i32 %0, 67295
%1 = zext i32 %and12 to i64
%conv16 = sext i32 %0 to i64
- %2 = ptrtoint i64* %b to i64
+ %2 = ptrtoint ptr %b to i64
%mul22 = mul nsw i64 %1, %2
%mul23 = mul nsw i64 %conv16, %2
%tobool25 = icmp ne i64 %mul22, 0
%inc27 = zext i1 %tobool25 to i64
- %3 = load i32*, i32** @n, align 8
- %arrayidx36 = getelementptr inbounds i32, i32* %3, i64 4
- store i32 0, i32* %arrayidx36, align 4
+ %3 = load ptr, ptr @n, align 8
+ %arrayidx36 = getelementptr inbounds i32, ptr %3, i64 4
+ store i32 0, ptr %arrayidx36, align 4
%spec.select = add i64 0, %mul23
%hi14.0 = add i64 %spec.select, %inc27
%add51 = add i64 %hi14.0, 0
- %4 = load i32, i32* null, align 4
+ %4 = load i32, ptr null, align 4
%and59 = and i32 %4, 67295
%5 = zext i32 %and59 to i64
%conv63 = sext i32 %4 to i64
- %6 = load i64, i64* %b, align 8
+ %6 = load i64, ptr %b, align 8
%mul71 = mul nsw i64 %6, %5
%mul72 = mul nsw i64 %6, %conv63
%tobool74 = icmp ne i64 %mul71, 0
%inc76 = zext i1 %tobool74 to i64
- %arrayidx85 = getelementptr inbounds i32, i32* %a, i64 5
- %7 = load i32, i32* %arrayidx85, align 4
+ %arrayidx85 = getelementptr inbounds i32, ptr %a, i64 5
+ %7 = load i32, ptr %arrayidx85, align 4
%and86 = and i32 %7, 67295
%conv90 = sext i32 %7 to i64
- %8 = load i64, i64* undef, align 8
+ %8 = load i64, ptr undef, align 8
%mul99 = mul nsw i64 %8, %conv90
- %9 = load i32, i32* undef, align 4
+ %9 = load i32, ptr undef, align 4
%and113 = and i32 %9, 67295
%tobool126 = icmp eq i32 %and113, 0
%spec.select397.v = select i1 %tobool126, i64 2, i64 3
- %10 = load i32, i32* undef, align 4
+ %10 = load i32, ptr undef, align 4
%and138 = and i32 %10, 67295
%11 = zext i32 %and138 to i64
%conv142 = sext i32 %10 to i64
- %12 = load i64, i64* null, align 8
+ %12 = load i64, ptr null, align 8
%mul150 = mul nsw i64 %12, %11
%mul151 = mul nsw i64 %12, %conv142
%tobool153 = icmp ne i64 %mul150, 0
%add110 = add i64 %add83, 0
%add135 = add i64 %add110, 0
%add162 = add i64 %add135, 0
- %13 = load i32, i32* null, align 4
+ %13 = load i32, ptr null, align 4
%and165 = and i32 %13, 67295
%14 = zext i32 %and165 to i64
%conv169 = sext i32 %13 to i64
%inc210 = zext i1 %tobool208 to i64
%hi192.0 = add i64 %spec.select400, %add157
%add212 = add i64 %hi192.0, %inc210
- %15 = inttoptr i64 %add212 to i32*
- store i32* %15, i32** @n, align 8
+ %15 = inttoptr i64 %add212 to ptr
+ store ptr %15, ptr @n, align 8
ret void
}
define void @foo0(fp128 %a0) {
entry:
- store fp128 %a0, fp128* @gld0, align 16
+ store fp128 %a0, ptr @gld0, align 16
ret void
}
define void @foo1() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
tail call void @foo2(fp128 %0)
ret void
}
define fp128 @foo3() {
entry:
%call = tail call fp128 @foo4()
- store fp128 %call, fp128* @gld0, align 16
- %0 = load fp128, fp128* @gld1, align 16
+ store fp128 %call, ptr @gld0, align 16
+ %0 = load fp128, ptr @gld1, align 16
ret fp128 %0
}
define fp128 @addLD() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
- %1 = load fp128, fp128* @gld1, align 16
+ %0 = load fp128, ptr @gld0, align 16
+ %1 = load fp128, ptr @gld1, align 16
%add = fadd fp128 %0, %1
ret fp128 %add
}
define fp128 @subLD() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
- %1 = load fp128, fp128* @gld1, align 16
+ %0 = load fp128, ptr @gld0, align 16
+ %1 = load fp128, ptr @gld1, align 16
%sub = fsub fp128 %0, %1
ret fp128 %sub
}
define fp128 @mulLD() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
- %1 = load fp128, fp128* @gld1, align 16
+ %0 = load fp128, ptr @gld0, align 16
+ %1 = load fp128, ptr @gld1, align 16
%mul = fmul fp128 %0, %1
ret fp128 %mul
}
define fp128 @divLD() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
- %1 = load fp128, fp128* @gld1, align 16
+ %0 = load fp128, ptr @gld0, align 16
+ %1 = load fp128, ptr @gld1, align 16
%div = fdiv fp128 %0, %1
ret fp128 %div
}
define fp128 @libcall1_fabsl() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @fabsl(fp128 %0) nounwind readnone
ret fp128 %call
}
define fp128 @libcall1_ceill() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @ceill(fp128 %0) nounwind readnone
ret fp128 %call
}
define fp128 @libcall1_sinl() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @sinl(fp128 %0) nounwind
ret fp128 %call
}
define fp128 @libcall1_cosl() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @cosl(fp128 %0) nounwind
ret fp128 %call
}
define fp128 @libcall1_expl() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @expl(fp128 %0) nounwind
ret fp128 %call
}
define fp128 @libcall1_exp2l() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @exp2l(fp128 %0) nounwind
ret fp128 %call
}
define fp128 @libcall1_logl() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @logl(fp128 %0) nounwind
ret fp128 %call
}
define fp128 @libcall1_log2l() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @log2l(fp128 %0) nounwind
ret fp128 %call
}
define fp128 @libcall1_log10l() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @log10l(fp128 %0) nounwind
ret fp128 %call
}
define fp128 @libcall1_nearbyintl() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @nearbyintl(fp128 %0) nounwind readnone
ret fp128 %call
}
define fp128 @libcall1_floorl() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @floorl(fp128 %0) nounwind readnone
ret fp128 %call
}
define fp128 @libcall1_sqrtl() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @sqrtl(fp128 %0) nounwind
ret fp128 %call
}
define fp128 @libcall1_rintl() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld0, align 16
%call = tail call fp128 @rintl(fp128 %0) nounwind readnone
ret fp128 %call
}
define fp128 @libcall2_copysignl() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
- %1 = load fp128, fp128* @gld1, align 16
+ %0 = load fp128, ptr @gld0, align 16
+ %1 = load fp128, ptr @gld1, align 16
%call = tail call fp128 @copysignl(fp128 %0, fp128 %1) nounwind readnone
ret fp128 %call
}
define fp128 @libcall2_powl() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
- %1 = load fp128, fp128* @gld1, align 16
+ %0 = load fp128, ptr @gld0, align 16
+ %1 = load fp128, ptr @gld1, align 16
%call = tail call fp128 @powl(fp128 %0, fp128 %1) nounwind
ret fp128 %call
}
define fp128 @libcall2_fmodl() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
- %1 = load fp128, fp128* @gld1, align 16
+ %0 = load fp128, ptr @gld0, align 16
+ %1 = load fp128, ptr @gld1, align 16
%call = tail call fp128 @fmodl(fp128 %0, fp128 %1) nounwind
ret fp128 %call
}
define fp128 @libcall3_fmal() {
entry:
- %0 = load fp128, fp128* @gld0, align 16
- %1 = load fp128, fp128* @gld2, align 16
- %2 = load fp128, fp128* @gld1, align 16
+ %0 = load fp128, ptr @gld0, align 16
+ %1 = load fp128, ptr @gld2, align 16
+ %2 = load fp128, ptr @gld1, align 16
%3 = tail call fp128 @llvm.fma.f128(fp128 %0, fp128 %2, fp128 %1)
ret fp128 %3
}
define fp128 @load_LD_LD() {
entry:
- %0 = load fp128, fp128* @gld1, align 16
+ %0 = load fp128, ptr @gld1, align 16
ret fp128 %0
}
define fp128 @load_LD_float() {
entry:
- %0 = load float, float* @gf1, align 4
+ %0 = load float, ptr @gf1, align 4
%conv = fpext float %0 to fp128
ret fp128 %conv
}
define fp128 @load_LD_double() {
entry:
- %0 = load double, double* @gd1, align 8
+ %0 = load double, ptr @gd1, align 8
%conv = fpext double %0 to fp128
ret fp128 %conv
}
define void @store_LD_LD() {
entry:
- %0 = load fp128, fp128* @gld1, align 16
- store fp128 %0, fp128* @gld0, align 16
+ %0 = load fp128, ptr @gld1, align 16
+ store fp128 %0, ptr @gld0, align 16
ret void
}
define void @store_LD_float() {
entry:
- %0 = load fp128, fp128* @gld1, align 16
+ %0 = load fp128, ptr @gld1, align 16
%conv = fptrunc fp128 %0 to float
- store float %conv, float* @gf1, align 4
+ store float %conv, ptr @gf1, align 4
ret void
}
define void @store_LD_double() {
entry:
- %0 = load fp128, fp128* @gld1, align 16
+ %0 = load fp128, ptr @gld1, align 16
%conv = fptrunc fp128 %0 to double
- store double %conv, double* @gd1, align 8
+ store double %conv, ptr @gd1, align 8
ret void
}
; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi=n64 < %s | FileCheck %s
-define void @foo(i32* noalias sret(i32) %agg.result) nounwind {
+define void @foo(ptr noalias sret(i32) %agg.result) nounwind {
entry:
; CHECK-LABEL: foo:
; CHECK: sw {{.*}}, 0($4)
; CHECK: jr $ra
; CHECK-NEXT: move $2, $4
- store i32 42, i32* %agg.result
+ store i32 42, ptr %agg.result
ret void
}
-define void @bar(i32 signext %v, i32* noalias sret(i32) %agg.result) nounwind {
+define void @bar(i32 signext %v, ptr noalias sret(i32) %agg.result) nounwind {
entry:
; CHECK-LABEL: bar:
; CHECK: sw $4, 0($5)
; CHECK: jr $ra
; CHECK-NEXT: move $2, $5
- store i32 %v, i32* %agg.result
+ store i32 %v, ptr %agg.result
ret void
}
; CHECK: 8byte
define i64 @foo1() nounwind readonly {
entry:
- %0 = load i64, i64* @gl, align 8
+ %0 = load i64, ptr @gl, align 8
ret i64 %0
}
; CHECK-N32: funcfl1
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(f0)
; CHECK-N32: lwc1 $f{{[0-9]+}}, 0($[[R0]])
- %0 = load float, float* @f0, align 4
+ %0 = load float, ptr @f0, align 4
ret float %0
}
; CHECK-N32: funcfl2
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(d0)
; CHECK-N32: ldc1 $f{{[0-9]+}}, 0($[[R0]])
- %0 = load double, double* @d0, align 8
+ %0 = load double, ptr @d0, align 8
ret double %0
}
; CHECK-N32: funcfs1
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(f0)
; CHECK-N32: swc1 $f{{[0-9]+}}, 0($[[R0]])
- %0 = load float, float* @f1, align 4
- store float %0, float* @f0, align 4
+ %0 = load float, ptr @f1, align 4
+ store float %0, ptr @f0, align 4
ret void
}
; CHECK-N32: funcfs2
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(d0)
; CHECK-N32: sdc1 $f{{[0-9]+}}, 0($[[R0]])
- %0 = load double, double* @d1, align 8
- store double %0, double* @d0, align 8
+ %0 = load double, ptr @d1, align 8
+ store double %0, ptr @d0, align 8
ret void
}
; GPRMULDIV: ddiv $2, $[[T0]], $[[T1]]
; GPRMULDIV: teq $[[T1]], $zero, 7
- %0 = load i64, i64* @gll0, align 8
- %1 = load i64, i64* @gll1, align 8
+ %0 = load i64, ptr @gll0, align 8
+ %1 = load i64, ptr @gll1, align 8
%div = sdiv i64 %0, %1
ret i64 %div
}
; GPRMULDIV: ddivu $2, $[[T0]], $[[T1]]
; GPRMULDIV: teq $[[T1]], $zero, 7
- %0 = load i64, i64* @gll0, align 8
- %1 = load i64, i64* @gll1, align 8
+ %0 = load i64, ptr @gll0, align 8
+ %1 = load i64, ptr @gll1, align 8
%div = udiv i64 %0, %1
ret i64 %div
}
; CHECK-N32: func1
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(c)
; CHECK-N32: lb ${{[0-9]+}}, 0($[[R0]])
- %0 = load i8, i8* @c, align 4
+ %0 = load i8, ptr @c, align 4
%conv = sext i8 %0 to i64
ret i64 %conv
}
; CHECK-N32: func2
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(s)
; CHECK-N32: lh ${{[0-9]+}}, 0($[[R0]])
- %0 = load i16, i16* @s, align 4
+ %0 = load i16, ptr @s, align 4
%conv = sext i16 %0 to i64
ret i64 %conv
}
; CHECK-N32: func3
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(i)
; CHECK-N32: lw ${{[0-9]+}}, 0($[[R0]])
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%conv = sext i32 %0 to i64
ret i64 %conv
}
; CHECK-N32: func4
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(l)
; CHECK-N32: ld ${{[0-9]+}}, 0($[[R0]])
- %0 = load i64, i64* @l, align 8
+ %0 = load i64, ptr @l, align 8
ret i64 %0
}
; CHECK-N32: ufunc1
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(uc)
; CHECK-N32: lbu ${{[0-9]+}}, 0($[[R0]])
- %0 = load i8, i8* @uc, align 4
+ %0 = load i8, ptr @uc, align 4
%conv = zext i8 %0 to i64
ret i64 %conv
}
; CHECK-N32: ufunc2
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(us)
; CHECK-N32: lhu ${{[0-9]+}}, 0($[[R0]])
- %0 = load i16, i16* @us, align 4
+ %0 = load i16, ptr @us, align 4
%conv = zext i16 %0 to i64
ret i64 %conv
}
; CHECK-N32: ufunc3
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(ui)
; CHECK-N32: lwu ${{[0-9]+}}, 0($[[R0]])
- %0 = load i32, i32* @ui, align 4
+ %0 = load i32, ptr @ui, align 4
%conv = zext i32 %0 to i64
ret i64 %conv
}
; CHECK-N32: sfunc1
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(c)
; CHECK-N32: sb ${{[0-9]+}}, 0($[[R0]])
- %0 = load i64, i64* @l1, align 8
+ %0 = load i64, ptr @l1, align 8
%conv = trunc i64 %0 to i8
- store i8 %conv, i8* @c, align 4
+ store i8 %conv, ptr @c, align 4
ret void
}
; CHECK-N32: sfunc2
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(s)
; CHECK-N32: sh ${{[0-9]+}}, 0($[[R0]])
- %0 = load i64, i64* @l1, align 8
+ %0 = load i64, ptr @l1, align 8
%conv = trunc i64 %0 to i16
- store i16 %conv, i16* @s, align 4
+ store i16 %conv, ptr @s, align 4
ret void
}
; CHECK-N32: sfunc3
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(i)
; CHECK-N32: sw ${{[0-9]+}}, 0($[[R0]])
- %0 = load i64, i64* @l1, align 8
+ %0 = load i64, ptr @l1, align 8
%conv = trunc i64 %0 to i32
- store i32 %conv, i32* @i, align 4
+ store i32 %conv, ptr @i, align 4
ret void
}
; CHECK-N32: sfunc4
; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(l)
; CHECK-N32: sd ${{[0-9]+}}, 0($[[R0]])
- %0 = load i64, i64* @l1, align 8
- store i64 %0, i64* @l, align 8
+ %0 = load i64, ptr @l1, align 8
+ store i64 %0, ptr @l, align 8
ret void
}
entry:
; CHECK: daddiu ${{[0-9]+}}, $sp
%a = alloca i32, align 4
- call void @foo1(i32* %a) nounwind
+ call void @foo1(ptr %a) nounwind
ret void
}
-declare void @foo1(i32*)
+declare void @foo1(ptr)
entry:
%in = alloca float, align 4
%out = alloca float, align 4
- store volatile float 0xBFD59E1380000000, float* %in, align 4
- %in.0.in.0. = load volatile float, float* %in, align 4
+ store volatile float 0xBFD59E1380000000, ptr %in, align 4
+ %in.0.in.0. = load volatile float, ptr %in, align 4
%rintf = tail call float @rintf(float %in.0.in.0.) #1
- store volatile float %rintf, float* %out, align 4
+ store volatile float %rintf, ptr %out, align 4
ret void
; CHECK-LABEL: foosf
declare float @rintf(float)
-define float @foosf1(float* nocapture readonly %a) #0 {
+define float @foosf1(ptr nocapture readonly %a) #0 {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%call = tail call float @roundf(float %0) #2
ret float %call
declare float @roundf(float) #1
-define float @foosf2(float* nocapture readonly %a) #0 {
+define float @foosf2(ptr nocapture readonly %a) #0 {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%call = tail call float @truncf(float %0) #2
ret float %call
declare float @truncf(float) #1
-define float @foosf3(float* nocapture readonly %a) #0 {
+define float @foosf3(ptr nocapture readonly %a) #0 {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%call = tail call float @floorf(float %0) #2
ret float %call
declare float @floorf(float) #1
-define float @foosf4(float* nocapture readonly %a) #0 {
+define float @foosf4(ptr nocapture readonly %a) #0 {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%call = tail call float @nearbyintf(float %0) #2
ret float %call
declare float @nearbyintf(float) #1
-define float @foosf5(float* nocapture readonly %a) #0 {
+define float @foosf5(ptr nocapture readonly %a) #0 {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%mul = fmul float %0, undef
ret float %mul
; CHECK-NOT: lwu
}
-define float @foosf6(float* nocapture readonly %a) #0 {
+define float @foosf6(ptr nocapture readonly %a) #0 {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%sub = fsub float %0, undef
ret float %sub
; CHECK-NOT: lwu
}
-define float @foosf7(float* nocapture readonly %a) #0 {
+define float @foosf7(ptr nocapture readonly %a) #0 {
entry:
- %0 = load float, float* %a, align 4
+ %0 = load float, ptr %a, align 4
%add = fadd float %0, undef
ret float %add
; CHECK-NOT: lwu
}
-define float @foosf8(float* nocapture readonly %a) #0 {
+define float @foosf8(ptr nocapture readonly %a) #0 {
entry:
%b = alloca float, align 4
- %b.0.b.0. = load volatile float, float* %b, align 4
- %0 = load float, float* %a, align 4
+ %b.0.b.0. = load volatile float, ptr %b, align 4
+ %0 = load float, ptr %a, align 4
%div = fdiv float %b.0.b.0., %0
ret float %div
define float @foosf9() #0 {
entry:
%b = alloca float, align 4
- %b.0.b.0. = load volatile float, float* %b, align 4
+ %b.0.b.0. = load volatile float, ptr %b, align 4
%conv = fpext float %b.0.b.0. to double
- %b.0.b.0.3 = load volatile float, float* %b, align 4
+ %b.0.b.0.3 = load volatile float, ptr %b, align 4
%conv1 = fpext float %b.0.b.0.3 to double
%call = tail call double @pow(double %conv, double %conv1) #1
%conv2 = fptrunc double %call to float
define float @foosf10() #0 {
entry:
%a = alloca float, align 4
- %a.0.a.0. = load volatile float, float* %a, align 4
+ %a.0.a.0. = load volatile float, ptr %a, align 4
%conv = fpext float %a.0.a.0. to double
%call = tail call double @sin(double %conv) #1
%conv1 = fptrunc double %call to float
define float @foosf11() #0 {
entry:
%b = alloca float, align 4
- %b.0.b.0. = load volatile float, float* %b, align 4
+ %b.0.b.0. = load volatile float, ptr %b, align 4
%call = tail call float @ceilf(float %b.0.b.0.) #2
ret float %call
entry:
%b = alloca float, align 4
%a = alloca float, align 4
- %b.0.b.0. = load volatile float, float* %b, align 4
- %a.0.a.0. = load volatile float, float* %a, align 4
+ %b.0.b.0. = load volatile float, ptr %b, align 4
+ %a.0.a.0. = load volatile float, ptr %a, align 4
%call = tail call float @fmaxf(float %b.0.b.0., float %a.0.a.0.) #2
ret float %call
entry:
%b = alloca float, align 4
%a = alloca float, align 4
- %b.0.b.0. = load volatile float, float* %b, align 4
- %a.0.a.0. = load volatile float, float* %a, align 4
+ %b.0.b.0. = load volatile float, ptr %b, align 4
+ %a.0.a.0. = load volatile float, ptr %a, align 4
%call = tail call float @fminf(float %b.0.b.0., float %a.0.a.0.) #2
ret float %call
define double @foo() #0 {
entry:
%x = alloca i32, align 4
- store volatile i32 -32, i32* %x, align 4
- %0 = load volatile i32, i32* %x, align 4
+ store volatile i32 -32, ptr %x, align 4
+ %0 = load volatile i32, ptr %x, align 4
%conv = sitofp i32 %0 to double
ret double %conv
; This test does not check the machine code output.
; RUN: llc -march=mips < %s
-@stat_vol_ptr_int = internal global i32* null, align 4
-@stat_ptr_vol_int = internal global i32* null, align 4
+@stat_vol_ptr_int = internal global ptr null, align 4
+@stat_ptr_vol_int = internal global ptr null, align 4
define void @simple_vol_file() nounwind {
entry:
- %tmp = load volatile i32*, i32** @stat_vol_ptr_int, align 4
- %0 = bitcast i32* %tmp to i8*
- call void @llvm.prefetch(i8* %0, i32 0, i32 0, i32 1)
- %tmp1 = load i32*, i32** @stat_ptr_vol_int, align 4
- %1 = bitcast i32* %tmp1 to i8*
- call void @llvm.prefetch(i8* %1, i32 0, i32 0, i32 1)
+ %tmp = load volatile ptr, ptr @stat_vol_ptr_int, align 4
+ call void @llvm.prefetch(ptr %tmp, i32 0, i32 0, i32 1)
+ %tmp1 = load ptr, ptr @stat_ptr_vol_int, align 4
+ call void @llvm.prefetch(ptr %tmp1, i32 0, i32 0, i32 1)
ret void
}
-declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind
+declare void @llvm.prefetch(ptr nocapture, i32, i32, i32) nounwind
; RUN: llc -march=mipsel -mattr=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16
-define i32 @sumc(i8* nocapture %to, i8* nocapture %from, i32) nounwind {
+define i32 @sumc(ptr nocapture %to, ptr nocapture %from, i32) nounwind {
entry:
%sext = shl i32 %0, 16
%conv = ashr exact i32 %sext, 16
br i1 %cmp8, label %for.end, label %for.body.lr.ph
for.body.lr.ph: ; preds = %entry
- %.pre = load i8, i8* %to, align 1
+ %.pre = load i8, ptr %to, align 1
br label %for.body
for.body: ; preds = %for.body.lr.ph, %for.body
%1 = phi i8 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ]
%i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
- %from.addr.09 = phi i8* [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
- %incdec.ptr = getelementptr inbounds i8, i8* %from.addr.09, i32 1
- %2 = load i8, i8* %from.addr.09, align 1
+ %from.addr.09 = phi ptr [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i8, ptr %from.addr.09, i32 1
+ %2 = load i8, ptr %from.addr.09, align 1
%conv27 = zext i8 %2 to i32
%conv36 = zext i8 %1 to i32
%add = add nsw i32 %conv36, %conv27
%conv4 = trunc i32 %add to i8
- store i8 %conv4, i8* %to, align 1
+ store i8 %conv4, ptr %to, align 1
%inc = add nsw i32 %i.010, 1
%cmp = icmp eq i32 %inc, %conv
br i1 %cmp, label %for.end, label %for.body
ret i32 undef
}
-define i32 @sum(i16* nocapture %to, i16* nocapture %from, i32) nounwind {
+define i32 @sum(ptr nocapture %to, ptr nocapture %from, i32) nounwind {
entry:
%sext = shl i32 %0, 16
%conv = ashr exact i32 %sext, 16
br i1 %cmp8, label %for.end, label %for.body.lr.ph
for.body.lr.ph: ; preds = %entry
- %.pre = load i16, i16* %to, align 2
+ %.pre = load i16, ptr %to, align 2
br label %for.body
for.body: ; preds = %for.body.lr.ph, %for.body
%1 = phi i16 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ]
%i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
- %from.addr.09 = phi i16* [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
- %incdec.ptr = getelementptr inbounds i16, i16* %from.addr.09, i32 1
- %2 = load i16, i16* %from.addr.09, align 2
+ %from.addr.09 = phi ptr [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i16, ptr %from.addr.09, i32 1
+ %2 = load i16, ptr %from.addr.09, align 2
%conv27 = zext i16 %2 to i32
%conv36 = zext i16 %1 to i32
%add = add nsw i32 %conv36, %conv27
%conv4 = trunc i32 %add to i16
- store i16 %conv4, i16* %to, align 2
+ store i16 %conv4, ptr %to, align 2
%inc = add nsw i32 %i.010, 1
%cmp = icmp eq i32 %inc, %conv
br i1 %cmp, label %for.end, label %for.body
define double @test_ldc1() {
entry:
- %0 = load double, double* @g0, align 8
+ %0 = load double, ptr @g0, align 8
ret double %0
}
define void @test_sdc1(double %a) {
entry:
- store double %a, double* @g0, align 8
+ store double %a, ptr @g0, align 8
ret void
}
; MM-STATIC-PIC: addu16 $[[R1:[0-9]+]], $4, $[[R0]]
; MM-STATIC-PIC: ldc1 $f0, 0($[[R1]])
-define double @test_ldxc1(double* nocapture readonly %a, i32 %i) {
+define double @test_ldxc1(ptr nocapture readonly %a, i32 %i) {
entry:
- %arrayidx = getelementptr inbounds double, double* %a, i32 %i
- %0 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %a, i32 %i
+ %0 = load double, ptr %arrayidx, align 8
ret double %0
}
; MM-STATIC-PIC: addu16 $[[R1:[0-9]+]], $6, $[[R0]]
; MM-STATIC-PIC: sdc1 $f12, 0($[[R1]])
-define void @test_sdxc1(double %b, double* nocapture %a, i32 %i) {
+define void @test_sdxc1(double %b, ptr nocapture %a, i32 %i) {
entry:
- %arrayidx = getelementptr inbounds double, double* %a, i32 %i
- store double %b, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %a, i32 %i
+ store double %b, ptr %arrayidx, align 8
ret void
}
define void @llvm_mips_nloc_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nloc_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_nloc_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.nloc.b(<16 x i8> %0)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_nloc_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_nloc_b_RES
ret void
}
define void @llvm_mips_nloc_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nloc_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_nloc_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.nloc.h(<8 x i16> %0)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_nloc_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_nloc_h_RES
ret void
}
define void @llvm_mips_nloc_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nloc_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_nloc_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.nloc.w(<4 x i32> %0)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_nloc_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_nloc_w_RES
ret void
}
define void @llvm_mips_nloc_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nloc_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_nloc_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.nloc.d(<2 x i64> %0)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_nloc_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_nloc_d_RES
ret void
}
define void @llvm_mips_nlzc_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nlzc_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_nlzc_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.nlzc.b(<16 x i8> %0)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_nlzc_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_nlzc_b_RES
ret void
}
define void @llvm_mips_nlzc_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nlzc_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_nlzc_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.nlzc.h(<8 x i16> %0)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_nlzc_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_nlzc_h_RES
ret void
}
define void @llvm_mips_nlzc_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nlzc_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_nlzc_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.nlzc.w(<4 x i32> %0)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_nlzc_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_nlzc_w_RES
ret void
}
define void @llvm_mips_nlzc_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nlzc_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_nlzc_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.nlzc.d(<2 x i64> %0)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_nlzc_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_nlzc_d_RES
ret void
}
define void @llvm_mips_pcnt_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pcnt_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_pcnt_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.pcnt.b(<16 x i8> %0)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_pcnt_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_pcnt_b_RES
ret void
}
define void @llvm_mips_pcnt_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pcnt_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_pcnt_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.pcnt.h(<8 x i16> %0)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_pcnt_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_pcnt_h_RES
ret void
}
define void @llvm_mips_pcnt_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pcnt_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_pcnt_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.pcnt.w(<4 x i32> %0)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_pcnt_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_pcnt_w_RES
ret void
}
define void @llvm_mips_pcnt_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pcnt_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_pcnt_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.pcnt.d(<2 x i64> %0)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_pcnt_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_pcnt_d_RES
ret void
}
define void @llvm_mips_fill_b_test() nounwind {
entry:
- %0 = load i32, i32* @llvm_mips_fill_b_ARG1
+ %0 = load i32, ptr @llvm_mips_fill_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.fill.b(i32 %0)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_fill_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_fill_b_RES
ret void
}
define void @llvm_mips_fill_h_test() nounwind {
entry:
- %0 = load i32, i32* @llvm_mips_fill_h_ARG1
+ %0 = load i32, ptr @llvm_mips_fill_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.fill.h(i32 %0)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_fill_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_fill_h_RES
ret void
}
define void @llvm_mips_fill_w_test() nounwind {
entry:
- %0 = load i32, i32* @llvm_mips_fill_w_ARG1
+ %0 = load i32, ptr @llvm_mips_fill_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.fill.w(i32 %0)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_fill_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_fill_w_RES
ret void
}
define void @llvm_mips_fill_d_test() nounwind {
entry:
- %0 = load i64, i64* @llvm_mips_fill_d_ARG1
+ %0 = load i64, ptr @llvm_mips_fill_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.fill.d(i64 %0)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_fill_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_fill_d_RES
ret void
}
define void @llvm_mips_flog2_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_flog2_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_flog2_w_ARG1
%1 = tail call <4 x float> @llvm.mips.flog2.w(<4 x float> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES
+ store <4 x float> %1, ptr @llvm_mips_flog2_w_RES
ret void
}
define void @llvm_mips_flog2_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_flog2_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_flog2_d_ARG1
%1 = tail call <2 x double> @llvm.mips.flog2.d(<2 x double> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES
+ store <2 x double> %1, ptr @llvm_mips_flog2_d_RES
ret void
}
define void @flog2_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_flog2_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_flog2_w_ARG1
%1 = tail call <4 x float> @llvm.log2.v4f32(<4 x float> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES
+ store <4 x float> %1, ptr @llvm_mips_flog2_w_RES
ret void
}
define void @flog2_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_flog2_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_flog2_d_ARG1
%1 = tail call <2 x double> @llvm.log2.v2f64(<2 x double> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES
+ store <2 x double> %1, ptr @llvm_mips_flog2_d_RES
ret void
}
define void @llvm_mips_frint_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_frint_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_frint_w_ARG1
%1 = tail call <4 x float> @llvm.mips.frint.w(<4 x float> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES
+ store <4 x float> %1, ptr @llvm_mips_frint_w_RES
ret void
}
define void @llvm_mips_frint_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_frint_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_frint_d_ARG1
%1 = tail call <2 x double> @llvm.mips.frint.d(<2 x double> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES
+ store <2 x double> %1, ptr @llvm_mips_frint_d_RES
ret void
}
define void @frint_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_frint_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_frint_w_ARG1
%1 = tail call <4 x float> @llvm.rint.v4f32(<4 x float> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES
+ store <4 x float> %1, ptr @llvm_mips_frint_w_RES
ret void
}
define void @frint_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_frint_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_frint_d_ARG1
%1 = tail call <2 x double> @llvm.rint.v2f64(<2 x double> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES
+ store <2 x double> %1, ptr @llvm_mips_frint_d_RES
ret void
}
define void @llvm_mips_frcp_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_frcp_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_frcp_w_ARG1
%1 = tail call <4 x float> @llvm.mips.frcp.w(<4 x float> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_frcp_w_RES
+ store <4 x float> %1, ptr @llvm_mips_frcp_w_RES
ret void
}
define void @llvm_mips_frcp_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_frcp_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_frcp_d_ARG1
%1 = tail call <2 x double> @llvm.mips.frcp.d(<2 x double> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_frcp_d_RES
+ store <2 x double> %1, ptr @llvm_mips_frcp_d_RES
ret void
}
define void @llvm_mips_frsqrt_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_frsqrt_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_frsqrt_w_ARG1
%1 = tail call <4 x float> @llvm.mips.frsqrt.w(<4 x float> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_frsqrt_w_RES
+ store <4 x float> %1, ptr @llvm_mips_frsqrt_w_RES
ret void
}
define void @llvm_mips_frsqrt_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_frsqrt_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_frsqrt_d_ARG1
%1 = tail call <2 x double> @llvm.mips.frsqrt.d(<2 x double> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_frsqrt_d_RES
+ store <2 x double> %1, ptr @llvm_mips_frsqrt_d_RES
ret void
}
define void @llvm_mips_fsqrt_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsqrt_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_fsqrt_w_ARG1
%1 = tail call <4 x float> @llvm.mips.fsqrt.w(<4 x float> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES
+ store <4 x float> %1, ptr @llvm_mips_fsqrt_w_RES
ret void
}
define void @llvm_mips_fsqrt_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsqrt_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_fsqrt_d_ARG1
%1 = tail call <2 x double> @llvm.mips.fsqrt.d(<2 x double> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES
+ store <2 x double> %1, ptr @llvm_mips_fsqrt_d_RES
ret void
}
define void @fsqrt_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsqrt_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_fsqrt_w_ARG1
%1 = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES
+ store <4 x float> %1, ptr @llvm_mips_fsqrt_w_RES
ret void
}
define void @fsqrt_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsqrt_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_fsqrt_d_ARG1
%1 = tail call <2 x double> @llvm.sqrt.v2f64(<2 x double> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES
+ store <2 x double> %1, ptr @llvm_mips_fsqrt_d_RES
ret void
}
define void @llvm_mips_fexupl_w_test() nounwind {
entry:
- %0 = load <8 x half>, <8 x half>* @llvm_mips_fexupl_w_ARG1
+ %0 = load <8 x half>, ptr @llvm_mips_fexupl_w_ARG1
%1 = tail call <4 x float> @llvm.mips.fexupl.w(<8 x half> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_fexupl_w_RES
+ store <4 x float> %1, ptr @llvm_mips_fexupl_w_RES
ret void
}
define void @llvm_mips_fexupl_d_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fexupl_d_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_fexupl_d_ARG1
%1 = tail call <2 x double> @llvm.mips.fexupl.d(<4 x float> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_fexupl_d_RES
+ store <2 x double> %1, ptr @llvm_mips_fexupl_d_RES
ret void
}
define void @llvm_mips_fexupr_w_test() nounwind {
entry:
- %0 = load <8 x half>, <8 x half>* @llvm_mips_fexupr_w_ARG1
+ %0 = load <8 x half>, ptr @llvm_mips_fexupr_w_ARG1
%1 = tail call <4 x float> @llvm.mips.fexupr.w(<8 x half> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_fexupr_w_RES
+ store <4 x float> %1, ptr @llvm_mips_fexupr_w_RES
ret void
}
define void @llvm_mips_fexupr_d_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fexupr_d_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_fexupr_d_ARG1
%1 = tail call <2 x double> @llvm.mips.fexupr.d(<4 x float> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_fexupr_d_RES
+ store <2 x double> %1, ptr @llvm_mips_fexupr_d_RES
ret void
}
define void @llvm_mips_ffint_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffint_s_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_ffint_s_w_ARG1
%1 = tail call <4 x float> @llvm.mips.ffint.s.w(<4 x i32> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_ffint_s_w_RES
+ store <4 x float> %1, ptr @llvm_mips_ffint_s_w_RES
ret void
}
define void @llvm_mips_ffint_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ffint_s_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_ffint_s_d_ARG1
%1 = tail call <2 x double> @llvm.mips.ffint.s.d(<2 x i64> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_ffint_s_d_RES
+ store <2 x double> %1, ptr @llvm_mips_ffint_s_d_RES
ret void
}
define void @llvm_mips_ffint_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffint_u_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_ffint_u_w_ARG1
%1 = tail call <4 x float> @llvm.mips.ffint.u.w(<4 x i32> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_ffint_u_w_RES
+ store <4 x float> %1, ptr @llvm_mips_ffint_u_w_RES
ret void
}
define void @llvm_mips_ffint_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ffint_u_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_ffint_u_d_ARG1
%1 = tail call <2 x double> @llvm.mips.ffint.u.d(<2 x i64> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_ffint_u_d_RES
+ store <2 x double> %1, ptr @llvm_mips_ffint_u_d_RES
ret void
}
define void @llvm_mips_ffql_w_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ffql_w_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_ffql_w_ARG1
%1 = tail call <4 x float> @llvm.mips.ffql.w(<8 x i16> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_ffql_w_RES
+ store <4 x float> %1, ptr @llvm_mips_ffql_w_RES
ret void
}
define void @llvm_mips_ffql_d_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffql_d_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_ffql_d_ARG1
%1 = tail call <2 x double> @llvm.mips.ffql.d(<4 x i32> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_ffql_d_RES
+ store <2 x double> %1, ptr @llvm_mips_ffql_d_RES
ret void
}
define void @llvm_mips_ffqr_w_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ffqr_w_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_ffqr_w_ARG1
%1 = tail call <4 x float> @llvm.mips.ffqr.w(<8 x i16> %0)
- store <4 x float> %1, <4 x float>* @llvm_mips_ffqr_w_RES
+ store <4 x float> %1, ptr @llvm_mips_ffqr_w_RES
ret void
}
define void @llvm_mips_ffqr_d_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffqr_d_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_ffqr_d_ARG1
%1 = tail call <2 x double> @llvm.mips.ffqr.d(<4 x i32> %0)
- store <2 x double> %1, <2 x double>* @llvm_mips_ffqr_d_RES
+ store <2 x double> %1, ptr @llvm_mips_ffqr_d_RES
ret void
}
define void @llvm_mips_fclass_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fclass_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_fclass_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.fclass.w(<4 x float> %0)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_fclass_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_fclass_w_RES
ret void
}
define void @llvm_mips_fclass_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fclass_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_fclass_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.fclass.d(<2 x double> %0)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_fclass_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_fclass_d_RES
ret void
}
define void @llvm_mips_ftrunc_s_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_ftrunc_s_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_ftrunc_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float> %0)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_s_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_ftrunc_s_w_RES
ret void
}
define void @llvm_mips_ftrunc_s_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_ftrunc_s_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_ftrunc_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double> %0)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_s_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_ftrunc_s_d_RES
ret void
}
define void @llvm_mips_ftrunc_u_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_ftrunc_u_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_ftrunc_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float> %0)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_u_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_ftrunc_u_w_RES
ret void
}
define void @llvm_mips_ftrunc_u_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_ftrunc_u_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_ftrunc_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double> %0)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_u_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_ftrunc_u_d_RES
ret void
}
define void @llvm_mips_ftint_s_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_ftint_s_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_ftint_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.ftint.s.w(<4 x float> %0)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_s_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_ftint_s_w_RES
ret void
}
define void @llvm_mips_ftint_s_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_ftint_s_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_ftint_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.ftint.s.d(<2 x double> %0)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_s_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_ftint_s_d_RES
ret void
}
define void @llvm_mips_ftint_u_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_ftint_u_w_ARG1
+ %0 = load <4 x float>, ptr @llvm_mips_ftint_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.ftint.u.w(<4 x float> %0)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_u_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_ftint_u_w_RES
ret void
}
define void @llvm_mips_ftint_u_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_ftint_u_d_ARG1
+ %0 = load <2 x double>, ptr @llvm_mips_ftint_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.ftint.u.d(<2 x double> %0)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_u_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_ftint_u_d_RES
ret void
}
define void @llvm_mips_ftq_h_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_ftq_h_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_ftq_h_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_ftq_h_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_ftq_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ftq.h(<4 x float> %0, <4 x float> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_ftq_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_ftq_h_RES
ret void
}
define void @llvm_mips_ftq_w_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_ftq_w_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_ftq_w_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_ftq_w_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_ftq_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ftq.w(<2 x double> %0, <2 x double> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_ftq_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_ftq_w_RES
ret void
}
define void @llvm_mips_add_a_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_add_a_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_add_a_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_add_a_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_add_a_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.add.a.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_add_a_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_add_a_b_RES
ret void
}
define void @llvm_mips_add_a_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_add_a_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_add_a_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_add_a_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_add_a_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.add.a.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_add_a_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_add_a_h_RES
ret void
}
define void @llvm_mips_add_a_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_add_a_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_add_a_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_add_a_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_add_a_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.add.a.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_add_a_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_add_a_w_RES
ret void
}
define void @llvm_mips_add_a_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_add_a_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_add_a_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_add_a_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_add_a_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.add.a.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_add_a_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_add_a_d_RES
ret void
}
define void @llvm_mips_adds_a_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_a_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_a_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_adds_a_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_adds_a_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.adds.a.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_a_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_adds_a_b_RES
ret void
}
define void @llvm_mips_adds_a_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_a_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_a_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_adds_a_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_adds_a_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.adds.a.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_a_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_adds_a_h_RES
ret void
}
define void @llvm_mips_adds_a_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_a_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_a_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_adds_a_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_adds_a_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.adds.a.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_a_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_adds_a_w_RES
ret void
}
define void @llvm_mips_adds_a_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_a_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_a_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_adds_a_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_adds_a_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.adds.a.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_a_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_adds_a_d_RES
ret void
}
define void @llvm_mips_adds_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_adds_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_adds_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.adds.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_adds_s_b_RES
ret void
}
define void @llvm_mips_adds_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_adds_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_adds_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.adds.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_adds_s_h_RES
ret void
}
define void @llvm_mips_adds_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_adds_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_adds_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.adds.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_adds_s_w_RES
ret void
}
define void @llvm_mips_adds_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_adds_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_adds_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.adds.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_adds_s_d_RES
ret void
}
define void @llvm_mips_adds_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_adds_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_adds_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.adds.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_adds_u_b_RES
ret void
}
define void @llvm_mips_adds_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_adds_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_adds_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.adds.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_adds_u_h_RES
ret void
}
define void @llvm_mips_adds_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_adds_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_adds_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.adds.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_adds_u_w_RES
ret void
}
define void @llvm_mips_adds_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_adds_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_adds_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.adds.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_adds_u_d_RES
ret void
}
define void @llvm_mips_addv_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_addv_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_addv_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_addv_b_RES
ret void
}
define void @llvm_mips_addv_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_addv_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_addv_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_addv_h_RES
ret void
}
define void @llvm_mips_addv_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_addv_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_addv_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_addv_w_RES
ret void
}
define void @llvm_mips_addv_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_addv_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_addv_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_addv_d_RES
ret void
}
define void @addv_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_addv_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_addv_b_ARG2
%2 = add <16 x i8> %0, %1
- store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_addv_b_RES
ret void
}
define void @addv_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_addv_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_addv_h_ARG2
%2 = add <8 x i16> %0, %1
- store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_addv_h_RES
ret void
}
define void @addv_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_addv_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_addv_w_ARG2
%2 = add <4 x i32> %0, %1
- store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_addv_w_RES
ret void
}
define void @addv_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_addv_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_addv_d_ARG2
%2 = add <2 x i64> %0, %1
- store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_addv_d_RES
ret void
}
define void @llvm_mips_asub_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_asub_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_asub_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.asub.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_asub_s_b_RES
ret void
}
define void @llvm_mips_asub_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_asub_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_asub_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.asub.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_asub_s_h_RES
ret void
}
define void @llvm_mips_asub_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_asub_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_asub_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.asub.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_asub_s_w_RES
ret void
}
define void @llvm_mips_asub_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_asub_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_asub_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.asub.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_asub_s_d_RES
ret void
}
define void @llvm_mips_asub_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_asub_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_asub_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.asub.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_asub_u_b_RES
ret void
}
define void @llvm_mips_asub_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_asub_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_asub_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.asub.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_asub_u_h_RES
ret void
}
define void @llvm_mips_asub_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_asub_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_asub_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.asub.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_asub_u_w_RES
ret void
}
define void @llvm_mips_asub_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_asub_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_asub_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.asub.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_asub_u_d_RES
ret void
}
define void @llvm_mips_ave_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_ave_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_ave_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ave.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_ave_s_b_RES
ret void
}
define void @llvm_mips_ave_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_ave_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_ave_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ave.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_ave_s_h_RES
ret void
}
define void @llvm_mips_ave_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_ave_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_ave_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ave.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_ave_s_w_RES
ret void
}
define void @llvm_mips_ave_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_ave_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_ave_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ave.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_ave_s_d_RES
ret void
}
define void @llvm_mips_ave_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_ave_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_ave_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ave.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_ave_u_b_RES
ret void
}
define void @llvm_mips_ave_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_ave_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_ave_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ave.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_ave_u_h_RES
ret void
}
define void @llvm_mips_ave_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_ave_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_ave_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ave.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_ave_u_w_RES
ret void
}
define void @llvm_mips_ave_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_ave_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_ave_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ave.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_ave_u_d_RES
ret void
}
define void @llvm_mips_aver_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_aver_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_aver_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.aver.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_aver_s_b_RES
ret void
}
define void @llvm_mips_aver_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_aver_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_aver_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.aver.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_aver_s_h_RES
ret void
}
define void @llvm_mips_aver_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_aver_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_aver_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.aver.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_aver_s_w_RES
ret void
}
define void @llvm_mips_aver_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_aver_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_aver_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.aver.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_aver_s_d_RES
ret void
}
define void @llvm_mips_aver_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_aver_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_aver_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.aver.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_aver_u_b_RES
ret void
}
define void @llvm_mips_aver_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_aver_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_aver_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.aver.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_aver_u_h_RES
ret void
}
define void @llvm_mips_aver_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_aver_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_aver_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.aver.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_aver_u_w_RES
ret void
}
define void @llvm_mips_aver_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_aver_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_aver_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.aver.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_aver_u_d_RES
ret void
}
define void @llvm_mips_bclr_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_bclr_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bclr_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bclr.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_bclr_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_bclr_b_RES
ret void
}
define void @llvm_mips_bclr_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_bclr_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_bclr_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.bclr.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_bclr_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_bclr_h_RES
ret void
}
define void @llvm_mips_bclr_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_bclr_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_bclr_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_bclr_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_bclr_w_RES
ret void
}
define void @llvm_mips_bclr_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_bclr_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_bclr_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.bclr.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_bclr_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_bclr_d_RES
ret void
}
define void @llvm_mips_binsl_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG2
- %2 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_binsl_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_binsl_b_ARG2
+ %2 = load <16 x i8>, ptr @llvm_mips_binsl_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.binsl.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
- store <16 x i8> %3, <16 x i8>* @llvm_mips_binsl_b_RES
+ store <16 x i8> %3, ptr @llvm_mips_binsl_b_RES
ret void
}
define void @llvm_mips_binsl_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_binsl_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_binsl_h_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_binsl_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.binsl.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_binsl_h_RES
+ store <8 x i16> %3, ptr @llvm_mips_binsl_h_RES
ret void
}
define void @llvm_mips_binsl_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_binsl_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_binsl_w_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_binsl_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.binsl.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_binsl_w_RES
+ store <4 x i32> %3, ptr @llvm_mips_binsl_w_RES
ret void
}
define void @llvm_mips_binsl_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG2
- %2 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_binsl_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_binsl_d_ARG2
+ %2 = load <2 x i64>, ptr @llvm_mips_binsl_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.binsl.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
- store <2 x i64> %3, <2 x i64>* @llvm_mips_binsl_d_RES
+ store <2 x i64> %3, ptr @llvm_mips_binsl_d_RES
ret void
}
define void @llvm_mips_binsr_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG2
- %2 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_binsr_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_binsr_b_ARG2
+ %2 = load <16 x i8>, ptr @llvm_mips_binsr_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.binsr.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
- store <16 x i8> %3, <16 x i8>* @llvm_mips_binsr_b_RES
+ store <16 x i8> %3, ptr @llvm_mips_binsr_b_RES
ret void
}
define void @llvm_mips_binsr_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_binsr_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_binsr_h_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_binsr_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.binsr.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_binsr_h_RES
+ store <8 x i16> %3, ptr @llvm_mips_binsr_h_RES
ret void
}
define void @llvm_mips_binsr_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_binsr_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_binsr_w_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_binsr_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.binsr.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_binsr_w_RES
+ store <4 x i32> %3, ptr @llvm_mips_binsr_w_RES
ret void
}
define void @llvm_mips_binsr_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG2
- %2 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_binsr_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_binsr_d_ARG2
+ %2 = load <2 x i64>, ptr @llvm_mips_binsr_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.binsr.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
- store <2 x i64> %3, <2 x i64>* @llvm_mips_binsr_d_RES
+ store <2 x i64> %3, ptr @llvm_mips_binsr_d_RES
ret void
}
define void @llvm_mips_bneg_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_bneg_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bneg_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bneg.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_bneg_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_bneg_b_RES
ret void
}
define void @llvm_mips_bneg_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_bneg_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_bneg_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.bneg.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_bneg_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_bneg_h_RES
ret void
}
define void @llvm_mips_bneg_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_bneg_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_bneg_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_bneg_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_bneg_w_RES
ret void
}
define void @llvm_mips_bneg_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_bneg_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_bneg_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.bneg.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_bneg_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_bneg_d_RES
ret void
}
define void @llvm_mips_bset_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_bset_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bset_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bset.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_bset_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_bset_b_RES
ret void
}
define void @llvm_mips_bset_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_bset_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_bset_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.bset.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_bset_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_bset_h_RES
ret void
}
define void @llvm_mips_bset_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_bset_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_bset_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_bset_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_bset_w_RES
ret void
}
define void @llvm_mips_bset_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_bset_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_bset_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.bset.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_bset_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_bset_d_RES
ret void
}
define void @llvm_mips_ceq_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ceq_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ceq_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_ceq_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_ceq_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ceq.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_ceq_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_ceq_b_RES
ret void
}
define void @llvm_mips_ceq_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ceq_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ceq_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_ceq_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_ceq_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ceq.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_ceq_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_ceq_h_RES
ret void
}
define void @llvm_mips_ceq_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ceq_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ceq_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_ceq_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_ceq_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ceq.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_ceq_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_ceq_w_RES
ret void
}
define void @llvm_mips_ceq_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ceq_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ceq_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_ceq_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_ceq_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ceq.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_ceq_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_ceq_d_RES
ret void
}
define void @llvm_mips_cle_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_cle_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_cle_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.cle.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_cle_s_b_RES
ret void
}
define void @llvm_mips_cle_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_cle_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_cle_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.cle.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_cle_s_h_RES
ret void
}
define void @llvm_mips_cle_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_cle_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_cle_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.cle.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_cle_s_w_RES
ret void
}
define void @llvm_mips_cle_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_cle_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_cle_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.cle.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_cle_s_d_RES
ret void
}
define void @llvm_mips_cle_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_cle_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_cle_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.cle.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_cle_u_b_RES
ret void
}
define void @llvm_mips_cle_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_cle_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_cle_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.cle.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_cle_u_h_RES
ret void
}
define void @llvm_mips_cle_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_cle_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_cle_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.cle.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_cle_u_w_RES
ret void
}
define void @llvm_mips_cle_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_cle_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_cle_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.cle.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_cle_u_d_RES
ret void
}
define void @llvm_mips_clt_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_clt_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_clt_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.clt.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_clt_s_b_RES
ret void
}
define void @llvm_mips_clt_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_clt_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_clt_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.clt.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_clt_s_h_RES
ret void
}
define void @llvm_mips_clt_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_clt_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_clt_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.clt.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_clt_s_w_RES
ret void
}
define void @llvm_mips_clt_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_clt_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_clt_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.clt.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_clt_s_d_RES
ret void
}
define void @llvm_mips_clt_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_clt_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_clt_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.clt.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_clt_u_b_RES
ret void
}
define void @llvm_mips_clt_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_clt_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_clt_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.clt.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_clt_u_h_RES
ret void
}
define void @llvm_mips_clt_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_clt_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_clt_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.clt.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_clt_u_w_RES
ret void
}
define void @llvm_mips_clt_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_clt_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_clt_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.clt.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_clt_u_d_RES
ret void
}
define void @llvm_mips_div_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_div_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_div_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.div.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_div_s_b_RES
ret void
}
define void @llvm_mips_div_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_div_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_div_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.div.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_div_s_h_RES
ret void
}
define void @llvm_mips_div_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_div_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_div_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.div.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_div_s_w_RES
ret void
}
define void @llvm_mips_div_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_div_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_div_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.div.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_div_s_d_RES
ret void
}
define void @div_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_div_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_div_s_b_ARG2
%2 = sdiv <16 x i8> %0, %1
- store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_div_s_b_RES
ret void
}
define void @div_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_div_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_div_s_h_ARG2
%2 = sdiv <8 x i16> %0, %1
- store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_div_s_h_RES
ret void
}
define void @div_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_div_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_div_s_w_ARG2
%2 = sdiv <4 x i32> %0, %1
- store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_div_s_w_RES
ret void
}
define void @div_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_div_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_div_s_d_ARG2
%2 = sdiv <2 x i64> %0, %1
- store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_div_s_d_RES
ret void
}
define void @llvm_mips_div_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_div_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_div_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.div.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_div_u_b_RES
ret void
}
define void @llvm_mips_div_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_div_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_div_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.div.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_div_u_h_RES
ret void
}
define void @llvm_mips_div_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_div_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_div_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.div.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_div_u_w_RES
ret void
}
define void @llvm_mips_div_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_div_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_div_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.div.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_div_u_d_RES
ret void
}
define void @div_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_div_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_div_u_b_ARG2
%2 = udiv <16 x i8> %0, %1
- store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_div_u_b_RES
ret void
}
define void @div_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_div_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_div_u_h_ARG2
%2 = udiv <8 x i16> %0, %1
- store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_div_u_h_RES
ret void
}
define void @div_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_div_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_div_u_w_ARG2
%2 = udiv <4 x i32> %0, %1
- store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_div_u_w_RES
ret void
}
define void @div_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_div_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_div_u_d_ARG2
%2 = udiv <2 x i64> %0, %1
- store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_div_u_d_RES
ret void
}
define void @llvm_mips_dotp_s_h_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_s_h_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_s_h_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_dotp_s_h_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_dotp_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.dotp.s.h(<16 x i8> %0, <16 x i8> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_dotp_s_h_RES
ret void
}
define void @llvm_mips_dotp_s_w_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_s_w_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_s_w_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_dotp_s_w_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_dotp_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.dotp.s.w(<8 x i16> %0, <8 x i16> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_dotp_s_w_RES
ret void
}
define void @llvm_mips_dotp_s_d_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_s_d_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_s_d_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_dotp_s_d_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_dotp_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.dotp.s.d(<4 x i32> %0, <4 x i32> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_dotp_s_d_RES
ret void
}
define void @llvm_mips_dotp_u_h_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_u_h_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_u_h_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_dotp_u_h_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_dotp_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.dotp.u.h(<16 x i8> %0, <16 x i8> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_dotp_u_h_RES
ret void
}
define void @llvm_mips_dotp_u_w_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_u_w_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_u_w_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_dotp_u_w_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_dotp_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.dotp.u.w(<8 x i16> %0, <8 x i16> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_dotp_u_w_RES
ret void
}
define void @llvm_mips_dotp_u_d_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_u_d_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_u_d_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_dotp_u_d_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_dotp_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.dotp.u.d(<4 x i32> %0, <4 x i32> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_dotp_u_d_RES
ret void
}
define void @llvm_mips_ilvev_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvev_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvev_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_ilvev_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_ilvev_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ilvev.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvev_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_ilvev_b_RES
ret void
}
define void @llvm_mips_ilvev_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvev_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvev_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_ilvev_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_ilvev_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ilvev.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvev_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_ilvev_h_RES
ret void
}
define void @llvm_mips_ilvev_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvev_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvev_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_ilvev_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_ilvev_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ilvev.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvev_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_ilvev_w_RES
ret void
}
define void @llvm_mips_ilvev_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvev_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvev_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_ilvev_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_ilvev_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ilvev.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvev_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_ilvev_d_RES
ret void
}
define void @llvm_mips_ilvl_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvl_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvl_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_ilvl_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_ilvl_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ilvl.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvl_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_ilvl_b_RES
ret void
}
define void @llvm_mips_ilvl_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvl_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvl_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_ilvl_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_ilvl_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ilvl.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvl_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_ilvl_h_RES
ret void
}
define void @llvm_mips_ilvl_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvl_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvl_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_ilvl_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_ilvl_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ilvl.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvl_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_ilvl_w_RES
ret void
}
define void @llvm_mips_ilvl_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvl_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvl_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_ilvl_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_ilvl_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ilvl.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvl_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_ilvl_d_RES
ret void
}
define void @llvm_mips_ilvod_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvod_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvod_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_ilvod_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_ilvod_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ilvod.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvod_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_ilvod_b_RES
ret void
}
define void @llvm_mips_ilvod_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvod_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvod_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_ilvod_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_ilvod_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ilvod.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvod_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_ilvod_h_RES
ret void
}
define void @llvm_mips_ilvod_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvod_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvod_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_ilvod_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_ilvod_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ilvod.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvod_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_ilvod_w_RES
ret void
}
define void @llvm_mips_ilvod_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvod_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvod_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_ilvod_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_ilvod_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ilvod.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvod_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_ilvod_d_RES
ret void
}
define void @llvm_mips_ilvr_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvr_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvr_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_ilvr_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_ilvr_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.ilvr.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvr_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_ilvr_b_RES
ret void
}
define void @llvm_mips_ilvr_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvr_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvr_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_ilvr_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_ilvr_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.ilvr.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvr_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_ilvr_h_RES
ret void
}
define void @llvm_mips_ilvr_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvr_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvr_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_ilvr_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_ilvr_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.ilvr.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvr_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_ilvr_w_RES
ret void
}
define void @llvm_mips_ilvr_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvr_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvr_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_ilvr_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_ilvr_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.ilvr.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvr_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_ilvr_d_RES
ret void
}
define void @llvm_mips_max_a_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_a_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_a_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_max_a_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_max_a_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.max.a.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_max_a_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_max_a_b_RES
ret void
}
define void @llvm_mips_max_a_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_a_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_a_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_max_a_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_max_a_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.max.a.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_max_a_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_max_a_h_RES
ret void
}
define void @llvm_mips_max_a_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_a_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_a_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_max_a_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_max_a_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.max.a.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_max_a_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_max_a_w_RES
ret void
}
define void @llvm_mips_max_a_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_a_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_a_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_max_a_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_max_a_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.max.a.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_max_a_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_max_a_d_RES
ret void
}
define void @llvm_mips_max_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_max_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_max_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.max.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_max_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_max_s_b_RES
ret void
}
define void @llvm_mips_max_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_max_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_max_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.max.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_max_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_max_s_h_RES
ret void
}
define void @llvm_mips_max_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_max_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_max_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.max.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_max_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_max_s_w_RES
ret void
}
define void @llvm_mips_max_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_max_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_max_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.max.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_max_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_max_s_d_RES
ret void
}
define void @llvm_mips_max_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_max_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_max_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.max.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_max_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_max_u_b_RES
ret void
}
define void @llvm_mips_max_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_max_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_max_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.max.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_max_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_max_u_h_RES
ret void
}
define void @llvm_mips_max_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_max_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_max_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.max.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_max_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_max_u_w_RES
ret void
}
define void @llvm_mips_max_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_max_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_max_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.max.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_max_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_max_u_d_RES
ret void
}
define void @llvm_mips_min_a_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_a_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_a_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_min_a_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_min_a_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.min.a.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_min_a_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_min_a_b_RES
ret void
}
define void @llvm_mips_min_a_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_a_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_a_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_min_a_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_min_a_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.min.a.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_min_a_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_min_a_h_RES
ret void
}
define void @llvm_mips_min_a_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_a_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_a_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_min_a_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_min_a_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.min.a.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_min_a_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_min_a_w_RES
ret void
}
define void @llvm_mips_min_a_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_a_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_a_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_min_a_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_min_a_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.min.a.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_min_a_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_min_a_d_RES
ret void
}
define void @llvm_mips_min_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_min_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_min_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.min.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_min_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_min_s_b_RES
ret void
}
define void @llvm_mips_min_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_min_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_min_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.min.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_min_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_min_s_h_RES
ret void
}
define void @llvm_mips_min_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_min_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_min_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.min.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_min_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_min_s_w_RES
ret void
}
define void @llvm_mips_min_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_min_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_min_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.min.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_min_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_min_s_d_RES
ret void
}
define void @llvm_mips_min_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_min_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_min_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.min.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_min_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_min_u_b_RES
ret void
}
define void @llvm_mips_min_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_min_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_min_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.min.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_min_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_min_u_h_RES
ret void
}
define void @llvm_mips_min_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_min_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_min_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.min.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_min_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_min_u_w_RES
ret void
}
define void @llvm_mips_min_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_min_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_min_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.min.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_min_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_min_u_d_RES
ret void
}
define void @llvm_mips_mod_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_mod_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_mod_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.mod.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_mod_s_b_RES
ret void
}
define void @llvm_mips_mod_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_mod_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_mod_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.mod.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_mod_s_h_RES
ret void
}
define void @llvm_mips_mod_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_mod_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_mod_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.mod.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_mod_s_w_RES
ret void
}
define void @llvm_mips_mod_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_mod_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_mod_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.mod.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_mod_s_d_RES
ret void
}
define void @llvm_mips_mod_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_mod_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_mod_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.mod.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_mod_u_b_RES
ret void
}
define void @llvm_mips_mod_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_mod_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_mod_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.mod.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_mod_u_h_RES
ret void
}
define void @llvm_mips_mod_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_mod_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_mod_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.mod.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_mod_u_w_RES
ret void
}
define void @llvm_mips_mod_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_mod_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_mod_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.mod.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_mod_u_d_RES
ret void
}
define void @llvm_mips_mulv_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_mulv_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_mulv_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.mulv.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_mulv_b_RES
ret void
}
define void @llvm_mips_mulv_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_mulv_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_mulv_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.mulv.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_mulv_h_RES
ret void
}
define void @llvm_mips_mulv_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_mulv_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_mulv_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.mulv.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_mulv_w_RES
ret void
}
define void @llvm_mips_mulv_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_mulv_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_mulv_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.mulv.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_mulv_d_RES
ret void
}
define void @mulv_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_mulv_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_mulv_b_ARG2
%2 = mul <16 x i8> %0, %1
- store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_mulv_b_RES
ret void
}
define void @mulv_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_mulv_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_mulv_h_ARG2
%2 = mul <8 x i16> %0, %1
- store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_mulv_h_RES
ret void
}
define void @mulv_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_mulv_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_mulv_w_ARG2
%2 = mul <4 x i32> %0, %1
- store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_mulv_w_RES
ret void
}
define void @mulv_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_mulv_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_mulv_d_ARG2
%2 = mul <2 x i64> %0, %1
- store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_mulv_d_RES
ret void
}
define void @llvm_mips_pckev_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pckev_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_pckev_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_pckev_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_pckev_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.pckev.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_pckev_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_pckev_b_RES
ret void
}
define void @llvm_mips_pckev_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pckev_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_pckev_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_pckev_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_pckev_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.pckev.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_pckev_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_pckev_h_RES
ret void
}
define void @llvm_mips_pckev_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pckev_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_pckev_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_pckev_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_pckev_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.pckev.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_pckev_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_pckev_w_RES
ret void
}
define void @llvm_mips_pckev_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pckev_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_pckev_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_pckev_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_pckev_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.pckev.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_pckev_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_pckev_d_RES
ret void
}
define void @llvm_mips_pckod_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pckod_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_pckod_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_pckod_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_pckod_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.pckod.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_pckod_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_pckod_b_RES
ret void
}
define void @llvm_mips_pckod_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pckod_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_pckod_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_pckod_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_pckod_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.pckod.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_pckod_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_pckod_h_RES
ret void
}
define void @llvm_mips_pckod_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pckod_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_pckod_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_pckod_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_pckod_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.pckod.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_pckod_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_pckod_w_RES
ret void
}
define void @llvm_mips_pckod_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pckod_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_pckod_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_pckod_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_pckod_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.pckod.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_pckod_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_pckod_d_RES
ret void
}
define void @llvm_mips_sld_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sld_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sld_b_ARG2
- %2 = load i32, i32* @llvm_mips_sld_b_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_sld_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_sld_b_ARG2
+ %2 = load i32, ptr @llvm_mips_sld_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.sld.b(<16 x i8> %0, <16 x i8> %1, i32 %2)
- store <16 x i8> %3, <16 x i8>* @llvm_mips_sld_b_RES
+ store <16 x i8> %3, ptr @llvm_mips_sld_b_RES
ret void
}
define void @llvm_mips_sld_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sld_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sld_h_ARG2
- %2 = load i32, i32* @llvm_mips_sld_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_sld_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_sld_h_ARG2
+ %2 = load i32, ptr @llvm_mips_sld_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.sld.h(<8 x i16> %0, <8 x i16> %1, i32 %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_sld_h_RES
+ store <8 x i16> %3, ptr @llvm_mips_sld_h_RES
ret void
}
define void @llvm_mips_sld_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sld_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sld_w_ARG2
- %2 = load i32, i32* @llvm_mips_sld_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_sld_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_sld_w_ARG2
+ %2 = load i32, ptr @llvm_mips_sld_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.sld.w(<4 x i32> %0, <4 x i32> %1, i32 %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_sld_w_RES
+ store <4 x i32> %3, ptr @llvm_mips_sld_w_RES
ret void
}
define void @llvm_mips_sld_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sld_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sld_d_ARG2
- %2 = load i32, i32* @llvm_mips_sld_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_sld_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_sld_d_ARG2
+ %2 = load i32, ptr @llvm_mips_sld_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.sld.d(<2 x i64> %0, <2 x i64> %1, i32 %2)
- store <2 x i64> %3, <2 x i64>* @llvm_mips_sld_d_RES
+ store <2 x i64> %3, ptr @llvm_mips_sld_d_RES
ret void
}
define void @llvm_mips_sll_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_sll_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_sll_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.sll.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_sll_b_RES
ret void
}
define void @llvm_mips_sll_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_sll_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_sll_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.sll.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_sll_h_RES
ret void
}
define void @llvm_mips_sll_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_sll_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_sll_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_sll_w_RES
ret void
}
define void @llvm_mips_sll_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_sll_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_sll_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.sll.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_sll_d_RES
ret void
}
define void @sll_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_sll_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_sll_b_ARG2
%2 = shl <16 x i8> %0, %1
- store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_sll_b_RES
ret void
}
define void @sll_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_sll_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_sll_h_ARG2
%2 = shl <8 x i16> %0, %1
- store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_sll_h_RES
ret void
}
define void @sll_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_sll_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_sll_w_ARG2
%2 = shl <4 x i32> %0, %1
- store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_sll_w_RES
ret void
}
define void @sll_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_sll_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_sll_d_ARG2
%2 = shl <2 x i64> %0, %1
- store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_sll_d_RES
ret void
}
define void @llvm_mips_sra_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_sra_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_sra_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.sra.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_sra_b_RES
ret void
}
define void @llvm_mips_sra_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_sra_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_sra_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.sra.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_sra_h_RES
ret void
}
define void @llvm_mips_sra_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_sra_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_sra_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_sra_w_RES
ret void
}
define void @llvm_mips_sra_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_sra_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_sra_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.sra.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_sra_d_RES
ret void
}
define void @sra_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_sra_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_sra_b_ARG2
%2 = ashr <16 x i8> %0, %1
- store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_sra_b_RES
ret void
}
define void @sra_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_sra_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_sra_h_ARG2
%2 = ashr <8 x i16> %0, %1
- store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_sra_h_RES
ret void
}
define void @sra_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_sra_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_sra_w_ARG2
%2 = ashr <4 x i32> %0, %1
- store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_sra_w_RES
ret void
}
define void @sra_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_sra_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_sra_d_ARG2
%2 = ashr <2 x i64> %0, %1
- store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_sra_d_RES
ret void
}
define void @llvm_mips_srar_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srar_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srar_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_srar_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_srar_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.srar.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_srar_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_srar_b_RES
ret void
}
define void @llvm_mips_srar_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srar_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srar_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_srar_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_srar_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.srar.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_srar_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_srar_h_RES
ret void
}
define void @llvm_mips_srar_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srar_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srar_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_srar_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_srar_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.srar.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_srar_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_srar_w_RES
ret void
}
define void @llvm_mips_srar_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srar_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srar_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_srar_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_srar_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.srar.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_srar_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_srar_d_RES
ret void
}
define void @llvm_mips_srl_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_srl_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_srl_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.srl.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_srl_b_RES
ret void
}
define void @llvm_mips_srl_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_srl_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_srl_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.srl.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_srl_h_RES
ret void
}
define void @llvm_mips_srl_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_srl_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_srl_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_srl_w_RES
ret void
}
define void @llvm_mips_srl_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_srl_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_srl_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.srl.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_srl_d_RES
ret void
}
define void @llvm_mips_srlr_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srlr_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srlr_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_srlr_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_srlr_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.srlr.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_srlr_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_srlr_b_RES
ret void
}
define void @llvm_mips_srlr_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srlr_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srlr_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_srlr_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_srlr_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.srlr.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_srlr_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_srlr_h_RES
ret void
}
define void @llvm_mips_srlr_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srlr_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srlr_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_srlr_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_srlr_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.srlr.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_srlr_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_srlr_w_RES
ret void
}
define void @llvm_mips_srlr_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srlr_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srlr_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_srlr_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_srlr_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.srlr.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_srlr_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_srlr_d_RES
ret void
}
define void @srl_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_srl_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_srl_b_ARG2
%2 = lshr <16 x i8> %0, %1
- store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_srl_b_RES
ret void
}
define void @srl_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_srl_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_srl_h_ARG2
%2 = lshr <8 x i16> %0, %1
- store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_srl_h_RES
ret void
}
define void @srl_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_srl_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_srl_w_ARG2
%2 = lshr <4 x i32> %0, %1
- store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_srl_w_RES
ret void
}
define void @srl_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_srl_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_srl_d_ARG2
%2 = lshr <2 x i64> %0, %1
- store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_srl_d_RES
ret void
}
define void @llvm_mips_subs_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_subs_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_subs_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.subs.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_subs_s_b_RES
ret void
}
define void @llvm_mips_subs_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_subs_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_subs_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.subs.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_subs_s_h_RES
ret void
}
define void @llvm_mips_subs_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_subs_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_subs_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.subs.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_subs_s_w_RES
ret void
}
define void @llvm_mips_subs_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_subs_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_subs_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.subs.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_subs_s_d_RES
ret void
}
define void @llvm_mips_subs_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_subs_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_subs_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.subs.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_subs_u_b_RES
ret void
}
define void @llvm_mips_subs_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_subs_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_subs_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.subs.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_subs_u_h_RES
ret void
}
define void @llvm_mips_subs_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_subs_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_subs_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.subs.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_subs_u_w_RES
ret void
}
define void @llvm_mips_subs_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_subs_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_subs_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.subs.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_subs_u_d_RES
ret void
}
define void @llvm_mips_subsus_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subsus_u_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subsus_u_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_subsus_u_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_subsus_u_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.subsus.u.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_subsus_u_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_subsus_u_b_RES
ret void
}
define void @llvm_mips_subsus_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subsus_u_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subsus_u_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_subsus_u_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_subsus_u_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.subsus.u.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_subsus_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_subsus_u_h_RES
ret void
}
define void @llvm_mips_subsus_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subsus_u_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subsus_u_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_subsus_u_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_subsus_u_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.subsus.u.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_subsus_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_subsus_u_w_RES
ret void
}
define void @llvm_mips_subsus_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subsus_u_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subsus_u_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_subsus_u_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_subsus_u_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.subsus.u.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_subsus_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_subsus_u_d_RES
ret void
}
define void @llvm_mips_subsuu_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subsuu_s_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subsuu_s_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_subsuu_s_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_subsuu_s_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_subsuu_s_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_subsuu_s_b_RES
ret void
}
define void @llvm_mips_subsuu_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subsuu_s_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subsuu_s_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_subsuu_s_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_subsuu_s_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_subsuu_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_subsuu_s_h_RES
ret void
}
define void @llvm_mips_subsuu_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subsuu_s_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subsuu_s_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_subsuu_s_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_subsuu_s_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_subsuu_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_subsuu_s_w_RES
ret void
}
define void @llvm_mips_subsuu_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subsuu_s_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subsuu_s_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_subsuu_s_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_subsuu_s_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_subsuu_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_subsuu_s_d_RES
ret void
}
define void @llvm_mips_subv_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_subv_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_subv_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.subv.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_subv_b_RES
ret void
}
define void @llvm_mips_subv_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_subv_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_subv_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.subv.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_subv_h_RES
ret void
}
define void @llvm_mips_subv_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_subv_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_subv_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.subv.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_subv_w_RES
ret void
}
define void @llvm_mips_subv_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_subv_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_subv_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.subv.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_subv_d_RES
ret void
}
define void @subv_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_subv_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_subv_b_ARG2
%2 = sub <16 x i8> %0, %1
- store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_subv_b_RES
ret void
}
define void @subv_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_subv_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_subv_h_ARG2
%2 = sub <8 x i16> %0, %1
- store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_subv_h_RES
ret void
}
define void @subv_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_subv_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_subv_w_ARG2
%2 = sub <4 x i32> %0, %1
- store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_subv_w_RES
ret void
}
define void @subv_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_subv_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_subv_d_ARG2
%2 = sub <2 x i64> %0, %1
- store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_subv_d_RES
ret void
}
define void @llvm_mips_vshf_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG2
- %2 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_vshf_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_vshf_b_ARG2
+ %2 = load <16 x i8>, ptr @llvm_mips_vshf_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.vshf.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
- store <16 x i8> %3, <16 x i8>* @llvm_mips_vshf_b_RES
+ store <16 x i8> %3, ptr @llvm_mips_vshf_b_RES
ret void
}
define void @llvm_mips_vshf_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_vshf_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_vshf_h_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_vshf_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.vshf.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_vshf_h_RES
+ store <8 x i16> %3, ptr @llvm_mips_vshf_h_RES
ret void
}
define void @llvm_mips_vshf_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_vshf_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_vshf_w_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_vshf_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.vshf.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_vshf_w_RES
+ store <4 x i32> %3, ptr @llvm_mips_vshf_w_RES
ret void
}
define void @llvm_mips_vshf_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG2
- %2 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_vshf_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_vshf_d_ARG2
+ %2 = load <2 x i64>, ptr @llvm_mips_vshf_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.vshf.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
- store <2 x i64> %3, <2 x i64>* @llvm_mips_vshf_d_RES
+ store <2 x i64> %3, ptr @llvm_mips_vshf_d_RES
ret void
}
define void @llvm_mips_maddv_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG2
- %2 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_maddv_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_maddv_b_ARG2
+ %2 = load <16 x i8>, ptr @llvm_mips_maddv_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.maddv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
- store <16 x i8> %3, <16 x i8>* @llvm_mips_maddv_b_RES
+ store <16 x i8> %3, ptr @llvm_mips_maddv_b_RES
ret void
}
define void @llvm_mips_maddv_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_maddv_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_maddv_h_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_maddv_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.maddv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_maddv_h_RES
+ store <8 x i16> %3, ptr @llvm_mips_maddv_h_RES
ret void
}
define void @llvm_mips_maddv_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_maddv_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_maddv_w_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_maddv_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.maddv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_maddv_w_RES
+ store <4 x i32> %3, ptr @llvm_mips_maddv_w_RES
ret void
}
define void @llvm_mips_maddv_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG2
- %2 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_maddv_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_maddv_d_ARG2
+ %2 = load <2 x i64>, ptr @llvm_mips_maddv_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.maddv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
- store <2 x i64> %3, <2 x i64>* @llvm_mips_maddv_d_RES
+ store <2 x i64> %3, ptr @llvm_mips_maddv_d_RES
ret void
}
define void @llvm_mips_msubv_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG2
- %2 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_msubv_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_msubv_b_ARG2
+ %2 = load <16 x i8>, ptr @llvm_mips_msubv_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.msubv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
- store <16 x i8> %3, <16 x i8>* @llvm_mips_msubv_b_RES
+ store <16 x i8> %3, ptr @llvm_mips_msubv_b_RES
ret void
}
define void @llvm_mips_msubv_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_msubv_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_msubv_h_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_msubv_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.msubv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_msubv_h_RES
+ store <8 x i16> %3, ptr @llvm_mips_msubv_h_RES
ret void
}
define void @llvm_mips_msubv_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_msubv_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_msubv_w_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_msubv_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.msubv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_msubv_w_RES
+ store <4 x i32> %3, ptr @llvm_mips_msubv_w_RES
ret void
}
define void @llvm_mips_msubv_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG2
- %2 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_msubv_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_msubv_d_ARG2
+ %2 = load <2 x i64>, ptr @llvm_mips_msubv_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.msubv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
- store <2 x i64> %3, <2 x i64>* @llvm_mips_msubv_d_RES
+ store <2 x i64> %3, ptr @llvm_mips_msubv_d_RES
ret void
}
define void @llvm_mips_dpadd_s_h_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG2
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_dpadd_s_h_ARG2
+ %1 = load <16 x i8>, ptr @llvm_mips_dpadd_s_h_ARG3
%2 = tail call <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>, <16 x i8> %0, <16 x i8> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_dpadd_s_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_dpadd_s_h_RES
ret void
}
define void @llvm_mips_dpadd_s_w_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG2
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_dpadd_s_w_ARG2
+ %1 = load <8 x i16>, ptr @llvm_mips_dpadd_s_w_ARG3
%2 = tail call <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32> <i32 4, i32 4, i32 4, i32 4>, <8 x i16> %0, <8 x i16> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_dpadd_s_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_dpadd_s_w_RES
ret void
}
define void @llvm_mips_dpadd_s_d_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG2
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_dpadd_s_d_ARG2
+ %1 = load <4 x i32>, ptr @llvm_mips_dpadd_s_d_ARG3
%2 = tail call <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64> <i64 4, i64 4>, <4 x i32> %0, <4 x i32> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_dpadd_s_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_dpadd_s_d_RES
ret void
}
define void @llvm_mips_dpadd_u_h_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG2
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_dpadd_u_h_ARG2
+ %1 = load <16 x i8>, ptr @llvm_mips_dpadd_u_h_ARG3
%2 = tail call <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>, <16 x i8> %0, <16 x i8> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_dpadd_u_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_dpadd_u_h_RES
ret void
}
define void @llvm_mips_dpadd_u_w_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG2
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_dpadd_u_w_ARG2
+ %1 = load <8 x i16>, ptr @llvm_mips_dpadd_u_w_ARG3
%2 = tail call <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32> <i32 4, i32 4, i32 4, i32 4>, <8 x i16> %0, <8 x i16> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_dpadd_u_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_dpadd_u_w_RES
ret void
}
define void @llvm_mips_dpadd_u_d_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG2
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_dpadd_u_d_ARG2
+ %1 = load <4 x i32>, ptr @llvm_mips_dpadd_u_d_ARG3
%2 = tail call <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64> <i64 4, i64 4>, <4 x i32> %0, <4 x i32> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_dpadd_u_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_dpadd_u_d_RES
ret void
}
define void @llvm_mips_dpsub_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_h_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_s_h_ARG2
- %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_s_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_dpsub_s_h_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_dpsub_s_h_ARG2
+ %2 = load <16 x i8>, ptr @llvm_mips_dpsub_s_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_s_h_RES
+ store <8 x i16> %3, ptr @llvm_mips_dpsub_s_h_RES
ret void
}
define void @llvm_mips_dpsub_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_w_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_w_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_dpsub_s_w_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_dpsub_s_w_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_dpsub_s_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_s_w_RES
+ store <4 x i32> %3, ptr @llvm_mips_dpsub_s_w_RES
ret void
}
define void @llvm_mips_dpsub_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpsub_s_d_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_d_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_dpsub_s_d_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_dpsub_s_d_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_dpsub_s_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
- store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_s_d_RES
+ store <2 x i64> %3, ptr @llvm_mips_dpsub_s_d_RES
ret void
}
define void @llvm_mips_dpsub_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_h_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_u_h_ARG2
- %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_u_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_dpsub_u_h_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_dpsub_u_h_ARG2
+ %2 = load <16 x i8>, ptr @llvm_mips_dpsub_u_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_u_h_RES
+ store <8 x i16> %3, ptr @llvm_mips_dpsub_u_h_RES
ret void
}
define void @llvm_mips_dpsub_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_w_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_w_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_dpsub_u_w_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_dpsub_u_w_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_dpsub_u_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_u_w_RES
+ store <4 x i32> %3, ptr @llvm_mips_dpsub_u_w_RES
ret void
}
define void @llvm_mips_dpsub_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpsub_u_d_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_d_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_dpsub_u_d_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_dpsub_u_d_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_dpsub_u_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.dpsub.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
- store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_u_d_RES
+ store <2 x i64> %3, ptr @llvm_mips_dpsub_u_d_RES
ret void
}
define void @llvm_mips_splat_b_test(i32 %a) nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_splat_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_splat_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.splat.b(<16 x i8> %0, i32 %a)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_splat_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_splat_b_RES
ret void
}
define void @llvm_mips_splat_h_test(i32 %a) nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_splat_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_splat_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.splat.h(<8 x i16> %0, i32 %a)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_splat_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_splat_h_RES
ret void
}
define void @llvm_mips_splat_w_test(i32 %a) nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_splat_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_splat_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.splat.w(<4 x i32> %0, i32 %a)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_splat_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_splat_w_RES
ret void
}
define void @llvm_mips_splat_d_test(i32 %a) nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_splat_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_splat_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.splat.d(<2 x i64> %0, i32 %a)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_splat_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_splat_d_RES
ret void
}
define void @llvm_mips_splat_d_arg_test(i32 %arg) {
entry:
%0 = tail call <2 x i64> @llvm.mips.splat.d(<2 x i64> <i64 12720328, i64 10580959>, i32 %arg)
- store volatile <2 x i64> %0, <2 x i64>* @llvm_mips_splat_d_RES
+ store volatile <2 x i64> %0, ptr @llvm_mips_splat_d_RES
ret void
}
; MIPS32-LABEL: llvm_mips_splat_d_arg_test
define void @llvm_mips_splat_d_imm_test() {
entry:
%0 = tail call <2 x i64> @llvm.mips.splat.d(<2 x i64> <i64 12720328, i64 10580959>, i32 76)
- store volatile<2 x i64> %0, <2 x i64>* @llvm_mips_splat_d_RES
+ store volatile<2 x i64> %0, ptr @llvm_mips_splat_d_RES
ret void
}
; MIPS32-LABEL: llvm_mips_splat_d_imm_test
define void @llvm_mips_fadd_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fadd_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fadd_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fadd_w_RES
ret void
}
define void @llvm_mips_fadd_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fadd_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fadd_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fadd_d_RES
ret void
}
define void @fadd_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fadd_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fadd_w_ARG2
%2 = fadd <4 x float> %0, %1
- store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fadd_w_RES
ret void
}
define void @fadd_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fadd_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fadd_d_ARG2
%2 = fadd <2 x double> %0, %1
- store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fadd_d_RES
ret void
}
define void @llvm_mips_fdiv_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fdiv_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fdiv_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fdiv.w(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fdiv_w_RES
ret void
}
define void @llvm_mips_fdiv_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fdiv_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fdiv_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fdiv.d(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fdiv_d_RES
ret void
}
define void @fdiv_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fdiv_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fdiv_w_ARG2
%2 = fdiv <4 x float> %0, %1
- store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fdiv_w_RES
ret void
}
define void @fdiv_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fdiv_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fdiv_d_ARG2
%2 = fdiv <2 x double> %0, %1
- store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fdiv_d_RES
ret void
}
define void @llvm_mips_fmin_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fmin_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fmin_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fmin_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fmin_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* @llvm_mips_fmin_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fmin_w_RES
ret void
}
define void @llvm_mips_fmin_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fmin_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fmin_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fmin_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fmin_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* @llvm_mips_fmin_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fmin_d_RES
ret void
}
define void @llvm_mips_fmin_a_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fmin_a_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fmin_a_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fmin_a_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fmin_a_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fmin.a.w(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* @llvm_mips_fmin_a_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fmin_a_w_RES
ret void
}
define void @llvm_mips_fmin_a_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fmin_a_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fmin_a_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fmin_a_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fmin_a_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fmin.a.d(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* @llvm_mips_fmin_a_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fmin_a_d_RES
ret void
}
define void @llvm_mips_fmax_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fmax_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fmax_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fmax_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fmax_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* @llvm_mips_fmax_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fmax_w_RES
ret void
}
define void @llvm_mips_fmax_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fmax_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fmax_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fmax_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fmax_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* @llvm_mips_fmax_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fmax_d_RES
ret void
}
define void @llvm_mips_fmax_a_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fmax_a_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fmax_a_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fmax_a_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fmax_a_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fmax.a.w(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* @llvm_mips_fmax_a_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fmax_a_w_RES
ret void
}
define void @llvm_mips_fmax_a_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fmax_a_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fmax_a_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fmax_a_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fmax_a_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fmax.a.d(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* @llvm_mips_fmax_a_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fmax_a_d_RES
ret void
}
define void @llvm_mips_fmul_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fmul_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fmul_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fmul_w_RES
ret void
}
define void @llvm_mips_fmul_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fmul_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fmul_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fmul.d(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fmul_d_RES
ret void
}
define void @fmul_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fmul_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fmul_w_ARG2
%2 = fmul <4 x float> %0, %1
- store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fmul_w_RES
ret void
}
define void @fmul_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fmul_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fmul_d_ARG2
%2 = fmul <2 x double> %0, %1
- store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fmul_d_RES
ret void
}
define void @llvm_mips_fsub_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fsub_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fsub_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fsub.w(<4 x float> %0, <4 x float> %1)
- store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fsub_w_RES
ret void
}
define void @llvm_mips_fsub_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fsub_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fsub_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fsub.d(<2 x double> %0, <2 x double> %1)
- store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fsub_d_RES
ret void
}
define void @fsub_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fsub_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fsub_w_ARG2
%2 = fsub <4 x float> %0, %1
- store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fsub_w_RES
ret void
}
define void @fsub_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fsub_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fsub_d_ARG2
%2 = fsub <2 x double> %0, %1
- store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fsub_d_RES
ret void
}
define void @llvm_mips_fmadd_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG2
- %2 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG3
+ %0 = load <4 x float>, ptr @llvm_mips_fmadd_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fmadd_w_ARG2
+ %2 = load <4 x float>, ptr @llvm_mips_fmadd_w_ARG3
%3 = tail call <4 x float> @llvm.mips.fmadd.w(<4 x float> %0, <4 x float> %1, <4 x float> %2)
- store <4 x float> %3, <4 x float>* @llvm_mips_fmadd_w_RES
+ store <4 x float> %3, ptr @llvm_mips_fmadd_w_RES
ret void
}
define void @llvm_mips_fmadd_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG2
- %2 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG3
+ %0 = load <2 x double>, ptr @llvm_mips_fmadd_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fmadd_d_ARG2
+ %2 = load <2 x double>, ptr @llvm_mips_fmadd_d_ARG3
%3 = tail call <2 x double> @llvm.mips.fmadd.d(<2 x double> %0, <2 x double> %1, <2 x double> %2)
- store <2 x double> %3, <2 x double>* @llvm_mips_fmadd_d_RES
+ store <2 x double> %3, ptr @llvm_mips_fmadd_d_RES
ret void
}
define void @llvm_mips_fmsub_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG2
- %2 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG3
+ %0 = load <4 x float>, ptr @llvm_mips_fmsub_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fmsub_w_ARG2
+ %2 = load <4 x float>, ptr @llvm_mips_fmsub_w_ARG3
%3 = tail call <4 x float> @llvm.mips.fmsub.w(<4 x float> %0, <4 x float> %1, <4 x float> %2)
- store <4 x float> %3, <4 x float>* @llvm_mips_fmsub_w_RES
+ store <4 x float> %3, ptr @llvm_mips_fmsub_w_RES
ret void
}
define void @llvm_mips_fmsub_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG2
- %2 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG3
+ %0 = load <2 x double>, ptr @llvm_mips_fmsub_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fmsub_d_ARG2
+ %2 = load <2 x double>, ptr @llvm_mips_fmsub_d_ARG3
%3 = tail call <2 x double> @llvm.mips.fmsub.d(<2 x double> %0, <2 x double> %1, <2 x double> %2)
- store <2 x double> %3, <2 x double>* @llvm_mips_fmsub_d_RES
+ store <2 x double> %3, ptr @llvm_mips_fmsub_d_RES
ret void
}
define void @llvm_mips_madd_q_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_madd_q_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_madd_q_h_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_madd_q_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.madd.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_madd_q_h_RES
+ store <8 x i16> %3, ptr @llvm_mips_madd_q_h_RES
ret void
}
define void @llvm_mips_madd_q_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_madd_q_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_madd_q_w_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_madd_q_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.madd.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_madd_q_w_RES
+ store <4 x i32> %3, ptr @llvm_mips_madd_q_w_RES
ret void
}
define void @llvm_mips_maddr_q_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_maddr_q_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_maddr_q_h_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_maddr_q_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.maddr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_maddr_q_h_RES
+ store <8 x i16> %3, ptr @llvm_mips_maddr_q_h_RES
ret void
}
define void @llvm_mips_maddr_q_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_maddr_q_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_maddr_q_w_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_maddr_q_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.maddr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_maddr_q_w_RES
+ store <4 x i32> %3, ptr @llvm_mips_maddr_q_w_RES
ret void
}
define void @llvm_mips_msub_q_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_msub_q_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_msub_q_h_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_msub_q_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.msub.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_msub_q_h_RES
+ store <8 x i16> %3, ptr @llvm_mips_msub_q_h_RES
ret void
}
define void @llvm_mips_msub_q_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_msub_q_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_msub_q_w_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_msub_q_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.msub.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_msub_q_w_RES
+ store <4 x i32> %3, ptr @llvm_mips_msub_q_w_RES
ret void
}
define void @llvm_mips_msubr_q_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_msubr_q_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_msubr_q_h_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_msubr_q_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.msubr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* @llvm_mips_msubr_q_h_RES
+ store <8 x i16> %3, ptr @llvm_mips_msubr_q_h_RES
ret void
}
define void @llvm_mips_msubr_q_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_msubr_q_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_msubr_q_w_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_msubr_q_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.msubr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* @llvm_mips_msubr_q_w_RES
+ store <4 x i32> %3, ptr @llvm_mips_msubr_q_w_RES
ret void
}
define void @llvm_mips_fexdo_h_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fexdo_h_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fexdo_h_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fexdo_h_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fexdo_h_ARG2
%2 = tail call <8 x half> @llvm.mips.fexdo.h(<4 x float> %0, <4 x float> %1)
- store <8 x half> %2, <8 x half>* @llvm_mips_fexdo_h_RES
+ store <8 x half> %2, ptr @llvm_mips_fexdo_h_RES
ret void
}
define void @llvm_mips_fexdo_w_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fexdo_w_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fexdo_w_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fexdo_w_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fexdo_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fexdo.w(<2 x double> %0, <2 x double> %1)
- store <4 x float> %2, <4 x float>* @llvm_mips_fexdo_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fexdo_w_RES
ret void
}
define void @llvm_mips_fexp2_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fexp2_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_fexp2_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fexp2_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_fexp2_w_ARG2
%2 = tail call <4 x float> @llvm.mips.fexp2.w(<4 x float> %0, <4 x i32> %1)
- store <4 x float> %2, <4 x float>* @llvm_mips_fexp2_w_RES
+ store <4 x float> %2, ptr @llvm_mips_fexp2_w_RES
ret void
}
define void @llvm_mips_fexp2_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fexp2_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_fexp2_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fexp2_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_fexp2_d_ARG2
%2 = tail call <2 x double> @llvm.mips.fexp2.d(<2 x double> %0, <2 x i64> %1)
- store <2 x double> %2, <2 x double>* @llvm_mips_fexp2_d_RES
+ store <2 x double> %2, ptr @llvm_mips_fexp2_d_RES
ret void
}
define void @llvm_mips_fcaf_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fcaf_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fcaf_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fcaf_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fcaf_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcaf.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fcaf_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fcaf_w_RES
ret void
}
define void @llvm_mips_fcaf_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fcaf_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fcaf_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fcaf_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fcaf_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcaf.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fcaf_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fcaf_d_RES
ret void
}
define void @llvm_mips_fceq_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fceq_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fceq_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fceq_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fceq_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fceq.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fceq_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fceq_w_RES
ret void
}
define void @llvm_mips_fceq_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fceq_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fceq_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fceq_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fceq_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fceq.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fceq_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fceq_d_RES
ret void
}
define void @llvm_mips_fcle_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fcle_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fcle_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fcle_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fcle_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcle.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fcle_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fcle_w_RES
ret void
}
define void @llvm_mips_fcle_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fcle_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fcle_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fcle_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fcle_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcle.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fcle_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fcle_d_RES
ret void
}
define void @llvm_mips_fclt_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fclt_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fclt_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fclt_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fclt_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fclt.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fclt_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fclt_w_RES
ret void
}
define void @llvm_mips_fclt_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fclt_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fclt_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fclt_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fclt_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fclt.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fclt_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fclt_d_RES
ret void
}
define void @llvm_mips_fcor_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fcor_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fcor_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fcor_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fcor_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcor.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fcor_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fcor_w_RES
ret void
}
define void @llvm_mips_fcor_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fcor_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fcor_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fcor_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fcor_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcor.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fcor_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fcor_d_RES
ret void
}
define void @llvm_mips_fcne_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fcne_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fcne_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fcne_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fcne_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcne.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fcne_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fcne_w_RES
ret void
}
define void @llvm_mips_fcne_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fcne_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fcne_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fcne_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fcne_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcne.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fcne_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fcne_d_RES
ret void
}
define void @llvm_mips_fcueq_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fcueq_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fcueq_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fcueq_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fcueq_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcueq.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fcueq_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fcueq_w_RES
ret void
}
define void @llvm_mips_fcueq_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fcueq_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fcueq_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fcueq_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fcueq_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcueq.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fcueq_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fcueq_d_RES
ret void
}
define void @llvm_mips_fcult_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fcult_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fcult_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fcult_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fcult_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcult.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fcult_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fcult_w_RES
ret void
}
define void @llvm_mips_fcult_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fcult_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fcult_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fcult_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fcult_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcult.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fcult_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fcult_d_RES
ret void
}
define void @llvm_mips_fcule_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fcule_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fcule_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fcule_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fcule_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcule.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fcule_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fcule_w_RES
ret void
}
define void @llvm_mips_fcule_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fcule_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fcule_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fcule_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fcule_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcule.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fcule_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fcule_d_RES
ret void
}
define void @llvm_mips_fcun_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fcun_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fcun_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fcun_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fcun_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcun.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fcun_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fcun_w_RES
ret void
}
define void @llvm_mips_fcun_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fcun_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fcun_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fcun_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fcun_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcun.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fcun_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fcun_d_RES
ret void
}
define void @llvm_mips_fcune_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fcune_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fcune_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fcune_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fcune_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fcune.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fcune_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fcune_w_RES
ret void
}
define void @llvm_mips_fcune_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fcune_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fcune_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fcune_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fcune_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fcune.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fcune_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fcune_d_RES
ret void
}
define void @llvm_mips_fsaf_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsaf_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fsaf_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fsaf_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fsaf_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsaf.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fsaf_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fsaf_w_RES
ret void
}
define void @llvm_mips_fsaf_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsaf_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fsaf_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fsaf_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fsaf_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsaf.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fsaf_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fsaf_d_RES
ret void
}
define void @llvm_mips_fseq_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fseq_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fseq_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fseq_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fseq_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fseq.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fseq_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fseq_w_RES
ret void
}
define void @llvm_mips_fseq_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fseq_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fseq_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fseq_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fseq_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fseq.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fseq_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fseq_d_RES
ret void
}
define void @llvm_mips_fsle_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsle_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fsle_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fsle_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fsle_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsle.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fsle_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fsle_w_RES
ret void
}
define void @llvm_mips_fsle_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsle_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fsle_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fsle_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fsle_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsle.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fsle_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fsle_d_RES
ret void
}
define void @llvm_mips_fslt_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fslt_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fslt_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fslt_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fslt_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fslt.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fslt_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fslt_w_RES
ret void
}
define void @llvm_mips_fslt_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fslt_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fslt_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fslt_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fslt_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fslt.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fslt_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fslt_d_RES
ret void
}
define void @llvm_mips_fsor_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsor_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fsor_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fsor_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fsor_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsor.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fsor_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fsor_w_RES
ret void
}
define void @llvm_mips_fsor_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsor_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fsor_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fsor_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fsor_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsor.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fsor_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fsor_d_RES
ret void
}
define void @llvm_mips_fsne_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsne_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fsne_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fsne_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fsne_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsne.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fsne_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fsne_w_RES
ret void
}
define void @llvm_mips_fsne_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsne_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fsne_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fsne_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fsne_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsne.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fsne_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fsne_d_RES
ret void
}
define void @llvm_mips_fsueq_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsueq_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fsueq_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fsueq_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fsueq_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsueq.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fsueq_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fsueq_w_RES
ret void
}
define void @llvm_mips_fsueq_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsueq_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fsueq_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fsueq_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fsueq_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsueq.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fsueq_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fsueq_d_RES
ret void
}
define void @llvm_mips_fsult_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsult_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fsult_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fsult_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fsult_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsult.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fsult_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fsult_w_RES
ret void
}
define void @llvm_mips_fsult_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsult_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fsult_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fsult_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fsult_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsult.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fsult_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fsult_d_RES
ret void
}
define void @llvm_mips_fsule_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsule_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fsule_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fsule_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fsule_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsule.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fsule_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fsule_w_RES
ret void
}
define void @llvm_mips_fsule_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsule_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fsule_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fsule_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fsule_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsule.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fsule_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fsule_d_RES
ret void
}
define void @llvm_mips_fsun_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsun_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fsun_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fsun_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fsun_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsun.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fsun_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fsun_w_RES
ret void
}
define void @llvm_mips_fsun_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsun_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fsun_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fsun_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fsun_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsun.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fsun_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fsun_d_RES
ret void
}
define void @llvm_mips_fsune_w_test() nounwind {
entry:
- %0 = load <4 x float>, <4 x float>* @llvm_mips_fsune_w_ARG1
- %1 = load <4 x float>, <4 x float>* @llvm_mips_fsune_w_ARG2
+ %0 = load <4 x float>, ptr @llvm_mips_fsune_w_ARG1
+ %1 = load <4 x float>, ptr @llvm_mips_fsune_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.fsune.w(<4 x float> %0, <4 x float> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_fsune_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_fsune_w_RES
ret void
}
define void @llvm_mips_fsune_d_test() nounwind {
entry:
- %0 = load <2 x double>, <2 x double>* @llvm_mips_fsune_d_ARG1
- %1 = load <2 x double>, <2 x double>* @llvm_mips_fsune_d_ARG2
+ %0 = load <2 x double>, ptr @llvm_mips_fsune_d_ARG1
+ %1 = load <2 x double>, ptr @llvm_mips_fsune_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.fsune.d(<2 x double> %0, <2 x double> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_fsune_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_fsune_d_RES
ret void
}
define void @llvm_mips_mul_q_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mul_q_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mul_q_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_mul_q_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_mul_q_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.mul.q.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_mul_q_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_mul_q_h_RES
ret void
}
define void @llvm_mips_mul_q_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mul_q_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mul_q_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_mul_q_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_mul_q_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.mul.q.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_mul_q_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_mul_q_w_RES
ret void
}
define void @llvm_mips_mulr_q_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulr_q_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulr_q_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_mulr_q_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_mulr_q_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.mulr.q.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_mulr_q_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_mulr_q_h_RES
ret void
}
define void @llvm_mips_mulr_q_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulr_q_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulr_q_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_mulr_q_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_mulr_q_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.mulr.q.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_mulr_q_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_mulr_q_w_RES
ret void
}
; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s --check-prefixes=ALL,MIPS
; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s --check-prefixes=ALL,MIPSEL
-define void @add_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @add_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: add_v16i8:
; ALL: # %bb.0:
; ALL-NEXT: ld.b $w0, 0($6)
; ALL-NEXT: addv.b $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = add <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @add_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @add_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: add_v8i16:
; ALL: # %bb.0:
; ALL-NEXT: ld.h $w0, 0($6)
; ALL-NEXT: addv.h $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = add <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @add_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @add_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: add_v4i32:
; ALL: # %bb.0:
; ALL-NEXT: ld.w $w0, 0($6)
; ALL-NEXT: addv.w $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = add <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @add_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @add_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: add_v2i64:
; ALL: # %bb.0:
; ALL-NEXT: ld.d $w0, 0($6)
; ALL-NEXT: addv.d $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = add <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @add_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @add_v16i8_i(ptr %c, ptr %a) nounwind {
; ALL-LABEL: add_v16i8_i:
; ALL: # %bb.0:
; ALL-NEXT: ld.b $w0, 0($5)
; ALL-NEXT: addvi.b $w0, $w0, 1
; ALL-NEXT: jr $ra
; ALL-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = add <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @add_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @add_v8i16_i(ptr %c, ptr %a) nounwind {
; ALL-LABEL: add_v8i16_i:
; ALL: # %bb.0:
; ALL-NEXT: ld.h $w0, 0($5)
; ALL-NEXT: addvi.h $w0, $w0, 1
; ALL-NEXT: jr $ra
; ALL-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = add <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1,
i16 1, i16 1, i16 1, i16 1>
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @add_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @add_v4i32_i(ptr %c, ptr %a) nounwind {
; ALL-LABEL: add_v4i32_i:
; ALL: # %bb.0:
; ALL-NEXT: ld.w $w0, 0($5)
; ALL-NEXT: addvi.w $w0, $w0, 1
; ALL-NEXT: jr $ra
; ALL-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = add <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @add_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @add_v2i64_i(ptr %c, ptr %a) nounwind {
; ALL-LABEL: add_v2i64_i:
; ALL: # %bb.0:
; ALL-NEXT: ld.d $w0, 0($5)
; ALL-NEXT: addvi.d $w0, $w0, 1
; ALL-NEXT: jr $ra
; ALL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = add <2 x i64> %1, <i64 1, i64 1>
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
-define void @sub_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @sub_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: sub_v16i8:
; ALL: # %bb.0:
; ALL-NEXT: ld.b $w0, 0($6)
; ALL-NEXT: subv.b $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = sub <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @sub_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @sub_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: sub_v8i16:
; ALL: # %bb.0:
; ALL-NEXT: ld.h $w0, 0($6)
; ALL-NEXT: subv.h $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = sub <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @sub_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @sub_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: sub_v4i32:
; ALL: # %bb.0:
; ALL-NEXT: ld.w $w0, 0($6)
; ALL-NEXT: subv.w $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = sub <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @sub_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @sub_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: sub_v2i64:
; ALL: # %bb.0:
; ALL-NEXT: ld.d $w0, 0($6)
; ALL-NEXT: subv.d $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = sub <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @sub_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @sub_v16i8_i(ptr %c, ptr %a) nounwind {
; ALL-LABEL: sub_v16i8_i:
; ALL: # %bb.0:
; ALL-NEXT: ld.b $w0, 0($5)
; ALL-NEXT: subvi.b $w0, $w0, 1
; ALL-NEXT: jr $ra
; ALL-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = sub <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @sub_v16i8_i_negated(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @sub_v16i8_i_negated(ptr %c, ptr %a) nounwind {
; ALL-LABEL: sub_v16i8_i_negated:
; ALL: # %bb.0:
; ALL-NEXT: ld.b $w0, 0($5)
; ALL-NEXT: subvi.b $w0, $w0, 1
; ALL-NEXT: jr $ra
; ALL-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = add <16 x i8> %1, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @sub_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @sub_v8i16_i(ptr %c, ptr %a) nounwind {
; ALL-LABEL: sub_v8i16_i:
; ALL: # %bb.0:
; ALL-NEXT: ld.h $w0, 0($5)
; ALL-NEXT: subvi.h $w0, $w0, 1
; ALL-NEXT: jr $ra
; ALL-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = sub <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1,
i16 1, i16 1, i16 1, i16 1>
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @sub_v8i16_i_negated(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @sub_v8i16_i_negated(ptr %c, ptr %a) nounwind {
; ALL-LABEL: sub_v8i16_i_negated:
; ALL: # %bb.0:
; ALL-NEXT: ld.h $w0, 0($5)
; ALL-NEXT: subvi.h $w0, $w0, 1
; ALL-NEXT: jr $ra
; ALL-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = add <8 x i16> %1, <i16 -1, i16 -1, i16 -1, i16 -1,
i16 -1, i16 -1, i16 -1, i16 -1>
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @sub_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @sub_v4i32_i(ptr %c, ptr %a) nounwind {
; ALL-LABEL: sub_v4i32_i:
; ALL: # %bb.0:
; ALL-NEXT: ld.w $w0, 0($5)
; ALL-NEXT: subvi.w $w0, $w0, 1
; ALL-NEXT: jr $ra
; ALL-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = sub <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @sub_v4i32_i_negated(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @sub_v4i32_i_negated(ptr %c, ptr %a) nounwind {
; ALL-LABEL: sub_v4i32_i_negated:
; ALL: # %bb.0:
; ALL-NEXT: ld.w $w0, 0($5)
; ALL-NEXT: subvi.w $w0, $w0, 1
; ALL-NEXT: jr $ra
; ALL-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = add <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1>
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @sub_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @sub_v2i64_i(ptr %c, ptr %a) nounwind {
; ALL-LABEL: sub_v2i64_i:
; ALL: # %bb.0:
; ALL-NEXT: ld.d $w0, 0($5)
; ALL-NEXT: subvi.d $w0, $w0, 1
; ALL-NEXT: jr $ra
; ALL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = sub <2 x i64> %1, <i64 1, i64 1>
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
-define void @sub_v2i64_i_negated(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @sub_v2i64_i_negated(ptr %c, ptr %a) nounwind {
; MIPS-LABEL: sub_v2i64_i_negated:
; MIPS: # %bb.0:
; MIPS-NEXT: ldi.b $w0, -1
; MIPSEL-NEXT: addv.d $w0, $w1, $w0
; MIPSEL-NEXT: jr $ra
; MIPSEL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = add <2 x i64> %1, <i64 -1, i64 -1>
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
-define void @mul_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @mul_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: mul_v16i8:
; ALL: # %bb.0:
; ALL-NEXT: ld.b $w0, 0($6)
; ALL-NEXT: mulv.b $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = mul <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @mul_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @mul_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: mul_v8i16:
; ALL: # %bb.0:
; ALL-NEXT: ld.h $w0, 0($6)
; ALL-NEXT: mulv.h $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = mul <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @mul_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @mul_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: mul_v4i32:
; ALL: # %bb.0:
; ALL-NEXT: ld.w $w0, 0($6)
; ALL-NEXT: mulv.w $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = mul <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @mul_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @mul_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: mul_v2i64:
; ALL: # %bb.0:
; ALL-NEXT: ld.d $w0, 0($6)
; ALL-NEXT: mulv.d $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = mul <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @maddv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+define void @maddv_v16i8(ptr %d, ptr %a, ptr %b,
; ALL-LABEL: maddv_v16i8:
; ALL: # %bb.0:
; ALL-NEXT: ld.b $w0, 0($7)
; ALL-NEXT: maddv.b $w2, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.b $w2, 0($4)
- <16 x i8>* %c) nounwind {
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
- %3 = load <16 x i8>, <16 x i8>* %c
+ ptr %c) nounwind {
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
+ %3 = load <16 x i8>, ptr %c
%4 = mul <16 x i8> %2, %3
%5 = add <16 x i8> %4, %1
- store <16 x i8> %5, <16 x i8>* %d
+ store <16 x i8> %5, ptr %d
ret void
}
-define void @maddv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+define void @maddv_v8i16(ptr %d, ptr %a, ptr %b,
; ALL-LABEL: maddv_v8i16:
; ALL: # %bb.0:
; ALL-NEXT: ld.h $w0, 0($7)
; ALL-NEXT: maddv.h $w2, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.h $w2, 0($4)
- <8 x i16>* %c) nounwind {
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
- %3 = load <8 x i16>, <8 x i16>* %c
+ ptr %c) nounwind {
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
+ %3 = load <8 x i16>, ptr %c
%4 = mul <8 x i16> %2, %3
%5 = add <8 x i16> %4, %1
- store <8 x i16> %5, <8 x i16>* %d
+ store <8 x i16> %5, ptr %d
ret void
}
-define void @maddv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+define void @maddv_v4i32(ptr %d, ptr %a, ptr %b,
; ALL-LABEL: maddv_v4i32:
; ALL: # %bb.0:
; ALL-NEXT: ld.w $w0, 0($7)
; ALL-NEXT: maddv.w $w2, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.w $w2, 0($4)
- <4 x i32>* %c) nounwind {
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
- %3 = load <4 x i32>, <4 x i32>* %c
+ ptr %c) nounwind {
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
+ %3 = load <4 x i32>, ptr %c
%4 = mul <4 x i32> %2, %3
%5 = add <4 x i32> %4, %1
- store <4 x i32> %5, <4 x i32>* %d
+ store <4 x i32> %5, ptr %d
ret void
}
-define void @maddv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+define void @maddv_v2i64(ptr %d, ptr %a, ptr %b,
; ALL-LABEL: maddv_v2i64:
; ALL: # %bb.0:
; ALL-NEXT: ld.d $w0, 0($7)
; ALL-NEXT: maddv.d $w2, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.d $w2, 0($4)
- <2 x i64>* %c) nounwind {
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
- %3 = load <2 x i64>, <2 x i64>* %c
+ ptr %c) nounwind {
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
+ %3 = load <2 x i64>, ptr %c
%4 = mul <2 x i64> %2, %3
%5 = add <2 x i64> %4, %1
- store <2 x i64> %5, <2 x i64>* %d
+ store <2 x i64> %5, ptr %d
ret void
}
-define void @msubv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+define void @msubv_v16i8(ptr %d, ptr %a, ptr %b,
; ALL-LABEL: msubv_v16i8:
; ALL: # %bb.0:
; ALL-NEXT: ld.b $w0, 0($7)
; ALL-NEXT: msubv.b $w2, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.b $w2, 0($4)
- <16 x i8>* %c) nounwind {
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
- %3 = load <16 x i8>, <16 x i8>* %c
+ ptr %c) nounwind {
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
+ %3 = load <16 x i8>, ptr %c
%4 = mul <16 x i8> %2, %3
%5 = sub <16 x i8> %1, %4
- store <16 x i8> %5, <16 x i8>* %d
+ store <16 x i8> %5, ptr %d
ret void
}
-define void @msubv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+define void @msubv_v8i16(ptr %d, ptr %a, ptr %b,
; ALL-LABEL: msubv_v8i16:
; ALL: # %bb.0:
; ALL-NEXT: ld.h $w0, 0($7)
; ALL-NEXT: msubv.h $w2, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.h $w2, 0($4)
- <8 x i16>* %c) nounwind {
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
- %3 = load <8 x i16>, <8 x i16>* %c
+ ptr %c) nounwind {
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
+ %3 = load <8 x i16>, ptr %c
%4 = mul <8 x i16> %2, %3
%5 = sub <8 x i16> %1, %4
- store <8 x i16> %5, <8 x i16>* %d
+ store <8 x i16> %5, ptr %d
ret void
}
-define void @msubv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+define void @msubv_v4i32(ptr %d, ptr %a, ptr %b,
; ALL-LABEL: msubv_v4i32:
; ALL: # %bb.0:
; ALL-NEXT: ld.w $w0, 0($7)
; ALL-NEXT: msubv.w $w2, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.w $w2, 0($4)
- <4 x i32>* %c) nounwind {
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
- %3 = load <4 x i32>, <4 x i32>* %c
+ ptr %c) nounwind {
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
+ %3 = load <4 x i32>, ptr %c
%4 = mul <4 x i32> %2, %3
%5 = sub <4 x i32> %1, %4
- store <4 x i32> %5, <4 x i32>* %d
+ store <4 x i32> %5, ptr %d
ret void
}
-define void @msubv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+define void @msubv_v2i64(ptr %d, ptr %a, ptr %b,
; ALL-LABEL: msubv_v2i64:
; ALL: # %bb.0:
; ALL-NEXT: ld.d $w0, 0($7)
; ALL-NEXT: msubv.d $w2, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.d $w2, 0($4)
- <2 x i64>* %c) nounwind {
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
- %3 = load <2 x i64>, <2 x i64>* %c
+ ptr %c) nounwind {
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
+ %3 = load <2 x i64>, ptr %c
%4 = mul <2 x i64> %2, %3
%5 = sub <2 x i64> %1, %4
- store <2 x i64> %5, <2 x i64>* %d
+ store <2 x i64> %5, ptr %d
ret void
}
-define void @div_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @div_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: div_s_v16i8:
; ALL: # %bb.0:
; ALL-NEXT: ld.b $w0, 0($6)
; ALL-NEXT: div_s.b $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = sdiv <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @div_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @div_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: div_s_v8i16:
; ALL: # %bb.0:
; ALL-NEXT: ld.h $w0, 0($6)
; ALL-NEXT: div_s.h $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = sdiv <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @div_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @div_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: div_s_v4i32:
; ALL: # %bb.0:
; ALL-NEXT: ld.w $w0, 0($6)
; ALL-NEXT: div_s.w $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = sdiv <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @div_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @div_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: div_s_v2i64:
; ALL: # %bb.0:
; ALL-NEXT: ld.d $w0, 0($6)
; ALL-NEXT: div_s.d $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = sdiv <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @div_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @div_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: div_u_v16i8:
; ALL: # %bb.0:
; ALL-NEXT: ld.b $w0, 0($6)
; ALL-NEXT: div_u.b $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = udiv <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @div_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @div_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: div_u_v8i16:
; ALL: # %bb.0:
; ALL-NEXT: ld.h $w0, 0($6)
; ALL-NEXT: div_u.h $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = udiv <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @div_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @div_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: div_u_v4i32:
; ALL: # %bb.0:
; ALL-NEXT: ld.w $w0, 0($6)
; ALL-NEXT: div_u.w $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = udiv <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @div_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @div_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: div_u_v2i64:
; ALL: # %bb.0:
; ALL-NEXT: ld.d $w0, 0($6)
; ALL-NEXT: div_u.d $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = udiv <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @mod_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @mod_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: mod_s_v16i8:
; ALL: # %bb.0:
; ALL-NEXT: ld.b $w0, 0($6)
; ALL-NEXT: mod_s.b $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = srem <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @mod_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @mod_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: mod_s_v8i16:
; ALL: # %bb.0:
; ALL-NEXT: ld.h $w0, 0($6)
; ALL-NEXT: mod_s.h $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = srem <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @mod_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @mod_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: mod_s_v4i32:
; ALL: # %bb.0:
; ALL-NEXT: ld.w $w0, 0($6)
; ALL-NEXT: mod_s.w $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = srem <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @mod_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @mod_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: mod_s_v2i64:
; ALL: # %bb.0:
; ALL-NEXT: ld.d $w0, 0($6)
; ALL-NEXT: mod_s.d $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = srem <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @mod_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @mod_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: mod_u_v16i8:
; ALL: # %bb.0:
; ALL-NEXT: ld.b $w0, 0($6)
; ALL-NEXT: mod_u.b $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = urem <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @mod_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @mod_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: mod_u_v8i16:
; ALL: # %bb.0:
; ALL-NEXT: ld.h $w0, 0($6)
; ALL-NEXT: mod_u.h $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = urem <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @mod_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @mod_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: mod_u_v4i32:
; ALL: # %bb.0:
; ALL-NEXT: ld.w $w0, 0($6)
; ALL-NEXT: mod_u.w $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = urem <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @mod_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @mod_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; ALL-LABEL: mod_u_v2i64:
; ALL: # %bb.0:
; ALL-NEXT: ld.d $w0, 0($6)
; ALL-NEXT: mod_u.d $w0, $w1, $w0
; ALL-NEXT: jr $ra
; ALL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = urem <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
-define void @add_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @add_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: add_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fadd <4 x float> %1, %2
; CHECK-DAG: fadd.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x float> %3, <4 x float>* %c
+ store <4 x float> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size add_v4f32
}
-define void @add_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @add_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: add_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fadd <2 x double> %1, %2
; CHECK-DAG: fadd.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x double> %3, <2 x double>* %c
+ store <2 x double> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size add_v2f64
}
-define void @sub_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @sub_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: sub_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fsub <4 x float> %1, %2
; CHECK-DAG: fsub.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x float> %3, <4 x float>* %c
+ store <4 x float> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size sub_v4f32
}
-define void @sub_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @sub_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: sub_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fsub <2 x double> %1, %2
; CHECK-DAG: fsub.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x double> %3, <2 x double>* %c
+ store <2 x double> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size sub_v2f64
}
-define void @mul_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @mul_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: mul_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fmul <4 x float> %1, %2
; CHECK-DAG: fmul.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x float> %3, <4 x float>* %c
+ store <4 x float> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size mul_v4f32
}
-define void @mul_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @mul_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: mul_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fmul <2 x double> %1, %2
; CHECK-DAG: fmul.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x double> %3, <2 x double>* %c
+ store <2 x double> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size mul_v2f64
}
-define void @fma_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
- <4 x float>* %c) nounwind {
+define void @fma_v4f32(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: fma_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
- %3 = load <4 x float>, <4 x float>* %c
+ %3 = load <4 x float>, ptr %c
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
%4 = tail call <4 x float> @llvm.fma.v4f32 (<4 x float> %1, <4 x float> %2,
<4 x float> %3)
; CHECK-DAG: fmadd.w [[R1]], [[R2]], [[R3]]
- store <4 x float> %4, <4 x float>* %d
+ store <4 x float> %4, ptr %d
; CHECK-DAG: st.w [[R1]], 0($4)
ret void
; CHECK: .size fma_v4f32
}
-define void @fma_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
- <2 x double>* %c) nounwind {
+define void @fma_v2f64(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: fma_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
- %3 = load <2 x double>, <2 x double>* %c
+ %3 = load <2 x double>, ptr %c
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
%4 = tail call <2 x double> @llvm.fma.v2f64 (<2 x double> %1, <2 x double> %2,
<2 x double> %3)
; CHECK-DAG: fmadd.d [[R1]], [[R2]], [[R3]]
- store <2 x double> %4, <2 x double>* %d
+ store <2 x double> %4, ptr %d
; CHECK-DAG: st.d [[R1]], 0($4)
ret void
; CHECK: .size fma_v2f64
}
-define void @fmlu_fsub_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
- <4 x float>* %c) nounwind {
+define void @fmlu_fsub_v4f32(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: fmlu_fsub_v4f32:
- %1 = load <4 x float>, <4 x float>* %b
+ %1 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($6)
- %2 = load <4 x float>, <4 x float>* %c
+ %2 = load <4 x float>, ptr %c
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($7)
%3 = fmul <4 x float> %1, %2
; CHECK-DAG: fmul.w [[R2]], [[R1]], [[R2]]
- %4 = load <4 x float>, <4 x float>* %a
+ %4 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($5)
%5 = fsub <4 x float> %4, %3
; CHECK-DAG: fsub.w [[R2]], [[R3]], [[R2]]
- store <4 x float> %5, <4 x float>* %d
+ store <4 x float> %5, ptr %d
; CHECK-DAG: st.w [[R2]], 0($4)
ret void
; CHECK: .size fmlu_fsub_v4f32
}
-define void @fmul_fsub_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
- <2 x double>* %c) nounwind {
+define void @fmul_fsub_v2f64(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: fmul_fsub_v2f64:
- %1 = load <2 x double>, <2 x double>* %b
+ %1 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($7)
- %2 = load <2 x double>, <2 x double>* %c
+ %2 = load <2 x double>, ptr %c
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fmul <2 x double> %1, %2
; CHECK-DAG: fmul.d [[R1]], [[R2]], [[R1]]
- %4 = load <2 x double>, <2 x double>* %a
+ %4 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($5)
%5 = fsub <2 x double> %4, %3
; CHECK-DAG: fsub.d [[R1]], [[R3]], [[R1]]
- store <2 x double> %5, <2 x double>* %d
+ store <2 x double> %5, ptr %d
; CHECK-DAG: st.d [[R1]], 0($4)
ret void
; CHECK: .size fmul_fsub_v2f64
}
-define void @fdiv_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @fdiv_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: fdiv_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fdiv <4 x float> %1, %2
; CHECK-DAG: fdiv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x float> %3, <4 x float>* %c
+ store <4 x float> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size fdiv_v4f32
}
-define void @fdiv_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @fdiv_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: fdiv_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fdiv <2 x double> %1, %2
; CHECK-DAG: fdiv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x double> %3, <2 x double>* %c
+ store <2 x double> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size fdiv_v2f64
}
-define void @fabs_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
+define void @fabs_v4f32(ptr %c, ptr %a) nounwind {
; CHECK: fabs_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <4 x float> @llvm.fabs.v4f32 (<4 x float> %1)
; CHECK-DAG: fmax_a.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <4 x float> %2, <4 x float>* %c
+ store <4 x float> %2, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size fabs_v4f32
}
-define void @fabs_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
+define void @fabs_v2f64(ptr %c, ptr %a) nounwind {
; CHECK: fabs_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <2 x double> @llvm.fabs.v2f64 (<2 x double> %1)
; CHECK-DAG: fmax_a.d [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <2 x double> %2, <2 x double>* %c
+ store <2 x double> %2, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size fabs_v2f64
}
-define void @fexp2_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
+define void @fexp2_v4f32(ptr %c, ptr %a) nounwind {
; CHECK: fexp2_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1)
; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
; CHECK-DAG: ffint_u.w [[R4:\$w[0-9]+]], [[R3]]
; CHECK-DAG: fexp2.w [[R4:\$w[0-9]+]], [[R3]], [[R1]]
- store <4 x float> %2, <4 x float>* %c
+ store <4 x float> %2, ptr %c
; CHECK-DAG: st.w [[R4]], 0($4)
ret void
; CHECK: .size fexp2_v4f32
}
-define void @fexp2_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
+define void @fexp2_v2f64(ptr %c, ptr %a) nounwind {
; CHECK: fexp2_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1)
; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
; CHECK-DAG: ffint_u.d [[R4:\$w[0-9]+]], [[R3]]
; CHECK-DAG: fexp2.d [[R4:\$w[0-9]+]], [[R3]], [[R1]]
- store <2 x double> %2, <2 x double>* %c
+ store <2 x double> %2, ptr %c
; CHECK-DAG: st.d [[R4]], 0($4)
ret void
; CHECK: .size fexp2_v2f64
}
-define void @fexp2_v4f32_2(<4 x float>* %c, <4 x float>* %a) nounwind {
+define void @fexp2_v4f32_2(ptr %c, ptr %a) nounwind {
; CHECK: fexp2_v4f32_2:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1)
%3 = fmul <4 x float> <float 2.0, float 2.0, float 2.0, float 2.0>, %2
; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
; CHECK-DAG: ffint_u.w [[R4:\$w[0-9]+]], [[R3]]
; CHECK-DAG: fexp2.w [[R5:\$w[0-9]+]], [[R4]], [[R1]]
- store <4 x float> %3, <4 x float>* %c
+ store <4 x float> %3, ptr %c
; CHECK-DAG: st.w [[R5]], 0($4)
ret void
; CHECK: .size fexp2_v4f32_2
}
-define void @fexp2_v2f64_2(<2 x double>* %c, <2 x double>* %a) nounwind {
+define void @fexp2_v2f64_2(ptr %c, ptr %a) nounwind {
; CHECK: fexp2_v2f64_2:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1)
%3 = fmul <2 x double> <double 2.0, double 2.0>, %2
; CHECK-DAG: ldi.d [[R2:\$w[0-9]+]], 1
; CHECK-DAG: ffint_u.d [[R3:\$w[0-9]+]], [[R2]]
; CHECK-DAG: fexp2.d [[R4:\$w[0-9]+]], [[R3]], [[R1]]
- store <2 x double> %3, <2 x double>* %c
+ store <2 x double> %3, ptr %c
; CHECK-DAG: st.d [[R4]], 0($4)
ret void
; CHECK: .size fexp2_v2f64_2
}
-define void @fsqrt_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
+define void @fsqrt_v4f32(ptr %c, ptr %a) nounwind {
; CHECK: fsqrt_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <4 x float> @llvm.sqrt.v4f32 (<4 x float> %1)
; CHECK-DAG: fsqrt.w [[R3:\$w[0-9]+]], [[R1]]
- store <4 x float> %2, <4 x float>* %c
+ store <4 x float> %2, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size fsqrt_v4f32
}
-define void @fsqrt_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
+define void @fsqrt_v2f64(ptr %c, ptr %a) nounwind {
; CHECK: fsqrt_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <2 x double> @llvm.sqrt.v2f64 (<2 x double> %1)
; CHECK-DAG: fsqrt.d [[R3:\$w[0-9]+]], [[R1]]
- store <2 x double> %2, <2 x double>* %c
+ store <2 x double> %2, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size fsqrt_v2f64
}
-define void @ffint_u_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind {
+define void @ffint_u_v4f32(ptr %c, ptr %a) nounwind {
; CHECK: ffint_u_v4f32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = uitofp <4 x i32> %1 to <4 x float>
; CHECK-DAG: ffint_u.w [[R3:\$w[0-9]+]], [[R1]]
- store <4 x float> %2, <4 x float>* %c
+ store <4 x float> %2, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ffint_u_v4f32
}
-define void @ffint_u_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind {
+define void @ffint_u_v2f64(ptr %c, ptr %a) nounwind {
; CHECK: ffint_u_v2f64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = uitofp <2 x i64> %1 to <2 x double>
; CHECK-DAG: ffint_u.d [[R3:\$w[0-9]+]], [[R1]]
- store <2 x double> %2, <2 x double>* %c
+ store <2 x double> %2, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size ffint_u_v2f64
}
-define void @ffint_s_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind {
+define void @ffint_s_v4f32(ptr %c, ptr %a) nounwind {
; CHECK: ffint_s_v4f32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = sitofp <4 x i32> %1 to <4 x float>
; CHECK-DAG: ffint_s.w [[R3:\$w[0-9]+]], [[R1]]
- store <4 x float> %2, <4 x float>* %c
+ store <4 x float> %2, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ffint_s_v4f32
}
-define void @ffint_s_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind {
+define void @ffint_s_v2f64(ptr %c, ptr %a) nounwind {
; CHECK: ffint_s_v2f64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = sitofp <2 x i64> %1 to <2 x double>
; CHECK-DAG: ffint_s.d [[R3:\$w[0-9]+]], [[R1]]
- store <2 x double> %2, <2 x double>* %c
+ store <2 x double> %2, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size ffint_s_v2f64
}
-define void @ftrunc_u_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind {
+define void @ftrunc_u_v4f32(ptr %c, ptr %a) nounwind {
; CHECK: ftrunc_u_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = fptoui <4 x float> %1 to <4 x i32>
; CHECK-DAG: ftrunc_u.w [[R3:\$w[0-9]+]], [[R1]]
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ftrunc_u_v4f32
}
-define void @ftrunc_u_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind {
+define void @ftrunc_u_v2f64(ptr %c, ptr %a) nounwind {
; CHECK: ftrunc_u_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = fptoui <2 x double> %1 to <2 x i64>
; CHECK-DAG: ftrunc_u.d [[R3:\$w[0-9]+]], [[R1]]
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size ftrunc_u_v2f64
}
-define void @ftrunc_s_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind {
+define void @ftrunc_s_v4f32(ptr %c, ptr %a) nounwind {
; CHECK: ftrunc_s_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = fptosi <4 x float> %1 to <4 x i32>
; CHECK-DAG: ftrunc_s.w [[R3:\$w[0-9]+]], [[R1]]
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ftrunc_s_v4f32
}
-define void @ftrunc_s_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind {
+define void @ftrunc_s_v2f64(ptr %c, ptr %a) nounwind {
; CHECK: ftrunc_s_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = fptosi <2 x double> %1 to <2 x i64>
; CHECK-DAG: ftrunc_s.d [[R3:\$w[0-9]+]], [[R1]]
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; do not fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 < C2
; MASK_TYPE1 = C2-C1 0s | 1s | ends with C1 0s
-define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64(<2 x i64>* %a, <2 x i64>* %b) {
+define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64(ptr %a, ptr %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64:
; MIPSEL64R6: # %bb.0: # %entry
; MIPSEL64R6-NEXT: ld.d $w0, 0($4)
; MIPSEL32R5-NEXT: jr $ra
; MIPSEL32R5-NEXT: st.d $w0, 0($5)
entry:
- %0 = load <2 x i64>, <2 x i64>* %a
+ %0 = load <2 x i64>, ptr %a
%1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 52)
%2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 51)
- store <2 x i64> %2, <2 x i64>* %b
+ store <2 x i64> %2, ptr %b
ret void
}
; do not fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 < C2
-define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64_long(<2 x i64>* %a, <2 x i64>* %b) {
+define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64_long(ptr %a, ptr %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64_long:
; MIPSEL64R6: # %bb.0: # %entry
; MIPSEL64R6-NEXT: ld.d $w0, 0($4)
; MIPSEL32R5-NEXT: jr $ra
; MIPSEL32R5-NEXT: st.d $w0, 0($5)
entry:
- %0 = load <2 x i64>, <2 x i64>* %a
+ %0 = load <2 x i64>, ptr %a
%1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 6)
%2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 4)
- store <2 x i64> %2, <2 x i64>* %b
+ store <2 x i64> %2, ptr %b
ret void
}
; do not fold (shl (srl x, c1), c2) -> (and (shl x, (sub c1, c2), MASK) if C1 >= C2
; MASK_TYPE2 = 1s | C1 zeros
-define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type2_i32(<2 x i64>* %a, <2 x i64>* %b) {
+define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type2_i32(ptr %a, ptr %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type2_i32:
; MIPSEL64R6: # %bb.0: # %entry
; MIPSEL64R6-NEXT: ld.d $w0, 0($4)
; MIPSEL32R5-NEXT: jr $ra
; MIPSEL32R5-NEXT: st.d $w0, 0($5)
entry:
- %0 = load <2 x i64>, <2 x i64>* %a
+ %0 = load <2 x i64>, ptr %a
%1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 4)
%2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 6)
- store <2 x i64> %2, <2 x i64>* %b
+ store <2 x i64> %2, ptr %b
ret void
}
; do not fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 < C2
-define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i32_long(<4 x i32>* %a, <4 x i32>* %b) {
+define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i32_long(ptr %a, ptr %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i32_long:
; MIPSEL64R6: # %bb.0: # %entry
; MIPSEL64R6-NEXT: ld.w $w0, 0($4)
; MIPSEL32R5-NEXT: jr $ra
; MIPSEL32R5-NEXT: st.w $w0, 0($5)
entry:
- %0 = load <4 x i32>, <4 x i32>* %a
+ %0 = load <4 x i32>, ptr %a
%1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 7)
%2 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %1, i32 3)
- store <4 x i32> %2, <4 x i32>* %b
+ store <4 x i32> %2, ptr %b
ret void
}
; do not fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
-define void @avoid_to_combine_shifts_to_and_mask_type2_i64_long(<2 x i64>* %a, <2 x i64>* %b) {
+define void @avoid_to_combine_shifts_to_and_mask_type2_i64_long(ptr %a, ptr %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_and_mask_type2_i64_long:
; MIPSEL64R6: # %bb.0: # %entry
; MIPSEL64R6-NEXT: ld.d $w0, 0($4)
; MIPSEL32R5-NEXT: jr $ra
; MIPSEL32R5-NEXT: st.d $w0, 0($5)
entry:
- %0 = load <2 x i64>, <2 x i64>* %a
+ %0 = load <2 x i64>, ptr %a
%1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 38)
%2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 38)
- store <2 x i64> %2, <2 x i64>* %b
+ store <2 x i64> %2, ptr %b
ret void
}
; do not fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
-define void @avoid_to_combine_shifts_to_and_mask_type2_i64(<2 x i64>* %a, <2 x i64>* %b) {
+define void @avoid_to_combine_shifts_to_and_mask_type2_i64(ptr %a, ptr %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_and_mask_type2_i64:
; MIPSEL64R6: # %bb.0: # %entry
; MIPSEL64R6-NEXT: ld.d $w0, 0($4)
; MIPSEL32R5-NEXT: jr $ra
; MIPSEL32R5-NEXT: st.d $w0, 0($5)
entry:
- %0 = load <2 x i64>, <2 x i64>* %a
+ %0 = load <2 x i64>, ptr %a
%1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 3)
%2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 3)
- store <2 x i64> %2, <2 x i64>* %b
+ store <2 x i64> %2, ptr %b
ret void
}
; do not fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
-define void @avoid_to_combine_shifts_to_and_mask_type1_long_i32_a(<4 x i32>* %a, <4 x i32>* %b) {
+define void @avoid_to_combine_shifts_to_and_mask_type1_long_i32_a(ptr %a, ptr %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_and_mask_type1_long_i32_a:
; MIPSEL64R6: # %bb.0: # %entry
; MIPSEL64R6-NEXT: ld.w $w0, 0($4)
; MIPSEL32R5-NEXT: jr $ra
; MIPSEL32R5-NEXT: st.w $w0, 0($5)
entry:
- %0 = load <4 x i32>, <4 x i32>* %a
+ %0 = load <4 x i32>, ptr %a
%1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 5)
%2 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %1, i32 5)
- store <4 x i32> %2, <4 x i32>* %b
+ store <4 x i32> %2, ptr %b
ret void
}
; do not fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
-define void @avoid_to_combine_shifts_to_and_mask_type1_long_i32_b(<4 x i32>* %a, <4 x i32>* %b) {
+define void @avoid_to_combine_shifts_to_and_mask_type1_long_i32_b(ptr %a, ptr %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_and_mask_type1_long_i32_b:
; MIPSEL64R6: # %bb.0: # %entry
; MIPSEL64R6-NEXT: ld.w $w0, 0($4)
; MIPSEL32R5-NEXT: jr $ra
; MIPSEL32R5-NEXT: st.w $w0, 0($5)
entry:
- %0 = load <4 x i32>, <4 x i32>* %a
+ %0 = load <4 x i32>, ptr %a
%1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 30)
%2 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %1, i32 30)
- store <4 x i32> %2, <4 x i32>* %b
+ store <4 x i32> %2, ptr %b
ret void
}
; N64-LE-NEXT: fill.d $w0, $1
; N64-LE-NEXT: jr $ra
; N64-LE-NEXT: st.b $w0, 0($2)
- store volatile <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8>*@v16i8
- store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8>*@v16i8
- store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 31>, <16 x i8>*@v16i8
- store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6>, <16 x i8>*@v16i8
- store volatile <16 x i8> <i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0>, <16 x i8>*@v16i8
- store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4>, <16 x i8>*@v16i8
- store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, <16 x i8>*@v16i8
+ store volatile <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, ptr @v16i8
+ store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, ptr @v16i8
+ store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 31>, ptr @v16i8
+ store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6>, ptr @v16i8
+ store volatile <16 x i8> <i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0>, ptr @v16i8
+ store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4>, ptr @v16i8
+ store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, ptr @v16i8
ret void
}
; N64-LE-NEXT: fill.d $w0, $1
; N64-LE-NEXT: jr $ra
; N64-LE-NEXT: st.h $w0, 0($2)
- store volatile <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16>*@v8i16
- store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16>*@v8i16
- store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 2, i16 1, i16 1, i16 1, i16 31>, <8 x i16>*@v8i16
- store volatile <8 x i16> <i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028>, <8 x i16>*@v8i16
- store volatile <8 x i16> <i16 1, i16 2, i16 1, i16 2, i16 1, i16 2, i16 1, i16 2>, <8 x i16>*@v8i16
- store volatile <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>, <8 x i16>*@v8i16
+ store volatile <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, ptr @v8i16
+ store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, ptr @v8i16
+ store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 2, i16 1, i16 1, i16 1, i16 31>, ptr @v8i16
+ store volatile <8 x i16> <i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028>, ptr @v8i16
+ store volatile <8 x i16> <i16 1, i16 2, i16 1, i16 2, i16 1, i16 2, i16 1, i16 2>, ptr @v8i16
+ store volatile <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>, ptr @v8i16
ret void
}
; N64-LE-NEXT: ld.w $w0, 0($1)
; N64-LE-NEXT: jr $ra
; N64-LE-NEXT: st.w $w0, 0($2)
- store volatile <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32>*@v4i32
- store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32>*@v4i32
- store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 31>, <4 x i32>*@v4i32
- store volatile <4 x i32> <i32 16843009, i32 16843009, i32 16843009, i32 16843009>, <4 x i32>*@v4i32
- store volatile <4 x i32> <i32 65537, i32 65537, i32 65537, i32 65537>, <4 x i32>*@v4i32
- store volatile <4 x i32> <i32 1, i32 2, i32 1, i32 2>, <4 x i32>*@v4i32
- store volatile <4 x i32> <i32 3, i32 4, i32 5, i32 6>, <4 x i32>*@v4i32
+ store volatile <4 x i32> <i32 0, i32 0, i32 0, i32 0>, ptr @v4i32
+ store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 1>, ptr @v4i32
+ store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 31>, ptr @v4i32
+ store volatile <4 x i32> <i32 16843009, i32 16843009, i32 16843009, i32 16843009>, ptr @v4i32
+ store volatile <4 x i32> <i32 65537, i32 65537, i32 65537, i32 65537>, ptr @v4i32
+ store volatile <4 x i32> <i32 1, i32 2, i32 1, i32 2>, ptr @v4i32
+ store volatile <4 x i32> <i32 3, i32 4, i32 5, i32 6>, ptr @v4i32
ret void
}
; N64-NEXT: ld.d $w0, 0($1)
; N64-NEXT: jr $ra
; N64-NEXT: st.d $w0, 0($2)
- store volatile <2 x i64> <i64 0, i64 0>, <2 x i64>*@v2i64
- store volatile <2 x i64> <i64 72340172838076673, i64 72340172838076673>, <2 x i64>*@v2i64
- store volatile <2 x i64> <i64 281479271743489, i64 281479271743489>, <2 x i64>*@v2i64
- store volatile <2 x i64> <i64 4294967297, i64 4294967297>, <2 x i64>*@v2i64
- store volatile <2 x i64> <i64 1, i64 1>, <2 x i64>*@v2i64
- store volatile <2 x i64> <i64 1, i64 31>, <2 x i64>*@v2i64
- store volatile <2 x i64> <i64 3, i64 4>, <2 x i64>*@v2i64
+ store volatile <2 x i64> <i64 0, i64 0>, ptr @v2i64
+ store volatile <2 x i64> <i64 72340172838076673, i64 72340172838076673>, ptr @v2i64
+ store volatile <2 x i64> <i64 281479271743489, i64 281479271743489>, ptr @v2i64
+ store volatile <2 x i64> <i64 4294967297, i64 4294967297>, ptr @v2i64
+ store volatile <2 x i64> <i64 1, i64 1>, ptr @v2i64
+ store volatile <2 x i64> <i64 1, i64 31>, ptr @v2i64
+ store volatile <2 x i64> <i64 3, i64 4>, ptr @v2i64
ret void
}
%14 = insertelement <16 x i8> %13, i8 %h, i32 13
%15 = insertelement <16 x i8> %14, i8 %h, i32 14
%16 = insertelement <16 x i8> %15, i8 %h, i32 15
- store volatile <16 x i8> %16, <16 x i8>*@v16i8
+ store volatile <16 x i8> %16, ptr @v16i8
ret void
}
%6 = insertelement <8 x i16> %5, i16 %f, i32 5
%7 = insertelement <8 x i16> %6, i16 %g, i32 6
%8 = insertelement <8 x i16> %7, i16 %h, i32 7
- store volatile <8 x i16> %8, <8 x i16>*@v8i16
+ store volatile <8 x i16> %8, ptr @v8i16
ret void
}
%2 = insertelement <4 x i32> %1, i32 %b, i32 1
%3 = insertelement <4 x i32> %2, i32 %c, i32 2
%4 = insertelement <4 x i32> %3, i32 %d, i32 3
- store volatile <4 x i32> %4, <4 x i32>*@v4i32
+ store volatile <4 x i32> %4, ptr @v4i32
ret void
}
; N64-NEXT: st.d $w0, 0($1)
%1 = insertelement <2 x i64> undef, i64 %a, i32 0
%2 = insertelement <2 x i64> %1, i64 %b, i32 1
- store volatile <2 x i64> %2, <2 x i64>*@v2i64
+ store volatile <2 x i64> %2, ptr @v2i64
ret void
}
; N64-NEXT: copy_s.b $1, $w0[1]
; N64-NEXT: jr $ra
; N64-NEXT: seb $2, $1
- %1 = load <16 x i8>, <16 x i8>* @v16i8
+ %1 = load <16 x i8>, ptr @v16i8
%2 = add <16 x i8> %1, %1
%3 = extractelement <16 x i8> %2, i32 1
%4 = sext i8 %3 to i32
; N64-NEXT: copy_s.h $1, $w0[1]
; N64-NEXT: jr $ra
; N64-NEXT: seh $2, $1
- %1 = load <8 x i16>, <8 x i16>* @v8i16
+ %1 = load <8 x i16>, ptr @v8i16
%2 = add <8 x i16> %1, %1
%3 = extractelement <8 x i16> %2, i32 1
%4 = sext i16 %3 to i32
; N64-NEXT: addv.w $w0, $w0, $w0
; N64-NEXT: jr $ra
; N64-NEXT: copy_s.w $2, $w0[1]
- %1 = load <4 x i32>, <4 x i32>* @v4i32
+ %1 = load <4 x i32>, ptr @v4i32
%2 = add <4 x i32> %1, %1
%3 = extractelement <4 x i32> %2, i32 1
ret i32 %3
; N64-NEXT: addv.d $w0, $w0, $w0
; N64-NEXT: jr $ra
; N64-NEXT: copy_s.d $2, $w0[1]
- %1 = load <2 x i64>, <2 x i64>* @v2i64
+ %1 = load <2 x i64>, ptr @v2i64
%2 = add <2 x i64> %1, %1
%3 = extractelement <2 x i64> %2, i32 1
ret i64 %3
; N64-NEXT: addv.b $w0, $w0, $w0
; N64-NEXT: jr $ra
; N64-NEXT: copy_u.b $2, $w0[1]
- %1 = load <16 x i8>, <16 x i8>* @v16i8
+ %1 = load <16 x i8>, ptr @v16i8
%2 = add <16 x i8> %1, %1
%3 = extractelement <16 x i8> %2, i32 1
%4 = zext i8 %3 to i32
; N64-NEXT: addv.h $w0, $w0, $w0
; N64-NEXT: jr $ra
; N64-NEXT: copy_u.h $2, $w0[1]
- %1 = load <8 x i16>, <8 x i16>* @v8i16
+ %1 = load <8 x i16>, ptr @v8i16
%2 = add <8 x i16> %1, %1
%3 = extractelement <8 x i16> %2, i32 1
%4 = zext i16 %3 to i32
; N64-NEXT: addv.w $w0, $w0, $w0
; N64-NEXT: jr $ra
; N64-NEXT: copy_s.w $2, $w0[1]
- %1 = load <4 x i32>, <4 x i32>* @v4i32
+ %1 = load <4 x i32>, ptr @v4i32
%2 = add <4 x i32> %1, %1
%3 = extractelement <4 x i32> %2, i32 1
ret i32 %3
; N64-NEXT: addv.d $w0, $w0, $w0
; N64-NEXT: jr $ra
; N64-NEXT: copy_s.d $2, $w0[1]
- %1 = load <2 x i64>, <2 x i64>* @v2i64
+ %1 = load <2 x i64>, ptr @v2i64
%2 = add <2 x i64> %1, %1
%3 = extractelement <2 x i64> %2, i32 1
ret i64 %3
; N64-NEXT: sra $1, $1, 24
; N64-NEXT: jr $ra
; N64-NEXT: seb $2, $1
- %1 = load <16 x i8>, <16 x i8>* @v16i8
+ %1 = load <16 x i8>, ptr @v16i8
%2 = add <16 x i8> %1, %1
- %3 = load i32, i32* @i32
+ %3 = load i32, ptr @i32
%4 = extractelement <16 x i8> %2, i32 %3
%5 = sext i8 %4 to i32
ret i32 %5
; N64-NEXT: sra $1, $1, 16
; N64-NEXT: jr $ra
; N64-NEXT: seh $2, $1
- %1 = load <8 x i16>, <8 x i16>* @v8i16
+ %1 = load <8 x i16>, ptr @v8i16
%2 = add <8 x i16> %1, %1
- %3 = load i32, i32* @i32
+ %3 = load i32, ptr @i32
%4 = extractelement <8 x i16> %2, i32 %3
%5 = sext i16 %4 to i32
ret i32 %5
; N64-NEXT: splat.w $w0, $w0[$1]
; N64-NEXT: jr $ra
; N64-NEXT: mfc1 $2, $f0
- %1 = load <4 x i32>, <4 x i32>* @v4i32
+ %1 = load <4 x i32>, ptr @v4i32
%2 = add <4 x i32> %1, %1
- %3 = load i32, i32* @i32
+ %3 = load i32, ptr @i32
%4 = extractelement <4 x i32> %2, i32 %3
ret i32 %4
}
; N64-NEXT: splat.d $w0, $w0[$1]
; N64-NEXT: jr $ra
; N64-NEXT: dmfc1 $2, $f0
- %1 = load <2 x i64>, <2 x i64>* @v2i64
+ %1 = load <2 x i64>, ptr @v2i64
%2 = add <2 x i64> %1, %1
- %3 = load i32, i32* @i32
+ %3 = load i32, ptr @i32
%4 = extractelement <2 x i64> %2, i32 %3
ret i64 %4
}
; N64-NEXT: mfc1 $1, $f0
; N64-NEXT: jr $ra
; N64-NEXT: srl $2, $1, 24
- %1 = load <16 x i8>, <16 x i8>* @v16i8
+ %1 = load <16 x i8>, ptr @v16i8
%2 = add <16 x i8> %1, %1
- %3 = load i32, i32* @i32
+ %3 = load i32, ptr @i32
%4 = extractelement <16 x i8> %2, i32 %3
%5 = zext i8 %4 to i32
ret i32 %5
; N64-NEXT: mfc1 $1, $f0
; N64-NEXT: jr $ra
; N64-NEXT: srl $2, $1, 16
- %1 = load <8 x i16>, <8 x i16>* @v8i16
+ %1 = load <8 x i16>, ptr @v8i16
%2 = add <8 x i16> %1, %1
- %3 = load i32, i32* @i32
+ %3 = load i32, ptr @i32
%4 = extractelement <8 x i16> %2, i32 %3
%5 = zext i16 %4 to i32
ret i32 %5
; N64-NEXT: splat.w $w0, $w0[$1]
; N64-NEXT: jr $ra
; N64-NEXT: mfc1 $2, $f0
- %1 = load <4 x i32>, <4 x i32>* @v4i32
+ %1 = load <4 x i32>, ptr @v4i32
%2 = add <4 x i32> %1, %1
- %3 = load i32, i32* @i32
+ %3 = load i32, ptr @i32
%4 = extractelement <4 x i32> %2, i32 %3
ret i32 %4
}
; N64-NEXT: splat.d $w0, $w0[$1]
; N64-NEXT: jr $ra
; N64-NEXT: dmfc1 $2, $f0
- %1 = load <2 x i64>, <2 x i64>* @v2i64
+ %1 = load <2 x i64>, ptr @v2i64
%2 = add <2 x i64> %1, %1
- %3 = load i32, i32* @i32
+ %3 = load i32, ptr @i32
%4 = extractelement <2 x i64> %2, i32 %3
ret i64 %4
}
; N64-NEXT: insert.b $w0[1], $4
; N64-NEXT: jr $ra
; N64-NEXT: st.b $w0, 0($1)
- %1 = load <16 x i8>, <16 x i8>* @v16i8
+ %1 = load <16 x i8>, ptr @v16i8
%a2 = trunc i32 %a to i8
%a3 = sext i8 %a2 to i32
%a4 = trunc i32 %a3 to i8
%2 = insertelement <16 x i8> %1, i8 %a4, i32 1
- store <16 x i8> %2, <16 x i8>* @v16i8
+ store <16 x i8> %2, ptr @v16i8
ret void
}
; N64-NEXT: insert.h $w0[1], $4
; N64-NEXT: jr $ra
; N64-NEXT: st.h $w0, 0($1)
- %1 = load <8 x i16>, <8 x i16>* @v8i16
+ %1 = load <8 x i16>, ptr @v8i16
%a2 = trunc i32 %a to i16
%a3 = sext i16 %a2 to i32
%a4 = trunc i32 %a3 to i16
%2 = insertelement <8 x i16> %1, i16 %a4, i32 1
- store <8 x i16> %2, <8 x i16>* @v8i16
+ store <8 x i16> %2, ptr @v8i16
ret void
}
; N64-NEXT: insert.w $w0[1], $4
; N64-NEXT: jr $ra
; N64-NEXT: st.w $w0, 0($1)
- %1 = load <4 x i32>, <4 x i32>* @v4i32
+ %1 = load <4 x i32>, ptr @v4i32
%2 = insertelement <4 x i32> %1, i32 %a, i32 1
- store <4 x i32> %2, <4 x i32>* @v4i32
+ store <4 x i32> %2, ptr @v4i32
ret void
}
define void @insert_v2i64(i64 signext %a) nounwind {
; N64-NEXT: insert.d $w0[1], $4
; N64-NEXT: jr $ra
; N64-NEXT: st.d $w0, 0($1)
- %1 = load <2 x i64>, <2 x i64>* @v2i64
+ %1 = load <2 x i64>, ptr @v2i64
%2 = insertelement <2 x i64> %1, i64 %a, i32 1
- store <2 x i64> %2, <2 x i64>* @v2i64
+ store <2 x i64> %2, ptr @v2i64
ret void
}
; N64-NEXT: sld.b $w0, $w0[$2]
; N64-NEXT: jr $ra
; N64-NEXT: st.b $w0, 0($1)
- %1 = load <16 x i8>, <16 x i8>* @v16i8
- %2 = load i32, i32* @i32
+ %1 = load <16 x i8>, ptr @v16i8
+ %2 = load i32, ptr @i32
%a2 = trunc i32 %a to i8
%a3 = sext i8 %a2 to i32
%a4 = trunc i32 %a3 to i8
%3 = insertelement <16 x i8> %1, i8 %a4, i32 %2
- store <16 x i8> %3, <16 x i8>* @v16i8
+ store <16 x i8> %3, ptr @v16i8
ret void
}
; N64-NEXT: sld.b $w0, $w0[$2]
; N64-NEXT: jr $ra
; N64-NEXT: st.h $w0, 0($1)
- %1 = load <8 x i16>, <8 x i16>* @v8i16
- %2 = load i32, i32* @i32
+ %1 = load <8 x i16>, ptr @v8i16
+ %2 = load i32, ptr @i32
%a2 = trunc i32 %a to i16
%a3 = sext i16 %a2 to i32
%a4 = trunc i32 %a3 to i16
%3 = insertelement <8 x i16> %1, i16 %a4, i32 %2
- store <8 x i16> %3, <8 x i16>* @v8i16
+ store <8 x i16> %3, ptr @v8i16
ret void
}
; N64-NEXT: sld.b $w0, $w0[$2]
; N64-NEXT: jr $ra
; N64-NEXT: st.w $w0, 0($1)
- %1 = load <4 x i32>, <4 x i32>* @v4i32
- %2 = load i32, i32* @i32
+ %1 = load <4 x i32>, ptr @v4i32
+ %2 = load i32, ptr @i32
%3 = insertelement <4 x i32> %1, i32 %a, i32 %2
- store <4 x i32> %3, <4 x i32>* @v4i32
+ store <4 x i32> %3, ptr @v4i32
ret void
}
; N64-NEXT: sld.b $w0, $w0[$2]
; N64-NEXT: jr $ra
; N64-NEXT: st.d $w0, 0($1)
- %1 = load <2 x i64>, <2 x i64>* @v2i64
- %2 = load i32, i32* @i32
+ %1 = load <2 x i64>, ptr @v2i64
+ %2 = load i32, ptr @i32
%3 = insertelement <2 x i64> %1, i64 %a, i32 %2
- store <2 x i64> %3, <2 x i64>* @v2i64
+ store <2 x i64> %3, ptr @v2i64
ret void
}
; N64-NEXT: sb $2, 1($1)
; N64-NEXT: jr $ra
; N64-NEXT: sb $2, 0($1)
- store volatile <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, <4 x i8>*@v4i8
+ store volatile <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, ptr @v4i8
ret void
}
define void @const_v4f32() nounwind {
; ALL-LABEL: const_v4f32:
- store volatile <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>, <4 x float>*@v4f32
+ store volatile <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>, ptr @v4f32
; ALL: ldi.b [[R1:\$w[0-9]+]], 0
- store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, <4 x float>*@v4f32
+ store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, ptr @v4f32
; ALL: lui [[R1:\$[0-9]+]], 16256
; ALL: fill.w [[R2:\$w[0-9]+]], [[R1]]
- store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 31.0>, <4 x float>*@v4f32
+ store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 31.0>, ptr @v4f32
; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; ALL: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]])
- store volatile <4 x float> <float 65537.0, float 65537.0, float 65537.0, float 65537.0>, <4 x float>*@v4f32
+ store volatile <4 x float> <float 65537.0, float 65537.0, float 65537.0, float 65537.0>, ptr @v4f32
; ALL: lui [[R1:\$[0-9]+]], 18304
; ALL: ori [[R2:\$[0-9]+]], [[R1]], 128
; ALL: fill.w [[R3:\$w[0-9]+]], [[R2]]
- store volatile <4 x float> <float 1.0, float 2.0, float 1.0, float 2.0>, <4 x float>*@v4f32
+ store volatile <4 x float> <float 1.0, float 2.0, float 1.0, float 2.0>, ptr @v4f32
; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; ALL: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]])
- store volatile <4 x float> <float 3.0, float 4.0, float 5.0, float 6.0>, <4 x float>*@v4f32
+ store volatile <4 x float> <float 3.0, float 4.0, float 5.0, float 6.0>, ptr @v4f32
; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
define void @const_v2f64() nounwind {
; ALL-LABEL: const_v2f64:
- store volatile <2 x double> <double 0.0, double 0.0>, <2 x double>*@v2f64
+ store volatile <2 x double> <double 0.0, double 0.0>, ptr @v2f64
; ALL: ldi.b [[R1:\$w[0-9]+]], 0
- store volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, <2 x double>*@v2f64
+ store volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, ptr @v2f64
; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; ALL: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]])
- store volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, <2 x double>*@v2f64
+ store volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, ptr @v2f64
; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; ALL: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]])
- store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, <2 x double>*@v2f64
+ store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, ptr @v2f64
; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; ALL: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]])
- store volatile <2 x double> <double 1.0, double 1.0>, <2 x double>*@v2f64
+ store volatile <2 x double> <double 1.0, double 1.0>, ptr @v2f64
; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; ALL: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]])
- store volatile <2 x double> <double 1.0, double 31.0>, <2 x double>*@v2f64
+ store volatile <2 x double> <double 1.0, double 31.0>, ptr @v2f64
; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; ALL: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]])
- store volatile <2 x double> <double 3.0, double 4.0>, <2 x double>*@v2f64
+ store volatile <2 x double> <double 3.0, double 4.0>, ptr @v2f64
; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
define void @nonconst_v4f32() nounwind {
; ALL-LABEL: nonconst_v4f32:
- %1 = load float , float *@f32
+ %1 = load float , ptr @f32
%2 = insertelement <4 x float> undef, float %1, i32 0
%3 = insertelement <4 x float> %2, float %1, i32 1
%4 = insertelement <4 x float> %3, float %1, i32 2
%5 = insertelement <4 x float> %4, float %1, i32 3
- store volatile <4 x float> %5, <4 x float>*@v4f32
+ store volatile <4 x float> %5, ptr @v4f32
; ALL: lwc1 $f[[R1:[0-9]+]], 0(
; ALL: splati.w [[R2:\$w[0-9]+]], $w[[R1]]
define void @nonconst_v2f64() nounwind {
; ALL-LABEL: nonconst_v2f64:
- %1 = load double , double *@f64
+ %1 = load double , ptr @f64
%2 = insertelement <2 x double> undef, double %1, i32 0
%3 = insertelement <2 x double> %2, double %1, i32 1
- store volatile <2 x double> %3, <2 x double>*@v2f64
+ store volatile <2 x double> %3, ptr @v2f64
; ALL: ldc1 $f[[R1:[0-9]+]], 0(
; ALL: splati.d [[R2:\$w[0-9]+]], $w[[R1]]
define float @extract_v4f32() nounwind {
; ALL-LABEL: extract_v4f32:
- %1 = load <4 x float>, <4 x float>* @v4f32
+ %1 = load <4 x float>, ptr @v4f32
; ALL-DAG: ld.w [[R1:\$w[0-9]+]],
%2 = fadd <4 x float> %1, %1
define float @extract_v4f32_elt0() nounwind {
; ALL-LABEL: extract_v4f32_elt0:
- %1 = load <4 x float>, <4 x float>* @v4f32
+ %1 = load <4 x float>, ptr @v4f32
; ALL-DAG: ld.w [[R1:\$w[0-9]+]],
%2 = fadd <4 x float> %1, %1
define float @extract_v4f32_elt2() nounwind {
; ALL-LABEL: extract_v4f32_elt2:
- %1 = load <4 x float>, <4 x float>* @v4f32
+ %1 = load <4 x float>, ptr @v4f32
; ALL-DAG: ld.w [[R1:\$w[0-9]+]],
%2 = fadd <4 x float> %1, %1
define float @extract_v4f32_vidx() nounwind {
; ALL-LABEL: extract_v4f32_vidx:
- %1 = load <4 x float>, <4 x float>* @v4f32
+ %1 = load <4 x float>, ptr @v4f32
; O32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4f32)(
; N32-DAG: lw [[PTR_V:\$[0-9]+]], %got_disp(v4f32)(
; N64-DAG: ld [[PTR_V:\$[0-9]+]], %got_disp(v4f32)(
%2 = fadd <4 x float> %1, %1
; ALL-DAG: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
- %3 = load i32, i32* @i32
+ %3 = load i32, ptr @i32
; O32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; N32-DAG: lw [[PTR_I:\$[0-9]+]], %got_disp(i32)(
; N64-DAG: ld [[PTR_I:\$[0-9]+]], %got_disp(i32)(
define double @extract_v2f64() nounwind {
; ALL-LABEL: extract_v2f64:
- %1 = load <2 x double>, <2 x double>* @v2f64
+ %1 = load <2 x double>, ptr @v2f64
; ALL-DAG: ld.d [[R1:\$w[0-9]+]],
%2 = fadd <2 x double> %1, %1
define double @extract_v2f64_elt0() nounwind {
; ALL-LABEL: extract_v2f64_elt0:
- %1 = load <2 x double>, <2 x double>* @v2f64
+ %1 = load <2 x double>, ptr @v2f64
; ALL-DAG: ld.d [[R1:\$w[0-9]+]],
%2 = fadd <2 x double> %1, %1
define double @extract_v2f64_vidx() nounwind {
; ALL-LABEL: extract_v2f64_vidx:
- %1 = load <2 x double>, <2 x double>* @v2f64
+ %1 = load <2 x double>, ptr @v2f64
; O32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2f64)(
; N32-DAG: lw [[PTR_V:\$[0-9]+]], %got_disp(v2f64)(
; N64-DAG: ld [[PTR_V:\$[0-9]+]], %got_disp(v2f64)(
%2 = fadd <2 x double> %1, %1
; ALL-DAG: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
- %3 = load i32, i32* @i32
+ %3 = load i32, ptr @i32
; O32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; N32-DAG: lw [[PTR_I:\$[0-9]+]], %got_disp(i32)(
; N64-DAG: ld [[PTR_I:\$[0-9]+]], %got_disp(i32)(
define void @insert_v4f32(float %a) nounwind {
; ALL-LABEL: insert_v4f32:
- %1 = load <4 x float>, <4 x float>* @v4f32
+ %1 = load <4 x float>, ptr @v4f32
; ALL-DAG: ld.w [[R1:\$w[0-9]+]],
%2 = insertelement <4 x float> %1, float %a, i32 1
; float argument passed in $f12
; ALL-DAG: insve.w [[R1]][1], $w12[0]
- store <4 x float> %2, <4 x float>* @v4f32
+ store <4 x float> %2, ptr @v4f32
; ALL-DAG: st.w [[R1]]
ret void
define void @insert_v2f64(double %a) nounwind {
; ALL-LABEL: insert_v2f64:
- %1 = load <2 x double>, <2 x double>* @v2f64
+ %1 = load <2 x double>, ptr @v2f64
; ALL-DAG: ld.d [[R1:\$w[0-9]+]],
%2 = insertelement <2 x double> %1, double %a, i32 1
; double argument passed in $f12
; ALL-DAG: insve.d [[R1]][1], $w12[0]
- store <2 x double> %2, <2 x double>* @v2f64
+ store <2 x double> %2, ptr @v2f64
; ALL-DAG: st.d [[R1]]
ret void
define void @insert_v4f32_vidx(float %a) nounwind {
; ALL-LABEL: insert_v4f32_vidx:
- %1 = load <4 x float>, <4 x float>* @v4f32
+ %1 = load <4 x float>, ptr @v4f32
; O32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4f32)(
; N32-DAG: lw [[PTR_V:\$[0-9]+]], %got_disp(v4f32)(
; N64-DAG: ld [[PTR_V:\$[0-9]+]], %got_disp(v4f32)(
; ALL-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]])
- %2 = load i32, i32* @i32
+ %2 = load i32, ptr @i32
; O32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; N32-DAG: lw [[PTR_I:\$[0-9]+]], %got_disp(i32)(
; N64-DAG: ld [[PTR_I:\$[0-9]+]], %got_disp(i32)(
; ALL-DAG: neg [[NIDX:\$[0-9]+]], [[BIDX]]
; ALL-DAG: sld.b [[R1]], [[R1]][[[NIDX]]]
- store <4 x float> %3, <4 x float>* @v4f32
+ store <4 x float> %3, ptr @v4f32
; ALL-DAG: st.w [[R1]]
ret void
define void @insert_v2f64_vidx(double %a) nounwind {
; ALL-LABEL: insert_v2f64_vidx:
- %1 = load <2 x double>, <2 x double>* @v2f64
+ %1 = load <2 x double>, ptr @v2f64
; O32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2f64)(
; N32-DAG: lw [[PTR_V:\$[0-9]+]], %got_disp(v2f64)(
; N64-DAG: ld [[PTR_V:\$[0-9]+]], %got_disp(v2f64)(
; ALL-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]])
- %2 = load i32, i32* @i32
+ %2 = load i32, ptr @i32
; O32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
; N32-DAG: lw [[PTR_I:\$[0-9]+]], %got_disp(i32)(
; N64-DAG: ld [[PTR_I:\$[0-9]+]], %got_disp(i32)(
; ALL-DAG: neg [[NIDX:\$[0-9]+]], [[BIDX]]
; ALL-DAG: sld.b [[R1]], [[R1]][[[NIDX]]]
- store <2 x double> %3, <2 x double>* @v2f64
+ store <2 x double> %3, ptr @v2f64
; ALL-DAG: st.d [[R1]]
ret void
define void @llvm_mips_sat_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sat_s_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_sat_s_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.sat.s.b(<16 x i8> %0, i32 7)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_s_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_sat_s_b_RES
ret void
}
define void @llvm_mips_sat_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sat_s_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_sat_s_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.sat.s.h(<8 x i16> %0, i32 7)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_s_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_sat_s_h_RES
ret void
}
define void @llvm_mips_sat_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sat_s_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_sat_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.sat.s.w(<4 x i32> %0, i32 7)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_s_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_sat_s_w_RES
ret void
}
define void @llvm_mips_sat_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sat_s_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_sat_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.sat.s.d(<2 x i64> %0, i32 7)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_s_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_sat_s_d_RES
ret void
}
define void @llvm_mips_sat_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sat_u_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_sat_u_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.sat.u.b(<16 x i8> %0, i32 7)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_u_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_sat_u_b_RES
ret void
}
define void @llvm_mips_sat_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sat_u_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_sat_u_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.sat.u.h(<8 x i16> %0, i32 7)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_u_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_sat_u_h_RES
ret void
}
define void @llvm_mips_sat_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sat_u_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_sat_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.sat.u.w(<4 x i32> %0, i32 7)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_u_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_sat_u_w_RES
ret void
}
define void @llvm_mips_sat_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sat_u_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_sat_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.sat.u.d(<2 x i64> %0, i32 7)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_u_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_sat_u_d_RES
ret void
}
define void @llvm_mips_slli_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_slli_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_slli_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.slli.b(<16 x i8> %0, i32 7)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_slli_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_slli_b_RES
ret void
}
define void @llvm_mips_slli_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_slli_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_slli_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.slli.h(<8 x i16> %0, i32 7)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_slli_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_slli_h_RES
ret void
}
define void @llvm_mips_slli_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_slli_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_slli_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %0, i32 7)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_slli_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_slli_w_RES
ret void
}
define void @llvm_mips_slli_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_slli_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_slli_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %0, i32 7)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_slli_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_slli_d_RES
ret void
}
define void @llvm_mips_srai_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srai_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_srai_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.srai.b(<16 x i8> %0, i32 7)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_srai_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_srai_b_RES
ret void
}
define void @llvm_mips_srai_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srai_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_srai_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.srai.h(<8 x i16> %0, i32 7)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_srai_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_srai_h_RES
ret void
}
define void @llvm_mips_srai_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srai_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_srai_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.srai.w(<4 x i32> %0, i32 7)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_srai_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_srai_w_RES
ret void
}
define void @llvm_mips_srai_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srai_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_srai_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.srai.d(<2 x i64> %0, i32 7)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_srai_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_srai_d_RES
ret void
}
define void @llvm_mips_srari_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srari_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_srari_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.srari.b(<16 x i8> %0, i32 7)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_srari_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_srari_b_RES
ret void
}
define void @llvm_mips_srari_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srari_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_srari_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.srari.h(<8 x i16> %0, i32 7)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_srari_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_srari_h_RES
ret void
}
define void @llvm_mips_srari_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srari_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_srari_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.srari.w(<4 x i32> %0, i32 7)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_srari_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_srari_w_RES
ret void
}
define void @llvm_mips_srari_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srari_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_srari_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.srari.d(<2 x i64> %0, i32 7)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_srari_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_srari_d_RES
ret void
}
define void @llvm_mips_srli_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srli_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_srli_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.srli.b(<16 x i8> %0, i32 7)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_srli_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_srli_b_RES
ret void
}
define void @llvm_mips_srli_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srli_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_srli_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.srli.h(<8 x i16> %0, i32 7)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_srli_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_srli_h_RES
ret void
}
define void @llvm_mips_srli_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srli_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_srli_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 7)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_srli_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_srli_w_RES
ret void
}
define void @llvm_mips_srli_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srli_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_srli_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 7)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_srli_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_srli_d_RES
ret void
}
define void @llvm_mips_srlri_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srlri_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_srlri_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %0, i32 7)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_srlri_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_srlri_b_RES
ret void
}
define void @llvm_mips_srlri_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srlri_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_srlri_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %0, i32 7)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_srlri_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_srlri_h_RES
ret void
}
define void @llvm_mips_srlri_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srlri_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_srlri_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %0, i32 7)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_srlri_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_srlri_w_RES
ret void
}
define void @llvm_mips_srlri_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srlri_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_srlri_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %0, i32 7)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_srlri_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_srlri_d_RES
ret void
}
; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck -check-prefix=BIGENDIAN %s
; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck -check-prefix=LITENDIAN %s
-define void @v16i8_to_v16i8(<16 x i8>* %src, <16 x i8>* %dst) nounwind {
+define void @v16i8_to_v16i8(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>, <16 x i8>* %src
+ %0 = load volatile <16 x i8>, ptr %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <16 x i8>
%3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
- store <16 x i8> %3, <16 x i8>* %dst
+ store <16 x i8> %3, ptr %dst
ret void
}
; BIGENDIAN: st.b [[R3]],
; BIGENDIAN: .size v16i8_to_v16i8
-define void @v16i8_to_v8i16(<16 x i8>* %src, <8 x i16>* %dst) nounwind {
+define void @v16i8_to_v8i16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>, <16 x i8>* %src
+ %0 = load volatile <16 x i8>, ptr %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <8 x i16>
%3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* %dst
+ store <8 x i16> %3, ptr %dst
ret void
}
; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v16i8_to_v8f16(<16 x i8>* %src, <8 x half>* %dst) nounwind {
+define void @v16i8_to_v8f16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>, <16 x i8>* %src
+ %0 = load volatile <16 x i8>, ptr %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <8 x half>
- store <8 x half> %2, <8 x half>* %dst
+ store <8 x half> %2, ptr %dst
ret void
}
; BIGENDIAN: st.b [[R2]],
; BIGENDIAN: .size v16i8_to_v8f16
-define void @v16i8_to_v4i32(<16 x i8>* %src, <4 x i32>* %dst) nounwind {
+define void @v16i8_to_v4i32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>, <16 x i8>* %src
+ %0 = load volatile <16 x i8>, ptr %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <4 x i32>
%3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* %dst
+ store <4 x i32> %3, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R4]],
; BIGENDIAN: .size v16i8_to_v4i32
-define void @v16i8_to_v4f32(<16 x i8>* %src, <4 x float>* %dst) nounwind {
+define void @v16i8_to_v4f32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>, <16 x i8>* %src
+ %0 = load volatile <16 x i8>, ptr %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <4 x float>
%3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
- store <4 x float> %3, <4 x float>* %dst
+ store <4 x float> %3, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R4]],
; BIGENDIAN: .size v16i8_to_v4f32
-define void @v16i8_to_v2i64(<16 x i8>* %src, <2 x i64>* %dst) nounwind {
+define void @v16i8_to_v2i64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>, <16 x i8>* %src
+ %0 = load volatile <16 x i8>, ptr %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <2 x i64>
%3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
- store <2 x i64> %3, <2 x i64>* %dst
+ store <2 x i64> %3, ptr %dst
ret void
}
; BIGENDIAN: st.d [[R4]],
; BIGENDIAN: .size v16i8_to_v2i64
-define void @v16i8_to_v2f64(<16 x i8>* %src, <2 x double>* %dst) nounwind {
+define void @v16i8_to_v2f64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <16 x i8>, <16 x i8>* %src
+ %0 = load volatile <16 x i8>, ptr %src
%1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
%2 = bitcast <16 x i8> %1 to <2 x double>
%3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
- store <2 x double> %3, <2 x double>* %dst
+ store <2 x double> %3, ptr %dst
ret void
}
; BIGENDIAN: st.d [[R4]],
; BIGENDIAN: .size v16i8_to_v2f64
-define void @v8i16_to_v16i8(<8 x i16>* %src, <16 x i8>* %dst) nounwind {
+define void @v8i16_to_v16i8(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>, <8 x i16>* %src
+ %0 = load volatile <8 x i16>, ptr %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <16 x i8>
%3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
- store <16 x i8> %3, <16 x i8>* %dst
+ store <16 x i8> %3, ptr %dst
ret void
}
; BIGENDIAN: st.b [[R4]],
; BIGENDIAN: .size v8i16_to_v16i8
-define void @v8i16_to_v8i16(<8 x i16>* %src, <8 x i16>* %dst) nounwind {
+define void @v8i16_to_v8i16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>, <8 x i16>* %src
+ %0 = load volatile <8 x i16>, ptr %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <8 x i16>
%3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* %dst
+ store <8 x i16> %3, ptr %dst
ret void
}
; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v8i16_to_v8f16(<8 x i16>* %src, <8 x half>* %dst) nounwind {
+define void @v8i16_to_v8f16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>, <8 x i16>* %src
+ %0 = load volatile <8 x i16>, ptr %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <8 x half>
- store <8 x half> %2, <8 x half>* %dst
+ store <8 x half> %2, ptr %dst
ret void
}
; BIGENDIAN: st.h [[R2]],
; BIGENDIAN: .size v8i16_to_v8f16
-define void @v8i16_to_v4i32(<8 x i16>* %src, <4 x i32>* %dst) nounwind {
+define void @v8i16_to_v4i32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>, <8 x i16>* %src
+ %0 = load volatile <8 x i16>, ptr %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <4 x i32>
%3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* %dst
+ store <4 x i32> %3, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R4]],
; BIGENDIAN: .size v8i16_to_v4i32
-define void @v8i16_to_v4f32(<8 x i16>* %src, <4 x float>* %dst) nounwind {
+define void @v8i16_to_v4f32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>, <8 x i16>* %src
+ %0 = load volatile <8 x i16>, ptr %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <4 x float>
%3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
- store <4 x float> %3, <4 x float>* %dst
+ store <4 x float> %3, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R4]],
; BIGENDIAN: .size v8i16_to_v4f32
-define void @v8i16_to_v2i64(<8 x i16>* %src, <2 x i64>* %dst) nounwind {
+define void @v8i16_to_v2i64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>, <8 x i16>* %src
+ %0 = load volatile <8 x i16>, ptr %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <2 x i64>
%3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
- store <2 x i64> %3, <2 x i64>* %dst
+ store <2 x i64> %3, ptr %dst
ret void
}
; BIGENDIAN: st.d [[R4]],
; BIGENDIAN: .size v8i16_to_v2i64
-define void @v8i16_to_v2f64(<8 x i16>* %src, <2 x double>* %dst) nounwind {
+define void @v8i16_to_v2f64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x i16>, <8 x i16>* %src
+ %0 = load volatile <8 x i16>, ptr %src
%1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
%2 = bitcast <8 x i16> %1 to <2 x double>
%3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
- store <2 x double> %3, <2 x double>* %dst
+ store <2 x double> %3, ptr %dst
ret void
}
;----
; We can't prevent the (bitcast (load X)) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v16i8(<8 x half>* %src, <16 x i8>* %dst) nounwind {
+define void @v8f16_to_v16i8(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x half>, <8 x half>* %src
+ %0 = load volatile <8 x half>, ptr %src
%1 = bitcast <8 x half> %0 to <16 x i8>
%2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %1, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* %dst
+ store <16 x i8> %2, ptr %dst
ret void
}
; We can't prevent the (bitcast (load X)) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v8i16(<8 x half>* %src, <8 x i16>* %dst) nounwind {
+define void @v8f16_to_v8i16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x half>, <8 x half>* %src
+ %0 = load volatile <8 x half>, ptr %src
%1 = bitcast <8 x half> %0 to <8 x i16>
%2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %1, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* %dst
+ store <8 x i16> %2, ptr %dst
ret void
}
; are no operations for v8f16 to put in the way.
; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v8f16(<8 x half>* %src, <8 x half>* %dst) nounwind {
+define void @v8f16_to_v8f16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x half>, <8 x half>* %src
+ %0 = load volatile <8 x half>, ptr %src
%1 = bitcast <8 x half> %0 to <8 x half>
- store <8 x half> %1, <8 x half>* %dst
+ store <8 x half> %1, ptr %dst
ret void
}
; We can't prevent the (bitcast (load X)) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v4i32(<8 x half>* %src, <4 x i32>* %dst) nounwind {
+define void @v8f16_to_v4i32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x half>, <8 x half>* %src
+ %0 = load volatile <8 x half>, ptr %src
%1 = bitcast <8 x half> %0 to <4 x i32>
%2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %1, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* %dst
+ store <4 x i32> %2, ptr %dst
ret void
}
; We can't prevent the (bitcast (load X)) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v4f32(<8 x half>* %src, <4 x float>* %dst) nounwind {
+define void @v8f16_to_v4f32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x half>, <8 x half>* %src
+ %0 = load volatile <8 x half>, ptr %src
%1 = bitcast <8 x half> %0 to <4 x float>
%2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %1, <4 x float> %1)
- store <4 x float> %2, <4 x float>* %dst
+ store <4 x float> %2, ptr %dst
ret void
}
; We can't prevent the (bitcast (load X)) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v2i64(<8 x half>* %src, <2 x i64>* %dst) nounwind {
+define void @v8f16_to_v2i64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x half>, <8 x half>* %src
+ %0 = load volatile <8 x half>, ptr %src
%1 = bitcast <8 x half> %0 to <2 x i64>
%2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %1, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* %dst
+ store <2 x i64> %2, ptr %dst
ret void
}
; We can't prevent the (bitcast (load X)) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v2f64(<8 x half>* %src, <2 x double>* %dst) nounwind {
+define void @v8f16_to_v2f64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <8 x half>, <8 x half>* %src
+ %0 = load volatile <8 x half>, ptr %src
%1 = bitcast <8 x half> %0 to <2 x double>
%2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %1, <2 x double> %1)
- store <2 x double> %2, <2 x double>* %dst
+ store <2 x double> %2, ptr %dst
ret void
}
; BIGENDIAN: .size v8f16_to_v2f64
;----
-define void @v4i32_to_v16i8(<4 x i32>* %src, <16 x i8>* %dst) nounwind {
+define void @v4i32_to_v16i8(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>, <4 x i32>* %src
+ %0 = load volatile <4 x i32>, ptr %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <16 x i8>
%3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
- store <16 x i8> %3, <16 x i8>* %dst
+ store <16 x i8> %3, ptr %dst
ret void
}
; BIGENDIAN: st.b [[R4]],
; BIGENDIAN: .size v4i32_to_v16i8
-define void @v4i32_to_v8i16(<4 x i32>* %src, <8 x i16>* %dst) nounwind {
+define void @v4i32_to_v8i16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>, <4 x i32>* %src
+ %0 = load volatile <4 x i32>, ptr %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <8 x i16>
%3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* %dst
+ store <8 x i16> %3, ptr %dst
ret void
}
; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v4i32_to_v8f16(<4 x i32>* %src, <8 x half>* %dst) nounwind {
+define void @v4i32_to_v8f16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>, <4 x i32>* %src
+ %0 = load volatile <4 x i32>, ptr %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <8 x half>
- store <8 x half> %2, <8 x half>* %dst
+ store <8 x half> %2, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R2]],
; BIGENDIAN: .size v4i32_to_v8f16
-define void @v4i32_to_v4i32(<4 x i32>* %src, <4 x i32>* %dst) nounwind {
+define void @v4i32_to_v4i32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>, <4 x i32>* %src
+ %0 = load volatile <4 x i32>, ptr %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <4 x i32>
%3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* %dst
+ store <4 x i32> %3, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R3]],
; BIGENDIAN: .size v4i32_to_v4i32
-define void @v4i32_to_v4f32(<4 x i32>* %src, <4 x float>* %dst) nounwind {
+define void @v4i32_to_v4f32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>, <4 x i32>* %src
+ %0 = load volatile <4 x i32>, ptr %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <4 x float>
%3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
- store <4 x float> %3, <4 x float>* %dst
+ store <4 x float> %3, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R3]],
; BIGENDIAN: .size v4i32_to_v4f32
-define void @v4i32_to_v2i64(<4 x i32>* %src, <2 x i64>* %dst) nounwind {
+define void @v4i32_to_v2i64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>, <4 x i32>* %src
+ %0 = load volatile <4 x i32>, ptr %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <2 x i64>
%3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
- store <2 x i64> %3, <2 x i64>* %dst
+ store <2 x i64> %3, ptr %dst
ret void
}
; BIGENDIAN: st.d [[R4]],
; BIGENDIAN: .size v4i32_to_v2i64
-define void @v4i32_to_v2f64(<4 x i32>* %src, <2 x double>* %dst) nounwind {
+define void @v4i32_to_v2f64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x i32>, <4 x i32>* %src
+ %0 = load volatile <4 x i32>, ptr %src
%1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
%2 = bitcast <4 x i32> %1 to <2 x double>
%3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
- store <2 x double> %3, <2 x double>* %dst
+ store <2 x double> %3, ptr %dst
ret void
}
; BIGENDIAN: st.d [[R4]],
; BIGENDIAN: .size v4i32_to_v2f64
-define void @v4f32_to_v16i8(<4 x float>* %src, <16 x i8>* %dst) nounwind {
+define void @v4f32_to_v16i8(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x float>, <4 x float>* %src
+ %0 = load volatile <4 x float>, ptr %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <16 x i8>
%3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
- store <16 x i8> %3, <16 x i8>* %dst
+ store <16 x i8> %3, ptr %dst
ret void
}
; BIGENDIAN: st.b [[R4]],
; BIGENDIAN: .size v4f32_to_v16i8
-define void @v4f32_to_v8i16(<4 x float>* %src, <8 x i16>* %dst) nounwind {
+define void @v4f32_to_v8i16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x float>, <4 x float>* %src
+ %0 = load volatile <4 x float>, ptr %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <8 x i16>
%3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* %dst
+ store <8 x i16> %3, ptr %dst
ret void
}
; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v4f32_to_v8f16(<4 x float>* %src, <8 x half>* %dst) nounwind {
+define void @v4f32_to_v8f16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x float>, <4 x float>* %src
+ %0 = load volatile <4 x float>, ptr %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <8 x half>
- store <8 x half> %2, <8 x half>* %dst
+ store <8 x half> %2, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R2]],
; BIGENDIAN: .size v4f32_to_v8f16
-define void @v4f32_to_v4i32(<4 x float>* %src, <4 x i32>* %dst) nounwind {
+define void @v4f32_to_v4i32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x float>, <4 x float>* %src
+ %0 = load volatile <4 x float>, ptr %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <4 x i32>
%3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* %dst
+ store <4 x i32> %3, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R3]],
; BIGENDIAN: .size v4f32_to_v4i32
-define void @v4f32_to_v4f32(<4 x float>* %src, <4 x float>* %dst) nounwind {
+define void @v4f32_to_v4f32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x float>, <4 x float>* %src
+ %0 = load volatile <4 x float>, ptr %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <4 x float>
%3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
- store <4 x float> %3, <4 x float>* %dst
+ store <4 x float> %3, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R3]],
; BIGENDIAN: .size v4f32_to_v4f32
-define void @v4f32_to_v2i64(<4 x float>* %src, <2 x i64>* %dst) nounwind {
+define void @v4f32_to_v2i64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x float>, <4 x float>* %src
+ %0 = load volatile <4 x float>, ptr %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <2 x i64>
%3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
- store <2 x i64> %3, <2 x i64>* %dst
+ store <2 x i64> %3, ptr %dst
ret void
}
; BIGENDIAN: st.d [[R4]],
; BIGENDIAN: .size v4f32_to_v2i64
-define void @v4f32_to_v2f64(<4 x float>* %src, <2 x double>* %dst) nounwind {
+define void @v4f32_to_v2f64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <4 x float>, <4 x float>* %src
+ %0 = load volatile <4 x float>, ptr %src
%1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
%2 = bitcast <4 x float> %1 to <2 x double>
%3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
- store <2 x double> %3, <2 x double>* %dst
+ store <2 x double> %3, ptr %dst
ret void
}
; BIGENDIAN: st.d [[R4]],
; BIGENDIAN: .size v4f32_to_v2f64
-define void @v2i64_to_v16i8(<2 x i64>* %src, <16 x i8>* %dst) nounwind {
+define void @v2i64_to_v16i8(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>, <2 x i64>* %src
+ %0 = load volatile <2 x i64>, ptr %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <16 x i8>
%3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
- store <16 x i8> %3, <16 x i8>* %dst
+ store <16 x i8> %3, ptr %dst
ret void
}
; BIGENDIAN: st.b [[R4]],
; BIGENDIAN: .size v2i64_to_v16i8
-define void @v2i64_to_v8i16(<2 x i64>* %src, <8 x i16>* %dst) nounwind {
+define void @v2i64_to_v8i16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>, <2 x i64>* %src
+ %0 = load volatile <2 x i64>, ptr %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <8 x i16>
%3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* %dst
+ store <8 x i16> %3, ptr %dst
ret void
}
; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v2i64_to_v8f16(<2 x i64>* %src, <8 x half>* %dst) nounwind {
+define void @v2i64_to_v8f16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>, <2 x i64>* %src
+ %0 = load volatile <2 x i64>, ptr %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <8 x half>
- store <8 x half> %2, <8 x half>* %dst
+ store <8 x half> %2, ptr %dst
ret void
}
; BIGENDIAN: st.d [[R2]],
; BIGENDIAN: .size v2i64_to_v8f16
-define void @v2i64_to_v4i32(<2 x i64>* %src, <4 x i32>* %dst) nounwind {
+define void @v2i64_to_v4i32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>, <2 x i64>* %src
+ %0 = load volatile <2 x i64>, ptr %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <4 x i32>
%3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* %dst
+ store <4 x i32> %3, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R4]],
; BIGENDIAN: .size v2i64_to_v4i32
-define void @v2i64_to_v4f32(<2 x i64>* %src, <4 x float>* %dst) nounwind {
+define void @v2i64_to_v4f32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>, <2 x i64>* %src
+ %0 = load volatile <2 x i64>, ptr %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <4 x float>
%3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
- store <4 x float> %3, <4 x float>* %dst
+ store <4 x float> %3, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R4]],
; BIGENDIAN: .size v2i64_to_v4f32
-define void @v2i64_to_v2i64(<2 x i64>* %src, <2 x i64>* %dst) nounwind {
+define void @v2i64_to_v2i64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>, <2 x i64>* %src
+ %0 = load volatile <2 x i64>, ptr %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <2 x i64>
%3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
- store <2 x i64> %3, <2 x i64>* %dst
+ store <2 x i64> %3, ptr %dst
ret void
}
; BIGENDIAN: st.d [[R3]],
; BIGENDIAN: .size v2i64_to_v2i64
-define void @v2i64_to_v2f64(<2 x i64>* %src, <2 x double>* %dst) nounwind {
+define void @v2i64_to_v2f64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x i64>, <2 x i64>* %src
+ %0 = load volatile <2 x i64>, ptr %src
%1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
%2 = bitcast <2 x i64> %1 to <2 x double>
%3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
- store <2 x double> %3, <2 x double>* %dst
+ store <2 x double> %3, ptr %dst
ret void
}
; BIGENDIAN: st.d [[R3]],
; BIGENDIAN: .size v2i64_to_v2f64
-define void @v2f64_to_v16i8(<2 x double>* %src, <16 x i8>* %dst) nounwind {
+define void @v2f64_to_v16i8(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x double>, <2 x double>* %src
+ %0 = load volatile <2 x double>, ptr %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <16 x i8>
%3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
- store <16 x i8> %3, <16 x i8>* %dst
+ store <16 x i8> %3, ptr %dst
ret void
}
; BIGENDIAN: st.b [[R4]],
; BIGENDIAN: .size v2f64_to_v16i8
-define void @v2f64_to_v8i16(<2 x double>* %src, <8 x i16>* %dst) nounwind {
+define void @v2f64_to_v8i16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x double>, <2 x double>* %src
+ %0 = load volatile <2 x double>, ptr %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <8 x i16>
%3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
- store <8 x i16> %3, <8 x i16>* %dst
+ store <8 x i16> %3, ptr %dst
ret void
}
; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
; are no operations for v8f16 to put in the way.
-define void @v2f64_to_v8f16(<2 x double>* %src, <8 x half>* %dst) nounwind {
+define void @v2f64_to_v8f16(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x double>, <2 x double>* %src
+ %0 = load volatile <2 x double>, ptr %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <8 x half>
- store <8 x half> %2, <8 x half>* %dst
+ store <8 x half> %2, ptr %dst
ret void
}
; BIGENDIAN: st.d [[R2]],
; BIGENDIAN: .size v2f64_to_v8f16
-define void @v2f64_to_v4i32(<2 x double>* %src, <4 x i32>* %dst) nounwind {
+define void @v2f64_to_v4i32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x double>, <2 x double>* %src
+ %0 = load volatile <2 x double>, ptr %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <4 x i32>
%3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
- store <4 x i32> %3, <4 x i32>* %dst
+ store <4 x i32> %3, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R4]],
; BIGENDIAN: .size v2f64_to_v4i32
-define void @v2f64_to_v4f32(<2 x double>* %src, <4 x float>* %dst) nounwind {
+define void @v2f64_to_v4f32(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x double>, <2 x double>* %src
+ %0 = load volatile <2 x double>, ptr %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <4 x float>
%3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
- store <4 x float> %3, <4 x float>* %dst
+ store <4 x float> %3, ptr %dst
ret void
}
; BIGENDIAN: st.w [[R4]],
; BIGENDIAN: .size v2f64_to_v4f32
-define void @v2f64_to_v2i64(<2 x double>* %src, <2 x i64>* %dst) nounwind {
+define void @v2f64_to_v2i64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x double>, <2 x double>* %src
+ %0 = load volatile <2 x double>, ptr %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <2 x i64>
%3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
- store <2 x i64> %3, <2 x i64>* %dst
+ store <2 x i64> %3, ptr %dst
ret void
}
; BIGENDIAN: st.d [[R3]],
; BIGENDIAN: .size v2f64_to_v2i64
-define void @v2f64_to_v2f64(<2 x double>* %src, <2 x double>* %dst) nounwind {
+define void @v2f64_to_v2f64(ptr %src, ptr %dst) nounwind {
entry:
- %0 = load volatile <2 x double>, <2 x double>* %src
+ %0 = load volatile <2 x double>, ptr %src
%1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
%2 = bitcast <2 x double> %1 to <2 x double>
%3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
- store <2 x double> %3, <2 x double>* %dst
+ store <2 x double> %3, ptr %dst
ret void
}
; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s --check-prefixes=CHECK,MIPS
; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s --check-prefixes=CHECK,MIPSEL
-define void @and_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @and_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: and_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($6)
; CHECK-NEXT: and.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = and <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @and_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @and_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: and_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($6)
; CHECK-NEXT: and.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = and <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @and_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @and_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: and_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($6)
; CHECK-NEXT: and.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = and <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @and_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @and_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: and_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($6)
; CHECK-NEXT: and.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = and <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @and_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @and_v16i8_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: and_v16i8_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: andi.b $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = and <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @and_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @and_v8i16_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: and_v8i16_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: and.v $w0, $w0, $w1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = and <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @and_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @and_v4i32_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: and_v4i32_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: and.v $w0, $w0, $w1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = and <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @and_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @and_v2i64_i(ptr %c, ptr %a) nounwind {
; MIPS-LABEL: and_v2i64_i:
; MIPS: # %bb.0:
; MIPS-NEXT: ldi.d $w0, 1
; MIPSEL-NEXT: and.v $w0, $w1, $w0
; MIPSEL-NEXT: jr $ra
; MIPSEL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = and <2 x i64> %1, <i64 1, i64 1>
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
-define void @or_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @or_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: or_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($6)
; CHECK-NEXT: or.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = or <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @or_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @or_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: or_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($6)
; CHECK-NEXT: or.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = or <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @or_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @or_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: or_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($6)
; CHECK-NEXT: or.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = or <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @or_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @or_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: or_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($6)
; CHECK-NEXT: or.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = or <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @or_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @or_v16i8_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: or_v16i8_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: ori.b $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = or <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @or_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @or_v8i16_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: or_v8i16_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: or.v $w0, $w0, $w1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = or <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @or_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @or_v4i32_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: or_v4i32_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: or.v $w0, $w0, $w1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = or <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @or_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @or_v2i64_i(ptr %c, ptr %a) nounwind {
; MIPS-LABEL: or_v2i64_i:
; MIPS: # %bb.0:
; MIPS-NEXT: ldi.d $w0, 3
; MIPSEL-NEXT: or.v $w0, $w1, $w0
; MIPSEL-NEXT: jr $ra
; MIPSEL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = or <2 x i64> %1, <i64 3, i64 3>
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
-define void @nor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @nor_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: nor_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($6)
; CHECK-NEXT: nor.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = or <16 x i8> %1, %2
%4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
ret void
}
-define void @nor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @nor_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: nor_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($6)
; CHECK-NEXT: nor.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = or <8 x i16> %1, %2
%4 = xor <8 x i16> %3, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
ret void
}
-define void @nor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @nor_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: nor_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($6)
; CHECK-NEXT: nor.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = or <4 x i32> %1, %2
%4 = xor <4 x i32> %3, <i32 -1, i32 -1, i32 -1, i32 -1>
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
ret void
}
-define void @nor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @nor_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: nor_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($6)
; CHECK-NEXT: nor.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = or <2 x i64> %1, %2
%4 = xor <2 x i64> %3, <i64 -1, i64 -1>
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
ret void
}
-define void @nor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @nor_v16i8_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: nor_v16i8_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: nori.b $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = or <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = xor <16 x i8> %2, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @nor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @nor_v8i16_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: nor_v8i16_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: nor.v $w0, $w0, $w1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = or <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = xor <8 x i16> %2, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @nor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @nor_v4i32_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: nor_v4i32_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: nor.v $w0, $w0, $w1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = or <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = xor <4 x i32> %2, <i32 -1, i32 -1, i32 -1, i32 -1>
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @nor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @nor_v2i64_i(ptr %c, ptr %a) nounwind {
; MIPS-LABEL: nor_v2i64_i:
; MIPS: # %bb.0:
; MIPS-NEXT: ldi.d $w0, 1
; MIPSEL-NEXT: nor.v $w0, $w1, $w0
; MIPSEL-NEXT: jr $ra
; MIPSEL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = or <2 x i64> %1, <i64 1, i64 1>
%3 = xor <2 x i64> %2, <i64 -1, i64 -1>
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @xor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @xor_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: xor_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($6)
; CHECK-NEXT: xor.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = xor <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @xor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @xor_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: xor_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($6)
; CHECK-NEXT: xor.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = xor <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @xor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @xor_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: xor_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($6)
; CHECK-NEXT: xor.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = xor <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @xor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @xor_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: xor_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($6)
; CHECK-NEXT: xor.v $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = xor <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @xor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @xor_v16i8_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: xor_v16i8_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: xori.b $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = xor <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @xor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @xor_v8i16_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: xor_v8i16_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: xor.v $w0, $w0, $w1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = xor <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @xor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @xor_v4i32_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: xor_v4i32_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: xor.v $w0, $w0, $w1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = xor <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @xor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @xor_v2i64_i(ptr %c, ptr %a) nounwind {
; MIPS-LABEL: xor_v2i64_i:
; MIPS: # %bb.0:
; MIPS-NEXT: ldi.d $w0, 3
; MIPSEL-NEXT: xor.v $w0, $w1, $w0
; MIPSEL-NEXT: jr $ra
; MIPSEL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = xor <2 x i64> %1, <i64 3, i64 3>
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
-define void @sll_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @sll_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: sll_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($6)
; CHECK-NEXT: sll.b $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = shl <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @sll_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @sll_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: sll_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($6)
; CHECK-NEXT: sll.h $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = shl <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @sll_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @sll_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: sll_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($6)
; CHECK-NEXT: sll.w $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = shl <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @sll_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @sll_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: sll_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($6)
; CHECK-NEXT: sll.d $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = shl <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @sll_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @sll_v16i8_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: sll_v16i8_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: slli.b $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = shl <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @sll_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @sll_v8i16_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: sll_v8i16_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: slli.h $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = shl <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @sll_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @sll_v4i32_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: sll_v4i32_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: slli.w $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = shl <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @sll_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @sll_v2i64_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: sll_v2i64_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($5)
; CHECK-NEXT: slli.d $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = shl <2 x i64> %1, <i64 1, i64 1>
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
-define void @sra_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @sra_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: sra_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($6)
; CHECK-NEXT: sra.b $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = ashr <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @sra_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @sra_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: sra_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($6)
; CHECK-NEXT: sra.h $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = ashr <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @sra_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @sra_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: sra_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($6)
; CHECK-NEXT: sra.w $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = ashr <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @sra_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @sra_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: sra_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($6)
; CHECK-NEXT: sra.d $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = ashr <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @sra_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @sra_v16i8_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: sra_v16i8_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: srai.b $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = ashr <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @sra_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @sra_v8i16_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: sra_v8i16_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: srai.h $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = ashr <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @sra_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @sra_v4i32_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: sra_v4i32_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: srai.w $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = ashr <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @sra_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @sra_v2i64_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: sra_v2i64_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($5)
; CHECK-NEXT: srai.d $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = ashr <2 x i64> %1, <i64 1, i64 1>
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
-define void @srl_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @srl_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: srl_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($6)
; CHECK-NEXT: srl.b $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = lshr <16 x i8> %1, %2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @srl_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @srl_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: srl_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($6)
; CHECK-NEXT: srl.h $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = lshr <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @srl_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @srl_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: srl_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($6)
; CHECK-NEXT: srl.w $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = lshr <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @srl_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @srl_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: srl_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($6)
; CHECK-NEXT: srl.d $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = lshr <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @srl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @srl_v16i8_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: srl_v16i8_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: srli.b $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = lshr <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @srl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @srl_v8i16_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: srl_v8i16_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: srli.h $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = lshr <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @srl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @srl_v4i32_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: srl_v4i32_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: srli.w $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = lshr <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @srl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @srl_v2i64_i(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: srl_v2i64_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($5)
; CHECK-NEXT: srli.d $w0, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = lshr <2 x i64> %1, <i64 1, i64 1>
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
-define void @ctpop_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @ctpop_v16i8(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: ctpop_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: pcnt.b $w0, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = tail call <16 x i8> @llvm.ctpop.v16i8 (<16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @ctpop_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @ctpop_v8i16(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: ctpop_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: pcnt.h $w0, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = tail call <8 x i16> @llvm.ctpop.v8i16 (<8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @ctpop_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @ctpop_v4i32(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: ctpop_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: pcnt.w $w0, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = tail call <4 x i32> @llvm.ctpop.v4i32 (<4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @ctpop_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @ctpop_v2i64(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: ctpop_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($5)
; CHECK-NEXT: pcnt.d $w0, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = tail call <2 x i64> @llvm.ctpop.v2i64 (<2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
-define void @ctlz_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @ctlz_v16i8(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: ctlz_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: nlzc.b $w0, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = tail call <16 x i8> @llvm.ctlz.v16i8 (<16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @ctlz_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @ctlz_v8i16(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: ctlz_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: nlzc.h $w0, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = tail call <8 x i16> @llvm.ctlz.v8i16 (<8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @ctlz_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @ctlz_v4i32(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: ctlz_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: nlzc.w $w0, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = tail call <4 x i32> @llvm.ctlz.v4i32 (<4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @ctlz_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @ctlz_v2i64(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: ctlz_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($5)
; CHECK-NEXT: nlzc.d $w0, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = tail call <2 x i64> @llvm.ctlz.v2i64 (<2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
-define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %m) nounwind {
+define void @bsel_v16i8(ptr %c, ptr %a, ptr %b, ptr %m) nounwind {
; CHECK-LABEL: bsel_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($7)
; CHECK-NEXT: bmnz.v $w2, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w2, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
- %3 = load <16 x i8>, <16 x i8>* %m
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
+ %3 = load <16 x i8>, ptr %m
%4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1,
i8 -1, i8 -1, i8 -1, i8 -1,
i8 -1, i8 -1, i8 -1, i8 -1,
%7 = or <16 x i8> %5, %6
; bmnz is the same operation
; (vselect Mask, IfSet, IfClr) -> (BMNZ IfClr, IfSet, Mask)
- store <16 x i8> %7, <16 x i8>* %c
+ store <16 x i8> %7, ptr %c
ret void
}
-define void @bsel_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %m) nounwind {
+define void @bsel_v16i8_i(ptr %c, ptr %a, ptr %m) nounwind {
; CHECK-LABEL: bsel_v16i8_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: bseli.b $w1, $w0, 6
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w1, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %m
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %m
%3 = xor <16 x i8> %2, <i8 -1, i8 -1, i8 -1, i8 -1,
i8 -1, i8 -1, i8 -1, i8 -1,
i8 -1, i8 -1, i8 -1, i8 -1,
i8 6, i8 6, i8 6, i8 6,
i8 6, i8 6, i8 6, i8 6>, %2
%6 = or <16 x i8> %4, %5
- store <16 x i8> %6, <16 x i8>* %c
+ store <16 x i8> %6, ptr %c
ret void
}
-define void @bsel_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @bsel_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bsel_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: bsel.v $w2, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w2, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = and <8 x i16> %1, <i16 6, i16 6, i16 6, i16 6,
i16 6, i16 6, i16 6, i16 6>
%4 = and <8 x i16> %2, <i16 65529, i16 65529, i16 65529, i16 65529,
i16 65529, i16 65529, i16 65529, i16 65529>
%5 = or <8 x i16> %3, %4
- store <8 x i16> %5, <8 x i16>* %c
+ store <8 x i16> %5, ptr %c
ret void
}
-define void @bsel_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @bsel_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bsel_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: bsel.v $w2, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w2, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = and <4 x i32> %1, <i32 6, i32 6, i32 6, i32 6>
%4 = and <4 x i32> %2, <i32 4294967289, i32 4294967289, i32 4294967289, i32 4294967289>
%5 = or <4 x i32> %3, %4
- store <4 x i32> %5, <4 x i32>* %c
+ store <4 x i32> %5, ptr %c
ret void
}
-define void @bsel_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @bsel_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; MIPS-LABEL: bsel_v2i64:
; MIPS: # %bb.0:
; MIPS-NEXT: ldi.d $w0, 6
; MIPSEL-NEXT: bsel.v $w0, $w2, $w1
; MIPSEL-NEXT: jr $ra
; MIPSEL-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = and <2 x i64> %1, <i64 6, i64 6>
%4 = and <2 x i64> %2, <i64 18446744073709551609, i64 18446744073709551609>
%5 = or <2 x i64> %3, %4
- store <2 x i64> %5, <2 x i64>* %c
+ store <2 x i64> %5, ptr %c
ret void
}
-define void @binsl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @binsl_v16i8_i(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: binsl_v16i8_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: binsli.b $w1, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w1, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = and <16 x i8> %1, <i8 192, i8 192, i8 192, i8 192,
i8 192, i8 192, i8 192, i8 192,
i8 192, i8 192, i8 192, i8 192,
i8 63, i8 63, i8 63, i8 63,
i8 63, i8 63, i8 63, i8 63>
%5 = or <16 x i8> %3, %4
- store <16 x i8> %5, <16 x i8>* %c
+ store <16 x i8> %5, ptr %c
ret void
}
-define void @binsl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @binsl_v8i16_i(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: binsl_v8i16_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: binsli.h $w1, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w1, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = and <8 x i16> %1, <i16 49152, i16 49152, i16 49152, i16 49152,
i16 49152, i16 49152, i16 49152, i16 49152>
%4 = and <8 x i16> %2, <i16 16383, i16 16383, i16 16383, i16 16383,
i16 16383, i16 16383, i16 16383, i16 16383>
%5 = or <8 x i16> %3, %4
- store <8 x i16> %5, <8 x i16>* %c
+ store <8 x i16> %5, ptr %c
ret void
}
-define void @binsl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @binsl_v4i32_i(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: binsl_v4i32_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: binsli.w $w1, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w1, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = and <4 x i32> %1, <i32 3221225472, i32 3221225472, i32 3221225472, i32 3221225472>
%4 = and <4 x i32> %2, <i32 1073741823, i32 1073741823, i32 1073741823, i32 1073741823>
%5 = or <4 x i32> %3, %4
- store <4 x i32> %5, <4 x i32>* %c
+ store <4 x i32> %5, ptr %c
ret void
}
-define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @binsl_v2i64_i(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: binsl_v2i64_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($5)
; CHECK-NEXT: binsli.d $w1, $w0, 60
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w1, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = and <2 x i64> %1, <i64 18446744073709551608, i64 18446744073709551608>
%4 = and <2 x i64> %2, <i64 7, i64 7>
%5 = or <2 x i64> %3, %4
; issue. If the mask doesn't fit within a 10-bit immediate, it gets
; legalized into a constant pool. We should add a test to cover the
; other cases once they correctly select binsli.d.
- store <2 x i64> %5, <2 x i64>* %c
+ store <2 x i64> %5, ptr %c
ret void
}
-define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @binsr_v16i8_i(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: binsr_v16i8_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: binsri.b $w1, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w1, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = and <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3,
i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
%4 = and <16 x i8> %2, <i8 252, i8 252, i8 252, i8 252,
i8 252, i8 252, i8 252, i8 252,
i8 252, i8 252, i8 252, i8 252>
%5 = or <16 x i8> %3, %4
- store <16 x i8> %5, <16 x i8>* %c
+ store <16 x i8> %5, ptr %c
ret void
}
-define void @binsr_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @binsr_v8i16_i(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: binsr_v8i16_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: binsri.h $w1, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w1, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = and <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3,
i16 3, i16 3, i16 3, i16 3>
%4 = and <8 x i16> %2, <i16 65532, i16 65532, i16 65532, i16 65532,
i16 65532, i16 65532, i16 65532, i16 65532>
%5 = or <8 x i16> %3, %4
- store <8 x i16> %5, <8 x i16>* %c
+ store <8 x i16> %5, ptr %c
ret void
}
-define void @binsr_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @binsr_v4i32_i(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: binsr_v4i32_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: binsri.w $w1, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w1, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = and <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
%4 = and <4 x i32> %2, <i32 4294967292, i32 4294967292, i32 4294967292, i32 4294967292>
%5 = or <4 x i32> %3, %4
- store <4 x i32> %5, <4 x i32>* %c
+ store <4 x i32> %5, ptr %c
ret void
}
-define void @binsr_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @binsr_v2i64_i(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: binsr_v2i64_i:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($5)
; CHECK-NEXT: binsri.d $w1, $w0, 1
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w1, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = and <2 x i64> %1, <i64 3, i64 3>
%4 = and <2 x i64> %2, <i64 18446744073709551612, i64 18446744073709551612>
%5 = or <2 x i64> %3, %4
- store <2 x i64> %5, <2 x i64>* %c
+ store <2 x i64> %5, ptr %c
ret void
}
-define void @bclr_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @bclr_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bclr_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($6)
; CHECK-NEXT: bclr.b $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
%4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%5 = and <16 x i8> %1, %4
- store <16 x i8> %5, <16 x i8>* %c
+ store <16 x i8> %5, ptr %c
ret void
}
-define void @bclr_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @bclr_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bclr_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($6)
; CHECK-NEXT: bclr.h $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
%4 = xor <8 x i16> %3, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%5 = and <8 x i16> %1, %4
- store <8 x i16> %5, <8 x i16>* %c
+ store <8 x i16> %5, ptr %c
ret void
}
-define void @bclr_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @bclr_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bclr_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($6)
; CHECK-NEXT: bclr.w $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
%4 = xor <4 x i32> %3, <i32 -1, i32 -1, i32 -1, i32 -1>
%5 = and <4 x i32> %1, %4
- store <4 x i32> %5, <4 x i32>* %c
+ store <4 x i32> %5, ptr %c
ret void
}
-define void @bclr_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @bclr_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bclr_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($6)
; CHECK-NEXT: bclr.d $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = shl <2 x i64> <i64 1, i64 1>, %2
%4 = xor <2 x i64> %3, <i64 -1, i64 -1>
%5 = and <2 x i64> %1, %4
- store <2 x i64> %5, <2 x i64>* %c
+ store <2 x i64> %5, ptr %c
ret void
}
-define void @bset_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @bset_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bset_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($6)
; CHECK-NEXT: bset.b $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
%4 = or <16 x i8> %1, %3
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
ret void
}
-define void @bset_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @bset_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bset_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($6)
; CHECK-NEXT: bset.h $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
%4 = or <8 x i16> %1, %3
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
ret void
}
-define void @bset_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @bset_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bset_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($6)
; CHECK-NEXT: bset.w $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
%4 = or <4 x i32> %1, %3
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
ret void
}
-define void @bset_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @bset_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bset_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($6)
; CHECK-NEXT: bset.d $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = shl <2 x i64> <i64 1, i64 1>, %2
%4 = or <2 x i64> %1, %3
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
ret void
}
-define void @bneg_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @bneg_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bneg_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($6)
; CHECK-NEXT: bneg.b $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
%3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
%4 = xor <16 x i8> %1, %3
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
ret void
}
-define void @bneg_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @bneg_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bneg_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($6)
; CHECK-NEXT: bneg.h $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
%3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
%4 = xor <8 x i16> %1, %3
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
ret void
}
-define void @bneg_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @bneg_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bneg_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($6)
; CHECK-NEXT: bneg.w $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
%3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
%4 = xor <4 x i32> %1, %3
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
ret void
}
-define void @bneg_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @bneg_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: bneg_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($6)
; CHECK-NEXT: bneg.d $w0, $w1, $w0
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
%3 = shl <2 x i64> <i64 1, i64 1>, %2
%4 = xor <2 x i64> %1, %3
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
ret void
}
-define void @bclri_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @bclri_v16i8(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: bclri_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: andi.b $w0, $w0, 247
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = xor <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>,
<i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%3 = and <16 x i8> %1, %2
; bclri.b and andi.b are exactly equivalent.
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
ret void
}
-define void @bclri_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @bclri_v8i16(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: bclri_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: bclri.h $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = xor <8 x i16> <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>,
<i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%3 = and <8 x i16> %1, %2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
ret void
}
-define void @bclri_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @bclri_v4i32(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: bclri_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: bclri.w $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = xor <4 x i32> <i32 8, i32 8, i32 8, i32 8>,
<i32 -1, i32 -1, i32 -1, i32 -1>
%3 = and <4 x i32> %1, %2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
ret void
}
-define void @bclri_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @bclri_v2i64(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: bclri_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($5)
; CHECK-NEXT: bclri.d $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = xor <2 x i64> <i64 8, i64 8>,
<i64 -1, i64 -1>
%3 = and <2 x i64> %1, %2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
ret void
}
-define void @bseti_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @bseti_v16i8(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: bseti_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: bseti.b $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = or <16 x i8> %1, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @bseti_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @bseti_v8i16(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: bseti_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: bseti.h $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = or <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @bseti_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @bseti_v4i32(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: bseti_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: bseti.w $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = or <4 x i32> %1, <i32 8, i32 8, i32 8, i32 8>
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @bseti_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @bseti_v2i64(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: bseti_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($5)
; CHECK-NEXT: bseti.d $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = or <2 x i64> %1, <i64 8, i64 8>
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
-define void @bnegi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @bnegi_v16i8(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: bnegi_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.b $w0, 0($5)
; CHECK-NEXT: bnegi.b $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($4)
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
%2 = xor <16 x i8> %1, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
ret void
}
-define void @bnegi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @bnegi_v8i16(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: bnegi_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.h $w0, 0($5)
; CHECK-NEXT: bnegi.h $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($4)
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
%2 = xor <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
ret void
}
-define void @bnegi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @bnegi_v4i32(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: bnegi_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.w $w0, 0($5)
; CHECK-NEXT: bnegi.w $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($4)
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
%2 = xor <4 x i32> %1, <i32 8, i32 8, i32 8, i32 8>
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
ret void
}
-define void @bnegi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @bnegi_v2i64(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: bnegi_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: ld.d $w0, 0($5)
; CHECK-NEXT: bnegi.d $w0, $w0, 3
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($4)
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
%2 = xor <2 x i64> %1, <i64 8, i64 8>
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
ret void
}
define void @llvm_mips_bmnzi_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_bmnzi_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bmnzi_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 240)
- store volatile <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ store volatile <16 x i8> %2, ptr @llvm_mips_bmnzi_b_RES
%3 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 15)
- store volatile <16 x i8> %3, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ store volatile <16 x i8> %3, ptr @llvm_mips_bmnzi_b_RES
%4 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 170)
- store <16 x i8> %4, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ store <16 x i8> %4, ptr @llvm_mips_bmnzi_b_RES
ret void
}
; CHECK-LABEL: llvm_mips_bmnzi_b_test:
define void @llvm_mips_bmzi_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_bmnzi_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bmnzi_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 240)
- store volatile <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ store volatile <16 x i8> %2, ptr @llvm_mips_bmnzi_b_RES
%3 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 15)
- store volatile <16 x i8> %3, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ store volatile <16 x i8> %3, ptr @llvm_mips_bmnzi_b_RES
%4 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 170)
- store <16 x i8> %4, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ store <16 x i8> %4, ptr @llvm_mips_bmnzi_b_RES
ret void
}
; CHECK-LABEL: llvm_mips_bmzi_b_test:
; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
-define void @ceq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ceq_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ceq_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp eq <16 x i8> %1, %2
%4 = sext <16 x i1> %3 to <16 x i8>
; CHECK-DAG: ceq.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size ceq_v16i8
}
-define void @ceq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ceq_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ceq_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp eq <8 x i16> %1, %2
%4 = sext <8 x i1> %3 to <8 x i16>
; CHECK-DAG: ceq.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size ceq_v8i16
}
-define void @ceq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ceq_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ceq_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp eq <4 x i32> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: ceq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ceq_v4i32
}
-define void @ceq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ceq_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ceq_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp eq <2 x i64> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: ceq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size ceq_v2i64
}
-define void @cle_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @cle_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: cle_s_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <16 x i8> %1, %2
%4 = sext <16 x i1> %3 to <16 x i8>
; CHECK-DAG: cle_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size cle_s_v16i8
}
-define void @cle_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @cle_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: cle_s_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <8 x i16> %1, %2
%4 = sext <8 x i1> %3 to <8 x i16>
; CHECK-DAG: cle_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size cle_s_v8i16
}
-define void @cle_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @cle_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: cle_s_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <4 x i32> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: cle_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size cle_s_v4i32
}
-define void @cle_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @cle_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: cle_s_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <2 x i64> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: cle_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size cle_s_v2i64
}
-define void @cle_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @cle_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: cle_u_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <16 x i8> %1, %2
%4 = sext <16 x i1> %3 to <16 x i8>
; CHECK-DAG: cle_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size cle_u_v16i8
}
-define void @cle_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @cle_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: cle_u_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <8 x i16> %1, %2
%4 = sext <8 x i1> %3 to <8 x i16>
; CHECK-DAG: cle_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size cle_u_v8i16
}
-define void @cle_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @cle_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: cle_u_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <4 x i32> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: cle_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size cle_u_v4i32
}
-define void @cle_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @cle_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: cle_u_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <2 x i64> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: cle_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size cle_u_v2i64
}
-define void @clt_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @clt_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: clt_s_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <16 x i8> %1, %2
%4 = sext <16 x i1> %3 to <16 x i8>
; CHECK-DAG: clt_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size clt_s_v16i8
}
-define void @clt_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @clt_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: clt_s_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <8 x i16> %1, %2
%4 = sext <8 x i1> %3 to <8 x i16>
; CHECK-DAG: clt_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size clt_s_v8i16
}
-define void @clt_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @clt_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: clt_s_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <4 x i32> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: clt_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size clt_s_v4i32
}
-define void @clt_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @clt_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: clt_s_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <2 x i64> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: clt_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size clt_s_v2i64
}
-define void @clt_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @clt_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: clt_u_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <16 x i8> %1, %2
%4 = sext <16 x i1> %3 to <16 x i8>
; CHECK-DAG: clt_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size clt_u_v16i8
}
-define void @clt_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @clt_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: clt_u_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <8 x i16> %1, %2
%4 = sext <8 x i1> %3 to <8 x i16>
; CHECK-DAG: clt_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size clt_u_v8i16
}
-define void @clt_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @clt_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: clt_u_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <4 x i32> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: clt_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size clt_u_v4i32
}
-define void @clt_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @clt_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: clt_u_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <2 x i64> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: clt_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; There is no != comparison, but test it anyway since we've had legalizer
; issues in this area.
-define void @cne_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @cne_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: cne_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ne <16 x i8> %1, %2
%4 = sext <16 x i1> %3 to <16 x i8>
; CHECK-DAG: ceq.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
; CHECK-DAG: xori.b [[R3]], [[R3]], 255
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; There is no != comparison, but test it anyway since we've had legalizer
; issues in this area.
-define void @cne_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @cne_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: cne_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ne <8 x i16> %1, %2
%4 = sext <8 x i1> %3 to <8 x i16>
; TODO: This should be an 'xori.b [[R3]], [[R3]], 255' but thats an optimisation issue
; CHECK-DAG: ldi.b [[R4:\$w[0-9]+]], -1
; CHECK-DAG: xor.v [[R3]], [[R3]], [[R4]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; There is no != comparison, but test it anyway since we've had legalizer
; issues in this area.
-define void @cne_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @cne_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: cne_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ne <4 x i32> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; TODO: This should be an 'xori.b [[R3]], [[R3]], 255' but thats an optimisation issue
; CHECK-DAG: ldi.b [[R4:\$w[0-9]+]], -1
; CHECK-DAG: xor.v [[R3]], [[R3]], [[R4]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; There is no != comparison, but test it anyway since we've had legalizer
; issues in this area.
-define void @cne_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @cne_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: cne_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ne <2 x i64> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; TODO: This should be an 'xori.b [[R3]], [[R3]], 255' but thats an optimisation issue
; CHECK-DAG: ldi.b [[R4:\$w[0-9]+]], -1
; CHECK-DAG: xor.v [[R3]], [[R3]], [[R4]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size cne_v2i64
}
-define void @ceqi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @ceqi_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: ceqi_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp eq <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = sext <16 x i1> %2 to <16 x i8>
; CHECK-DAG: ceqi.b [[R3:\$w[0-9]+]], [[R1]], 1
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size ceqi_v16i8
}
-define void @ceqi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @ceqi_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: ceqi_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp eq <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = sext <8 x i1> %2 to <8 x i16>
; CHECK-DAG: ceqi.h [[R3:\$w[0-9]+]], [[R1]], 1
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size ceqi_v8i16
}
-define void @ceqi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @ceqi_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: ceqi_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp eq <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = sext <4 x i1> %2 to <4 x i32>
; CHECK-DAG: ceqi.w [[R3:\$w[0-9]+]], [[R1]], 1
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ceqi_v4i32
}
-define void @ceqi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @ceqi_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: ceqi_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp eq <2 x i64> %1, <i64 1, i64 1>
%3 = sext <2 x i1> %2 to <2 x i64>
; CHECK-DAG: ceqi.d [[R3:\$w[0-9]+]], [[R1]], 1
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size ceqi_v2i64
}
-define void @clei_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @clei_s_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: clei_s_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = sext <16 x i1> %2 to <16 x i8>
; CHECK-DAG: clei_s.b [[R3:\$w[0-9]+]], [[R1]], 1
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size clei_s_v16i8
}
-define void @clei_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @clei_s_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: clei_s_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = sext <8 x i1> %2 to <8 x i16>
; CHECK-DAG: clei_s.h [[R3:\$w[0-9]+]], [[R1]], 1
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size clei_s_v8i16
}
-define void @clei_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @clei_s_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: clei_s_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = sext <4 x i1> %2 to <4 x i32>
; CHECK-DAG: clei_s.w [[R3:\$w[0-9]+]], [[R1]], 1
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size clei_s_v4i32
}
-define void @clei_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @clei_s_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: clei_s_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <2 x i64> %1, <i64 1, i64 1>
%3 = sext <2 x i1> %2 to <2 x i64>
; CHECK-DAG: clei_s.d [[R3:\$w[0-9]+]], [[R1]], 1
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size clei_s_v2i64
}
-define void @clei_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @clei_u_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: clei_u_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = sext <16 x i1> %2 to <16 x i8>
; CHECK-DAG: clei_u.b [[R3:\$w[0-9]+]], [[R1]], 1
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size clei_u_v16i8
}
-define void @clei_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @clei_u_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: clei_u_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = sext <8 x i1> %2 to <8 x i16>
; CHECK-DAG: clei_u.h [[R3:\$w[0-9]+]], [[R1]], 1
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size clei_u_v8i16
}
-define void @clei_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @clei_u_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: clei_u_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = sext <4 x i1> %2 to <4 x i32>
; CHECK-DAG: clei_u.w [[R3:\$w[0-9]+]], [[R1]], 1
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size clei_u_v4i32
}
-define void @clei_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @clei_u_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: clei_u_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <2 x i64> %1, <i64 1, i64 1>
%3 = sext <2 x i1> %2 to <2 x i64>
; CHECK-DAG: clei_u.d [[R3:\$w[0-9]+]], [[R1]], 1
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size clei_u_v2i64
}
-define void @clti_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @clti_s_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: clti_s_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = sext <16 x i1> %2 to <16 x i8>
; CHECK-DAG: clti_s.b [[R3:\$w[0-9]+]], [[R1]], 1
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size clti_s_v16i8
}
-define void @clti_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @clti_s_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: clti_s_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = sext <8 x i1> %2 to <8 x i16>
; CHECK-DAG: clti_s.h [[R3:\$w[0-9]+]], [[R1]], 1
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size clti_s_v8i16
}
-define void @clti_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @clti_s_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: clti_s_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = sext <4 x i1> %2 to <4 x i32>
; CHECK-DAG: clti_s.w [[R3:\$w[0-9]+]], [[R1]], 1
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size clti_s_v4i32
}
-define void @clti_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @clti_s_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: clti_s_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <2 x i64> %1, <i64 1, i64 1>
%3 = sext <2 x i1> %2 to <2 x i64>
; CHECK-DAG: clti_s.d [[R3:\$w[0-9]+]], [[R1]], 1
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size clti_s_v2i64
}
-define void @clti_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @clti_u_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: clti_u_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <16 x i8> %1, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
%3 = sext <16 x i1> %2 to <16 x i8>
; CHECK-DAG: clti_u.b [[R3:\$w[0-9]+]], [[R1]], 2
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size clti_u_v16i8
}
-define void @clti_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @clti_u_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: clti_u_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <8 x i16> %1, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
%3 = sext <8 x i1> %2 to <8 x i16>
; CHECK-DAG: clti_u.h [[R3:\$w[0-9]+]], [[R1]], 2
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size clti_u_v8i16
}
-define void @clti_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @clti_u_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: clti_u_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
%3 = sext <4 x i1> %2 to <4 x i32>
; CHECK-DAG: clti_u.w [[R3:\$w[0-9]+]], [[R1]], 2
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size clti_u_v4i32
}
-define void @clti_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @clti_u_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: clti_u_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <2 x i64> %1, <i64 2, i64 2>
%3 = sext <2 x i1> %2 to <2 x i64>
; CHECK-DAG: clti_u.d [[R3:\$w[0-9]+]], [[R1]], 2
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size clti_u_v2i64
}
-define void @bsel_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
- <16 x i8>* %c) nounwind {
+define void @bsel_s_v16i8(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bsel_s_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
- %3 = load <16 x i8>, <16 x i8>* %c
+ %3 = load <16 x i8>, ptr %c
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
%4 = icmp sgt <16 x i8> %1, %2
; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <16 x i1> %4, <16 x i8> %1, <16 x i8> %3
; bmnz.v is the same operation
; CHECK-DAG: bmnz.v [[R3]], [[R1]], [[R4]]
- store <16 x i8> %5, <16 x i8>* %d
+ store <16 x i8> %5, ptr %d
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size bsel_s_v16i8
}
-define void @bsel_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
- <8 x i16>* %c) nounwind {
+define void @bsel_s_v8i16(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bsel_s_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
- %3 = load <8 x i16>, <8 x i16>* %c
+ %3 = load <8 x i16>, ptr %c
; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
%4 = icmp sgt <8 x i16> %1, %2
; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <8 x i1> %4, <8 x i16> %1, <8 x i16> %3
; Note that IfSet and IfClr are swapped since the condition is inverted
; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
- store <8 x i16> %5, <8 x i16>* %d
+ store <8 x i16> %5, ptr %d
; CHECK-DAG: st.h [[R4]], 0($4)
ret void
; CHECK: .size bsel_s_v8i16
}
-define void @bsel_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
- <4 x i32>* %c) nounwind {
+define void @bsel_s_v4i32(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bsel_s_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
- %3 = load <4 x i32>, <4 x i32>* %c
+ %3 = load <4 x i32>, ptr %c
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
%4 = icmp sgt <4 x i32> %1, %2
; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <4 x i1> %4, <4 x i32> %1, <4 x i32> %3
; Note that IfSet and IfClr are swapped since the condition is inverted
; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
- store <4 x i32> %5, <4 x i32>* %d
+ store <4 x i32> %5, ptr %d
; CHECK-DAG: st.w [[R4]], 0($4)
ret void
; CHECK: .size bsel_s_v4i32
}
-define void @bsel_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
- <2 x i64>* %c) nounwind {
+define void @bsel_s_v2i64(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bsel_s_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
- %3 = load <2 x i64>, <2 x i64>* %c
+ %3 = load <2 x i64>, ptr %c
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
%4 = icmp sgt <2 x i64> %1, %2
; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <2 x i1> %4, <2 x i64> %1, <2 x i64> %3
; Note that IfSet and IfClr are swapped since the condition is inverted
; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
- store <2 x i64> %5, <2 x i64>* %d
+ store <2 x i64> %5, ptr %d
; CHECK-DAG: st.d [[R4]], 0($4)
ret void
; CHECK: .size bsel_s_v2i64
}
-define void @bsel_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
- <16 x i8>* %c) nounwind {
+define void @bsel_u_v16i8(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bsel_u_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
- %3 = load <16 x i8>, <16 x i8>* %c
+ %3 = load <16 x i8>, ptr %c
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
%4 = icmp ugt <16 x i8> %1, %2
; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <16 x i1> %4, <16 x i8> %1, <16 x i8> %3
; bmnz.v is the same operation
; CHECK-DAG: bmnz.v [[R3]], [[R1]], [[R4]]
- store <16 x i8> %5, <16 x i8>* %d
+ store <16 x i8> %5, ptr %d
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size bsel_u_v16i8
}
-define void @bsel_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
- <8 x i16>* %c) nounwind {
+define void @bsel_u_v8i16(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bsel_u_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
- %3 = load <8 x i16>, <8 x i16>* %c
+ %3 = load <8 x i16>, ptr %c
; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
%4 = icmp ugt <8 x i16> %1, %2
; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <8 x i1> %4, <8 x i16> %1, <8 x i16> %3
; Note that IfSet and IfClr are swapped since the condition is inverted
; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
- store <8 x i16> %5, <8 x i16>* %d
+ store <8 x i16> %5, ptr %d
; CHECK-DAG: st.h [[R4]], 0($4)
ret void
; CHECK: .size bsel_u_v8i16
}
-define void @bsel_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
- <4 x i32>* %c) nounwind {
+define void @bsel_u_v4i32(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bsel_u_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
- %3 = load <4 x i32>, <4 x i32>* %c
+ %3 = load <4 x i32>, ptr %c
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
%4 = icmp ugt <4 x i32> %1, %2
; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <4 x i1> %4, <4 x i32> %1, <4 x i32> %3
; Note that IfSet and IfClr are swapped since the condition is inverted
; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
- store <4 x i32> %5, <4 x i32>* %d
+ store <4 x i32> %5, ptr %d
; CHECK-DAG: st.w [[R4]], 0($4)
ret void
; CHECK: .size bsel_u_v4i32
}
-define void @bsel_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
- <2 x i64>* %c) nounwind {
+define void @bsel_u_v2i64(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bsel_u_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
- %3 = load <2 x i64>, <2 x i64>* %c
+ %3 = load <2 x i64>, ptr %c
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
%4 = icmp ugt <2 x i64> %1, %2
; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <2 x i1> %4, <2 x i64> %1, <2 x i64> %3
; Note that IfSet and IfClr are swapped since the condition is inverted
; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
- store <2 x i64> %5, <2 x i64>* %d
+ store <2 x i64> %5, ptr %d
; CHECK-DAG: st.d [[R4]], 0($4)
ret void
; CHECK: .size bsel_u_v2i64
}
-define void @bseli_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
- <16 x i8>* %c) nounwind {
+define void @bseli_s_v16i8(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bseli_s_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <16 x i8> %1, %2
; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%4 = select <16 x i1> %3, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %1
; CHECK-DAG: bseli.b [[R4]], [[R1]], 1
- store <16 x i8> %4, <16 x i8>* %d
+ store <16 x i8> %4, ptr %d
; CHECK-DAG: st.b [[R4]], 0($4)
ret void
; CHECK: .size bseli_s_v16i8
}
-define void @bseli_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
- <8 x i16>* %c) nounwind {
+define void @bseli_s_v8i16(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bseli_s_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <8 x i16> %1, %2
; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%4 = select <8 x i1> %3, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %1
; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
- store <8 x i16> %4, <8 x i16>* %d
+ store <8 x i16> %4, ptr %d
; CHECK-DAG: st.h [[R4]], 0($4)
ret void
; CHECK: .size bseli_s_v8i16
}
-define void @bseli_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
- <4 x i32>* %c) nounwind {
+define void @bseli_s_v4i32(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bseli_s_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <4 x i32> %1, %2
; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%4 = select <4 x i1> %3, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> %1
; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
- store <4 x i32> %4, <4 x i32>* %d
+ store <4 x i32> %4, ptr %d
; CHECK-DAG: st.w [[R4]], 0($4)
ret void
; CHECK: .size bseli_s_v4i32
}
-define void @bseli_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
- <2 x i64>* %c) nounwind {
+define void @bseli_s_v2i64(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bseli_s_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <2 x i64> %1, %2
; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%4 = select <2 x i1> %3, <2 x i64> <i64 1, i64 1>, <2 x i64> %1
; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
- store <2 x i64> %4, <2 x i64>* %d
+ store <2 x i64> %4, ptr %d
; CHECK-DAG: st.d [[R4]], 0($4)
ret void
; CHECK: .size bseli_s_v2i64
}
-define void @bseli_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
- <16 x i8>* %c) nounwind {
+define void @bseli_u_v16i8(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bseli_u_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <16 x i8> %1, %2
; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%4 = select <16 x i1> %3, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %1
; CHECK-DAG: bseli.b [[R4]], [[R1]], 1
- store <16 x i8> %4, <16 x i8>* %d
+ store <16 x i8> %4, ptr %d
; CHECK-DAG: st.b [[R4]], 0($4)
ret void
; CHECK: .size bseli_u_v16i8
}
-define void @bseli_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
- <8 x i16>* %c) nounwind {
+define void @bseli_u_v8i16(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bseli_u_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <8 x i16> %1, %2
; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%4 = select <8 x i1> %3, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %1
; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
- store <8 x i16> %4, <8 x i16>* %d
+ store <8 x i16> %4, ptr %d
; CHECK-DAG: st.h [[R4]], 0($4)
ret void
; CHECK: .size bseli_u_v8i16
}
-define void @bseli_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
- <4 x i32>* %c) nounwind {
+define void @bseli_u_v4i32(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bseli_u_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <4 x i32> %1, %2
; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%4 = select <4 x i1> %3, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> %1
; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
- store <4 x i32> %4, <4 x i32>* %d
+ store <4 x i32> %4, ptr %d
; CHECK-DAG: st.w [[R4]], 0($4)
ret void
; CHECK: .size bseli_u_v4i32
}
-define void @bseli_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
- <2 x i64>* %c) nounwind {
+define void @bseli_u_v2i64(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bseli_u_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <2 x i64> %1, %2
; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%4 = select <2 x i1> %3, <2 x i64> <i64 1, i64 1>, <2 x i64> %1
; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
- store <2 x i64> %4, <2 x i64>* %d
+ store <2 x i64> %4, ptr %d
; CHECK-DAG: st.d [[R4]], 0($4)
ret void
; CHECK: .size bseli_u_v2i64
}
-define void @max_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @max_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_s_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
; CHECK-DAG: max_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size max_s_v16i8
}
-define void @max_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @max_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_s_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
; CHECK-DAG: max_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size max_s_v8i16
}
-define void @max_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @max_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_s_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
; CHECK-DAG: max_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size max_s_v4i32
}
-define void @max_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @max_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_s_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
; CHECK-DAG: max_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size max_s_v2i64
}
-define void @max_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @max_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_u_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
; CHECK-DAG: max_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size max_u_v16i8
}
-define void @max_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @max_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_u_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
; CHECK-DAG: max_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size max_u_v8i16
}
-define void @max_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @max_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_u_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
; CHECK-DAG: max_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size max_u_v4i32
}
-define void @max_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @max_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_u_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
; CHECK-DAG: max_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size max_u_v2i64
}
-define void @max_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @max_s_eq_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_s_eq_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sge <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
; CHECK-DAG: max_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size max_s_eq_v16i8
}
-define void @max_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @max_s_eq_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_s_eq_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sge <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
; CHECK-DAG: max_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size max_s_eq_v8i16
}
-define void @max_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @max_s_eq_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_s_eq_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sge <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
; CHECK-DAG: max_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size max_s_eq_v4i32
}
-define void @max_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @max_s_eq_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_s_eq_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sge <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
; CHECK-DAG: max_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size max_s_eq_v2i64
}
-define void @max_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @max_u_eq_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_u_eq_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp uge <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
; CHECK-DAG: max_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size max_u_eq_v16i8
}
-define void @max_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @max_u_eq_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_u_eq_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp uge <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
; CHECK-DAG: max_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size max_u_eq_v8i16
}
-define void @max_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @max_u_eq_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_u_eq_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp uge <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
; CHECK-DAG: max_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size max_u_eq_v4i32
}
-define void @max_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @max_u_eq_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_u_eq_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp uge <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
; CHECK-DAG: max_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size max_u_eq_v2i64
}
-define void @maxi_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @maxi_s_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: maxi_s_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sgt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; CHECK-DAG: maxi_s.b [[R3:\$w[0-9]+]], [[R1]], 1
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size maxi_s_v16i8
}
-define void @maxi_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @maxi_s_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: maxi_s_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sgt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; CHECK-DAG: maxi_s.h [[R3:\$w[0-9]+]], [[R1]], 1
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size maxi_s_v8i16
}
-define void @maxi_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @maxi_s_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: maxi_s_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sgt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: maxi_s.w [[R3:\$w[0-9]+]], [[R1]], 1
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size maxi_s_v4i32
}
-define void @maxi_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @maxi_s_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: maxi_s_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sgt <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
; CHECK-DAG: maxi_s.d [[R3:\$w[0-9]+]], [[R1]], 1
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size maxi_s_v2i64
}
-define void @maxi_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @maxi_u_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: maxi_u_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ugt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; CHECK-DAG: maxi_u.b [[R3:\$w[0-9]+]], [[R1]], 1
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size maxi_u_v16i8
}
-define void @maxi_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @maxi_u_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: maxi_u_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ugt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; CHECK-DAG: maxi_u.h [[R3:\$w[0-9]+]], [[R1]], 1
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size maxi_u_v8i16
}
-define void @maxi_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @maxi_u_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: maxi_u_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ugt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: maxi_u.w [[R3:\$w[0-9]+]], [[R1]], 1
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size maxi_u_v4i32
}
-define void @maxi_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @maxi_u_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: maxi_u_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ugt <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
; CHECK-DAG: maxi_u.d [[R3:\$w[0-9]+]], [[R1]], 1
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size maxi_u_v2i64
}
-define void @maxi_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @maxi_s_eq_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: maxi_s_eq_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sge <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; CHECK-DAG: maxi_s.b [[R3:\$w[0-9]+]], [[R1]], 1
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size maxi_s_eq_v16i8
}
-define void @maxi_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @maxi_s_eq_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: maxi_s_eq_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sge <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; CHECK-DAG: maxi_s.h [[R3:\$w[0-9]+]], [[R1]], 1
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size maxi_s_eq_v8i16
}
-define void @maxi_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @maxi_s_eq_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: maxi_s_eq_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sge <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: maxi_s.w [[R3:\$w[0-9]+]], [[R1]], 1
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size maxi_s_eq_v4i32
}
-define void @maxi_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @maxi_s_eq_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: maxi_s_eq_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sge <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
; CHECK-DAG: maxi_s.d [[R3:\$w[0-9]+]], [[R1]], 1
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size maxi_s_eq_v2i64
}
-define void @maxi_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @maxi_u_eq_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: maxi_u_eq_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp uge <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; CHECK-DAG: maxi_u.b [[R3:\$w[0-9]+]], [[R1]], 1
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size maxi_u_eq_v16i8
}
-define void @maxi_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @maxi_u_eq_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: maxi_u_eq_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp uge <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; CHECK-DAG: maxi_u.h [[R3:\$w[0-9]+]], [[R1]], 1
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size maxi_u_eq_v8i16
}
-define void @maxi_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @maxi_u_eq_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: maxi_u_eq_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp uge <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: maxi_u.w [[R3:\$w[0-9]+]], [[R1]], 1
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size maxi_u_eq_v4i32
}
-define void @maxi_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @maxi_u_eq_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: maxi_u_eq_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp uge <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
; CHECK-DAG: maxi_u.d [[R3:\$w[0-9]+]], [[R1]], 1
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size maxi_u_eq_v2i64
}
-define void @min_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @min_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_s_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
; CHECK-DAG: min_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size min_s_v16i8
}
-define void @min_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @min_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_s_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
; CHECK-DAG: min_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size min_s_v8i16
}
-define void @min_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @min_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_s_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
; CHECK-DAG: min_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size min_s_v4i32
}
-define void @min_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @min_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_s_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp slt <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
; CHECK-DAG: min_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size min_s_v2i64
}
-define void @min_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @min_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_u_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
; CHECK-DAG: min_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size min_u_v16i8
}
-define void @min_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @min_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_u_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
; CHECK-DAG: min_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size min_u_v8i16
}
-define void @min_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @min_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_u_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
; CHECK-DAG: min_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size min_u_v4i32
}
-define void @min_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @min_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_u_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ult <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
; CHECK-DAG: min_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size min_u_v2i64
}
-define void @min_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @min_s_eq_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_s_eq_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
; CHECK-DAG: min_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size min_s_eq_v16i8
}
-define void @min_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @min_s_eq_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_s_eq_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
; CHECK-DAG: min_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size min_s_eq_v8i16
}
-define void @min_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @min_s_eq_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_s_eq_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
; CHECK-DAG: min_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size min_s_eq_v4i32
}
-define void @min_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @min_s_eq_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_s_eq_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sle <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
; CHECK-DAG: min_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size min_s_eq_v2i64
}
-define void @min_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @min_u_eq_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_u_eq_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
; CHECK-DAG: min_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %4, <16 x i8>* %c
+ store <16 x i8> %4, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size min_u_eq_v16i8
}
-define void @min_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @min_u_eq_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_u_eq_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <8 x i16> %1, %2
%4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
; CHECK-DAG: min_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %4, <8 x i16>* %c
+ store <8 x i16> %4, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size min_u_eq_v8i16
}
-define void @min_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @min_u_eq_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_u_eq_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <4 x i32> %1, %2
%4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
; CHECK-DAG: min_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size min_u_eq_v4i32
}
-define void @min_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @min_u_eq_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_u_eq_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ule <2 x i64> %1, %2
%4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
; CHECK-DAG: min_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size min_u_eq_v2i64
}
-define void @mini_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @mini_s_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: mini_s_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; CHECK-DAG: mini_s.b [[R3:\$w[0-9]+]], [[R1]], 1
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size mini_s_v16i8
}
-define void @mini_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @mini_s_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: mini_s_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; CHECK-DAG: mini_s.h [[R3:\$w[0-9]+]], [[R1]], 1
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size mini_s_v8i16
}
-define void @mini_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @mini_s_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: mini_s_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: mini_s.w [[R3:\$w[0-9]+]], [[R1]], 1
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size mini_s_v4i32
}
-define void @mini_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @mini_s_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: mini_s_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp slt <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
; CHECK-DAG: mini_s.d [[R3:\$w[0-9]+]], [[R1]], 1
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size mini_s_v2i64
}
-define void @mini_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @mini_u_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: mini_u_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; CHECK-DAG: mini_u.b [[R3:\$w[0-9]+]], [[R1]], 1
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size mini_u_v16i8
}
-define void @mini_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @mini_u_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: mini_u_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; CHECK-DAG: mini_u.h [[R3:\$w[0-9]+]], [[R1]], 1
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size mini_u_v8i16
}
-define void @mini_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @mini_u_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: mini_u_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: mini_u.w [[R3:\$w[0-9]+]], [[R1]], 1
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size mini_u_v4i32
}
-define void @mini_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @mini_u_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: mini_u_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ult <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
; CHECK-DAG: mini_u.d [[R3:\$w[0-9]+]], [[R1]], 1
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size mini_u_v2i64
}
-define void @mini_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @mini_s_eq_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: mini_s_eq_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; CHECK-DAG: mini_s.b [[R3:\$w[0-9]+]], [[R1]], 1
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size mini_s_eq_v16i8
}
-define void @mini_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @mini_s_eq_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: mini_s_eq_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; CHECK-DAG: mini_s.h [[R3:\$w[0-9]+]], [[R1]], 1
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size mini_s_eq_v8i16
}
-define void @mini_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @mini_s_eq_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: mini_s_eq_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: mini_s.w [[R3:\$w[0-9]+]], [[R1]], 1
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size mini_s_eq_v4i32
}
-define void @mini_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @mini_s_eq_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: mini_s_eq_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp sle <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
; CHECK-DAG: mini_s.d [[R3:\$w[0-9]+]], [[R1]], 1
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size mini_s_eq_v2i64
}
-define void @mini_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @mini_u_eq_v16i8(ptr %c, ptr %a) nounwind {
; CHECK: mini_u_eq_v16i8:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
; CHECK-DAG: mini_u.b [[R3:\$w[0-9]+]], [[R1]], 1
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size mini_u_eq_v16i8
}
-define void @mini_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @mini_u_eq_v8i16(ptr %c, ptr %a) nounwind {
; CHECK: mini_u_eq_v8i16:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
; CHECK-DAG: mini_u.h [[R3:\$w[0-9]+]], [[R1]], 1
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size mini_u_eq_v8i16
}
-define void @mini_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @mini_u_eq_v4i32(ptr %c, ptr %a) nounwind {
; CHECK: mini_u_eq_v4i32:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
%3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: mini_u.w [[R3:\$w[0-9]+]], [[R1]], 1
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size mini_u_eq_v4i32
}
-define void @mini_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @mini_u_eq_v2i64(ptr %c, ptr %a) nounwind {
; CHECK: mini_u_eq_v2i64:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = icmp ule <2 x i64> %1, <i64 1, i64 1>
%3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
; CHECK-DAG: mini_u.d [[R3:\$w[0-9]+]], [[R1]], 1
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
declare <4 x float> @llvm.mips.fmin.w(<4 x float>, <4 x float>) nounwind
declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind
-define void @false_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @false_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: false_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
- %2 = load <4 x float>, <4 x float>* %b
+ %1 = load <4 x float>, ptr %a
+ %2 = load <4 x float>, ptr %b
%3 = fcmp false <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
ret void
; (setcc $a, $b, SETFALSE) is always folded, so we won't get fcaf:
; CHECK: .size false_v4f32
}
-define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @false_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: false_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
- %2 = load <2 x double>, <2 x double>* %b
+ %1 = load <2 x double>, ptr %a
+ %2 = load <2 x double>, ptr %b
%3 = fcmp false <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
ret void
; (setcc $a, $b, SETFALSE) is always folded
; CHECK: .size false_v2f64
}
-define void @oeq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @oeq_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: oeq_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp oeq <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fceq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size oeq_v4f32
}
-define void @oeq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @oeq_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: oeq_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp oeq <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fceq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size oeq_v2f64
}
-define void @oge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @oge_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: oge_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp oge <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fcle.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size oge_v4f32
}
-define void @oge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @oge_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: oge_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp oge <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fcle.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size oge_v2f64
}
-define void @ogt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ogt_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ogt_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ogt <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fclt.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ogt_v4f32
}
-define void @ogt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ogt_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ogt_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ogt <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fclt.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size ogt_v2f64
}
-define void @ole_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ole_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ole_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ole <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fcle.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ole_v4f32
}
-define void @ole_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ole_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ole_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ole <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fcle.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size ole_v2f64
}
-define void @olt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @olt_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: olt_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp olt <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fclt.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size olt_v4f32
}
-define void @olt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @olt_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: olt_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp olt <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fclt.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size olt_v2f64
}
-define void @one_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @one_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: one_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp one <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fcne.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size one_v4f32
}
-define void @one_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @one_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: one_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp one <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fcne.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size one_v2f64
}
-define void @ord_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ord_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ord_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ord <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fcor.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ord_v4f32
}
-define void @ord_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ord_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ord_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ord <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fcor.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size ord_v2f64
}
-define void @ueq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ueq_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ueq_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ueq <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fcueq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ueq_v4f32
}
-define void @ueq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ueq_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ueq_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ueq <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fcueq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size ueq_v2f64
}
-define void @uge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @uge_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: uge_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp uge <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fcule.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size uge_v4f32
}
-define void @uge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @uge_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: uge_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp uge <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fcule.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size uge_v2f64
}
-define void @ugt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ugt_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ugt_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ugt <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fcult.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ugt_v4f32
}
-define void @ugt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ugt_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ugt_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ugt <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fcult.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size ugt_v2f64
}
-define void @ule_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ule_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ule_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ule <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fcule.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ule_v4f32
}
-define void @ule_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ule_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ule_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ule <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fcule.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size ule_v2f64
}
-define void @ult_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ult_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ult_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ult <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fcult.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size ult_v4f32
}
-define void @ult_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ult_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: ult_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ult <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fcult.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size ult_v2f64
}
-define void @uno_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @uno_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: uno_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp uno <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
; CHECK-DAG: fcun.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size uno_v4f32
}
-define void @uno_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @uno_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: uno_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp uno <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
; CHECK-DAG: fcun.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size uno_v2f64
}
-define void @true_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @true_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: true_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
- %2 = load <4 x float>, <4 x float>* %b
+ %1 = load <4 x float>, ptr %a
+ %2 = load <4 x float>, ptr %b
%3 = fcmp true <4 x float> %1, %2
%4 = sext <4 x i1> %3 to <4 x i32>
- store <4 x i32> %4, <4 x i32>* %c
+ store <4 x i32> %4, ptr %c
ret void
; (setcc $a, $b, SETTRUE) is always folded, so we won't get fcaf:
; CHECK: .size true_v4f32
}
-define void @true_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @true_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: true_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
- %2 = load <2 x double>, <2 x double>* %b
+ %1 = load <2 x double>, ptr %a
+ %2 = load <2 x double>, ptr %b
%3 = fcmp true <2 x double> %1, %2
%4 = sext <2 x i1> %3 to <2 x i64>
- store <2 x i64> %4, <2 x i64>* %c
+ store <2 x i64> %4, ptr %c
ret void
; (setcc $a, $b, SETTRUE) is always folded.
; CHECK: .size true_v2f64
}
-define void @bsel_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
- <4 x float>* %c) nounwind {
+define void @bsel_v4f32(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bsel_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
- %3 = load <4 x float>, <4 x float>* %c
+ %3 = load <4 x float>, ptr %c
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
%4 = fcmp ogt <4 x float> %1, %2
; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <4 x i1> %4, <4 x float> %1, <4 x float> %3
; Note that IfSet and IfClr are swapped since the condition is inverted
; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
- store <4 x float> %5, <4 x float>* %d
+ store <4 x float> %5, ptr %d
; CHECK-DAG: st.w [[R4]], 0($4)
ret void
; CHECK: .size bsel_v4f32
}
-define void @bsel_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
- <2 x double>* %c) nounwind {
+define void @bsel_v2f64(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bsel_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
- %3 = load <2 x double>, <2 x double>* %c
+ %3 = load <2 x double>, ptr %c
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
%4 = fcmp ogt <2 x double> %1, %2
; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <2 x i1> %4, <2 x double> %1, <2 x double> %3
; Note that IfSet and IfClr are swapped since the condition is inverted
; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
- store <2 x double> %5, <2 x double>* %d
+ store <2 x double> %5, ptr %d
; CHECK-DAG: st.d [[R4]], 0($4)
ret void
; CHECK: .size bsel_v2f64
}
-define void @bseli_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
- <4 x float>* %c) nounwind {
+define void @bseli_v4f32(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bseli_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ogt <4 x float> %1, %2
; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%4 = select <4 x i1> %3, <4 x float> %1, <4 x float> zeroinitializer
; Note that IfSet and IfClr are swapped since the condition is inverted
; CHECK-DAG: bsel.v [[R4]], [[R3:\$w[0-9]+]], [[R1]]
- store <4 x float> %4, <4 x float>* %d
+ store <4 x float> %4, ptr %d
; CHECK-DAG: st.w [[R4]], 0($4)
ret void
; CHECK: .size bseli_v4f32
}
-define void @bseli_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
- <2 x double>* %c) nounwind {
+define void @bseli_v2f64(ptr %d, ptr %a, ptr %b,
+ ptr %c) nounwind {
; CHECK: bseli_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = fcmp ogt <2 x double> %1, %2
; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%4 = select <2 x i1> %3, <2 x double> %1, <2 x double> zeroinitializer
; Note that IfSet and IfClr are swapped since the condition is inverted
; CHECK-DAG: bsel.v [[R4]], [[R3:\$w[0-9]+]], [[R1]]
- store <2 x double> %4, <2 x double>* %d
+ store <2 x double> %4, ptr %d
; CHECK-DAG: st.d [[R4]], 0($4)
ret void
; CHECK: .size bseli_v2f64
}
-define void @max_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @max_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %1, <4 x float> %2)
; CHECK-DAG: fmax.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x float> %3, <4 x float>* %c
+ store <4 x float> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size max_v4f32
}
-define void @max_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @max_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: max_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %1, <2 x double> %2)
; CHECK-DAG: fmax.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x double> %3, <2 x double>* %c
+ store <2 x double> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size max_v2f64
}
-define void @min_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @min_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_v4f32:
- %1 = load <4 x float>, <4 x float>* %a
+ %1 = load <4 x float>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x float>, <4 x float>* %b
+ %2 = load <4 x float>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %1, <4 x float> %2)
; CHECK-DAG: fmin.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x float> %3, <4 x float>* %c
+ store <4 x float> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size min_v4f32
}
-define void @min_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @min_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK: min_v2f64:
- %1 = load <2 x double>, <2 x double>* %a
+ %1 = load <2 x double>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x double>, <2 x double>* %b
+ %2 = load <2 x double>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %1, <2 x double> %2)
; CHECK-DAG: fmin.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x double> %3, <2 x double>* %c
+ store <2 x double> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
define void @llvm_mips_copy_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_copy_s_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_copy_s_b_ARG1
%1 = tail call i32 @llvm.mips.copy.s.b(<16 x i8> %0, i32 1)
- store i32 %1, i32* @llvm_mips_copy_s_b_RES
+ store i32 %1, ptr @llvm_mips_copy_s_b_RES
ret void
}
define void @llvm_mips_copy_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_copy_s_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_copy_s_h_ARG1
%1 = tail call i32 @llvm.mips.copy.s.h(<8 x i16> %0, i32 1)
- store i32 %1, i32* @llvm_mips_copy_s_h_RES
+ store i32 %1, ptr @llvm_mips_copy_s_h_RES
ret void
}
define void @llvm_mips_copy_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_copy_s_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_copy_s_w_ARG1
%1 = tail call i32 @llvm.mips.copy.s.w(<4 x i32> %0, i32 1)
- store i32 %1, i32* @llvm_mips_copy_s_w_RES
+ store i32 %1, ptr @llvm_mips_copy_s_w_RES
ret void
}
define void @llvm_mips_copy_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_copy_s_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_copy_s_d_ARG1
%1 = tail call i64 @llvm.mips.copy.s.d(<2 x i64> %0, i32 1)
- store i64 %1, i64* @llvm_mips_copy_s_d_RES
+ store i64 %1, ptr @llvm_mips_copy_s_d_RES
ret void
}
define void @llvm_mips_copy_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_copy_u_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_copy_u_b_ARG1
%1 = tail call i32 @llvm.mips.copy.u.b(<16 x i8> %0, i32 1)
- store i32 %1, i32* @llvm_mips_copy_u_b_RES
+ store i32 %1, ptr @llvm_mips_copy_u_b_RES
ret void
}
define void @llvm_mips_copy_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_copy_u_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_copy_u_h_ARG1
%1 = tail call i32 @llvm.mips.copy.u.h(<8 x i16> %0, i32 1)
- store i32 %1, i32* @llvm_mips_copy_u_h_RES
+ store i32 %1, ptr @llvm_mips_copy_u_h_RES
ret void
}
define void @llvm_mips_copy_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_copy_u_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_copy_u_w_ARG1
%1 = tail call i32 @llvm.mips.copy.u.w(<4 x i32> %0, i32 1)
- store i32 %1, i32* @llvm_mips_copy_u_w_RES
+ store i32 %1, ptr @llvm_mips_copy_u_w_RES
ret void
}
define void @llvm_mips_copy_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_copy_u_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_copy_u_d_ARG1
%1 = tail call i64 @llvm.mips.copy.u.d(<2 x i64> %0, i32 1)
- store i64 %1, i64* @llvm_mips_copy_u_d_RES
+ store i64 %1, ptr @llvm_mips_copy_u_d_RES
ret void
}
define void @llvm_mips_insert_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_insert_b_ARG1
- %1 = load i32, i32* @llvm_mips_insert_b_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_insert_b_ARG1
+ %1 = load i32, ptr @llvm_mips_insert_b_ARG3
%2 = tail call <16 x i8> @llvm.mips.insert.b(<16 x i8> %0, i32 1, i32 %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_insert_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_insert_b_RES
ret void
}
define void @llvm_mips_insert_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_insert_h_ARG1
- %1 = load i32, i32* @llvm_mips_insert_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_insert_h_ARG1
+ %1 = load i32, ptr @llvm_mips_insert_h_ARG3
%2 = tail call <8 x i16> @llvm.mips.insert.h(<8 x i16> %0, i32 1, i32 %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_insert_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_insert_h_RES
ret void
}
define void @llvm_mips_insert_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_insert_w_ARG1
- %1 = load i32, i32* @llvm_mips_insert_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_insert_w_ARG1
+ %1 = load i32, ptr @llvm_mips_insert_w_ARG3
%2 = tail call <4 x i32> @llvm.mips.insert.w(<4 x i32> %0, i32 1, i32 %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_insert_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_insert_w_RES
ret void
}
define void @llvm_mips_insert_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_insert_d_ARG1
- %1 = load i64, i64* @llvm_mips_insert_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_insert_d_ARG1
+ %1 = load i64, ptr @llvm_mips_insert_d_ARG3
%2 = tail call <2 x i64> @llvm.mips.insert.d(<2 x i64> %0, i32 1, i64 %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_insert_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_insert_d_RES
ret void
}
define void @llvm_mips_insve_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_insve_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_insve_b_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_insve_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_insve_b_ARG3
%2 = tail call <16 x i8> @llvm.mips.insve.b(<16 x i8> %0, i32 1, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_insve_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_insve_b_RES
ret void
}
define void @llvm_mips_insve_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_insve_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_insve_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_insve_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_insve_h_ARG3
%2 = tail call <8 x i16> @llvm.mips.insve.h(<8 x i16> %0, i32 1, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_insve_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_insve_h_RES
ret void
}
define void @llvm_mips_insve_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_insve_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_insve_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_insve_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_insve_w_ARG3
%2 = tail call <4 x i32> @llvm.mips.insve.w(<4 x i32> %0, i32 1, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_insve_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_insve_w_RES
ret void
}
define void @llvm_mips_insve_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_insve_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_insve_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_insve_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_insve_d_ARG3
%2 = tail call <2 x i64> @llvm.mips.insve.d(<2 x i64> %0, i32 1, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_insve_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_insve_d_RES
ret void
}
define void @llvm_mips_move_vb_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_move_vb_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_move_vb_ARG1
%1 = tail call <16 x i8> @llvm.mips.move.v(<16 x i8> %0)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_move_vb_RES
+ store <16 x i8> %1, ptr @llvm_mips_move_vb_RES
ret void
}
define void @llvm_mips_sldi_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sldi_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sldi_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_sldi_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_sldi_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %0, <16 x i8> %1, i32 1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_sldi_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_sldi_b_RES
ret void
}
define void @llvm_mips_sldi_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sldi_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sldi_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_sldi_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_sldi_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %0, <8 x i16> %1, i32 1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_sldi_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_sldi_h_RES
ret void
}
define void @llvm_mips_sldi_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sldi_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sldi_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_sldi_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_sldi_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %0, <4 x i32> %1, i32 1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_sldi_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_sldi_w_RES
ret void
}
define void @llvm_mips_sldi_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sldi_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sldi_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_sldi_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_sldi_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %0, <2 x i64> %1, i32 1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_sldi_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_sldi_d_RES
ret void
}
define void @llvm_mips_splati_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_splati_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_splati_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.splati.b(<16 x i8> %0, i32 1)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_splati_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_splati_b_RES
ret void
}
define void @llvm_mips_splati_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_splati_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_splati_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.splati.h(<8 x i16> %0, i32 1)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_splati_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_splati_h_RES
ret void
}
define void @llvm_mips_splati_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_splati_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_splati_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.splati.w(<4 x i32> %0, i32 1)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_splati_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_splati_w_RES
ret void
}
define void @llvm_mips_splati_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_splati_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_splati_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.splati.d(<2 x i64> %0, i32 1)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_splati_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_splati_d_RES
ret void
}
; BIGENDIAN: .byte 15
; BIGENDIAN: const_v16i8:
- store volatile <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, <16 x i8>*@v16i8
+ store volatile <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, ptr @v16i8
ret void
}
; BIGENDIAN: .2byte 7
; BIGENDIAN: const_v8i16:
- store volatile <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, <8 x i16>*@v8i16
+ store volatile <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, ptr @v8i16
ret void
}
; BIGENDIAN: .4byte 3
; BIGENDIAN: const_v4i32:
- store volatile <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32>*@v4i32
+ store volatile <4 x i32> <i32 0, i32 1, i32 2, i32 3>, ptr @v4i32
ret void
}
; BIGENDIAN: .4byte 2
; BIGENDIAN: const_v2i64:
- store volatile <2 x i64> <i64 1, i64 2>, <2 x i64>*@v2i64
+ store volatile <2 x i64> <i64 1, i64 2>, ptr @v2i64
ret void
}
@k = external global float
-declare float @k2(half *)
+declare float @k2(ptr)
define void @f3(i16 %b) {
; MIPS32-LABEL: f3:
entry:
%0 = alloca half
%1 = bitcast i16 %b to half
- store half %1, half * %0
- %2 = call float @k2(half * %0)
- store float %2, float * @k
+ store half %1, ptr %0
+ %2 = call float @k2(ptr %0)
+ store float %2, ptr @k
ret void
}
; MIPS64-N64-NEXT: daddiu $sp, $sp, 16
%1 = bitcast i16 %b to half
%2 = fpext half %1 to float
- store float %2, float * @k
+ store float %2, ptr @k
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: sh $2, 0($1)
entry:
- %0 = load half, half * @h, align 2
+ %0 = load half, ptr @h, align 2
%1 = fpext half %0 to double
- %2 = load half, half * @h, align 2
+ %2 = load half, ptr @h, align 2
%3 = fpext half %2 to double
%add = fadd double %1, %3
%4 = fptrunc double %add to half
- store half %4, half * @h, align 2
+ store half %4, ptr @h, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: mfc1 $2, $f0
entry:
- %0 = load half, half * @h, align 2
+ %0 = load half, ptr @h, align 2
%1 = fptoui half %0 to i32
ret i32 %1
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: mfc1 $2, $f0
entry:
- %0 = load half, half * @h, align 2
+ %0 = load half, ptr @h, align 2
%1 = fptosi half %0 to i32
ret i32 %1
%0 = uitofp i32 %a to half
- store half %0, half * @h, align 2
+ store half %0, ptr @h, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: sh $2, 0($1)
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
- %2 = load i16, i16* @g, align 2
+ %2 = load i16, ptr @g, align 2
%3 = call float @llvm.convert.from.fp16.f32(i16 %2)
%add = fadd float %1, %3
%4 = call i16 @llvm.convert.to.fp16.f32(float %add)
- store i16 %4, i16* @g, align 2
+ store i16 %4, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: sh $2, 0($1)
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
- %2 = load i16, i16* @g, align 2
+ %2 = load i16, ptr @g, align 2
%3 = call float @llvm.convert.from.fp16.f32(i16 %2)
%sub = fsub float %1, %3
%4 = call i16 @llvm.convert.to.fp16.f32(float %sub)
- store i16 %4, i16* @g, align 2
+ store i16 %4, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: sh $2, 0($1)
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
- %2 = load i16, i16* @g, align 2
+ %2 = load i16, ptr @g, align 2
%3 = call float @llvm.convert.from.fp16.f32(i16 %2)
%mul = fmul float %1, %3
%4 = call i16 @llvm.convert.to.fp16.f32(float %mul)
- store i16 %4, i16* @g, align 2
+ store i16 %4, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: sh $2, 0($1)
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
- %2 = load i16, i16* @g, align 2
+ %2 = load i16, ptr @g, align 2
%3 = call float @llvm.convert.from.fp16.f32(i16 %2)
%div = fdiv float %1, %3
%4 = call i16 @llvm.convert.to.fp16.f32(float %div)
- store i16 %4, i16* @g, align 2
+ store i16 %4, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
- %2 = load i16, i16* @g, align 2
+ %2 = load i16, ptr @g, align 2
%3 = call float @llvm.convert.from.fp16.f32(i16 %2)
%rem = frem float %1, %3
%4 = call i16 @llvm.convert.to.fp16.f32(float %rem)
- store i16 %4, i16* @g, align 2
+ store i16 %4, ptr @g, align 2
ret void
}
; MIPSR6-N64-NEXT: jr $ra
; MIPSR6-N64-NEXT: sh $2, 0($1)
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
- %2 = load i16, i16* @g, align 2
+ %2 = load i16, ptr @g, align 2
%3 = call float @llvm.convert.from.fp16.f32(i16 %2)
%fcmp = fcmp oeq float %1, %3
%4 = zext i1 %fcmp to i16
- store i16 %4, i16* @i1, align 2
+ store i16 %4, ptr @i1, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: sh $2, 0($1)
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %powi)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %powi)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %powi)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %log2)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %log10)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: sh $2, 0($1)
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %sqrt)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %sin)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %cos)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%exp = call float @llvm.exp.f32(float %1)
%2 = call i16 @llvm.convert.to.fp16.f32(float %exp)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %exp2)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %fma)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPSR6-N64-NEXT: jr $ra
; MIPSR6-N64-NEXT: sh $2, 0($1)
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
; MIPS32-N32: madd.s $f[[F1:[0-9]]], $f13, $f[[F0]], $f12
%2 = call i16 @llvm.convert.to.fp16.f32(float %fmuladd)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: sh $2, 0($1)
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %fabs)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %minnum)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %maxnum)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: sh $2, 0($1)
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %copysign)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %floor)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %ceil)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %trunc)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%rint = call float @llvm.rint.f32(float %1)
%2 = call i16 @llvm.convert.to.fp16.f32(float %rint)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %nearbyint)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
; MIPS64-N64-NEXT: jr $ra
; MIPS64-N64-NEXT: daddiu $sp, $sp, 32
entry:
- %0 = load i16, i16* @g, align 2
+ %0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
%2 = call i16 @llvm.convert.to.fp16.f32(float %round)
- store i16 %2, i16* @g, align 2
+ store i16 %2, ptr @g, align 2
ret void
}
define i32 @test() local_unnamed_addr {
entry:
- %0 = load <8 x half>, <8 x half>* @g, align 16
+ %0 = load <8 x half>, ptr @g, align 16
%1 = tail call <4 x float> @llvm.mips.fexupl.w(<8 x half> %0)
- store <4 x float> %1, <4 x float>* @i, align 16
+ store <4 x float> %1, ptr @i, align 16
; CHECK: ld.h $w[[W0:[0-9]+]], 0(${{[0-9]+}})
; CHECK: fexupl.w $w[[W1:[0-9]+]], $w[[W0]]
; CHECK: st.w $w[[W1]], 0(${{[0-9]+}})
%2 = tail call <4 x float> @llvm.mips.fexupr.w(<8 x half> %0)
- store <4 x float> %2, <4 x float>* @j, align 16
+ store <4 x float> %2, ptr @j, align 16
; CHECK: fexupr.w $w[[W2:[0-9]+]], $w[[W0]]
; CHECK: st.w $w[[W2]], 0(${{[0-9]+}})
; CHECK: loadstore_v16i8_near:
%1 = alloca <16 x i8>
- %2 = load volatile <16 x i8>, <16 x i8>* %1
+ %2 = load volatile <16 x i8>, ptr %1
; CHECK: ld.b [[R1:\$w[0-9]+]], 0($sp)
- store volatile <16 x i8> %2, <16 x i8>* %1
+ store volatile <16 x i8> %2, ptr %1
; CHECK: st.b [[R1]], 0($sp)
ret void
%2 = alloca [492 x i8] ; Push the frame--acounting for the emergency spill
; slot--right up to 512 bytes
- %3 = load volatile <16 x i8>, <16 x i8>* %1
+ %3 = load volatile <16 x i8>, ptr %1
; CHECK: ld.b [[R1:\$w[0-9]+]], 496($sp)
- store volatile <16 x i8> %3, <16 x i8>* %1
+ store volatile <16 x i8> %3, ptr %1
; CHECK: st.b [[R1]], 496($sp)
ret void
%2 = alloca [497 x i8] ; Push the frame--acounting for the emergency spill
; slot--right up to 512 bytes
- %3 = load volatile <16 x i8>, <16 x i8>* %1
+ %3 = load volatile <16 x i8>, ptr %1
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 512
; CHECK: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <16 x i8> %3, <16 x i8>* %1
+ store volatile <16 x i8> %3, ptr %1
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 512
; CHECK: st.b [[R1]], 0([[BASE]])
%2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
; slot--right up to 32768 bytes
- %3 = load volatile <16 x i8>, <16 x i8>* %1
+ %3 = load volatile <16 x i8>, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <16 x i8> %3, <16 x i8>* %1
+ store volatile <16 x i8> %3, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: st.b [[R1]], 0([[BASE]])
%2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
; slot--just over 32768 bytes
- %3 = load volatile <16 x i8>, <16 x i8>* %1
+ %3 = load volatile <16 x i8>, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <16 x i8> %3, <16 x i8>* %1
+ store volatile <16 x i8> %3, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: st.b [[R1]], 0([[BASE]])
; CHECK: loadstore_v8i16_near:
%1 = alloca <8 x i16>
- %2 = load volatile <8 x i16>, <8 x i16>* %1
+ %2 = load volatile <8 x i16>, ptr %1
; CHECK: ld.h [[R1:\$w[0-9]+]], 0($sp)
- store volatile <8 x i16> %2, <8 x i16>* %1
+ store volatile <8 x i16> %2, ptr %1
; CHECK: st.h [[R1]], 0($sp)
ret void
; CHECK: loadstore_v8i16_unaligned:
%1 = alloca [2 x <8 x i16>]
- %2 = bitcast [2 x <8 x i16>]* %1 to i8*
- %3 = getelementptr i8, i8* %2, i32 1
- %4 = bitcast i8* %3 to [2 x <8 x i16>]*
- %5 = getelementptr [2 x <8 x i16>], [2 x <8 x i16>]* %4, i32 0, i32 0
+ %2 = getelementptr i8, ptr %1, i32 1
- %6 = load volatile <8 x i16>, <8 x i16>* %5
+ %3 = load volatile <8 x i16>, ptr %2
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <8 x i16> %6, <8 x i16>* %5
+ store volatile <8 x i16> %3, ptr %2
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
; CHECK: st.h [[R1]], 0([[BASE]])
%2 = alloca [1004 x i8] ; Push the frame--acounting for the emergency spill
; slot--right up to 1024 bytes
- %3 = load volatile <8 x i16>, <8 x i16>* %1
+ %3 = load volatile <8 x i16>, ptr %1
; CHECK: ld.h [[R1:\$w[0-9]+]], 1008($sp)
- store volatile <8 x i16> %3, <8 x i16>* %1
+ store volatile <8 x i16> %3, ptr %1
; CHECK: st.h [[R1]], 1008($sp)
ret void
%2 = alloca [1009 x i8] ; Push the frame--acounting for the emergency spill
; slot--just over 1024 bytes
- %3 = load volatile <8 x i16>, <8 x i16>* %1
+ %3 = load volatile <8 x i16>, ptr %1
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1024
; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <8 x i16> %3, <8 x i16>* %1
+ store volatile <8 x i16> %3, ptr %1
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1024
; CHECK: st.h [[R1]], 0([[BASE]])
%2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
; slot--right up to 32768 bytes
- %3 = load volatile <8 x i16>, <8 x i16>* %1
+ %3 = load volatile <8 x i16>, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <8 x i16> %3, <8 x i16>* %1
+ store volatile <8 x i16> %3, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: st.h [[R1]], 0([[BASE]])
%2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
; slot--just over 32768 bytes
- %3 = load volatile <8 x i16>, <8 x i16>* %1
+ %3 = load volatile <8 x i16>, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <8 x i16> %3, <8 x i16>* %1
+ store volatile <8 x i16> %3, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: st.h [[R1]], 0([[BASE]])
; CHECK: loadstore_v4i32_near:
%1 = alloca <4 x i32>
- %2 = load volatile <4 x i32>, <4 x i32>* %1
+ %2 = load volatile <4 x i32>, ptr %1
; CHECK: ld.w [[R1:\$w[0-9]+]], 0($sp)
- store volatile <4 x i32> %2, <4 x i32>* %1
+ store volatile <4 x i32> %2, ptr %1
; CHECK: st.w [[R1]], 0($sp)
ret void
; CHECK: loadstore_v4i32_unaligned:
%1 = alloca [2 x <4 x i32>]
- %2 = bitcast [2 x <4 x i32>]* %1 to i8*
- %3 = getelementptr i8, i8* %2, i32 1
- %4 = bitcast i8* %3 to [2 x <4 x i32>]*
- %5 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %4, i32 0, i32 0
+ %2 = getelementptr i8, ptr %1, i32 1
- %6 = load volatile <4 x i32>, <4 x i32>* %5
+ %3 = load volatile <4 x i32>, ptr %2
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <4 x i32> %6, <4 x i32>* %5
+ store volatile <4 x i32> %3, ptr %2
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
; CHECK: st.w [[R1]], 0([[BASE]])
%2 = alloca [2028 x i8] ; Push the frame--acounting for the emergency spill
; slot--right up to 2048 bytes
- %3 = load volatile <4 x i32>, <4 x i32>* %1
+ %3 = load volatile <4 x i32>, ptr %1
; CHECK: ld.w [[R1:\$w[0-9]+]], 2032($sp)
- store volatile <4 x i32> %3, <4 x i32>* %1
+ store volatile <4 x i32> %3, ptr %1
; CHECK: st.w [[R1]], 2032($sp)
ret void
%2 = alloca [2033 x i8] ; Push the frame--acounting for the emergency spill
; slot--just over 2048 bytes
- %3 = load volatile <4 x i32>, <4 x i32>* %1
+ %3 = load volatile <4 x i32>, ptr %1
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 2048
; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <4 x i32> %3, <4 x i32>* %1
+ store volatile <4 x i32> %3, ptr %1
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 2048
; CHECK: st.w [[R1]], 0([[BASE]])
%2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
; slot-- right up to 32768 bytes
- %3 = load volatile <4 x i32>, <4 x i32>* %1
+ %3 = load volatile <4 x i32>, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <4 x i32> %3, <4 x i32>* %1
+ store volatile <4 x i32> %3, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: st.w [[R1]], 0([[BASE]])
%2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
; slot--just over 32768 bytes
- %3 = load volatile <4 x i32>, <4 x i32>* %1
+ %3 = load volatile <4 x i32>, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <4 x i32> %3, <4 x i32>* %1
+ store volatile <4 x i32> %3, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: st.w [[R1]], 0([[BASE]])
; CHECK: loadstore_v2i64_near:
%1 = alloca <2 x i64>
- %2 = load volatile <2 x i64>, <2 x i64>* %1
+ %2 = load volatile <2 x i64>, ptr %1
; CHECK: ld.d [[R1:\$w[0-9]+]], 0($sp)
- store volatile <2 x i64> %2, <2 x i64>* %1
+ store volatile <2 x i64> %2, ptr %1
; CHECK: st.d [[R1]], 0($sp)
ret void
; CHECK: loadstore_v2i64_unaligned:
%1 = alloca [2 x <2 x i64>]
- %2 = bitcast [2 x <2 x i64>]* %1 to i8*
- %3 = getelementptr i8, i8* %2, i32 1
- %4 = bitcast i8* %3 to [2 x <2 x i64>]*
- %5 = getelementptr [2 x <2 x i64>], [2 x <2 x i64>]* %4, i32 0, i32 0
+ %2 = getelementptr i8, ptr %1, i32 1
- %6 = load volatile <2 x i64>, <2 x i64>* %5
+ %3 = load volatile <2 x i64>, ptr %2
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <2 x i64> %6, <2 x i64>* %5
+ store volatile <2 x i64> %3, ptr %2
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
; CHECK: st.d [[R1]], 0([[BASE]])
%1 = alloca <2 x i64>
%2 = alloca [4076 x i8] ; Push the frame--acounting for the emergency spill
; slot--right up to 4096 bytes
- %3 = load volatile <2 x i64>, <2 x i64>* %1
+ %3 = load volatile <2 x i64>, ptr %1
; CHECK: ld.d [[R1:\$w[0-9]+]], 4080($sp)
- store volatile <2 x i64> %3, <2 x i64>* %1
+ store volatile <2 x i64> %3, ptr %1
; CHECK: st.d [[R1]], 4080($sp)
ret void
%2 = alloca [4081 x i8] ; Push the frame--acounting for the emergency spill
; slot--just over 4096 bytes
- %3 = load volatile <2 x i64>, <2 x i64>* %1
+ %3 = load volatile <2 x i64>, ptr %1
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 4096
; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <2 x i64> %3, <2 x i64>* %1
+ store volatile <2 x i64> %3, ptr %1
; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 4096
; CHECK: st.d [[R1]], 0([[BASE]])
%2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
; slot--right up to 32768 bytes
- %3 = load volatile <2 x i64>, <2 x i64>* %1
+ %3 = load volatile <2 x i64>, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <2 x i64> %3, <2 x i64>* %1
+ store volatile <2 x i64> %3, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: st.d [[R1]], 0([[BASE]])
%2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
; slot--just over 32768 bytes
- %3 = load volatile <2 x i64>, <2 x i64>* %1
+ %3 = load volatile <2 x i64>, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
- store volatile <2 x i64> %3, <2 x i64>* %1
+ store volatile <2 x i64> %3, ptr %1
; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
; CHECK: st.d [[R1]], 0([[BASE]])
define i32 @llvm_mips_bnz_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnz_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_bnz_b_ARG1
%1 = tail call i32 @llvm.mips.bnz.b(<16 x i8> %0)
%2 = icmp eq i32 %1, 0
br i1 %2, label %true, label %false
define i32 @llvm_mips_bnz_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bnz_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_bnz_h_ARG1
%1 = tail call i32 @llvm.mips.bnz.h(<8 x i16> %0)
%2 = icmp eq i32 %1, 0
br i1 %2, label %true, label %false
define i32 @llvm_mips_bnz_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bnz_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_bnz_w_ARG1
%1 = tail call i32 @llvm.mips.bnz.w(<4 x i32> %0)
%2 = icmp eq i32 %1, 0
br i1 %2, label %true, label %false
define i32 @llvm_mips_bnz_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bnz_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_bnz_d_ARG1
%1 = tail call i32 @llvm.mips.bnz.d(<2 x i64> %0)
%2 = icmp eq i32 %1, 0
br i1 %2, label %true, label %false
define void @llvm_mips_ldi_b_test() nounwind {
entry:
%0 = call <16 x i8> @llvm.mips.ldi.b(i32 3)
- store <16 x i8> %0, <16 x i8>* @llvm_mips_ldi_b_RES1
+ store <16 x i8> %0, ptr @llvm_mips_ldi_b_RES1
%1 = call <16 x i8> @llvm.mips.ldi.b(i32 -3)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_ldi_b_RES2
+ store <16 x i8> %1, ptr @llvm_mips_ldi_b_RES2
ret void
}
define void @llvm_mips_ldi_h_test() nounwind {
entry:
%0 = call <8 x i16> @llvm.mips.ldi.h(i32 3)
- store <8 x i16> %0, <8 x i16>* @llvm_mips_ldi_h_RES1
+ store <8 x i16> %0, ptr @llvm_mips_ldi_h_RES1
%1 = call <8 x i16> @llvm.mips.ldi.h(i32 -3)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_ldi_h_RES2
+ store <8 x i16> %1, ptr @llvm_mips_ldi_h_RES2
ret void
}
define void @llvm_mips_ldi_w_test() nounwind {
entry:
%0 = call <4 x i32> @llvm.mips.ldi.w(i32 3)
- store <4 x i32> %0, <4 x i32>* @llvm_mips_ldi_w_RES1
+ store <4 x i32> %0, ptr @llvm_mips_ldi_w_RES1
%1 = call <4 x i32> @llvm.mips.ldi.w(i32 -3)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_ldi_w_RES2
+ store <4 x i32> %1, ptr @llvm_mips_ldi_w_RES2
ret void
}
define void @llvm_mips_ldi_d_test() nounwind {
entry:
%0 = call <2 x i64> @llvm.mips.ldi.d(i32 3)
- store <2 x i64> %0, <2 x i64>* @llvm_mips_ldi_d_RES1
+ store <2 x i64> %0, ptr @llvm_mips_ldi_d_RES1
%1 = call <2 x i64> @llvm.mips.ldi.d(i32 -3)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_ldi_d_RES2
+ store <2 x i64> %1, ptr @llvm_mips_ldi_d_RES2
ret void
}
define void @llvm_mips_addvi_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addvi_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_addvi_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %0, i32 14)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_addvi_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_addvi_b_RES
ret void
}
define void @llvm_mips_addvi_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addvi_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_addvi_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %0, i32 14)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_addvi_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_addvi_h_RES
ret void
}
define void @llvm_mips_addvi_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addvi_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_addvi_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %0, i32 14)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_addvi_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_addvi_w_RES
ret void
}
define void @llvm_mips_addvi_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addvi_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_addvi_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %0, i32 14)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_addvi_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_addvi_d_RES
ret void
}
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($1)
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclri_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_bclri_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %0, i32 7)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_bclri_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_bclri_b_RES
ret void
}
declare <16 x i8> @llvm.mips.bclri.b(<16 x i8>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($1)
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclri_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_bclri_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %0, i32 7)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_bclri_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_bclri_h_RES
ret void
}
declare <8 x i16> @llvm.mips.bclri.h(<8 x i16>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($1)
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclri_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_bclri_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %0, i32 7)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_bclri_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_bclri_w_RES
ret void
}
declare <4 x i32> @llvm.mips.bclri.w(<4 x i32>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($1)
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclri_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_bclri_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %0, i32 7)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_bclri_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_bclri_d_RES
ret void
}
declare <2 x i64> @llvm.mips.bclri.d(<2 x i64>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w1, 0($1)
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_binsli_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_binsli_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %0, <16 x i8> %1, i32 6)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_binsli_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_binsli_b_RES
ret void
}
declare <16 x i8> @llvm.mips.binsli.b(<16 x i8>, <16 x i8>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w1, 0($1)
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsli_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsli_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_binsli_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_binsli_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %0, <8 x i16> %1, i32 7)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_binsli_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_binsli_h_RES
ret void
}
declare <8 x i16> @llvm.mips.binsli.h(<8 x i16>, <8 x i16>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w1, 0($1)
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsli_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsli_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_binsli_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_binsli_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %0, <4 x i32> %1, i32 7)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_binsli_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_binsli_w_RES
ret void
}
declare <4 x i32> @llvm.mips.binsli.w(<4 x i32>, <4 x i32>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w1, 0($1)
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsli_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsli_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_binsli_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_binsli_d_ARG2
; TODO: We use a particularly wide mask here to work around a legalization
; issue. If the mask doesn't fit within a 10-bit immediate, it gets
; legalized into a constant pool. We should add a test to cover the
; other cases once they correctly select binsli.d.
%2 = tail call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %0, <2 x i64> %1, i32 61)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_binsli_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_binsli_d_RES
ret void
}
declare <2 x i64> @llvm.mips.binsli.d(<2 x i64>, <2 x i64>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w1, 0($1)
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_binsri_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_binsri_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %0, <16 x i8> %1, i32 6)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_binsri_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_binsri_b_RES
ret void
}
declare <16 x i8> @llvm.mips.binsri.b(<16 x i8>, <16 x i8>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w1, 0($1)
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsri_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsri_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_binsri_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_binsri_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %0, <8 x i16> %1, i32 7)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_binsri_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_binsri_h_RES
ret void
}
declare <8 x i16> @llvm.mips.binsri.h(<8 x i16>, <8 x i16>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w1, 0($1)
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsri_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsri_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_binsri_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_binsri_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %0, <4 x i32> %1, i32 7)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_binsri_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_binsri_w_RES
ret void
}
declare <4 x i32> @llvm.mips.binsri.w(<4 x i32>, <4 x i32>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w1, 0($1)
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsri_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsri_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_binsri_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_binsri_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %0, <2 x i64> %1, i32 7)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_binsri_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_binsri_d_RES
ret void
}
declare <2 x i64> @llvm.mips.binsri.d(<2 x i64>, <2 x i64>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($1)
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnegi_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_bnegi_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %0, i32 7)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_bnegi_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_bnegi_b_RES
ret void
}
declare <16 x i8> @llvm.mips.bnegi.b(<16 x i8>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($1)
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bnegi_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_bnegi_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %0, i32 7)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_bnegi_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_bnegi_h_RES
ret void
}
declare <8 x i16> @llvm.mips.bnegi.h(<8 x i16>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($1)
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bnegi_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_bnegi_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %0, i32 7)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_bnegi_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_bnegi_w_RES
ret void
}
declare <4 x i32> @llvm.mips.bnegi.w(<4 x i32>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($1)
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bnegi_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_bnegi_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %0, i32 7)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_bnegi_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_bnegi_d_RES
ret void
}
declare <2 x i64> @llvm.mips.bnegi.d(<2 x i64>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($1)
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bseti_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_bseti_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %0, i32 7)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_bseti_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_bseti_b_RES
ret void
}
declare <16 x i8> @llvm.mips.bseti.b(<16 x i8>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($1)
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bseti_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_bseti_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %0, i32 7)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_bseti_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_bseti_h_RES
ret void
}
declare <8 x i16> @llvm.mips.bseti.h(<8 x i16>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($1)
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bseti_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_bseti_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %0, i32 7)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_bseti_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_bseti_w_RES
ret void
}
declare <4 x i32> @llvm.mips.bseti.w(<4 x i32>, i32) nounwind
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($1)
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bseti_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_bseti_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %0, i32 7)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_bseti_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_bseti_d_RES
ret void
}
declare <2 x i64> @llvm.mips.bseti.d(<2 x i64>, i32) nounwind
define void @llvm_mips_ceqi_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ceqi_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_ceqi_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.ceqi.b(<16 x i8> %0, i32 14)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_ceqi_b_RES1
+ store <16 x i8> %1, ptr @llvm_mips_ceqi_b_RES1
%2 = tail call <16 x i8> @llvm.mips.ceqi.b(<16 x i8> %0, i32 -14)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_ceqi_b_RES2
+ store <16 x i8> %2, ptr @llvm_mips_ceqi_b_RES2
ret void
}
define void @llvm_mips_ceqi_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ceqi_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_ceqi_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.ceqi.h(<8 x i16> %0, i32 14)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_ceqi_h_RES1
+ store <8 x i16> %1, ptr @llvm_mips_ceqi_h_RES1
%2 = tail call <8 x i16> @llvm.mips.ceqi.h(<8 x i16> %0, i32 -14)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_ceqi_h_RES2
+ store <8 x i16> %2, ptr @llvm_mips_ceqi_h_RES2
ret void
}
define void @llvm_mips_ceqi_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ceqi_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_ceqi_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.ceqi.w(<4 x i32> %0, i32 14)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_ceqi_w_RES1
+ store <4 x i32> %1, ptr @llvm_mips_ceqi_w_RES1
%2 = tail call <4 x i32> @llvm.mips.ceqi.w(<4 x i32> %0, i32 -14)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_ceqi_w_RES2
+ store <4 x i32> %2, ptr @llvm_mips_ceqi_w_RES2
ret void
}
define void @llvm_mips_ceqi_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ceqi_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_ceqi_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.ceqi.d(<2 x i64> %0, i32 14)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_ceqi_d_RES1
+ store <2 x i64> %1, ptr @llvm_mips_ceqi_d_RES1
%2 = tail call <2 x i64> @llvm.mips.ceqi.d(<2 x i64> %0, i32 -14)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_ceqi_d_RES2
+ store <2 x i64> %2, ptr @llvm_mips_ceqi_d_RES2
ret void
}
define void @llvm_mips_clei_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clei_s_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_clei_s_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %0, i32 14)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_s_b_RES1
+ store <16 x i8> %1, ptr @llvm_mips_clei_s_b_RES1
%2 = tail call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %0, i32 -14)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_clei_s_b_RES2
+ store <16 x i8> %2, ptr @llvm_mips_clei_s_b_RES2
ret void
}
define void @llvm_mips_clei_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clei_s_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_clei_s_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %0, i32 14)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_s_h_RES1
+ store <8 x i16> %1, ptr @llvm_mips_clei_s_h_RES1
%2 = tail call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %0, i32 -14)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_clei_s_h_RES2
+ store <8 x i16> %2, ptr @llvm_mips_clei_s_h_RES2
ret void
}
define void @llvm_mips_clei_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clei_s_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_clei_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %0, i32 14)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_s_w_RES1
+ store <4 x i32> %1, ptr @llvm_mips_clei_s_w_RES1
%2 = tail call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %0, i32 -14)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_clei_s_w_RES2
+ store <4 x i32> %2, ptr @llvm_mips_clei_s_w_RES2
ret void
}
define void @llvm_mips_clei_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clei_s_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_clei_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %0, i32 14)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_s_d_RES1
+ store <2 x i64> %1, ptr @llvm_mips_clei_s_d_RES1
%2 = tail call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %0, i32 -14)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_clei_s_d_RES2
+ store <2 x i64> %2, ptr @llvm_mips_clei_s_d_RES2
ret void
}
define void @llvm_mips_clei_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clei_u_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_clei_u_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %0, i32 14)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_u_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_clei_u_b_RES
ret void
}
define void @llvm_mips_clei_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clei_u_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_clei_u_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %0, i32 14)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_u_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_clei_u_h_RES
ret void
}
define void @llvm_mips_clei_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clei_u_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_clei_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %0, i32 14)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_u_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_clei_u_w_RES
ret void
}
define void @llvm_mips_clei_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clei_u_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_clei_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %0, i32 14)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_u_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_clei_u_d_RES
ret void
}
define void @llvm_mips_clti_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clti_s_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_clti_s_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %0, i32 14)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_s_b_RES1
+ store <16 x i8> %1, ptr @llvm_mips_clti_s_b_RES1
%2 = tail call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %0, i32 -14)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_clti_s_b_RES2
+ store <16 x i8> %2, ptr @llvm_mips_clti_s_b_RES2
ret void
}
define void @llvm_mips_clti_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clti_s_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_clti_s_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %0, i32 14)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_s_h_RES1
+ store <8 x i16> %1, ptr @llvm_mips_clti_s_h_RES1
%2 = tail call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %0, i32 -14)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_clti_s_h_RES2
+ store <8 x i16> %2, ptr @llvm_mips_clti_s_h_RES2
ret void
}
define void @llvm_mips_clti_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clti_s_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_clti_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %0, i32 14)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_s_w_RES1
+ store <4 x i32> %1, ptr @llvm_mips_clti_s_w_RES1
%2 = tail call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %0, i32 -14)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_clti_s_w_RES2
+ store <4 x i32> %2, ptr @llvm_mips_clti_s_w_RES2
ret void
}
define void @llvm_mips_clti_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clti_s_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_clti_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %0, i32 14)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_s_d_RES1
+ store <2 x i64> %1, ptr @llvm_mips_clti_s_d_RES1
%2 = tail call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %0, i32 -14)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_clti_s_d_RES2
+ store <2 x i64> %2, ptr @llvm_mips_clti_s_d_RES2
ret void
}
define void @llvm_mips_clti_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clti_u_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_clti_u_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %0, i32 14)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_u_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_clti_u_b_RES
ret void
}
define void @llvm_mips_clti_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clti_u_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_clti_u_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %0, i32 14)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_u_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_clti_u_h_RES
ret void
}
define void @llvm_mips_clti_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clti_u_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_clti_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %0, i32 14)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_u_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_clti_u_w_RES
ret void
}
define void @llvm_mips_clti_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clti_u_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_clti_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %0, i32 14)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_u_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_clti_u_d_RES
ret void
}
define void @llvm_mips_maxi_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maxi_s_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_maxi_s_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %0, i32 14)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_s_b_RES1
+ store <16 x i8> %1, ptr @llvm_mips_maxi_s_b_RES1
%2 = tail call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %0, i32 -14)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_maxi_s_b_RES2
+ store <16 x i8> %2, ptr @llvm_mips_maxi_s_b_RES2
ret void
}
define void @llvm_mips_maxi_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maxi_s_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_maxi_s_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %0, i32 14)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_s_h_RES1
+ store <8 x i16> %1, ptr @llvm_mips_maxi_s_h_RES1
%2 = tail call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %0, i32 -14)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_maxi_s_h_RES2
+ store <8 x i16> %2, ptr @llvm_mips_maxi_s_h_RES2
ret void
}
define void @llvm_mips_maxi_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maxi_s_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_maxi_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %0, i32 14)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_s_w_RES1
+ store <4 x i32> %1, ptr @llvm_mips_maxi_s_w_RES1
%2 = tail call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %0, i32 -14)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_maxi_s_w_RES2
+ store <4 x i32> %2, ptr @llvm_mips_maxi_s_w_RES2
ret void
}
define void @llvm_mips_maxi_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maxi_s_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_maxi_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %0, i32 14)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_s_d_RES1
+ store <2 x i64> %1, ptr @llvm_mips_maxi_s_d_RES1
%2 = tail call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %0, i32 -14)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_maxi_s_d_RES2
+ store <2 x i64> %2, ptr @llvm_mips_maxi_s_d_RES2
ret void
}
define void @llvm_mips_maxi_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maxi_u_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_maxi_u_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %0, i32 14)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_u_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_maxi_u_b_RES
ret void
}
define void @llvm_mips_maxi_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maxi_u_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_maxi_u_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %0, i32 14)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_u_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_maxi_u_h_RES
ret void
}
define void @llvm_mips_maxi_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maxi_u_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_maxi_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %0, i32 14)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_u_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_maxi_u_w_RES
ret void
}
define void @llvm_mips_maxi_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maxi_u_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_maxi_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %0, i32 14)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_u_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_maxi_u_d_RES
ret void
}
define void @llvm_mips_mini_s_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mini_s_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_mini_s_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %0, i32 14)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_s_b_RES1
+ store <16 x i8> %1, ptr @llvm_mips_mini_s_b_RES1
%2 = tail call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %0, i32 -14)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_mini_s_b_RES2
+ store <16 x i8> %2, ptr @llvm_mips_mini_s_b_RES2
ret void
}
define void @llvm_mips_mini_s_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mini_s_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_mini_s_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %0, i32 14)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_s_h_RES1
+ store <8 x i16> %1, ptr @llvm_mips_mini_s_h_RES1
%2 = tail call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %0, i32 -14)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_mini_s_h_RES2
+ store <8 x i16> %2, ptr @llvm_mips_mini_s_h_RES2
ret void
}
define void @llvm_mips_mini_s_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mini_s_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_mini_s_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %0, i32 14)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_s_w_RES1
+ store <4 x i32> %1, ptr @llvm_mips_mini_s_w_RES1
%2 = tail call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %0, i32 -14)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_mini_s_w_RES2
+ store <4 x i32> %2, ptr @llvm_mips_mini_s_w_RES2
ret void
}
define void @llvm_mips_mini_s_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mini_s_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_mini_s_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %0, i32 14)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_s_d_RES1
+ store <2 x i64> %1, ptr @llvm_mips_mini_s_d_RES1
%2 = tail call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %0, i32 -14)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_mini_s_d_RES2
+ store <2 x i64> %2, ptr @llvm_mips_mini_s_d_RES2
ret void
}
define void @llvm_mips_mini_u_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mini_u_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_mini_u_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %0, i32 14)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_u_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_mini_u_b_RES
ret void
}
define void @llvm_mips_mini_u_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mini_u_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_mini_u_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %0, i32 14)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_u_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_mini_u_h_RES
ret void
}
define void @llvm_mips_mini_u_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mini_u_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_mini_u_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %0, i32 14)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_u_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_mini_u_w_RES
ret void
}
define void @llvm_mips_mini_u_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mini_u_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_mini_u_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %0, i32 14)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_u_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_mini_u_d_RES
ret void
}
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.b $w0, 0($1)
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subvi_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_subvi_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.subvi.b(<16 x i8> %0, i32 14)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_subvi_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_subvi_b_RES
ret void
}
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.h $w0, 0($1)
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subvi_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_subvi_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.subvi.h(<8 x i16> %0, i32 14)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_subvi_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_subvi_h_RES
ret void
}
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.w $w0, 0($1)
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subvi_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_subvi_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.subvi.w(<4 x i32> %0, i32 14)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_subvi_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_subvi_w_RES
ret void
}
; CHECK-NEXT: jr $ra
; CHECK-NEXT: st.d $w0, 0($1)
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subvi_d_ARG1
+ %0 = load <2 x i64>, ptr @llvm_mips_subvi_d_ARG1
%1 = tail call <2 x i64> @llvm.mips.subvi.d(<2 x i64> %0, i32 14)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_subvi_d_RES
+ store <2 x i64> %1, ptr @llvm_mips_subvi_d_RES
ret void
}
define void @llvm_mips_ld_b_test() nounwind {
entry:
- %0 = bitcast <16 x i8>* @llvm_mips_ld_b_ARG to i8*
- %1 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 16)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_ld_b_RES
+ %0 = tail call <16 x i8> @llvm.mips.ld.b(ptr @llvm_mips_ld_b_ARG, i32 16)
+ store <16 x i8> %0, ptr @llvm_mips_ld_b_RES
ret void
}
-declare <16 x i8> @llvm.mips.ld.b(i8*, i32) nounwind
+declare <16 x i8> @llvm.mips.ld.b(ptr, i32) nounwind
; CHECK: llvm_mips_ld_b_test:
; CHECK: ld.b [[R1:\$w[0-9]+]], 16(
define void @llvm_mips_ld_b_unaligned_test() nounwind {
entry:
- %0 = bitcast <16 x i8>* @llvm_mips_ld_b_ARG to i8*
- %1 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 9)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_ld_b_RES
+ %0 = tail call <16 x i8> @llvm.mips.ld.b(ptr @llvm_mips_ld_b_ARG, i32 9)
+ store <16 x i8> %0, ptr @llvm_mips_ld_b_RES
ret void
}
define void @llvm_mips_ld_b_valid_range_tests() nounwind {
entry:
- %0 = bitcast <16 x i8>* @llvm_mips_ld_b_ARG to i8*
- %1 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 -512)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_ld_b_RES
- %2 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 511)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_ld_b_RES
+ %0 = tail call <16 x i8> @llvm.mips.ld.b(ptr @llvm_mips_ld_b_ARG, i32 -512)
+ store <16 x i8> %0, ptr @llvm_mips_ld_b_RES
+ %1 = tail call <16 x i8> @llvm.mips.ld.b(ptr @llvm_mips_ld_b_ARG, i32 511)
+ store <16 x i8> %1, ptr @llvm_mips_ld_b_RES
ret void
}
define void @llvm_mips_ld_b_invalid_range_tests() nounwind {
entry:
- %0 = bitcast <16 x i8>* @llvm_mips_ld_b_ARG to i8*
- %1 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 -513)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_ld_b_RES
- %2 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 512)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_ld_b_RES
+ %0 = tail call <16 x i8> @llvm.mips.ld.b(ptr @llvm_mips_ld_b_ARG, i32 -513)
+ store <16 x i8> %0, ptr @llvm_mips_ld_b_RES
+ %1 = tail call <16 x i8> @llvm.mips.ld.b(ptr @llvm_mips_ld_b_ARG, i32 512)
+ store <16 x i8> %1, ptr @llvm_mips_ld_b_RES
ret void
}
define void @llvm_mips_ld_h_test() nounwind {
entry:
- %0 = bitcast <8 x i16>* @llvm_mips_ld_h_ARG to i8*
- %1 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 16)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_ld_h_RES
+ %0 = tail call <8 x i16> @llvm.mips.ld.h(ptr @llvm_mips_ld_h_ARG, i32 16)
+ store <8 x i16> %0, ptr @llvm_mips_ld_h_RES
ret void
}
-declare <8 x i16> @llvm.mips.ld.h(i8*, i32) nounwind
+declare <8 x i16> @llvm.mips.ld.h(ptr, i32) nounwind
; CHECK: llvm_mips_ld_h_test:
; CHECK: ld.h [[R1:\$w[0-9]+]], 16(
define void @llvm_mips_ld_h_unaligned_test() nounwind {
entry:
- %0 = bitcast <8 x i16>* @llvm_mips_ld_h_ARG to i8*
- %1 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 9)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_ld_h_RES
+ %0 = tail call <8 x i16> @llvm.mips.ld.h(ptr @llvm_mips_ld_h_ARG, i32 9)
+ store <8 x i16> %0, ptr @llvm_mips_ld_h_RES
ret void
}
define void @llvm_mips_ld_h_valid_range_tests() nounwind {
entry:
- %0 = bitcast <8 x i16>* @llvm_mips_ld_h_ARG to i8*
- %1 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 -1024)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_ld_h_RES
- %2 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 1022)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_ld_h_RES
+ %0 = tail call <8 x i16> @llvm.mips.ld.h(ptr @llvm_mips_ld_h_ARG, i32 -1024)
+ store <8 x i16> %0, ptr @llvm_mips_ld_h_RES
+ %1 = tail call <8 x i16> @llvm.mips.ld.h(ptr @llvm_mips_ld_h_ARG, i32 1022)
+ store <8 x i16> %1, ptr @llvm_mips_ld_h_RES
ret void
}
define void @llvm_mips_ld_h_invalid_range_tests() nounwind {
entry:
- %0 = bitcast <8 x i16>* @llvm_mips_ld_h_ARG to i8*
- %1 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 -1026)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_ld_h_RES
- %2 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 1024)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_ld_h_RES
+ %0 = tail call <8 x i16> @llvm.mips.ld.h(ptr @llvm_mips_ld_h_ARG, i32 -1026)
+ store <8 x i16> %0, ptr @llvm_mips_ld_h_RES
+ %1 = tail call <8 x i16> @llvm.mips.ld.h(ptr @llvm_mips_ld_h_ARG, i32 1024)
+ store <8 x i16> %1, ptr @llvm_mips_ld_h_RES
ret void
}
define void @llvm_mips_ld_w_test() nounwind {
entry:
- %0 = bitcast <4 x i32>* @llvm_mips_ld_w_ARG to i8*
- %1 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 16)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_ld_w_RES
+ %0 = tail call <4 x i32> @llvm.mips.ld.w(ptr @llvm_mips_ld_w_ARG, i32 16)
+ store <4 x i32> %0, ptr @llvm_mips_ld_w_RES
ret void
}
-declare <4 x i32> @llvm.mips.ld.w(i8*, i32) nounwind
+declare <4 x i32> @llvm.mips.ld.w(ptr, i32) nounwind
; CHECK: llvm_mips_ld_w_test:
; CHECK: ld.w [[R1:\$w[0-9]+]], 16(
define void @llvm_mips_ld_w_unaligned_test() nounwind {
entry:
- %0 = bitcast <4 x i32>* @llvm_mips_ld_w_ARG to i8*
- %1 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 9)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_ld_w_RES
+ %0 = tail call <4 x i32> @llvm.mips.ld.w(ptr @llvm_mips_ld_w_ARG, i32 9)
+ store <4 x i32> %0, ptr @llvm_mips_ld_w_RES
ret void
}
define void @llvm_mips_ld_w_valid_range_tests() nounwind {
entry:
- %0 = bitcast <4 x i32>* @llvm_mips_ld_w_ARG to i8*
- %1 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 -2048)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_ld_w_RES
- %2 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 2044)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_ld_w_RES
+ %0 = tail call <4 x i32> @llvm.mips.ld.w(ptr @llvm_mips_ld_w_ARG, i32 -2048)
+ store <4 x i32> %0, ptr @llvm_mips_ld_w_RES
+ %1 = tail call <4 x i32> @llvm.mips.ld.w(ptr @llvm_mips_ld_w_ARG, i32 2044)
+ store <4 x i32> %1, ptr @llvm_mips_ld_w_RES
ret void
}
define void @llvm_mips_ld_w_invalid_range_tests() nounwind {
entry:
- %0 = bitcast <4 x i32>* @llvm_mips_ld_w_ARG to i8*
- %1 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 -2052)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_ld_w_RES
- %2 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 2048)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_ld_w_RES
+ %0 = tail call <4 x i32> @llvm.mips.ld.w(ptr @llvm_mips_ld_w_ARG, i32 -2052)
+ store <4 x i32> %0, ptr @llvm_mips_ld_w_RES
+ %1 = tail call <4 x i32> @llvm.mips.ld.w(ptr @llvm_mips_ld_w_ARG, i32 2048)
+ store <4 x i32> %1, ptr @llvm_mips_ld_w_RES
ret void
}
define void @llvm_mips_ld_d_test() nounwind {
entry:
- %0 = bitcast <2 x i64>* @llvm_mips_ld_d_ARG to i8*
- %1 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 16)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_ld_d_RES
+ %0 = tail call <2 x i64> @llvm.mips.ld.d(ptr @llvm_mips_ld_d_ARG, i32 16)
+ store <2 x i64> %0, ptr @llvm_mips_ld_d_RES
ret void
}
-declare <2 x i64> @llvm.mips.ld.d(i8*, i32) nounwind
+declare <2 x i64> @llvm.mips.ld.d(ptr, i32) nounwind
; CHECK: llvm_mips_ld_d_test:
; CHECK: ld.d [[R1:\$w[0-9]+]], 16(
define void @llvm_mips_ld_d_unaligned_test() nounwind {
entry:
- %0 = bitcast <2 x i64>* @llvm_mips_ld_d_ARG to i8*
- %1 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 9)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_ld_d_RES
+ %0 = tail call <2 x i64> @llvm.mips.ld.d(ptr @llvm_mips_ld_d_ARG, i32 9)
+ store <2 x i64> %0, ptr @llvm_mips_ld_d_RES
ret void
}
define void @llvm_mips_ld_d_valid_range_tests() nounwind {
entry:
- %0 = bitcast <2 x i64>* @llvm_mips_ld_d_ARG to i8*
- %1 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 -4096)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_ld_d_RES
- %2 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 4088)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_ld_d_RES
+ %0 = tail call <2 x i64> @llvm.mips.ld.d(ptr @llvm_mips_ld_d_ARG, i32 -4096)
+ store <2 x i64> %0, ptr @llvm_mips_ld_d_RES
+ %1 = tail call <2 x i64> @llvm.mips.ld.d(ptr @llvm_mips_ld_d_ARG, i32 4088)
+ store <2 x i64> %1, ptr @llvm_mips_ld_d_RES
ret void
}
define void @llvm_mips_ld_d_invalid_range_tests() nounwind {
entry:
- %0 = bitcast <2 x i64>* @llvm_mips_ld_d_ARG to i8*
- %1 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 -4104)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_ld_d_RES
- %2 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 4096)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_ld_d_RES
+ %0 = tail call <2 x i64> @llvm.mips.ld.d(ptr @llvm_mips_ld_d_ARG, i32 -4104)
+ store <2 x i64> %0, ptr @llvm_mips_ld_d_RES
+ %1 = tail call <2 x i64> @llvm.mips.ld.d(ptr @llvm_mips_ld_d_ARG, i32 4096)
+ store <2 x i64> %1, ptr @llvm_mips_ld_d_RES
ret void
}
define void @llvm_mips_st_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_st_b_ARG
- %1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8*
- tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 16)
+ %0 = load <16 x i8>, ptr @llvm_mips_st_b_ARG
+ tail call void @llvm.mips.st.b(<16 x i8> %0, ptr @llvm_mips_st_b_RES, i32 16)
ret void
}
-declare void @llvm.mips.st.b(<16 x i8>, i8*, i32) nounwind
+declare void @llvm.mips.st.b(<16 x i8>, ptr, i32) nounwind
; CHECK: llvm_mips_st_b_test:
; CHECK: ld.b
define void @llvm_mips_st_b_unaligned_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_st_b_ARG
- %1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8*
- tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 9)
+ %0 = load <16 x i8>, ptr @llvm_mips_st_b_ARG
+ tail call void @llvm.mips.st.b(<16 x i8> %0, ptr @llvm_mips_st_b_RES, i32 9)
ret void
}
define void @llvm_mips_st_b_valid_range_tests() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_st_b_ARG
- %1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8*
- tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 -512)
- tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 511)
+ %0 = load <16 x i8>, ptr @llvm_mips_st_b_ARG
+ tail call void @llvm.mips.st.b(<16 x i8> %0, ptr @llvm_mips_st_b_RES, i32 -512)
+ tail call void @llvm.mips.st.b(<16 x i8> %0, ptr @llvm_mips_st_b_RES, i32 511)
ret void
}
define void @llvm_mips_st_b_invalid_range_tests() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_st_b_ARG
- %1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8*
- tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 -513)
- tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 512)
+ %0 = load <16 x i8>, ptr @llvm_mips_st_b_ARG
+ tail call void @llvm.mips.st.b(<16 x i8> %0, ptr @llvm_mips_st_b_RES, i32 -513)
+ tail call void @llvm.mips.st.b(<16 x i8> %0, ptr @llvm_mips_st_b_RES, i32 512)
ret void
}
define void @llvm_mips_st_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_st_h_ARG
- %1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8*
- tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 16)
+ %0 = load <8 x i16>, ptr @llvm_mips_st_h_ARG
+ tail call void @llvm.mips.st.h(<8 x i16> %0, ptr @llvm_mips_st_h_RES, i32 16)
ret void
}
-declare void @llvm.mips.st.h(<8 x i16>, i8*, i32) nounwind
+declare void @llvm.mips.st.h(<8 x i16>, ptr, i32) nounwind
; CHECK: llvm_mips_st_h_test:
; CHECK: ld.h
define void @llvm_mips_st_h_unaligned_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_st_h_ARG
- %1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8*
- tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 9)
+ %0 = load <8 x i16>, ptr @llvm_mips_st_h_ARG
+ tail call void @llvm.mips.st.h(<8 x i16> %0, ptr @llvm_mips_st_h_RES, i32 9)
ret void
}
define void @llvm_mips_st_h_valid_range_tests() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_st_h_ARG
- %1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8*
- tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 -1024)
- tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 1022)
+ %0 = load <8 x i16>, ptr @llvm_mips_st_h_ARG
+ tail call void @llvm.mips.st.h(<8 x i16> %0, ptr @llvm_mips_st_h_RES, i32 -1024)
+ tail call void @llvm.mips.st.h(<8 x i16> %0, ptr @llvm_mips_st_h_RES, i32 1022)
ret void
}
define void @llvm_mips_st_h_invalid_range_tests() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_st_h_ARG
- %1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8*
- tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 -1026)
- tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 1024)
+ %0 = load <8 x i16>, ptr @llvm_mips_st_h_ARG
+ tail call void @llvm.mips.st.h(<8 x i16> %0, ptr @llvm_mips_st_h_RES, i32 -1026)
+ tail call void @llvm.mips.st.h(<8 x i16> %0, ptr @llvm_mips_st_h_RES, i32 1024)
ret void
}
define void @llvm_mips_st_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_st_w_ARG
- %1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8*
- tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 16)
+ %0 = load <4 x i32>, ptr @llvm_mips_st_w_ARG
+ tail call void @llvm.mips.st.w(<4 x i32> %0, ptr @llvm_mips_st_w_RES, i32 16)
ret void
}
-declare void @llvm.mips.st.w(<4 x i32>, i8*, i32) nounwind
+declare void @llvm.mips.st.w(<4 x i32>, ptr, i32) nounwind
; CHECK: llvm_mips_st_w_test:
; CHECK: ld.w
define void @llvm_mips_st_w_unaligned_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_st_w_ARG
- %1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8*
- tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 9)
+ %0 = load <4 x i32>, ptr @llvm_mips_st_w_ARG
+ tail call void @llvm.mips.st.w(<4 x i32> %0, ptr @llvm_mips_st_w_RES, i32 9)
ret void
}
define void @llvm_mips_st_w_valid_range_tests() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_st_w_ARG
- %1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8*
- tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 -2048)
- tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 2044)
+ %0 = load <4 x i32>, ptr @llvm_mips_st_w_ARG
+ tail call void @llvm.mips.st.w(<4 x i32> %0, ptr @llvm_mips_st_w_RES, i32 -2048)
+ tail call void @llvm.mips.st.w(<4 x i32> %0, ptr @llvm_mips_st_w_RES, i32 2044)
ret void
}
define void @llvm_mips_st_w_invalid_range_tests() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_st_w_ARG
- %1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8*
- tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 -2052)
- tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 2048)
+ %0 = load <4 x i32>, ptr @llvm_mips_st_w_ARG
+ tail call void @llvm.mips.st.w(<4 x i32> %0, ptr @llvm_mips_st_w_RES, i32 -2052)
+ tail call void @llvm.mips.st.w(<4 x i32> %0, ptr @llvm_mips_st_w_RES, i32 2048)
ret void
}
define void @llvm_mips_st_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_st_d_ARG
- %1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8*
- tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 16)
+ %0 = load <2 x i64>, ptr @llvm_mips_st_d_ARG
+ tail call void @llvm.mips.st.d(<2 x i64> %0, ptr @llvm_mips_st_d_RES, i32 16)
ret void
}
-declare void @llvm.mips.st.d(<2 x i64>, i8*, i32) nounwind
+declare void @llvm.mips.st.d(<2 x i64>, ptr, i32) nounwind
; CHECK: llvm_mips_st_d_test:
; CHECK: ld.d
define void @llvm_mips_st_d_unaligned_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_st_d_ARG
- %1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8*
- tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 9)
+ %0 = load <2 x i64>, ptr @llvm_mips_st_d_ARG
+ tail call void @llvm.mips.st.d(<2 x i64> %0, ptr @llvm_mips_st_d_RES, i32 9)
ret void
}
define void @llvm_mips_st_d_valid_range_tests() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_st_d_ARG
- %1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8*
- tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 -4096)
- tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 4088)
+ %0 = load <2 x i64>, ptr @llvm_mips_st_d_ARG
+ tail call void @llvm.mips.st.d(<2 x i64> %0, ptr @llvm_mips_st_d_RES, i32 -4096)
+ tail call void @llvm.mips.st.d(<2 x i64> %0, ptr @llvm_mips_st_d_RES, i32 4088)
ret void
}
define void @llvm_mips_st_d_invalid_range_tests() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_st_d_ARG
- %1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8*
- tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 -4104)
- tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 4096)
+ %0 = load <2 x i64>, ptr @llvm_mips_st_d_ARG
+ tail call void @llvm.mips.st.d(<2 x i64> %0, ptr @llvm_mips_st_d_RES, i32 -4104)
+ tail call void @llvm.mips.st.d(<2 x i64> %0, ptr @llvm_mips_st_d_RES, i32 4096)
ret void
}
define void @llvm_mips_andi_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_andi_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_andi_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.andi.b(<16 x i8> %0, i32 25)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_andi_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_andi_b_RES
ret void
}
define void @llvm_mips_bmnzi_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_bmnzi_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bmnzi_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 25)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_bmnzi_b_RES
ret void
}
define void @llvm_mips_bmzi_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmzi_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmzi_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_bmzi_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bmzi_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 25)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_bmzi_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_bmzi_b_RES
ret void
}
define void @llvm_mips_bseli_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bseli_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bseli_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_bseli_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bseli_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %0, <16 x i8> %1, i32 25)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_bseli_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_bseli_b_RES
ret void
}
define void @llvm_mips_nori_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nori_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_nori_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.nori.b(<16 x i8> %0, i32 25)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_nori_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_nori_b_RES
ret void
}
define void @llvm_mips_ori_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ori_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_ori_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.ori.b(<16 x i8> %0, i32 25)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_ori_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_ori_b_RES
ret void
}
define void @llvm_mips_shf_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_shf_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_shf_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.shf.b(<16 x i8> %0, i32 25)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_shf_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_shf_b_RES
ret void
}
define void @llvm_mips_shf_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_shf_h_ARG1
+ %0 = load <8 x i16>, ptr @llvm_mips_shf_h_ARG1
%1 = tail call <8 x i16> @llvm.mips.shf.h(<8 x i16> %0, i32 25)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_shf_h_RES
+ store <8 x i16> %1, ptr @llvm_mips_shf_h_RES
ret void
}
define void @llvm_mips_shf_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_shf_w_ARG1
+ %0 = load <4 x i32>, ptr @llvm_mips_shf_w_ARG1
%1 = tail call <4 x i32> @llvm.mips.shf.w(<4 x i32> %0, i32 25)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_shf_w_RES
+ store <4 x i32> %1, ptr @llvm_mips_shf_w_RES
ret void
}
define void @llvm_mips_xori_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xori_b_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_xori_b_ARG1
%1 = tail call <16 x i8> @llvm.mips.xori.b(<16 x i8> %0, i32 25)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_xori_b_RES
+ store <16 x i8> %1, ptr @llvm_mips_xori_b_RES
ret void
}
; Test that the immediate intrinsics with out of range values trigger an error.
-define void @binsli_b(<16 x i8> * %ptr) {
+define void @binsli_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %a, <16 x i8> %a, i32 65)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
; CHECK: LLVM ERROR: Immediate out of range
-define void @binsri_b(<16 x i8> * %ptr) {
+define void @binsri_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %a, <16 x i8> %a, i32 5)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @bmnzi_b(<16 x i8> * %ptr) {
+define void @bmnzi_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %a, <16 x i8> %a, i32 63)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @bmzi_b(<16 x i8> * %ptr) {
+define void @bmzi_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %a, <16 x i8> %a, i32 63)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @bnegi_b(<16 x i8> * %ptr) {
+define void @bnegi_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %a, i32 6)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @bseli_b(<16 x i8> * %ptr) {
+define void @bseli_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %a, <16 x i8> %a, i32 63)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @bseti_b(<16 x i8> * %ptr) {
+define void @bseti_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %a, i32 9)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @clei_s_b(<16 x i8> * %ptr) {
+define void @clei_s_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %a, i32 152)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @clei_u_b(<16 x i8> * %ptr) {
+define void @clei_u_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %a, i32 163)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @clti_s_b(<16 x i8> * %ptr) {
+define void @clti_s_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %a, i32 129)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @clti_u_b(<16 x i8> * %ptr) {
+define void @clti_u_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %a, i32 163)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @ldi_b(<16 x i8> * %ptr) {
+define void @ldi_b(ptr %ptr) {
entry:
%r = call <16 x i8> @llvm.mips.ldi.b(i32 1025)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_s_b(<16 x i8> * %ptr) {
+define void @maxi_s_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %a, i32 163)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_u_b(<16 x i8> * %ptr) {
+define void @maxi_u_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %a, i32 163)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @mini_s_b(<16 x i8> * %ptr) {
+define void @mini_s_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %a, i32 163)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @mini_u_b(<16 x i8> * %ptr) {
+define void @mini_u_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %a, i32 163)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @nori_b(<16 x i8> * %ptr) {
+define void @nori_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.nori.b(<16 x i8> %a, i32 63)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @ori_b(<16 x i8> * %ptr) {
+define void @ori_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.ori.b(<16 x i8> %a, i32 63)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @sldi_b(<16 x i8> * %ptr) {
+define void @sldi_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %a, <16 x i8> %a, i32 7)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @slli_b(<16 x i8> * %ptr) {
+define void @slli_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.slli.b(<16 x i8> %a, i32 65)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @splati_b(<16 x i8> * %ptr) {
+define void @splati_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.splati.b(<16 x i8> %a, i32 65)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @srai_b(<16 x i8> * %ptr) {
+define void @srai_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.srai.b(<16 x i8> %a, i32 65)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @srari_b(<16 x i8> * %ptr) {
+define void @srari_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.srari.b(<16 x i8> %a, i32 65)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @srli_b(<16 x i8> * %ptr) {
+define void @srli_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.srli.b(<16 x i8> %a, i32 65)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @srlri_b(<16 x i8> * %ptr) {
+define void @srlri_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %a, i32 65)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @addvi_w(<4 x i32> * %ptr) {
+define void @addvi_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @bclri_w(<4 x i32> * %ptr) {
+define void @bclri_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @binsli_w(<4 x i32> * %ptr) {
+define void @binsli_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %a, <4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @binsri_w(<4 x i32> * %ptr) {
+define void @binsri_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %a, <4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @bnegi_w(<4 x i32> * %ptr) {
+define void @bnegi_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @bseti_w(<4 x i32> * %ptr) {
+define void @bseti_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @clei_s_w(<4 x i32> * %ptr) {
+define void @clei_s_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @clei_u_w(<4 x i32> * %ptr) {
+define void @clei_u_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @clti_s_w(<4 x i32> * %ptr) {
+define void @clti_s_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @clti_u_w(<4 x i32> * %ptr) {
+define void @clti_u_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_s_w(<4 x i32> * %ptr) {
+define void @maxi_s_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_u_w(<4 x i32> * %ptr) {
+define void @maxi_u_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @mini_s_w(<4 x i32> * %ptr) {
+define void @mini_s_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @mini_u_w(<4 x i32> * %ptr) {
+define void @mini_u_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @ldi_w(<4 x i32> * %ptr) {
+define void @ldi_w(ptr %ptr) {
entry:
%r = call <4 x i32> @llvm.mips.ldi.w(i32 1024)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @sldi_w(<4 x i32> * %ptr) {
+define void @sldi_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %a, <4 x i32> %a, i32 63)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @slli_w(<4 x i32> * %ptr) {
+define void @slli_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.slli.w(<4 x i32> %a, i32 65)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @splati_w(<4 x i32> * %ptr) {
+define void @splati_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.splati.w(<4 x i32> %a, i32 65)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @srai_w(<4 x i32> * %ptr) {
+define void @srai_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.srai.w(<4 x i32> %a, i32 65)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @srari_w(<4 x i32> * %ptr) {
+define void @srari_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.srari.w(<4 x i32> %a, i32 65)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @srli_w(<4 x i32> * %ptr) {
+define void @srli_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.srli.w(<4 x i32> %a, i32 65)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @srlri_w(<4 x i32> * %ptr) {
+define void @srlri_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %a, i32 65)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @addvi_h(<8 x i16> * %ptr) {
+define void @addvi_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %a, i32 65)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @bclri_h(<8 x i16> * %ptr) {
+define void @bclri_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %a, i32 16)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @binsli_h(<8 x i16> * %ptr) {
+define void @binsli_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %a, <8 x i16> %a, i32 17)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @binsri_h(<8 x i16> * %ptr) {
+define void @binsri_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %a, i32 19)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @bnegi_h(<8 x i16> * %ptr) {
+define void @bnegi_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %a, i32 19)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @bseti_h(<8 x i16> * %ptr) {
+define void @bseti_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %a, i32 19)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @clei_s_h(<8 x i16> * %ptr) {
+define void @clei_s_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %a, i32 63)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @clei_u_h(<8 x i16> * %ptr) {
+define void @clei_u_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %a, i32 130)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @clti_s_h(<8 x i16> * %ptr) {
+define void @clti_s_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %a, i32 63)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @clti_u_h(<8 x i16> * %ptr) {
+define void @clti_u_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %a, i32 63)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_s_h(<8 x i16> * %ptr) {
+define void @maxi_s_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %a, i32 63)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_u_h(<8 x i16> * %ptr) {
+define void @maxi_u_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %a, i32 130)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @mini_s_h(<8 x i16> * %ptr) {
+define void @mini_s_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %a, i32 63)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @mini_u_h(<8 x i16> * %ptr) {
+define void @mini_u_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %a, i32 130)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @ldi_h(<8 x i16> * %ptr) {
+define void @ldi_h(ptr %ptr) {
entry:
%r = call <8 x i16> @llvm.mips.ldi.h(i32 1024)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @sldi_h(<8 x i16> * %ptr) {
+define void @sldi_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %a, <8 x i16> %a, i32 65)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @slli_h(<8 x i16> * %ptr) {
+define void @slli_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.slli.h(<8 x i16> %a, i32 65)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @splati_h(<8 x i16> * %ptr) {
+define void @splati_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.splati.h(<8 x i16> %a, i32 65)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @srai_h(<8 x i16> * %ptr) {
+define void @srai_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.srai.h(<8 x i16> %a, i32 65)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @srari_h(<8 x i16> * %ptr) {
+define void @srari_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.srari.h(<8 x i16> %a, i32 65)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @srli_h(<8 x i16> * %ptr) {
+define void @srli_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.srli.h(<8 x i16> %a, i32 65)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @srlri_h(<8 x i16> * %ptr) {
+define void @srlri_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %a, i32 65)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define i32 @copy_s_b(<16 x i8> * %ptr) {
+define i32 @copy_s_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.s.b(<16 x i8> %a, i32 17)
ret i32 %r
}
-define i32 @copy_s_h(<8 x i16> * %ptr) {
+define i32 @copy_s_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.s.h(<8 x i16> %a, i32 9)
ret i32 %r
}
-define i32 @copy_s_w(<4 x i32> * %ptr) {
+define i32 @copy_s_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.s.w(<4 x i32> %a, i32 5)
ret i32 %r
}
-define i32 @copy_u_b(<16 x i8> * %ptr) {
+define i32 @copy_u_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.u.b(<16 x i8> %a, i32 16)
ret i32 %r
}
-define i32 @copy_u_h(<8 x i16> * %ptr) {
+define i32 @copy_u_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.u.h(<8 x i16> %a, i32 9)
ret i32 %r
}
-define i32 @copy_u_w(<4 x i32> * %ptr) {
+define i32 @copy_u_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.u.w(<4 x i32> %a, i32 5)
ret i32 %r
}
-define i64 @copy_s_d(<2 x i64> * %ptr) {
-entry: %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+define i64 @copy_s_d(ptr %ptr) {
+entry: %a = load <2 x i64>, ptr %ptr, align 16
%r = call i64 @llvm.mips.copy.s.d(<2 x i64> %a, i32 3)
ret i64 %r
}
-define i64 @copy_u_d(<2 x i64> * %ptr) {
-entry: %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+define i64 @copy_u_d(ptr %ptr) {
+entry: %a = load <2 x i64>, ptr %ptr, align 16
%r = call i64 @llvm.mips.copy.u.d(<2 x i64> %a, i32 3)
ret i64 %r
}
-define void @addvi_d(<2 x i64> * %ptr) {
+define void @addvi_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %a, i32 65)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @bclri_d(<2 x i64> * %ptr) {
+define void @bclri_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %a, i32 64)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @binsli_d(<2 x i64> * %ptr) {
+define void @binsli_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %a, i32 65)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @binsri_d(<2 x i64> * %ptr) {
+define void @binsri_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %a, <2 x i64> %a, i32 65)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @bnegi_d(<2 x i64> * %ptr) {
+define void @bnegi_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %a, i32 65)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @bseti_d(<2 x i64> * %ptr) {
+define void @bseti_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %a, i32 65)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @clei_s_d(<2 x i64> * %ptr) {
+define void @clei_s_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %a, i32 63)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @clei_u_d(<2 x i64> * %ptr) {
+define void @clei_u_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %a, i32 63)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @clti_s_d(<2 x i64> * %ptr) {
+define void @clti_s_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %a, i32 63)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @clti_u_d(<2 x i64> * %ptr) {
+define void @clti_u_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %a, i32 63)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @ldi_d(<2 x i64> * %ptr) {
+define void @ldi_d(ptr %ptr) {
entry:
%r = call <2 x i64> @llvm.mips.ldi.d(i32 1024)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_s_d(<2 x i64> * %ptr) {
+define void @maxi_s_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %a, i32 63)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_u_d(<2 x i64> * %ptr) {
+define void @maxi_u_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %a, i32 63)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @mini_s_d(<2 x i64> * %ptr) {
+define void @mini_s_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %a, i32 63)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @mini_u_d(<2 x i64> * %ptr) {
+define void @mini_u_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %a, i32 63)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @sldi_d(<2 x i64> * %ptr) {
+define void @sldi_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %a, <2 x i64> %a, i32 1)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @slli_d(<2 x i64> * %ptr) {
+define void @slli_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.slli.d(<2 x i64> %a, i32 65)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @srai_d(<2 x i64> * %ptr) {
+define void @srai_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.srai.d(<2 x i64> %a, i32 65)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @srari_d(<2 x i64> * %ptr) {
+define void @srari_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.srari.d(<2 x i64> %a, i32 65)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @srli_d(<2 x i64> * %ptr) {
+define void @srli_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.srli.d(<2 x i64> %a, i32 65)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @srlri_d(<2 x i64> * %ptr) {
+define void @srlri_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %a, i32 65)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}; Negative numbers
-define void @neg_addvi_b(<16 x i8> * %ptr) {
+define void @neg_addvi_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %a, i32 -25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_andi_b(<16 x i8> * %ptr) {
+define void @neg_andi_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.andi.b(<16 x i8> %a, i32 -25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bclri_b(<16 x i8> * %ptr) {
+define void @neg_bclri_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %a, i32 -3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_binsli_b(<16 x i8> * %ptr) {
+define void @neg_binsli_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %a, <16 x i8> %a, i32 -3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_binsri_b(<16 x i8> * %ptr) {
+define void @neg_binsri_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %a, <16 x i8> %a, i32 5)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bmnzi_b(<16 x i8> * %ptr) {
+define void @neg_bmnzi_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %a, <16 x i8> %a, i32 -25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bmzi_b(<16 x i8> * %ptr) {
+define void @neg_bmzi_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %a, <16 x i8> %a, i32 -25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bnegi_b(<16 x i8> * %ptr) {
+define void @neg_bnegi_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %a, i32 6)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bseli_b(<16 x i8> * %ptr) {
+define void @neg_bseli_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %a, <16 x i8> %a, i32 -25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bseti_b(<16 x i8> * %ptr) {
+define void @neg_bseti_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %a, i32 -5)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clei_s_b(<16 x i8> * %ptr) {
+define void @neg_clei_s_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %a, i32 -120)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clei_u_b(<16 x i8> * %ptr) {
+define void @neg_clei_u_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %a, i32 -25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clti_s_b(<16 x i8> * %ptr) {
+define void @neg_clti_s_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %a, i32 -35)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clti_u_b(<16 x i8> * %ptr) {
+define void @neg_clti_u_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %a, i32 -25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_ldi_b(<16 x i8> * %ptr) {
+define void @neg_ldi_b(ptr %ptr) {
entry:
%r = call <16 x i8> @llvm.mips.ldi.b(i32 -3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_maxi_s_b(<16 x i8> * %ptr) {
+define void @neg_maxi_s_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %a, i32 2)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_maxi_u_b(<16 x i8> * %ptr) {
+define void @neg_maxi_u_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %a, i32 2)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_mini_s_b(<16 x i8> * %ptr) {
+define void @neg_mini_s_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %a, i32 2)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_mini_u_b(<16 x i8> * %ptr) {
+define void @neg_mini_u_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %a, i32 2)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_nori_b(<16 x i8> * %ptr) {
+define void @neg_nori_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.nori.b(<16 x i8> %a, i32 -25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_ori_b(<16 x i8> * %ptr) {
+define void @neg_ori_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.ori.b(<16 x i8> %a, i32 -25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_sldi_b(<16 x i8> * %ptr) {
+define void @neg_sldi_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %a, <16 x i8> %a, i32 -7)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_slli_b(<16 x i8> * %ptr) {
+define void @neg_slli_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.slli.b(<16 x i8> %a, i32 -3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_splati_b(<16 x i8> * %ptr) {
+define void @neg_splati_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.splati.b(<16 x i8> %a, i32 -3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srai_b(<16 x i8> * %ptr) {
+define void @neg_srai_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.srai.b(<16 x i8> %a, i32 -3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srari_b(<16 x i8> * %ptr) {
+define void @neg_srari_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.srari.b(<16 x i8> %a, i32 -3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srli_b(<16 x i8> * %ptr) {
+define void @neg_srli_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.srli.b(<16 x i8> %a, i32 -3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srlri_b(<16 x i8> * %ptr) {
+define void @neg_srlri_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %a, i32 -3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @neg_addvi_w(<4 x i32> * %ptr) {
+define void @neg_addvi_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %a, i32 -25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bclri_w(<4 x i32> * %ptr) {
+define void @neg_bclri_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %a, i32 -25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_binsli_w(<4 x i32> * %ptr) {
+define void @neg_binsli_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %a, <4 x i32> %a, i32 -25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_binsri_w(<4 x i32> * %ptr) {
+define void @neg_binsri_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %a, <4 x i32> %a, i32 -25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bnegi_w(<4 x i32> * %ptr) {
+define void @neg_bnegi_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %a, i32 -25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bseti_w(<4 x i32> * %ptr) {
+define void @neg_bseti_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %a, i32 -25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clei_s_w(<4 x i32> * %ptr) {
+define void @neg_clei_s_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %a, i32 -140)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clei_u_w(<4 x i32> * %ptr) {
+define void @neg_clei_u_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %a, i32 -25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clti_s_w(<4 x i32> * %ptr) {
+define void @neg_clti_s_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %a, i32 -150)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clti_u_w(<4 x i32> * %ptr) {
+define void @neg_clti_u_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %a, i32 -25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_maxi_s_w(<4 x i32> * %ptr) {
+define void @neg_maxi_s_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %a, i32 -200)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_maxi_u_w(<4 x i32> * %ptr) {
+define void @neg_maxi_u_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %a, i32 -200)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_mini_s_w(<4 x i32> * %ptr) {
+define void @neg_mini_s_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %a, i32 -200)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_mini_u_w(<4 x i32> * %ptr) {
+define void @neg_mini_u_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %a, i32 -200)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_ldi_w(<4 x i32> * %ptr) {
+define void @neg_ldi_w(ptr %ptr) {
entry:
%r = call <4 x i32> @llvm.mips.ldi.w(i32 -300)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_sldi_w(<4 x i32> * %ptr) {
+define void @neg_sldi_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %a, <4 x i32> %a, i32 -20)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_slli_w(<4 x i32> * %ptr) {
+define void @neg_slli_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.slli.w(<4 x i32> %a, i32 -3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_splati_w(<4 x i32> * %ptr) {
+define void @neg_splati_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.splati.w(<4 x i32> %a, i32 -3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srai_w(<4 x i32> * %ptr) {
+define void @neg_srai_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.srai.w(<4 x i32> %a, i32 -3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srari_w(<4 x i32> * %ptr) {
+define void @neg_srari_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.srari.w(<4 x i32> %a, i32 -3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srli_w(<4 x i32> * %ptr) {
+define void @neg_srli_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.srli.w(<4 x i32> %a, i32 -3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srlri_w(<4 x i32> * %ptr) {
+define void @neg_srlri_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %a, i32 -3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @neg_addvi_h(<8 x i16> * %ptr) {
+define void @neg_addvi_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %a, i32 -25)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bclri_h(<8 x i16> * %ptr) {
+define void @neg_bclri_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %a, i32 -8)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_binsli_h(<8 x i16> * %ptr) {
+define void @neg_binsli_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %a, <8 x i16> %a, i32 -8)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_binsri_h(<8 x i16> * %ptr) {
+define void @neg_binsri_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %a, i32 -15)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bnegi_h(<8 x i16> * %ptr) {
+define void @neg_bnegi_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %a, i32 -14)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bseti_h(<8 x i16> * %ptr) {
+define void @neg_bseti_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %a, i32 -15)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clei_s_h(<8 x i16> * %ptr) {
+define void @neg_clei_s_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %a, i32 -25)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clei_u_h(<8 x i16> * %ptr) {
+define void @neg_clei_u_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %a, i32 -25)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clti_s_h(<8 x i16> * %ptr) {
+define void @neg_clti_s_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %a, i32 -150)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clti_u_h(<8 x i16> * %ptr) {
+define void @neg_clti_u_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %a, i32 -25)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_maxi_s_h(<8 x i16> * %ptr) {
+define void @neg_maxi_s_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %a, i32 -200)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_maxi_u_h(<8 x i16> * %ptr) {
+define void @neg_maxi_u_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %a, i32 -200)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_mini_s_h(<8 x i16> * %ptr) {
+define void @neg_mini_s_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %a, i32 -200)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_mini_u_h(<8 x i16> * %ptr) {
+define void @neg_mini_u_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %a, i32 -2)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_ldi_h(<8 x i16> * %ptr) {
+define void @neg_ldi_h(ptr %ptr) {
entry:
%r = call <8 x i16> @llvm.mips.ldi.h(i32 -300)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_sldi_h(<8 x i16> * %ptr) {
+define void @neg_sldi_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %a, <8 x i16> %a, i32 -3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_slli_h(<8 x i16> * %ptr) {
+define void @neg_slli_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.slli.h(<8 x i16> %a, i32 -3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_splati_h(<8 x i16> * %ptr) {
+define void @neg_splati_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.splati.h(<8 x i16> %a, i32 -3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srai_h(<8 x i16> * %ptr) {
+define void @neg_srai_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.srai.h(<8 x i16> %a, i32 -3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srari_h(<8 x i16> * %ptr) {
+define void @neg_srari_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.srari.h(<8 x i16> %a, i32 -3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srli_h(<8 x i16> * %ptr) {
+define void @neg_srli_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.srli.h(<8 x i16> %a, i32 -3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srlri_h(<8 x i16> * %ptr) {
+define void @neg_srlri_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %a, i32 -3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define i32 @neg_copy_s_b(<16 x i8> * %ptr) {
+define i32 @neg_copy_s_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.s.b(<16 x i8> %a, i32 -1)
ret i32 %r
}
-define i32 @neg_copy_s_h(<8 x i16> * %ptr) {
+define i32 @neg_copy_s_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.s.h(<8 x i16> %a, i32 -1)
ret i32 %r
}
-define i32 @neg_copy_s_w(<4 x i32> * %ptr) {
+define i32 @neg_copy_s_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.s.w(<4 x i32> %a, i32 -1)
ret i32 %r
}
-define i32 @neg_copy_u_b(<16 x i8> * %ptr) {
+define i32 @neg_copy_u_b(ptr %ptr) {
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.u.b(<16 x i8> %a, i32 -1)
ret i32 %r
}
-define i32 @neg_copy_u_h(<8 x i16> * %ptr) {
+define i32 @neg_copy_u_h(ptr %ptr) {
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.u.h(<8 x i16> %a, i32 -1)
ret i32 %r
}
-define i32 @neg_copy_u_w(<4 x i32> * %ptr) {
+define i32 @neg_copy_u_w(ptr %ptr) {
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.u.w(<4 x i32> %a, i32 -1)
ret i32 %r
}
-define i64 @neg_copy_s_d(<2 x i64> * %ptr) {
-entry: %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+define i64 @neg_copy_s_d(ptr %ptr) {
+entry: %a = load <2 x i64>, ptr %ptr, align 16
%r = call i64 @llvm.mips.copy.s.d(<2 x i64> %a, i32 -1)
ret i64 %r
}
-define i64 @neg_copy_u_d(<2 x i64> * %ptr) {
-entry: %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+define i64 @neg_copy_u_d(ptr %ptr) {
+entry: %a = load <2 x i64>, ptr %ptr, align 16
%r = call i64 @llvm.mips.copy.u.d(<2 x i64> %a, i32 -1)
ret i64 %r
}
-define void @neg_addvi_d(<2 x i64> * %ptr) {
+define void @neg_addvi_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %a, i32 -25)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bclri_d(<2 x i64> * %ptr) {
+define void @neg_bclri_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %a, i32 -25)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_binsli_d(<2 x i64> * %ptr) {
+define void @neg_binsli_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %a, i32 -25)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_binsri_d(<2 x i64> * %ptr) {
+define void @neg_binsri_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %a, <2 x i64> %a, i32 -25)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bnegi_d(<2 x i64> * %ptr) {
+define void @neg_bnegi_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %a, i32 -25)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_bseti_d(<2 x i64> * %ptr) {
+define void @neg_bseti_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %a, i32 -25)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clei_s_d(<2 x i64> * %ptr) {
+define void @neg_clei_s_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %a, i32 -45)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clei_u_d(<2 x i64> * %ptr) {
+define void @neg_clei_u_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %a, i32 -25)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clti_s_d(<2 x i64> * %ptr) {
+define void @neg_clti_s_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %a, i32 -32)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_clti_u_d(<2 x i64> * %ptr) {
+define void @neg_clti_u_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %a, i32 -25)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_ldi_d(<2 x i64> * %ptr) {
+define void @neg_ldi_d(ptr %ptr) {
entry:
%r = call <2 x i64> @llvm.mips.ldi.d(i32 -3)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_maxi_s_d(<2 x i64> * %ptr) {
+define void @neg_maxi_s_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %a, i32 -202)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_maxi_u_d(<2 x i64> * %ptr) {
+define void @neg_maxi_u_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %a, i32 -2)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_mini_s_d(<2 x i64> * %ptr) {
+define void @neg_mini_s_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %a, i32 -202)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_mini_u_d(<2 x i64> * %ptr) {
+define void @neg_mini_u_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %a, i32 -2)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_sldi_d(<2 x i64> * %ptr) {
+define void @neg_sldi_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %a, <2 x i64> %a, i32 -1)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_slli_d(<2 x i64> * %ptr) {
+define void @neg_slli_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.slli.d(<2 x i64> %a, i32 -3)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srai_d(<2 x i64> * %ptr) {
+define void @neg_srai_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.srai.d(<2 x i64> %a, i32 -3)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srari_d(<2 x i64> * %ptr) {
+define void @neg_srari_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.srari.d(<2 x i64> %a, i32 -3)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srli_d(<2 x i64> * %ptr) {
+define void @neg_srli_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.srli.d(<2 x i64> %a, i32 -3)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @neg_srlri_d(<2 x i64> * %ptr) {
+define void @neg_srlri_d(ptr %ptr) {
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %a, i32 -3)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
; Some of the intrinsics lower to equivalent forms.
-define void @addvi_b(<16 x i8> * %ptr) {
+define void @addvi_b(ptr %ptr) {
; MSA-LABEL: addvi_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %a, i32 25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @andi_b(<16 x i8> * %ptr) {
+define void @andi_b(ptr %ptr) {
; MSA-LABEL: andi_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.andi.b(<16 x i8> %a, i32 25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @bclri_b(<16 x i8> * %ptr) {
+define void @bclri_b(ptr %ptr) {
; MSA-LABEL: bclri_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %a, i32 3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @binsli_b(<16 x i8> * %ptr, <16 x i8> * %ptr2) {
+define void @binsli_b(ptr %ptr, ptr %ptr2) {
; MSA-LABEL: binsli_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($5)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w1, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
- %b = load <16 x i8>, <16 x i8> * %ptr2, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
+ %b = load <16 x i8>, ptr %ptr2, align 16
%r = call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %a, <16 x i8> %b, i32 3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @binsri_b(<16 x i8> * %ptr, <16 x i8> * %ptr2) {
+define void @binsri_b(ptr %ptr, ptr %ptr2) {
; MSA-LABEL: binsri_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($5)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w1, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
- %b = load <16 x i8>, <16 x i8> * %ptr2, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
+ %b = load <16 x i8>, ptr %ptr2, align 16
%r = call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %a, <16 x i8> %b, i32 5)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @bmnzi_b(<16 x i8> * %ptr, <16 x i8> * %ptr2) {
+define void @bmnzi_b(ptr %ptr, ptr %ptr2) {
; MSA-LABEL: bmnzi_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($5)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w1, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
- %b = load <16 x i8>, <16 x i8> * %ptr2, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
+ %b = load <16 x i8>, ptr %ptr2, align 16
%r = call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %a, <16 x i8> %b, i32 25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @bmzi_b(<16 x i8> * %ptr, <16 x i8> * %ptr2) {
+define void @bmzi_b(ptr %ptr, ptr %ptr2) {
; MSA-LABEL: bmzi_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w1, 0($2)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
- %b = load <16 x i8>, <16 x i8> * %ptr2, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
+ %b = load <16 x i8>, ptr %ptr2, align 16
%r = call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %a, <16 x i8> %b, i32 25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @bnegi_b(<16 x i8> * %ptr) {
+define void @bnegi_b(ptr %ptr) {
; MSA-LABEL: bnegi_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %a, i32 6)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @bseli_b(<16 x i8> * %ptr) {
+define void @bseli_b(ptr %ptr) {
; MSA-LABEL: bseli_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %a, <16 x i8> %a, i32 25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @bseti_b(<16 x i8> * %ptr) {
+define void @bseti_b(ptr %ptr) {
; MSA-LABEL: bseti_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %a, i32 5)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @clei_s_b(<16 x i8> * %ptr) {
+define void @clei_s_b(ptr %ptr) {
; MSA-LABEL: clei_s_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %a, i32 12)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @clei_u_b(<16 x i8> * %ptr) {
+define void @clei_u_b(ptr %ptr) {
; MSA-LABEL: clei_u_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %a, i32 25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @clti_s_b(<16 x i8> * %ptr) {
+define void @clti_s_b(ptr %ptr) {
; MSA-LABEL: clti_s_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %a, i32 15)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @clti_u_b(<16 x i8> * %ptr) {
+define void @clti_u_b(ptr %ptr) {
; MSA-LABEL: clti_u_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %a, i32 25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @ldi_b(<16 x i8> * %ptr) {
+define void @ldi_b(ptr %ptr) {
; MSA-LABEL: ldi_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ldi.b $w0, 3
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
%r = call <16 x i8> @llvm.mips.ldi.b(i32 3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_s_b(<16 x i8> * %ptr) {
+define void @maxi_s_b(ptr %ptr) {
; MSA-LABEL: maxi_s_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %a, i32 2)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_u_b(<16 x i8> * %ptr) {
+define void @maxi_u_b(ptr %ptr) {
; MSA-LABEL: maxi_u_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %a, i32 2)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @mini_s_b(<16 x i8> * %ptr) {
+define void @mini_s_b(ptr %ptr) {
; MSA-LABEL: mini_s_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %a, i32 2)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @mini_u_b(<16 x i8> * %ptr) {
+define void @mini_u_b(ptr %ptr) {
; MSA-LABEL: mini_u_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %a, i32 2)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @nori_b(<16 x i8> * %ptr) {
+define void @nori_b(ptr %ptr) {
; MSA-LABEL: nori_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.nori.b(<16 x i8> %a, i32 25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @ori_b(<16 x i8> * %ptr) {
+define void @ori_b(ptr %ptr) {
; MSA-LABEL: ori_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.ori.b(<16 x i8> %a, i32 25)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @sldi_b(<16 x i8> * %ptr) {
+define void @sldi_b(ptr %ptr) {
; MSA-LABEL: sldi_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %a, <16 x i8> %a, i32 7)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @slli_b(<16 x i8> * %ptr) {
+define void @slli_b(ptr %ptr) {
; MSA-LABEL: slli_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.slli.b(<16 x i8> %a, i32 3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @splati_b(<16 x i8> * %ptr) {
+define void @splati_b(ptr %ptr) {
; MSA-LABEL: splati_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.splati.b(<16 x i8> %a, i32 3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @srai_b(<16 x i8> * %ptr) {
+define void @srai_b(ptr %ptr) {
; MSA-LABEL: srai_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.srai.b(<16 x i8> %a, i32 3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @srari_b(<16 x i8> * %ptr) {
+define void @srari_b(ptr %ptr) {
; MSA-LABEL: srari_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.srari.b(<16 x i8> %a, i32 3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @srli_b(<16 x i8> * %ptr) {
+define void @srli_b(ptr %ptr) {
; MSA-LABEL: srli_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.srli.b(<16 x i8> %a, i32 3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @srlri_b(<16 x i8> * %ptr) {
+define void @srlri_b(ptr %ptr) {
; MSA-LABEL: srlri_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.b $w0, 0($1)
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %a, i32 3)
- store <16 x i8> %r, <16 x i8> * %ptr, align 16
+ store <16 x i8> %r, ptr %ptr, align 16
ret void
}
-define void @addvi_w(<4 x i32> * %ptr) {
+define void @addvi_w(ptr %ptr) {
; MSA-LABEL: addvi_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %a, i32 25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @bclri_w(<4 x i32> * %ptr) {
+define void @bclri_w(ptr %ptr) {
; MSA-LABEL: bclri_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %a, i32 25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @binsli_w(<4 x i32> * %ptr, <4 x i32> * %ptr2) {
+define void @binsli_w(ptr %ptr, ptr %ptr2) {
; MSA-LABEL: binsli_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($5)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w1, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
- %b = load <4 x i32>, <4 x i32> * %ptr2, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
+ %b = load <4 x i32>, ptr %ptr2, align 16
%r = call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %a, <4 x i32> %b, i32 25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @binsri_w(<4 x i32> * %ptr, <4 x i32> * %ptr2) {
+define void @binsri_w(ptr %ptr, ptr %ptr2) {
; MSA-LABEL: binsri_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($5)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w1, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
- %b = load <4 x i32>, <4 x i32> * %ptr2, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
+ %b = load <4 x i32>, ptr %ptr2, align 16
%r = call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %a, <4 x i32> %b, i32 25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @bnegi_w(<4 x i32> * %ptr) {
+define void @bnegi_w(ptr %ptr) {
; MSA-LABEL: bnegi_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %a, i32 25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @bseti_w(<4 x i32> * %ptr) {
+define void @bseti_w(ptr %ptr) {
; MSA-LABEL: bseti_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %a, i32 25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @clei_s_w(<4 x i32> * %ptr) {
+define void @clei_s_w(ptr %ptr) {
; MSA-LABEL: clei_s_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %a, i32 14)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @clei_u_w(<4 x i32> * %ptr) {
+define void @clei_u_w(ptr %ptr) {
; MSA-LABEL: clei_u_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %a, i32 25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @clti_s_w(<4 x i32> * %ptr) {
+define void @clti_s_w(ptr %ptr) {
; MSA-LABEL: clti_s_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %a, i32 15)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @clti_u_w(<4 x i32> * %ptr) {
+define void @clti_u_w(ptr %ptr) {
; MSA-LABEL: clti_u_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %a, i32 25)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_s_w(<4 x i32> * %ptr) {
+define void @maxi_s_w(ptr %ptr) {
; MSA-LABEL: maxi_s_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %a, i32 2)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_u_w(<4 x i32> * %ptr) {
+define void @maxi_u_w(ptr %ptr) {
; MSA-LABEL: maxi_u_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %a, i32 2)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @mini_s_w(<4 x i32> * %ptr) {
+define void @mini_s_w(ptr %ptr) {
; MSA-LABEL: mini_s_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %a, i32 2)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @mini_u_w(<4 x i32> * %ptr) {
+define void @mini_u_w(ptr %ptr) {
; MSA-LABEL: mini_u_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %a, i32 2)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @ldi_w(<4 x i32> * %ptr) {
+define void @ldi_w(ptr %ptr) {
; MSA-LABEL: ldi_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ldi.w $w0, 3
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
%r = call <4 x i32> @llvm.mips.ldi.w(i32 3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @sldi_w(<4 x i32> * %ptr) {
+define void @sldi_w(ptr %ptr) {
; MSA-LABEL: sldi_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %a, <4 x i32> %a, i32 2)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @slli_w(<4 x i32> * %ptr) {
+define void @slli_w(ptr %ptr) {
; MSA-LABEL: slli_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.slli.w(<4 x i32> %a, i32 3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @splati_w(<4 x i32> * %ptr) {
+define void @splati_w(ptr %ptr) {
; MSA-LABEL: splati_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.splati.w(<4 x i32> %a, i32 3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @srai_w(<4 x i32> * %ptr) {
+define void @srai_w(ptr %ptr) {
; MSA-LABEL: srai_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.srai.w(<4 x i32> %a, i32 3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @srari_w(<4 x i32> * %ptr) {
+define void @srari_w(ptr %ptr) {
; MSA-LABEL: srari_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.srari.w(<4 x i32> %a, i32 3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @srli_w(<4 x i32> * %ptr) {
+define void @srli_w(ptr %ptr) {
; MSA-LABEL: srli_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.srli.w(<4 x i32> %a, i32 3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @srlri_w(<4 x i32> * %ptr) {
+define void @srlri_w(ptr %ptr) {
; MSA-LABEL: srlri_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.w $w0, 0($1)
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %a, i32 3)
- store <4 x i32> %r, <4 x i32> * %ptr, align 16
+ store <4 x i32> %r, ptr %ptr, align 16
ret void
}
-define void @addvi_h(<8 x i16> * %ptr) {
+define void @addvi_h(ptr %ptr) {
; MSA-LABEL: addvi_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %a, i32 25)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @bclri_h(<8 x i16> * %ptr) {
+define void @bclri_h(ptr %ptr) {
; MSA-LABEL: bclri_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %a, i32 8)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @binsli_h(<8 x i16> * %ptr, <8 x i16> * %ptr2) {
+define void @binsli_h(ptr %ptr, ptr %ptr2) {
; MSA-LABEL: binsli_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($5)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w1, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
- %b = load <8 x i16>, <8 x i16> * %ptr2, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
+ %b = load <8 x i16>, ptr %ptr2, align 16
%r = call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %a, <8 x i16> %b, i32 8)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @binsri_h(<8 x i16> * %ptr, <8 x i16> * %ptr2) {
+define void @binsri_h(ptr %ptr, ptr %ptr2) {
; MSA-LABEL: binsri_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($5)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w1, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
- %b = load <8 x i16>, <8 x i16> * %ptr2, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
+ %b = load <8 x i16>, ptr %ptr2, align 16
%r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %b, i32 14)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @bnegi_h(<8 x i16> * %ptr) {
+define void @bnegi_h(ptr %ptr) {
; MSA-LABEL: bnegi_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %a, i32 14)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @bseti_h(<8 x i16> * %ptr) {
+define void @bseti_h(ptr %ptr) {
; MSA-LABEL: bseti_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %a, i32 15)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @clei_s_h(<8 x i16> * %ptr) {
+define void @clei_s_h(ptr %ptr) {
; MSA-LABEL: clei_s_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %a, i32 13)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @clei_u_h(<8 x i16> * %ptr) {
+define void @clei_u_h(ptr %ptr) {
; MSA-LABEL: clei_u_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %a, i32 25)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @clti_s_h(<8 x i16> * %ptr) {
+define void @clti_s_h(ptr %ptr) {
; MSA-LABEL: clti_s_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %a, i32 15)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @clti_u_h(<8 x i16> * %ptr) {
+define void @clti_u_h(ptr %ptr) {
; MSA-LABEL: clti_u_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %a, i32 25)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_s_h(<8 x i16> * %ptr) {
+define void @maxi_s_h(ptr %ptr) {
; MSA-LABEL: maxi_s_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %a, i32 2)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_u_h(<8 x i16> * %ptr) {
+define void @maxi_u_h(ptr %ptr) {
; MSA-LABEL: maxi_u_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %a, i32 2)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @mini_s_h(<8 x i16> * %ptr) {
+define void @mini_s_h(ptr %ptr) {
; MSA-LABEL: mini_s_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %a, i32 2)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @mini_u_h(<8 x i16> * %ptr) {
+define void @mini_u_h(ptr %ptr) {
; MSA-LABEL: mini_u_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %a, i32 2)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @ldi_h(<8 x i16> * %ptr) {
+define void @ldi_h(ptr %ptr) {
; MSA-LABEL: ldi_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ldi.h $w0, 3
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
%r = call <8 x i16> @llvm.mips.ldi.h(i32 3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @sldi_h(<8 x i16> * %ptr) {
+define void @sldi_h(ptr %ptr) {
; MSA-LABEL: sldi_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %a, <8 x i16> %a, i32 3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @slli_h(<8 x i16> * %ptr) {
+define void @slli_h(ptr %ptr) {
; MSA-LABEL: slli_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.slli.h(<8 x i16> %a, i32 3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @splati_h(<8 x i16> * %ptr) {
+define void @splati_h(ptr %ptr) {
; MSA-LABEL: splati_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.splati.h(<8 x i16> %a, i32 3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @srai_h(<8 x i16> * %ptr) {
+define void @srai_h(ptr %ptr) {
; MSA-LABEL: srai_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.srai.h(<8 x i16> %a, i32 3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @srari_h(<8 x i16> * %ptr) {
+define void @srari_h(ptr %ptr) {
; MSA-LABEL: srari_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.srari.h(<8 x i16> %a, i32 3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @srli_h(<8 x i16> * %ptr) {
+define void @srli_h(ptr %ptr) {
; MSA-LABEL: srli_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.srli.h(<8 x i16> %a, i32 3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define void @srlri_h(<8 x i16> * %ptr) {
+define void @srlri_h(ptr %ptr) {
; MSA-LABEL: srlri_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.h $w0, 0($1)
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %a, i32 3)
- store <8 x i16> %r, <8 x i16> * %ptr, align 16
+ store <8 x i16> %r, ptr %ptr, align 16
ret void
}
-define i32 @copy_s_b(<16 x i8> * %ptr) {
+define i32 @copy_s_b(ptr %ptr) {
; MSA-LABEL: copy_s_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: copy_s.b $2, $w0[1]
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.s.b(<16 x i8> %a, i32 1)
ret i32 %r
}
-define i32 @copy_s_h(<8 x i16> * %ptr) {
+define i32 @copy_s_h(ptr %ptr) {
; MSA-LABEL: copy_s_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: copy_s.h $2, $w0[1]
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.s.h(<8 x i16> %a, i32 1)
ret i32 %r
}
-define i32 @copy_s_w(<4 x i32> * %ptr) {
+define i32 @copy_s_w(ptr %ptr) {
; MSA-LABEL: copy_s_w:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.w $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: copy_s.w $2, $w0[1]
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.s.w(<4 x i32> %a, i32 1)
ret i32 %r
}
-define i32 @copy_u_b(<16 x i8> * %ptr) {
+define i32 @copy_u_b(ptr %ptr) {
; MSA-LABEL: copy_u_b:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.b $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: copy_u.b $2, $w0[1]
entry:
- %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+ %a = load <16 x i8>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.u.b(<16 x i8> %a, i32 1)
ret i32 %r
}
-define i32 @copy_u_h(<8 x i16> * %ptr) {
+define i32 @copy_u_h(ptr %ptr) {
; MSA-LABEL: copy_u_h:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.h $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: copy_u.h $2, $w0[1]
entry:
- %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+ %a = load <8 x i16>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.u.h(<8 x i16> %a, i32 1)
ret i32 %r
}
-define i32 @copy_u_w(<4 x i32> * %ptr) {
+define i32 @copy_u_w(ptr %ptr) {
; MSA32-LABEL: copy_u_w:
; MSA32: # %bb.0: # %entry
; MSA32-NEXT: ld.w $w0, 0($4)
; MSA64N64-NEXT: jr $ra
; MSA64N64-NEXT: copy_u.w $2, $w0[1]
entry:
- %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+ %a = load <4 x i32>, ptr %ptr, align 16
%r = call i32 @llvm.mips.copy.u.w(<4 x i32> %a, i32 1)
ret i32 %r
}
-define i64 @copy_s_d(<2 x i64> * %ptr) {
+define i64 @copy_s_d(ptr %ptr) {
; MSA32-LABEL: copy_s_d:
; MSA32: # %bb.0: # %entry
; MSA32-NEXT: ld.w $w0, 0($4)
; MSA64N64-NEXT: jr $ra
; MSA64N64-NEXT: copy_s.d $2, $w0[1]
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call i64 @llvm.mips.copy.s.d(<2 x i64> %a, i32 1)
ret i64 %r
}
-define i64 @copy_u_d(<2 x i64> * %ptr) {
+define i64 @copy_u_d(ptr %ptr) {
; MSA32-LABEL: copy_u_d:
; MSA32: # %bb.0: # %entry
; MSA32-NEXT: ld.w $w0, 0($4)
; MSA64N64-NEXT: jr $ra
; MSA64N64-NEXT: copy_s.d $2, $w0[1]
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call i64 @llvm.mips.copy.u.d(<2 x i64> %a, i32 1)
ret i64 %r
}
-define void @addvi_d(<2 x i64> * %ptr) {
+define void @addvi_d(ptr %ptr) {
; MSA-LABEL: addvi_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %a, i32 25)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @bclri_d(<2 x i64> * %ptr) {
+define void @bclri_d(ptr %ptr) {
; MSA-LABEL: bclri_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %a, i32 16)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @binsli_d(<2 x i64> * %ptr, <2 x i64> * %ptr2) {
+define void @binsli_d(ptr %ptr, ptr %ptr2) {
; MSA-LABEL: binsli_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($5)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w1, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
- %b = load <2 x i64>, <2 x i64> * %ptr2, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
+ %b = load <2 x i64>, ptr %ptr2, align 16
%r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %b, i32 4)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @binsri_d(<2 x i64> * %ptr, <2 x i64> * %ptr2) {
+define void @binsri_d(ptr %ptr, ptr %ptr2) {
; MSA-LABEL: binsri_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($5)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w1, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
- %b = load <2 x i64>, <2 x i64> * %ptr2, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
+ %b = load <2 x i64>, ptr %ptr2, align 16
%r = call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %a, <2 x i64> %b, i32 5)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @bnegi_d(<2 x i64> * %ptr) {
+define void @bnegi_d(ptr %ptr) {
; MSA-LABEL: bnegi_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %a, i32 9)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @bseti_d(<2 x i64> * %ptr) {
+define void @bseti_d(ptr %ptr) {
; MSA-LABEL: bseti_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %a, i32 25)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @clei_s_d(<2 x i64> * %ptr) {
+define void @clei_s_d(ptr %ptr) {
; MSA-LABEL: clei_s_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %a, i32 15)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @clei_u_d(<2 x i64> * %ptr) {
+define void @clei_u_d(ptr %ptr) {
; MSA-LABEL: clei_u_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %a, i32 25)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @clti_s_d(<2 x i64> * %ptr) {
+define void @clti_s_d(ptr %ptr) {
; MSA-LABEL: clti_s_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %a, i32 15)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @clti_u_d(<2 x i64> * %ptr) {
+define void @clti_u_d(ptr %ptr) {
; MSA-LABEL: clti_u_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %a, i32 25)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @ldi_d(<2 x i64> * %ptr) {
+define void @ldi_d(ptr %ptr) {
; MSA32-LABEL: ldi_d:
; MSA32: # %bb.0: # %entry
; MSA32-NEXT: ldi.d $w0, 3
; MSA64N64-NEXT: st.d $w0, 0($4)
entry:
%r = call <2 x i64> @llvm.mips.ldi.d(i32 3)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_s_d(<2 x i64> * %ptr) {
+define void @maxi_s_d(ptr %ptr) {
; MSA-LABEL: maxi_s_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %a, i32 2)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @maxi_u_d(<2 x i64> * %ptr) {
+define void @maxi_u_d(ptr %ptr) {
; MSA-LABEL: maxi_u_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %a, i32 2)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @mini_s_d(<2 x i64> * %ptr) {
+define void @mini_s_d(ptr %ptr) {
; MSA-LABEL: mini_s_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %a, i32 2)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @mini_u_d(<2 x i64> * %ptr) {
+define void @mini_u_d(ptr %ptr) {
; MSA-LABEL: mini_u_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %a, i32 2)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @sldi_d(<2 x i64> * %ptr) {
+define void @sldi_d(ptr %ptr) {
; MSA-LABEL: sldi_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %a, <2 x i64> %a, i32 1)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @slli_d(<2 x i64> * %ptr) {
+define void @slli_d(ptr %ptr) {
; MSA-LABEL: slli_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.slli.d(<2 x i64> %a, i32 3)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @srai_d(<2 x i64> * %ptr) {
+define void @srai_d(ptr %ptr) {
; MSA-LABEL: srai_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.srai.d(<2 x i64> %a, i32 3)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @srari_d(<2 x i64> * %ptr) {
+define void @srari_d(ptr %ptr) {
; MSA-LABEL: srari_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.srari.d(<2 x i64> %a, i32 3)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @srli_d(<2 x i64> * %ptr) {
+define void @srli_d(ptr %ptr) {
; MSA-LABEL: srli_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.srli.d(<2 x i64> %a, i32 3)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @srlri_d(<2 x i64> * %ptr) {
+define void @srlri_d(ptr %ptr) {
; MSA-LABEL: srlri_d:
; MSA: # %bb.0: # %entry
; MSA-NEXT: ld.d $w0, 0($4)
; MSA64N32-NEXT: jr $ra
; MSA64N32-NEXT: st.d $w0, 0($1)
entry:
- %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+ %a = load <2 x i64>, ptr %ptr, align 16
%r = call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %a, i32 3)
- store <2 x i64> %r, <2 x i64> * %ptr, align 16
+ store <2 x i64> %r, ptr %ptr, align 16
ret void
}
-define void @ld_d2(<2 x i64> * %ptr, i8 * %ldptr) {
+define void @ld_d2(ptr %ptr, ptr %ldptr) {
; MSA32-LABEL: ld_d2:
; MSA32: # %bb.0: # %entry
; MSA32-NEXT: addiu $1, $5, 4096
; MSA64N64-NEXT: jr $ra
; MSA64N64-NEXT: st.d $w0, 0($4)
entry:
- %a = call <2 x i64> @llvm.mips.ld.d(i8* %ldptr, i32 4096)
- store <2 x i64> %a, <2 x i64> * %ptr, align 16
+ %a = call <2 x i64> @llvm.mips.ld.d(ptr %ldptr, i32 4096)
+ store <2 x i64> %a, ptr %ptr, align 16
ret void
}
declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32)
declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32)
declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32)
-declare <16 x i8> @llvm.mips.ld.b(i8*, i32)
-declare <8 x i16> @llvm.mips.ld.h(i8*, i32)
-declare <4 x i32> @llvm.mips.ld.w(i8*, i32)
-declare <2 x i64> @llvm.mips.ld.d(i8*, i32)
-declare void @llvm.mips.st.b(<16 x i8>, i8*, i32)
-declare void @llvm.mips.st.h(<8 x i16>, i8*, i32)
-declare void @llvm.mips.st.w(<4 x i32>, i8*, i32)
-declare void @llvm.mips.st.d(<2 x i64>, i8*, i32)
+declare <16 x i8> @llvm.mips.ld.b(ptr, i32)
+declare <8 x i16> @llvm.mips.ld.h(ptr, i32)
+declare <4 x i32> @llvm.mips.ld.w(ptr, i32)
+declare <2 x i64> @llvm.mips.ld.d(ptr, i32)
+declare void @llvm.mips.st.b(<16 x i8>, ptr, i32)
+declare void @llvm.mips.st.h(<8 x i16>, ptr, i32)
+declare void @llvm.mips.st.w(<4 x i32>, ptr, i32)
+declare void @llvm.mips.st.d(<2 x i64>, ptr, i32)
; CHECK-LABEL: test1:
%0 = call <4 x i32> asm "ldi.w ${0:w}, 1", "=f"()
; CHECK: ldi.w $w{{[1-3]?[0-9]}}, 1
- store <4 x i32> %0, <4 x i32>* @v4i32_r
+ store <4 x i32> %0, ptr @v4i32_r
ret void
}
define void @test2() nounwind {
entry:
; CHECK-LABEL: test2:
- %0 = load <4 x i32>, <4 x i32>* @v4i32_r
+ %0 = load <4 x i32>, ptr @v4i32_r
%1 = call <4 x i32> asm "addvi.w ${0:w}, ${1:w}, 1", "=f,f"(<4 x i32> %0)
; CHECK: addvi.w $w{{[1-3]?[0-9]}}, $w{{[1-3]?[0-9]}}, 1
- store <4 x i32> %1, <4 x i32>* @v4i32_r
+ store <4 x i32> %1, ptr @v4i32_r
ret void
}
define void @test3() nounwind {
entry:
; CHECK-LABEL: test3:
- %0 = load <4 x i32>, <4 x i32>* @v4i32_r
+ %0 = load <4 x i32>, ptr @v4i32_r
%1 = call <4 x i32> asm sideeffect "addvi.w ${0:w}, ${1:w}, 1", "=f,f,~{$w0}"(<4 x i32> %0)
; CHECK: addvi.w $w{{([1-9]|[1-3][0-9])}}, $w{{([1-9]|[1-3][0-9])}}, 1
- store <4 x i32> %1, <4 x i32>* @v4i32_r
+ store <4 x i32> %1, ptr @v4i32_r
ret void
}
; Test intrinsics for 4-byte and 8-byte MSA load and stores.
-define void @llvm_mips_ldr_d_test(<2 x i64>* %val, i8* %ptr) nounwind {
+define void @llvm_mips_ldr_d_test(ptr %val, ptr %ptr) nounwind {
; MIPS32R5-EB-LABEL: llvm_mips_ldr_d_test:
; MIPS32R5-EB: # %bb.0: # %entry
; MIPS32R5-EB-NEXT: # implicit-def: $v0
; MIPS64R6-NEXT: st.d $w0, 0($4)
; MIPS64R6-NEXT: jrc $ra
entry:
- %0 = tail call <2 x i64> @llvm.mips.ldr.d(i8* %ptr, i32 16)
- store <2 x i64> %0, <2 x i64>* %val
+ %0 = tail call <2 x i64> @llvm.mips.ldr.d(ptr %ptr, i32 16)
+ store <2 x i64> %0, ptr %val
ret void
}
-declare <2 x i64> @llvm.mips.ldr.d(i8*, i32) nounwind
+declare <2 x i64> @llvm.mips.ldr.d(ptr, i32) nounwind
-define void @llvm_mips_ldr_w_test(<4 x i32>* %val, i8* %ptr) nounwind {
+define void @llvm_mips_ldr_w_test(ptr %val, ptr %ptr) nounwind {
; MIPS32R5-EB-LABEL: llvm_mips_ldr_w_test:
; MIPS32R5-EB: # %bb.0: # %entry
; MIPS32R5-EB-NEXT: # implicit-def: $at
; MIPS64R6-NEXT: st.w $w0, 0($4)
; MIPS64R6-NEXT: jrc $ra
entry:
- %0 = tail call <4 x i32> @llvm.mips.ldr.w(i8* %ptr, i32 16)
- store <4 x i32> %0, <4 x i32>* %val
+ %0 = tail call <4 x i32> @llvm.mips.ldr.w(ptr %ptr, i32 16)
+ store <4 x i32> %0, ptr %val
ret void
}
-declare <4 x i32> @llvm.mips.ldr.w(i8*, i32) nounwind
+declare <4 x i32> @llvm.mips.ldr.w(ptr, i32) nounwind
-define void @llvm_mips_str_d_test(<2 x i64>* %val, i8* %ptr) nounwind {
+define void @llvm_mips_str_d_test(ptr %val, ptr %ptr) nounwind {
; MIPS32R5-EB-LABEL: llvm_mips_str_d_test:
; MIPS32R5-EB: # %bb.0: # %entry
; MIPS32R5-EB-NEXT: ld.d $w0, 0($4)
; MIPS64R6-NEXT: sd $1, 16($5)
; MIPS64R6-NEXT: jrc $ra
entry:
- %0 = load <2 x i64>, <2 x i64>* %val
- tail call void @llvm.mips.str.d(<2 x i64> %0, i8* %ptr, i32 16)
+ %0 = load <2 x i64>, ptr %val
+ tail call void @llvm.mips.str.d(<2 x i64> %0, ptr %ptr, i32 16)
ret void
}
-declare void @llvm.mips.str.d(<2 x i64>, i8*, i32) nounwind
+declare void @llvm.mips.str.d(<2 x i64>, ptr, i32) nounwind
-define void @llvm_mips_str_w_test(<4 x i32>* %val, i8* %ptr) nounwind {
+define void @llvm_mips_str_w_test(ptr %val, ptr %ptr) nounwind {
; MIPS32R5-EB-LABEL: llvm_mips_str_w_test:
; MIPS32R5-EB: # %bb.0: # %entry
; MIPS32R5-EB-NEXT: ld.w $w0, 0($4)
; MIPS64R6-NEXT: sw $1, 16($5)
; MIPS64R6-NEXT: jrc $ra
entry:
- %0 = load <4 x i32>, <4 x i32>* %val
- tail call void @llvm.mips.str.w(<4 x i32> %0, i8* %ptr, i32 16)
+ %0 = load <4 x i32>, ptr %val
+ tail call void @llvm.mips.str.w(<4 x i32> %0, ptr %ptr, i32 16)
ret void
}
-declare void @llvm.mips.str.w(<4 x i32>, i8*, i32) nounwind
+declare void @llvm.mips.str.w(<4 x i32>, ptr, i32) nounwind
; "Unexpected illegal type!" assertion.
; It should at least successfully build.
-define void @autogen_SD1704963983(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD1704963983(ptr, ptr, ptr, i32, i64, i8) {
BB:
%A4 = alloca <4 x double>
%A3 = alloca <8 x i64>
%A2 = alloca <1 x double>
%A1 = alloca double
%A = alloca i32
- %L = load i8, i8* %0
- store i8 77, i8* %0
+ %L = load i8, ptr %0
+ store i8 77, ptr %0
%E = extractelement <8 x i64> zeroinitializer, i32 2
%Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15, i32 1, i32 3>
%I = insertelement <8 x i64> zeroinitializer, i64 %E, i32 7
- %Sl = select i1 false, i8* %0, i8* %0
+ %Sl = select i1 false, ptr %0, ptr %0
%Cmp = icmp eq i32 434069, 272505
br label %CF
CF: ; preds = %CF, %CF78, %BB
- %L5 = load i8, i8* %Sl
- store i8 %L, i8* %Sl
+ %L5 = load i8, ptr %Sl
+ store i8 %L, ptr %Sl
%E6 = extractelement <8 x i32> zeroinitializer, i32 2
%Shuff7 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 13, i32 15, i32 1, i32 3, i32 5, i32 7, i32 9, i32 undef>
%I8 = insertelement <8 x i64> zeroinitializer, i64 %4, i32 7
%FC = sitofp <8 x i64> zeroinitializer to <8 x float>
%Sl9 = select i1 %Cmp, i8 77, i8 77
%Cmp10 = icmp uge <8 x i64> %Shuff, zeroinitializer
- %L11 = load i8, i8* %0
- store i8 %Sl9, i8* %0
+ %L11 = load i8, ptr %0
+ store i8 %Sl9, ptr %0
%E12 = extractelement <1 x i16> zeroinitializer, i32 0
%Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 9, i32 11, i32 13, i32 15, i32 undef, i32 3, i32 5, i32 7>
%I14 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 3
%Tr = trunc <8 x i64> %Shuff to <8 x i32>
%Sl16 = select i1 %Cmp, i8 77, i8 %5
%Cmp17 = icmp ult <8 x i1> %Cmp10, %Cmp10
- %L18 = load i8, i8* %Sl
- store i8 -1, i8* %Sl
+ %L18 = load i8, ptr %Sl
+ store i8 -1, ptr %Sl
%E19 = extractelement <8 x i32> zeroinitializer, i32 3
%Shuff20 = shufflevector <8 x float> %FC, <8 x float> %FC, <8 x i32> <i32 6, i32 8, i32 undef, i32 12, i32 14, i32 0, i32 2, i32 undef>
%I21 = insertelement <8 x i64> %Shuff13, i64 %E, i32 0
br i1 %Cmp25, label %CF, label %CF78
CF78: ; preds = %CF
- %L26 = load i8, i8* %Sl
- store i32 50347, i32* %A
+ %L26 = load i8, ptr %Sl
+ store i32 50347, ptr %A
%E27 = extractelement <8 x i1> %Cmp10, i32 2
br i1 %E27, label %CF, label %CF77
%B30 = urem <8 x i32> %Tr, zeroinitializer
%Tr31 = trunc i32 0 to i16
%Sl32 = select i1 %Cmp, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer
- %L33 = load i8, i8* %Sl
- store i8 %L26, i8* %Sl
+ %L33 = load i8, ptr %Sl
+ store i8 %L26, ptr %Sl
%E34 = extractelement <4 x i32> zeroinitializer, i32 0
%Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> %B, <1 x i32> undef
%I36 = insertelement <8 x i64> %Shuff28, i64 %E, i32 7
%B37 = srem <1 x i16> %I29, zeroinitializer
%FC38 = sitofp <8 x i32> %B30 to <8 x double>
%Sl39 = select i1 %Cmp, double 0.000000e+00, double %Sl24
- %L40 = load i8, i8* %Sl
- store i8 %Sl16, i8* %Sl
+ %L40 = load i8, ptr %Sl
+ store i8 %Sl16, ptr %Sl
%E41 = extractelement <1 x i16> zeroinitializer, i32 0
%Shuff42 = shufflevector <8 x i1> %Cmp17, <8 x i1> %Cmp10, <8 x i32> <i32 14, i32 undef, i32 2, i32 4, i32 undef, i32 8, i32 10, i32 12>
%I43 = insertelement <4 x i32> zeroinitializer, i32 272505, i32 0
%B44 = urem <8 x i32> %B30, %Tr
- %PC = bitcast i8* %0 to i64*
%Sl45 = select i1 %Cmp, <8 x i1> %Cmp10, <8 x i1> %Shuff42
%Cmp46 = fcmp ugt float 0xB856238A00000000, 0x47DA795E40000000
br i1 %Cmp46, label %CF77, label %CF80
CF80: ; preds = %CF80, %CF77
- %L47 = load i64, i64* %PC
- store i8 77, i8* %Sl
+ %L47 = load i64, ptr %0
+ store i8 77, ptr %Sl
%E48 = extractelement <8 x i64> zeroinitializer, i32 2
%Shuff49 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff7, <8 x i32> <i32 5, i32 7, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 3>
%I50 = insertelement <8 x i64> zeroinitializer, i64 %L47, i32 7
%B51 = fdiv float 0x46CC2D8000000000, %FC23
- %PC52 = bitcast <8 x i64>* %A3 to i64*
%Sl53 = select i1 %Cmp, <8 x i64> %Shuff, <8 x i64> %Shuff
%Cmp54 = fcmp ole float 0x47DA795E40000000, 0xB856238A00000000
br i1 %Cmp54, label %CF80, label %CF81
CF81: ; preds = %CF80
- %L55 = load i8, i8* %Sl
- store i8 %Sl16, i8* %Sl
+ %L55 = load i8, ptr %Sl
+ store i8 %Sl16, ptr %Sl
%E56 = extractelement <1 x i16> %B, i32 0
%Shuff57 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> <i32 1>
%I58 = insertelement <8 x i64> zeroinitializer, i64 %L47, i32 7
%B59 = srem i32 %E19, %E19
%Sl60 = select i1 %Cmp, i8 77, i8 77
%Cmp61 = icmp ult <1 x i16> zeroinitializer, %B
- %L62 = load i8, i8* %Sl
- store i64 %L47, i64* %PC52
+ %L62 = load i8, ptr %Sl
+ store i64 %L47, ptr %A3
%E63 = extractelement <4 x i32> %I43, i32 2
%Shuff64 = shufflevector <4 x i1> zeroinitializer, <4 x i1> zeroinitializer, <4 x i32> <i32 undef, i32 undef, i32 1, i32 3>
%I65 = insertelement <8 x i64> %B22, i64 %L47, i32 7
br i1 %Cmp69, label %CF77, label %CF79
CF79: ; preds = %CF81
- %L70 = load i32, i32* %A
- store i64 %4, i64* %PC
+ %L70 = load i32, ptr %A
+ store i64 %4, ptr %0
%E71 = extractelement <4 x i32> zeroinitializer, i32 0
%Shuff72 = shufflevector <8 x i32> zeroinitializer, <8 x i32> %B44, <8 x i32> <i32 11, i32 undef, i32 15, i32 1, i32 3, i32 undef, i32 7, i32 9>
%I73 = insertelement <8 x i16> zeroinitializer, i16 %E12, i32 5
%B74 = fsub double 0.000000e+00, 0.000000e+00
%Sl75 = select i1 %Cmp46, i32 %E6, i32 %E19
%Cmp76 = icmp ugt <4 x i32> %I43, zeroinitializer
- store i8 %L, i8* %Sl
- store i64 %L47, i64* %PC
- store i64 %L47, i64* %PC
- store i8 %L5, i8* %Sl
- store i8 %L5, i8* %0
+ store i8 %L, ptr %Sl
+ store i64 %L47, ptr %0
+ store i64 %L47, ptr %0
+ store i8 %L5, ptr %Sl
+ store i8 %L5, ptr %0
ret void
}
; `Opc && "Cannot copy registers"' assertion.
; It should at least successfully build.
-define void @autogen_SD1935737938(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD1935737938(ptr, ptr, ptr, i32, i64, i8) {
BB:
%A4 = alloca i64
%A3 = alloca <4 x i32>
%A2 = alloca i64
%A1 = alloca i32
%A = alloca <2 x i64>
- %L = load i8, i8* %0
- store i8 -1, i8* %0
+ %L = load i8, ptr %0
+ store i8 -1, ptr %0
%E = extractelement <2 x i32> zeroinitializer, i32 0
%Shuff = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
%I = insertelement <1 x i64> <i64 -1>, i64 286689, i32 0
%B = lshr i8 %L, -69
%ZE = fpext float 0xBF2AA5FE80000000 to double
%Sl = select i1 true, <1 x i64> <i64 -1>, <1 x i64> <i64 -1>
- %L5 = load i8, i8* %0
- store i8 -69, i8* %0
+ %L5 = load i8, ptr %0
+ store i8 -69, ptr %0
%E6 = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14
%Shuff7 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
%I8 = insertelement <2 x i32> zeroinitializer, i32 135673, i32 1
%FC = uitofp i32 %3 to double
%Sl10 = select i1 true, <1 x i1> zeroinitializer, <1 x i1> zeroinitializer
%Cmp = icmp ne <1 x i64> %I, <i64 -1>
- %L11 = load i8, i8* %0
- store i8 %L11, i8* %0
+ %L11 = load i8, ptr %0
+ store i8 %L11, ptr %0
%E12 = extractelement <1 x i64> <i64 -1>, i32 0
%Shuff13 = shufflevector <1 x i64> %Sl, <1 x i64> <i64 -1>, <1 x i32> <i32 1>
%I14 = insertelement <1 x i64> %I, i64 303290, i32 0
br label %CF74
CF74: ; preds = %CF74, %CF80, %CF76, %BB
- %L18 = load i8, i8* %0
- store i8 -69, i8* %0
+ %L18 = load i8, ptr %0
+ store i8 -69, ptr %0
%E19 = extractelement <1 x i64> %Sl, i32 0
%Shuff20 = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i32> <i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10>
%I21 = insertelement <2 x i32> %Shuff, i32 135673, i32 0
%B22 = urem i32 135673, %3
%FC23 = sitofp i8 %L to float
%Sl24 = select i1 true, i8 %B, i8 %L18
- %L25 = load i8, i8* %0
- store i8 %L, i8* %0
+ %L25 = load i8, ptr %0
+ store i8 %L, ptr %0
%E26 = extractelement <2 x i32> %Shuff, i32 1
%Shuff27 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 2, i32 0>
%I28 = insertelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i64 %E12, i32 8
br i1 %Cmp31, label %CF74, label %CF80
CF80: ; preds = %CF74
- %L32 = load i8, i8* %0
- store i8 -1, i8* %0
+ %L32 = load i8, ptr %0
+ store i8 -1, ptr %0
%E33 = extractelement <2 x i32> zeroinitializer, i32 1
%Shuff34 = shufflevector <1 x i64> %Shuff13, <1 x i64> <i64 -1>, <1 x i32> zeroinitializer
%I35 = insertelement <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i8 -1, i32 0
%FC36 = sitofp <1 x i1> %Cmp to <1 x float>
%Sl37 = select i1 true, <8 x i8> %Shuff20, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%Cmp38 = icmp sgt <2 x i32> %I21, %Shuff27
- %L39 = load i8, i8* %0
- store i8 %Sl24, i8* %0
+ %L39 = load i8, ptr %0
+ store i8 %Sl24, ptr %0
%E40 = extractelement <8 x i64> zeroinitializer, i32 1
%Shuff41 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Cmp38, <2 x i32> <i32 0, i32 2>
%I42 = insertelement <4 x i32> zeroinitializer, i32 414573, i32 2
br i1 %Cmp45, label %CF74, label %CF76
CF76: ; preds = %CF80
- %L46 = load i8, i8* %0
- store i8 %L39, i8* %0
+ %L46 = load i8, ptr %0
+ store i8 %L39, ptr %0
%E47 = extractelement <2 x i32> %Shuff27, i32 0
%Shuff48 = shufflevector <1 x i1> %Sl10, <1 x i1> %Sl10, <1 x i32> <i32 1>
%I49 = insertelement <1 x i64> <i64 -1>, i64 %E12, i32 0
br i1 %Cmp52, label %CF74, label %CF75
CF75: ; preds = %CF75, %CF76
- %L53 = load i8, i8* %0
- store i8 %L18, i8* %0
+ %L53 = load i8, ptr %0
+ store i8 %L18, ptr %0
%E54 = extractelement <8 x i8> %Shuff20, i32 5
%Shuff55 = shufflevector <2 x i32> %Shuff, <2 x i32> zeroinitializer, <2 x i32> <i32 0, i32 2>
%I56 = insertelement <4 x i32> %I42, i32 %B22, i32 2
br i1 %Cmp59, label %CF75, label %CF78
CF78: ; preds = %CF75
- %L60 = load i8, i8* %0
- store i8 -69, i8* %0
+ %L60 = load i8, ptr %0
+ store i8 -69, ptr %0
%E61 = extractelement <2 x i32> zeroinitializer, i32 0
%Shuff62 = shufflevector <2 x i32> %Shuff7, <2 x i32> %I21, <2 x i32> <i32 1, i32 3>
%I63 = insertelement <1 x i1> %Sl16, i1 %Cmp45, i32 0
br label %CF
CF: ; preds = %CF, %CF78
- %L68 = load i8, i8* %0
- store i64 %B57, i64* %2
+ %L68 = load i8, ptr %0
+ store i64 %B57, ptr %2
%E69 = extractelement <2 x i1> %Shuff41, i32 1
br i1 %E69, label %CF, label %CF77
br i1 %Cmp73, label %CF77, label %CF79
CF79: ; preds = %CF77
- store i8 %L18, i8* %0
- store i8 %E54, i8* %0
- store i8 %L39, i8* %0
- store i8 %L39, i8* %0
- store i8 %B, i8* %0
+ store i8 %L18, ptr %0
+ store i8 %E54, ptr %0
+ store i8 %L39, ptr %0
+ store i8 %L39, ptr %0
+ store i8 %B, ptr %0
ret void
}
; This test originally failed for MSA after dereferencing a null this pointer.
; It should at least successfully build.
-define void @autogen_SD2704903805(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD2704903805(ptr, ptr, ptr, i32, i64, i8) {
BB:
%A4 = alloca i32
%A3 = alloca i32
%A2 = alloca i8
%A1 = alloca i32
%A = alloca i8
- %L = load i8, i8* %0
- store i8 %5, i8* %0
+ %L = load i8, ptr %0
+ store i8 %5, ptr %0
%E = extractelement <2 x i16> zeroinitializer, i32 0
%Shuff = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> undef
%I = insertelement <1 x i8> <i8 -1>, i8 85, i32 0
br label %CF83
CF83: ; preds = %BB
- %L5 = load i8, i8* %0
- store i8 85, i8* %0
+ %L5 = load i8, ptr %0
+ store i8 85, ptr %0
%E6 = extractelement <1 x i8> <i8 -1>, i32 0
%Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 1, i32 3>
%I8 = insertelement <4 x i16> zeroinitializer, i16 %E, i32 3
br label %CF
CF: ; preds = %CF, %CF81, %CF83
- %L13 = load i8, i8* %0
- store i8 0, i8* %0
+ %L13 = load i8, ptr %0
+ store i8 0, ptr %0
%E14 = extractelement <2 x i64> zeroinitializer, i32 0
%Shuff15 = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i32> <i32 3, i32 5, i32 7, i32 undef>
%I16 = insertelement <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, i64 81222, i32 1
br i1 %Cmp19, label %CF80, label %CF81
CF81: ; preds = %CF80
- %L20 = load i8, i8* %0
- store i8 85, i8* %0
+ %L20 = load i8, ptr %0
+ store i8 85, ptr %0
%E21 = extractelement <1 x i8> <i8 -1>, i32 0
%Shuff22 = shufflevector <1 x i8> <i8 -1>, <1 x i8> %Shuff, <1 x i32> zeroinitializer
%I23 = insertelement <1 x i8> <i8 -1>, i8 %L5, i32 0
%FC24 = fptoui <4 x float> %FC to <4 x i16>
%Sl25 = select i1 %Cmp, <2 x i32> zeroinitializer, <2 x i32> <i32 -1, i32 -1>
%Cmp26 = icmp ult <4 x i64> %I16, %Shuff15
- %L27 = load i8, i8* %0
- store i8 %L, i8* %0
+ %L27 = load i8, ptr %0
+ store i8 %L, ptr %0
%E28 = extractelement <1 x i8> <i8 -1>, i32 0
%Shuff29 = shufflevector <8 x i16> zeroinitializer, <8 x i16> zeroinitializer, <8 x i32> <i32 11, i32 undef, i32 15, i32 1, i32 3, i32 5, i32 undef, i32 9>
%I30 = insertelement <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, i64 %E14, i32 1
%B31 = mul i8 %E28, 85
- %PC = bitcast i32* %A3 to i32*
%Sl32 = select i1 %Cmp12, float %FC10, float 0x4712BFE680000000
- %L33 = load i32, i32* %PC
- store i32 %L33, i32* %PC
+ %L33 = load i32, ptr %A3
+ store i32 %L33, ptr %A3
%E34 = extractelement <2 x i16> zeroinitializer, i32 1
%Shuff35 = shufflevector <1 x i8> %Shuff, <1 x i8> <i8 -1>, <1 x i32> zeroinitializer
%I36 = insertelement <1 x i8> <i8 -1>, i8 %L13, i32 0
br i1 %Cmp39, label %CF, label %CF77
CF77: ; preds = %CF77, %CF81
- %L40 = load i32, i32* %PC
- store i32 %3, i32* %PC
+ %L40 = load i32, ptr %A3
+ store i32 %3, ptr %A3
%E41 = extractelement <2 x i32> zeroinitializer, i32 0
%Shuff42 = shufflevector <2 x i32> <i32 -1, i32 -1>, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
%I43 = insertelement <1 x i8> <i8 -1>, i8 0, i32 0
%Se = sext i32 %3 to i64
%Sl45 = select i1 true, <1 x i8> %Shuff, <1 x i8> %I43
%Cmp46 = icmp sge <1 x i8> %I36, %Shuff
- %L47 = load i32, i32* %PC
- store i32 %L33, i32* %PC
+ %L47 = load i32, ptr %A3
+ store i32 %L33, ptr %A3
%E48 = extractelement <2 x i16> zeroinitializer, i32 0
%Shuff49 = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> <i32 1>
%I50 = insertelement <2 x i32> %Sl25, i32 47963, i32 1
br i1 %Cmp54, label %CF77, label %CF78
CF78: ; preds = %CF78, %CF77
- %L55 = load i32, i32* %PC
- store i32 %L33, i32* %PC
+ %L55 = load i32, ptr %A3
+ store i32 %L33, ptr %A3
%E56 = extractelement <8 x i16> %Shuff29, i32 4
%Shuff57 = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> <i32 1>
%I58 = insertelement <1 x i8> %B51, i8 %Sl53, i32 0
br i1 %Cmp60, label %CF78, label %CF79
CF79: ; preds = %CF79, %CF78
- %L61 = load i32, i32* %PC
- store i32 %L33, i32* %A3
+ %L61 = load i32, ptr %A3
+ store i32 %L33, ptr %A3
%E62 = extractelement <4 x i64> %Shuff15, i32 1
%Shuff63 = shufflevector <8 x i16> %Shuff29, <8 x i16> %Shuff29, <8 x i32> <i32 undef, i32 10, i32 12, i32 undef, i32 undef, i32 undef, i32 4, i32 6>
%I64 = insertelement <2 x i64> zeroinitializer, i64 %Se, i32 0
br i1 %Cmp68, label %CF79, label %CF82
CF82: ; preds = %CF79
- %L69 = load i32, i32* %PC
- store i32 %L33, i32* %PC
+ %L69 = load i32, ptr %A3
+ store i32 %L33, ptr %A3
%E70 = extractelement <8 x i16> zeroinitializer, i32 3
%Shuff71 = shufflevector <4 x i64> %Shuff15, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i32> <i32 6, i32 undef, i32 2, i32 4>
%I72 = insertelement <1 x i8> <i8 -1>, i8 %L, i32 0
%ZE74 = zext <4 x i1> %Cmp26 to <4 x i32>
%Sl75 = select i1 %Cmp, i32 463279, i32 %L61
%Cmp76 = icmp sgt <1 x i8> %Shuff49, %Shuff22
- store i8 %B31, i8* %0
- store i8 85, i8* %0
- store i32 %L33, i32* %PC
- store i8 %B65, i8* %0
- store i8 %L5, i8* %0
+ store i8 %B31, ptr %0
+ store i8 85, ptr %0
+ store i32 %L33, ptr %A3
+ store i8 %B65, ptr %0
+ store i8 %L5, ptr %0
ret void
}
; "Don't know how to expand this condition!" unreachable.
; It should at least successfully build.
-define void @autogen_SD3861334421(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD3861334421(ptr, ptr, ptr, i32, i64, i8) {
BB:
%A4 = alloca <2 x i32>
%A3 = alloca <2 x double>
%A2 = alloca i64
%A1 = alloca i64
%A = alloca double
- %L = load i8, i8* %0
- store i8 -101, i8* %0
+ %L = load i8, ptr %0
+ store i8 -101, ptr %0
%E = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 0
%Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 undef, i32 1>
%I = insertelement <8 x i64> zeroinitializer, i64 %4, i32 5
%B = and i64 116376, 57247
%FC = uitofp i8 7 to double
%Sl = select i1 false, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
- %L5 = load i8, i8* %0
- store i8 %L, i8* %0
+ %L5 = load i8, ptr %0
+ store i8 %L, ptr %0
%E6 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 3
%Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
%I8 = insertelement <8 x i8> %Sl, i8 7, i32 4
br label %CF
CF: ; preds = %CF, %BB
- %L11 = load i8, i8* %0
- store i8 -87, i8* %0
+ %L11 = load i8, ptr %0
+ store i8 -87, ptr %0
%E12 = extractelement <4 x i64> zeroinitializer, i32 0
%Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 7, i32 9, i32 11, i32 13, i32 undef, i32 1, i32 3, i32 5>
%I14 = insertelement <4 x i64> zeroinitializer, i64 380809, i32 1
br i1 %Cmp18, label %CF, label %CF80
CF80: ; preds = %CF80, %CF88, %CF
- %L19 = load i8, i8* %0
- store i8 -101, i8* %0
+ %L19 = load i8, ptr %0
+ store i8 -101, ptr %0
%E20 = extractelement <4 x i64> zeroinitializer, i32 0
%Shuff21 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff7, <4 x i32> <i32 7, i32 1, i32 3, i32 5>
%I22 = insertelement <4 x i64> zeroinitializer, i64 127438, i32 1
br i1 %Cmp25, label %CF80, label %CF83
CF83: ; preds = %CF83, %CF80
- %L26 = load i8, i8* %0
- store i8 -87, i8* %0
+ %L26 = load i8, ptr %0
+ store i8 -87, ptr %0
%E27 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 0
%Shuff28 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 7, i32 1, i32 3, i32 5>
%I29 = insertelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 492085, i32 1
br i1 %Cmp33, label %CF83, label %CF88
CF88: ; preds = %CF83
- %L34 = load i8, i8* %0
- store i8 -87, i8* %0
+ %L34 = load i8, ptr %0
+ store i8 -87, ptr %0
%E35 = extractelement <8 x i64> %Shuff, i32 7
%Shuff36 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %Shuff28, <4 x i32> <i32 2, i32 undef, i32 undef, i32 0>
%I37 = insertelement <4 x i64> zeroinitializer, i64 380809, i32 0
br i1 %Cmp40, label %CF80, label %CF81
CF81: ; preds = %CF81, %CF85, %CF87, %CF88
- %L41 = load i8, i8* %0
- store i8 %L34, i8* %0
+ %L41 = load i8, ptr %0
+ store i8 %L34, ptr %0
%E42 = extractelement <8 x i64> %Shuff13, i32 6
%Shuff43 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 7>
%I44 = insertelement <4 x i64> zeroinitializer, i64 116376, i32 3
br i1 %Cmp47, label %CF81, label %CF85
CF85: ; preds = %CF81
- %L48 = load i8, i8* %0
- store i8 -101, i8* %0
+ %L48 = load i8, ptr %0
+ store i8 -101, ptr %0
%E49 = extractelement <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i32 2
%Shuff50 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
%I51 = insertelement <4 x i64> zeroinitializer, i64 %E20, i32 3
%FC53 = uitofp i8 %L48 to double
%Sl54 = select i1 %Cmp47, i32 %3, i32 %Sl24
%Cmp55 = icmp ne <8 x i64> %Shuff13, zeroinitializer
- %L56 = load i8, i8* %0
- store i8 %L11, i8* %0
+ %L56 = load i8, ptr %0
+ store i8 %L11, ptr %0
%E57 = extractelement <4 x i64> %Shuff21, i32 1
%Shuff58 = shufflevector <8 x i64> %Shuff, <8 x i64> zeroinitializer, <8 x i32> <i32 4, i32 6, i32 undef, i32 10, i32 12, i32 undef, i32 0, i32 2>
%I59 = insertelement <4 x i64> zeroinitializer, i64 %E42, i32 2
CF84: ; preds = %CF84, %CF85
%Sl62 = select i1 false, i8 %L, i8 %L48
%Cmp63 = icmp ne <8 x i64> %I, zeroinitializer
- %L64 = load i8, i8* %0
- store i8 %5, i8* %0
+ %L64 = load i8, ptr %0
+ store i8 %5, ptr %0
%E65 = extractelement <8 x i1> %Cmp55, i32 0
br i1 %E65, label %CF84, label %CF87
%ZE69 = zext <8 x i8> %Sl32 to <8 x i64>
%Sl70 = select i1 %Tr61, i64 %E20, i64 %E12
%Cmp71 = icmp slt <8 x i64> %I, %Shuff
- %L72 = load i8, i8* %0
- store i8 %L72, i8* %0
+ %L72 = load i8, ptr %0
+ store i8 %L72, ptr %0
%E73 = extractelement <8 x i1> %Cmp55, i32 6
br i1 %E73, label %CF81, label %CF82
%I75 = insertelement <4 x i64> zeroinitializer, i64 380809, i32 3
%B76 = fsub double 0.000000e+00, %FC53
%Tr77 = trunc i32 %E to i8
- %Sl78 = select i1 %Cmp18, i64* %A2, i64* %2
+ %Sl78 = select i1 %Cmp18, ptr %A2, ptr %2
%Cmp79 = icmp eq i32 394647, 492085
br i1 %Cmp79, label %CF82, label %CF86
CF86: ; preds = %CF82
- store i64 %Sl70, i64* %Sl78
- store i64 %E57, i64* %Sl78
- store i64 %Sl70, i64* %Sl78
- store i64 %B, i64* %Sl78
- store i64 %Sl10, i64* %Sl78
+ store i64 %Sl70, ptr %Sl78
+ store i64 %E57, ptr %Sl78
+ store i64 %Sl70, ptr %Sl78
+ store i64 %B, ptr %Sl78
+ store i64 %Sl10, ptr %Sl78
ret void
}
; "Type for zero vector elements is not legal" assertion.
; It should at least successfully build.
-define void @autogen_SD3926023935(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD3926023935(ptr, ptr, ptr, i32, i64, i8) {
BB:
%A4 = alloca i1
%A3 = alloca float
%A2 = alloca double
%A1 = alloca float
%A = alloca double
- %L = load i8, i8* %0
- store i8 -123, i8* %0
+ %L = load i8, ptr %0
+ store i8 -123, ptr %0
%E = extractelement <4 x i64> zeroinitializer, i32 1
%Shuff = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%I = insertelement <2 x i1> zeroinitializer, i1 false, i32 0
%BC = bitcast i64 181325 to double
%Sl = select i1 false, <2 x i32> zeroinitializer, <2 x i32> zeroinitializer
%Cmp = icmp ne <4 x i64> zeroinitializer, zeroinitializer
- %L5 = load i8, i8* %0
- store i8 %L, i8* %0
+ %L5 = load i8, ptr %0
+ store i8 %L, ptr %0
%E6 = extractelement <4 x i64> zeroinitializer, i32 3
%Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 2, i32 0>
%I8 = insertelement <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i64 498254, i32 4
br label %CF80
CF80: ; preds = %BB
- %L11 = load i8, i8* %0
- store i8 -123, i8* %0
+ %L11 = load i8, ptr %0
+ store i8 -123, ptr %0
%E12 = extractelement <2 x i16> zeroinitializer, i32 1
%Shuff13 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%I14 = insertelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 %B, i32 2
%B15 = sdiv i64 334618, -1
- %PC = bitcast i1* %A4 to i64*
%Sl16 = select i1 %Cmp10, <4 x i32> zeroinitializer, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
%Cmp17 = icmp ule <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %Sl16
- %L18 = load double, double* %A2
- store i64 498254, i64* %PC
+ %L18 = load double, ptr %A2
+ store i64 498254, ptr %A4
%E19 = extractelement <4 x i64> zeroinitializer, i32 0
%Shuff20 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> <i32 3, i32 1>
%I21 = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
%ZE = zext <2 x i1> %Shuff20 to <2 x i32>
%Sl23 = select i1 %Cmp10, <2 x i1> %Shuff20, <2 x i1> zeroinitializer
%Cmp24 = icmp ult <2 x i32> zeroinitializer, zeroinitializer
- %L25 = load i8, i8* %0
- store i8 %L25, i8* %0
+ %L25 = load i8, ptr %0
+ store i8 %L25, ptr %0
%E26 = extractelement <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, i32 3
%Shuff27 = shufflevector <4 x i32> %Shuff, <4 x i32> %I14, <4 x i32> <i32 6, i32 0, i32 undef, i32 4>
%I28 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 0
CF79: ; preds = %CF80
%Sl30 = select i1 false, i8 %B29, i8 -123
%Cmp31 = icmp sge <2 x i1> %I, %I
- %L32 = load i64, i64* %PC
- store i8 -123, i8* %0
+ %L32 = load i64, ptr %A4
+ store i8 -123, ptr %0
%E33 = extractelement <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 2
%Shuff34 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
%I35 = insertelement <4 x i64> zeroinitializer, i64 498254, i32 3
%B36 = sub <8 x i64> %I8, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
- %PC37 = bitcast i8* %0 to i1*
%Sl38 = select i1 %Cmp10, i8 -43, i8 %L5
%Cmp39 = icmp eq i64 498254, %B15
br label %CF
CF: ; preds = %CF, %CF79
- %L40 = load double, double* %A
- store i1 %Cmp39, i1* %PC37
+ %L40 = load double, ptr %A
+ store i1 %Cmp39, ptr %0
%E41 = extractelement <4 x i64> zeroinitializer, i32 3
%Shuff42 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %ZE, <2 x i32> <i32 2, i32 undef>
%I43 = insertelement <4 x i32> %Shuff, i32 %3, i32 0
br i1 %Cmp46, label %CF77, label %CF78
CF78: ; preds = %CF78, %CF83, %CF82, %CF77
- %L47 = load i64, i64* %PC
- store i8 -123, i8* %0
+ %L47 = load i64, ptr %A4
+ store i8 -123, ptr %0
%E48 = extractelement <4 x i64> zeroinitializer, i32 3
%Shuff49 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 undef>
%I50 = insertelement <2 x i1> zeroinitializer, i1 %Cmp10, i32 0
br i1 %Cmp54, label %CF78, label %CF82
CF82: ; preds = %CF83
- %L55 = load i64, i64* %PC
- store i64 %L32, i64* %PC
+ %L55 = load i64, ptr %A4
+ store i64 %L32, ptr %A4
%E56 = extractelement <2 x i16> %Shuff7, i32 1
%Shuff57 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
%I58 = insertelement <2 x i32> %Sl, i32 %Tr52, i32 0
%FC = sitofp i64 498254 to double
%Sl60 = select i1 false, i64 %E6, i64 -1
%Cmp61 = icmp sgt <4 x i32> %Shuff27, %I43
- %L62 = load i64, i64* %PC
- store i64 %Sl9, i64* %PC
+ %L62 = load i64, ptr %A4
+ store i64 %Sl9, ptr %A4
%E63 = extractelement <2 x i32> %ZE, i32 0
%Shuff64 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> <i32 1, i32 3, i32 undef, i32 7>
%I65 = insertelement <4 x i32> %Shuff, i32 %3, i32 3
CF81: ; preds = %CF82
%Cmp69 = icmp ne <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %B36
- %L70 = load i8, i8* %0
- store i64 %L55, i64* %PC
+ %L70 = load i8, ptr %0
+ store i64 %L55, ptr %A4
%E71 = extractelement <4 x i32> %Shuff49, i32 1
%Shuff72 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff34, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%I73 = insertelement <4 x i64> %Shuff64, i64 %E, i32 2
%B74 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %B36
%Sl75 = select i1 %Sl68, i64 %B51, i64 %L55
%Cmp76 = icmp sgt <8 x i64> %B74, %B36
- store i1 %Cmp39, i1* %PC37
- store i64 %E41, i64* %PC
- store i64 %L32, i64* %PC
- store i64 %Sl75, i64* %2
- store i64 %L32, i64* %PC
+ store i1 %Cmp39, ptr %0
+ store i64 %E41, ptr %A4
+ store i64 %L32, ptr %A4
+ store i64 %Sl75, ptr %2
+ store i64 %L32, ptr %A4
ret void
}
; v4f32 on MSA.
; It should at least successfully build.
-define void @autogen_SD3997499501(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD3997499501(ptr, ptr, ptr, i32, i64, i8) {
BB:
%A4 = alloca <1 x double>
%A3 = alloca double
%A2 = alloca float
%A1 = alloca double
%A = alloca double
- %L = load i8, i8* %0
- store i8 97, i8* %0
+ %L = load i8, ptr %0
+ store i8 97, ptr %0
%E = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14
%Shuff = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> <i32 1, i32 3>
%I = insertelement <4 x i64> zeroinitializer, i64 0, i32 3
%Tr = trunc <1 x i64> zeroinitializer to <1 x i8>
- %Sl = select i1 false, double* %A1, double* %A
+ %Sl = select i1 false, ptr %A1, ptr %A
%Cmp = icmp ne <2 x i64> zeroinitializer, zeroinitializer
- %L5 = load double, double* %Sl
- store float -4.374162e+06, float* %A2
+ %L5 = load double, ptr %Sl
+ store float -4.374162e+06, ptr %A2
%E6 = extractelement <4 x i64> zeroinitializer, i32 3
%Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I, <4 x i32> <i32 2, i32 4, i32 6, i32 undef>
%I8 = insertelement <2 x i1> %Shuff, i1 false, i32 0
%B = ashr <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <i32 -1, i32 -1, i32 -1, i32 -1>
- %PC = bitcast float* %A2 to float*
%Sl9 = select i1 false, i32 82299, i32 0
%Cmp10 = icmp slt i8 97, %5
br label %CF72
CF72: ; preds = %CF72, %CF80, %CF78, %BB
- %L11 = load double, double* %Sl
- store double 0.000000e+00, double* %Sl
+ %L11 = load double, ptr %Sl
+ store double 0.000000e+00, ptr %Sl
%E12 = extractelement <2 x i1> zeroinitializer, i32 0
br i1 %E12, label %CF72, label %CF80
br i1 %Cmp17, label %CF72, label %CF77
CF77: ; preds = %CF77, %CF80
- %L18 = load double, double* %Sl
- store double 0.000000e+00, double* %Sl
+ %L18 = load double, ptr %Sl
+ store double 0.000000e+00, ptr %Sl
%E19 = extractelement <2 x i1> zeroinitializer, i32 0
br i1 %E19, label %CF77, label %CF78
%B22 = sdiv <4 x i64> %Shuff7, zeroinitializer
%FC = uitofp i8 97 to double
%Sl23 = select i1 %Cmp10, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer
- %L24 = load double, double* %Sl
- store float %Sl16, float* %PC
+ %L24 = load double, ptr %Sl
+ store float %Sl16, ptr %A2
%E25 = extractelement <2 x i1> %Shuff, i32 1
br i1 %E25, label %CF72, label %CF76
%B28 = mul <4 x i64> %I27, zeroinitializer
%ZE = zext <8 x i1> zeroinitializer to <8 x i64>
%Sl29 = select i1 %Cmp17, float -4.374162e+06, float -4.374162e+06
- %L30 = load i8, i8* %0
- store double %L5, double* %Sl
+ %L30 = load i8, ptr %0
+ store double %L5, ptr %Sl
%E31 = extractelement <8 x i1> zeroinitializer, i32 5
br label %CF
br i1 %Cmp36, label %CF, label %CF74
CF74: ; preds = %CF74, %CF
- %L37 = load float, float* %PC
- store double 0.000000e+00, double* %Sl
+ %L37 = load float, ptr %A2
+ store double 0.000000e+00, ptr %Sl
%E38 = extractelement <2 x i1> %Sl23, i32 1
br i1 %E38, label %CF74, label %CF75
%I40 = insertelement <4 x i64> zeroinitializer, i64 %4, i32 2
%Sl41 = select i1 %Cmp10, i32 0, i32 %3
%Cmp42 = icmp ne <1 x i64> zeroinitializer, zeroinitializer
- %L43 = load double, double* %Sl
- store i64 %4, i64* %2
+ %L43 = load double, ptr %Sl
+ store i64 %4, ptr %2
%E44 = extractelement <2 x i1> %Shuff20, i32 1
br i1 %E44, label %CF75, label %CF82
br i1 %Cmp49, label %CF75, label %CF81
CF81: ; preds = %CF82
- %L50 = load i8, i8* %0
- store double %L43, double* %Sl
+ %L50 = load i8, ptr %0
+ store double %L43, ptr %Sl
%E51 = extractelement <4 x i64> %Shuff7, i32 3
%Shuff52 = shufflevector <4 x float> %BC34, <4 x float> %BC34, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
%I53 = insertelement <2 x i1> %Cmp, i1 %E25, i32 0
%B54 = fdiv double %L24, %L43
%BC55 = bitcast <4 x i64> zeroinitializer to <4 x double>
%Sl56 = select i1 false, i8 %5, i8 97
- %L57 = load i8, i8* %0
- store i8 %L50, i8* %0
+ %L57 = load i8, ptr %0
+ store i8 %L50, ptr %0
%E58 = extractelement <2 x i1> %Shuff20, i32 1
br i1 %E58, label %CF, label %CF73
%Shuff59 = shufflevector <2 x i1> %Shuff13, <2 x i1> %Shuff45, <2 x i32> <i32 undef, i32 0>
%I60 = insertelement <4 x float> %Shuff52, float -4.374162e+06, i32 0
%B61 = mul <4 x i64> %I46, zeroinitializer
- %PC62 = bitcast double* %A3 to float*
%Sl63 = select i1 %Cmp10, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer
%Cmp64 = icmp ne <2 x i1> %Cmp, %Shuff
- %L65 = load double, double* %A1
- store float -4.374162e+06, float* %PC62
+ %L65 = load double, ptr %A1
+ store float -4.374162e+06, ptr %A3
%E66 = extractelement <8 x i1> %I21, i32 3
br i1 %E66, label %CF73, label %CF79
br i1 %Cmp71, label %CF79, label %CF83
CF83: ; preds = %CF79
- store double 0.000000e+00, double* %Sl
- store float %BC, float* %PC62
- store double %Sl48, double* %Sl
- store double %FC, double* %Sl
- store float %BC, float* %PC62
+ store double 0.000000e+00, ptr %Sl
+ store float %BC, ptr %A3
+ store double %Sl48, ptr %Sl
+ store double %FC, ptr %Sl
+ store float %BC, ptr %A3
ret void
}
; `Num < NumOperands && "Invalid child # of SDNode!"' assertion.
; It should at least successfully build.
-define void @autogen_SD525530439(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD525530439(ptr, ptr, ptr, i32, i64, i8) {
BB:
%A4 = alloca i32
%A3 = alloca double
%A2 = alloca <1 x double>
%A1 = alloca <8 x double>
%A = alloca i64
- %L = load i8, i8* %0
- store i64 33695, i64* %A
+ %L = load i8, ptr %0
+ store i64 33695, ptr %A
%E = extractelement <4 x i32> zeroinitializer, i32 3
%Shuff = shufflevector <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 2, i32 0>
%I = insertelement <4 x i16> zeroinitializer, i16 -11642, i32 0
%B = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%ZE = fpext float 0x3B64A2B880000000 to double
%Sl = select i1 true, i16 -1, i16 -11642
- %L5 = load i8, i8* %0
- store i8 0, i8* %0
+ %L5 = load i8, ptr %0
+ store i8 0, ptr %0
%E6 = extractelement <4 x i32> zeroinitializer, i32 2
%Shuff7 = shufflevector <8 x i1> zeroinitializer, <8 x i1> zeroinitializer, <8 x i32> <i32 undef, i32 7, i32 9, i32 11, i32 13, i32 15, i32 1, i32 undef>
%I8 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 3
%B9 = sub i32 71140, 439732
%BC = bitcast <2 x i32> <i32 -1, i32 -1> to <2 x float>
- %Sl10 = select i1 true, i32* %1, i32* %1
+ %Sl10 = select i1 true, ptr %1, ptr %1
%Cmp = icmp sge <8 x i64> zeroinitializer, zeroinitializer
- %L11 = load i32, i32* %Sl10
- store <1 x double> zeroinitializer, <1 x double>* %A2
+ %L11 = load i32, ptr %Sl10
+ store <1 x double> zeroinitializer, ptr %A2
%E12 = extractelement <4 x i16> zeroinitializer, i32 0
%Shuff13 = shufflevector <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i32> undef
%I14 = insertelement <1 x i16> zeroinitializer, i16 %Sl, i32 0
br label %CF75
CF75: ; preds = %CF75, %BB
- %L19 = load i32, i32* %Sl10
- store i32 %L11, i32* %Sl10
+ %L19 = load i32, ptr %Sl10
+ store i32 %L11, ptr %Sl10
%E20 = extractelement <4 x i32> zeroinitializer, i32 1
%Shuff21 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %I8, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
%I22 = insertelement <4 x float> %BC16, float 0x3EEF3D6300000000, i32 2
br i1 %Cmp26, label %CF75, label %CF76
CF76: ; preds = %CF75
- %L27 = load i32, i32* %Sl10
- store i32 439732, i32* %Sl10
+ %L27 = load i32, ptr %Sl10
+ store i32 439732, ptr %Sl10
%E28 = extractelement <4 x i32> %Shuff21, i32 3
%Shuff29 = shufflevector <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0>
%I30 = insertelement <8 x i1> %Shuff7, i1 %Cmp18, i32 4
br label %CF74
CF74: ; preds = %CF74, %CF80, %CF78, %CF76
- %L33 = load i64, i64* %2
- store i32 71140, i32* %Sl10
+ %L33 = load i64, ptr %2
+ store i32 71140, ptr %Sl10
%E34 = extractelement <4 x i32> zeroinitializer, i32 1
%Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> undef
%I36 = insertelement <4 x i16> zeroinitializer, i16 -11642, i32 0
br i1 %Cmp39, label %CF74, label %CF80
CF80: ; preds = %CF74
- %L40 = load i8, i8* %0
- store i32 0, i32* %Sl10
+ %L40 = load i8, ptr %0
+ store i32 0, ptr %Sl10
%E41 = extractelement <8 x i64> zeroinitializer, i32 1
%Shuff42 = shufflevector <1 x i16> %I14, <1 x i16> %I14, <1 x i32> undef
%I43 = insertelement <4 x i16> %I36, i16 -11642, i32 0
br i1 %Sl44, label %CF74, label %CF78
CF78: ; preds = %CF80
- %L45 = load i32, i32* %Sl10
- store i8 %L5, i8* %0
+ %L45 = load i32, ptr %Sl10
+ store i8 %L5, ptr %0
%E46 = extractelement <8 x i1> %Shuff7, i32 2
br i1 %E46, label %CF74, label %CF77
br i1 %Cmp52, label %CF77, label %CF79
CF79: ; preds = %CF77
- %L53 = load i32, i32* %Sl10
- store i8 %L40, i8* %0
+ %L53 = load i32, ptr %Sl10
+ store i8 %L40, ptr %0
%E54 = extractelement <4 x i32> zeroinitializer, i32 1
%Shuff55 = shufflevector <4 x i32> %Shuff21, <4 x i32> %I8, <4 x i32> <i32 4, i32 6, i32 undef, i32 2>
%I56 = insertelement <4 x i32> zeroinitializer, i32 %Sl51, i32 2
%Tr = trunc <1 x i64> %Shuff13 to <1 x i16>
%Sl57 = select i1 %Cmp18, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 -1, i32 -1>
%Cmp58 = icmp uge <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %I56
- %L59 = load i8, i8* %0
- store <1 x double> zeroinitializer, <1 x double>* %A2
+ %L59 = load i8, ptr %0
+ store <1 x double> zeroinitializer, ptr %A2
%E60 = extractelement <4 x i32> zeroinitializer, i32 0
%Shuff61 = shufflevector <4 x i32> %I8, <4 x i32> %I8, <4 x i32> <i32 undef, i32 1, i32 undef, i32 undef>
%I62 = insertelement <4 x i16> zeroinitializer, i16 %E12, i32 1
%B63 = and <4 x i32> %Shuff61, <i32 -1, i32 -1, i32 -1, i32 -1>
- %PC = bitcast double* %A3 to i32*
%Sl64 = select i1 %Cmp18, <4 x i32> %Shuff61, <4 x i32> %Shuff55
%Cmp65 = icmp sgt i32 439732, %3
br label %CF
CF: ; preds = %CF79
- %L66 = load i32, i32* %Sl10
- store i32 %E6, i32* %PC
+ %L66 = load i32, ptr %Sl10
+ store i32 %E6, ptr %A3
%E67 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 2
%Shuff68 = shufflevector <4 x i32> %Sl64, <4 x i32> %I8, <4 x i32> <i32 5, i32 undef, i32 1, i32 undef>
%I69 = insertelement <4 x i16> %Shuff47, i16 %Sl, i32 3
%FC71 = sitofp i32 %L66 to double
%Sl72 = select i1 %Cmp18, i64 %4, i64 %4
%Cmp73 = icmp eq <4 x i64> zeroinitializer, %B70
- store i32 %B23, i32* %PC
- store i32 %3, i32* %PC
- store i32 %3, i32* %Sl10
- store i32 %L27, i32* %1
- store i32 0, i32* %PC
+ store i32 %B23, ptr %A3
+ store i32 %3, ptr %A3
+ store i32 %3, ptr %Sl10
+ store i32 %L27, ptr %1
+ store i32 0, ptr %A3
ret void
}
; v2f64 on MSA.
; It should at least successfully build.
-define void @autogen_SD997348632(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD997348632(ptr, ptr, ptr, i32, i64, i8) {
BB:
%A4 = alloca <2 x i32>
%A3 = alloca <16 x i16>
%A2 = alloca <4 x i1>
%A1 = alloca <4 x i16>
%A = alloca <2 x i32>
- %L = load i8, i8* %0
- store i8 %L, i8* %0
+ %L = load i8, ptr %0
+ store i8 %L, ptr %0
%E = extractelement <4 x i32> zeroinitializer, i32 0
%Shuff = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 1, i32 3, i32 5>
%I = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
%FC = sitofp <4 x i32> zeroinitializer to <4 x double>
%Sl = select i1 false, <4 x i64> %Shuff, <4 x i64> %Shuff
- %L5 = load i8, i8* %0
- store i8 %5, i8* %0
+ %L5 = load i8, ptr %0
+ store i8 %5, ptr %0
%E6 = extractelement <1 x i16> zeroinitializer, i32 0
%Shuff7 = shufflevector <2 x i1> %I, <2 x i1> %I, <2 x i32> <i32 1, i32 undef>
%I8 = insertelement <1 x i16> zeroinitializer, i16 0, i32 0
%FC9 = fptoui float 0x406DB70180000000 to i64
%Sl10 = select i1 false, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%Cmp = icmp ult <4 x i64> zeroinitializer, zeroinitializer
- %L11 = load i8, i8* %0
- store i8 %L, i8* %0
+ %L11 = load i8, ptr %0
+ store i8 %L, ptr %0
%E12 = extractelement <4 x i64> zeroinitializer, i32 2
%Shuff13 = shufflevector <4 x i32> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 undef, i32 3>
%I14 = insertelement <8 x i32> zeroinitializer, i32 -1, i32 7
br label %CF
CF: ; preds = %CF, %CF79, %CF84, %BB
- %L18 = load i8, i8* %0
- store i8 %L, i8* %0
+ %L18 = load i8, ptr %0
+ store i8 %L, ptr %0
%E19 = extractelement <4 x i64> %Sl, i32 3
%Shuff20 = shufflevector <2 x i1> %Shuff7, <2 x i1> %I, <2 x i32> <i32 2, i32 0>
%I21 = insertelement <4 x i64> zeroinitializer, i64 %FC9, i32 0
br i1 %Cmp25, label %CF, label %CF79
CF79: ; preds = %CF
- %L26 = load i8, i8* %0
- store i8 %L26, i8* %0
+ %L26 = load i8, ptr %0
+ store i8 %L26, ptr %0
%E27 = extractelement <1 x i16> zeroinitializer, i32 0
%Shuff28 = shufflevector <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11>
%I29 = insertelement <16 x i32> %Shuff28, i32 %B, i32 15
br i1 %Cmp32, label %CF, label %CF78
CF78: ; preds = %CF78, %CF79
- %L33 = load i8, i8* %0
- store i8 %L, i8* %0
+ %L33 = load i8, ptr %0
+ store i8 %L, ptr %0
%E34 = extractelement <16 x i32> %Shuff28, i32 1
%Shuff35 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I21, <4 x i32> <i32 undef, i32 6, i32 0, i32 2>
%I36 = insertelement <4 x double> %FC, double 0xA4A57F449CA36CC2, i32 2
br i1 %Cmp38, label %CF78, label %CF80
CF80: ; preds = %CF80, %CF82, %CF78
- %L39 = load i8, i8* %0
- store i8 %L, i8* %0
+ %L39 = load i8, ptr %0
+ store i8 %L, ptr %0
%E40 = extractelement <2 x i1> %Shuff20, i32 1
br i1 %E40, label %CF80, label %CF82
%B43 = sub i32 %E, 0
%Sl44 = select i1 %Cmp32, <16 x i32> %Shuff28, <16 x i32> %Shuff28
%Cmp45 = icmp sgt <4 x i64> zeroinitializer, %I21
- %L46 = load i8, i8* %0
- store i8 %L11, i8* %0
+ %L46 = load i8, ptr %0
+ store i8 %L11, ptr %0
%E47 = extractelement <8 x i32> %Sl16, i32 4
%Shuff48 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Shuff7, <2 x i32> <i32 undef, i32 1>
%I49 = insertelement <2 x i1> %Shuff48, i1 %Cmp17, i32 1
CF81: ; preds = %CF81, %CF82
%Sl52 = select i1 false, float -6.749110e+06, float 0x406DB70180000000
%Cmp53 = icmp uge <2 x i32> <i32 -1, i32 -1>, <i32 -1, i32 -1>
- %L54 = load i8, i8* %0
- store i8 %L5, i8* %0
+ %L54 = load i8, ptr %0
+ store i8 %L5, ptr %0
%E55 = extractelement <8 x i32> zeroinitializer, i32 7
%Shuff56 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 4, i32 6, i32 0>
%I57 = insertelement <2 x i1> %Shuff7, i1 false, i32 0
%FC59 = fptoui <4 x double> %I36 to <4 x i16>
%Sl60 = select i1 %Cmp17, <2 x i1> %I, <2 x i1> %I57
%Cmp61 = icmp ule <8 x i32> %B50, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
- %L62 = load i8, i8* %0
- store i8 %L33, i8* %0
+ %L62 = load i8, ptr %0
+ store i8 %L33, ptr %0
%E63 = extractelement <4 x i64> %Shuff, i32 2
%Shuff64 = shufflevector <4 x i64> %Shuff56, <4 x i64> %Shuff56, <4 x i32> <i32 5, i32 7, i32 1, i32 undef>
%I65 = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
br i1 %Cmp69, label %CF, label %CF77
CF77: ; preds = %CF84
- %L70 = load i8, i8* %0
- store i8 %L, i8* %0
+ %L70 = load i8, ptr %0
+ store i8 %L, ptr %0
%E71 = extractelement <4 x i64> %Shuff, i32 0
%Shuff72 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> <i32 3, i32 1>
%I73 = insertelement <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 %B66, i32 1
%FC74 = uitofp i1 %Cmp32 to double
%Sl75 = select i1 %FC51, i16 9704, i16 0
%Cmp76 = icmp ugt <1 x i16> %I8, %I8
- store i8 %L39, i8* %0
- store i8 %5, i8* %0
- store i8 %Tr23, i8* %0
- store i8 %L, i8* %0
- store i8 %5, i8* %0
+ store i8 %L39, ptr %0
+ store i8 %5, ptr %0
+ store i8 %Tr23, ptr %0
+ store i8 %L, ptr %0
+ store i8 %5, ptr %0
ret void
}
; build_vector.
; It should at least successfully build.
-define void @autogen_SD742806235(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD742806235(ptr, ptr, ptr, i32, i64, i8) {
BB:
%A4 = alloca double
%A3 = alloca double
%A2 = alloca <8 x i8>
%A1 = alloca <4 x float>
%A = alloca i1
- store i8 %5, i8* %0
- store i8 %5, i8* %0
- store i8 %5, i8* %0
- store <8 x i8> <i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 -1>, <8 x i8>* %A2
- store i8 %5, i8* %0
+ store i8 %5, ptr %0
+ store i8 %5, ptr %0
+ store i8 %5, ptr %0
+ store <8 x i8> <i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 -1>, ptr %A2
+ store i8 %5, ptr %0
ret void
}
entry:
; CHECK-NOT: lwc1 $f{{[13579]+}}
; CHECK: lwc1 $f{{[02468]+}}
- %0 = load float, float * @f1
+ %0 = load float, ptr @f1
%1 = insertelement <4 x float> undef, float %0, i32 0
%2 = insertelement <4 x float> %1, float %0, i32 1
%3 = insertelement <4 x float> %2, float %0, i32 2
; CHECK-NOT: lwc1 $f{{[13579]+}}
; CHECK: lwc1 $f{{[02468]+}}
- %5 = load float, float * @f2
+ %5 = load float, ptr @f2
%6 = insertelement <4 x float> undef, float %5, i32 0
%7 = insertelement <4 x float> %6, float %5, i32 1
%8 = insertelement <4 x float> %7, float %5, i32 2
%9 = insertelement <4 x float> %8, float %5, i32 3
%10 = fadd <4 x float> %4, %9
- store <4 x float> %10, <4 x float> * @v3
+ store <4 x float> %10, ptr @v3
ret void
}
entry:
; CHECK-NOT: lwc1 $f{{[13579]+}}
; CHECK: lwc1 $f{{[02468]+}}
- %0 = load float, float * @f1
+ %0 = load float, ptr @f1
%1 = fpext float %0 to double
; CHECK-NOT: lwc1 $f{{[13579]+}}
; CHECK: lwc1 $f{{[02468]+}}
- %2 = load float, float * @f2
+ %2 = load float, ptr @f2
%3 = fpext float %2 to double
%4 = fadd double %1, %3
- store double%4, double * @d1
+ store double%4, ptr @d1
ret void
}
; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
-define void @ashr_v4i32(<4 x i32>* %c) nounwind {
+define void @ashr_v4i32(ptr %c) nounwind {
; CHECK-LABEL: ashr_v4i32:
%1 = ashr <4 x i32> <i32 1, i32 2, i32 4, i32 8>,
; CHECK-NOT: sra
; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 1
; CHECK-NOT: sra
- store volatile <4 x i32> %1, <4 x i32>* %c
+ store volatile <4 x i32> %1, ptr %c
; CHECK-DAG: st.w [[R1]], 0($4)
%2 = ashr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>,
; CHECK-NOT: sra
; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], -2
; CHECK-NOT: sra
- store volatile <4 x i32> %2, <4 x i32>* %c
+ store volatile <4 x i32> %2, ptr %c
; CHECK-DAG: st.w [[R1]], 0($4)
ret void
; CHECK-LABEL: .size ashr_v4i32
}
-define void @lshr_v4i32(<4 x i32>* %c) nounwind {
+define void @lshr_v4i32(ptr %c) nounwind {
; CHECK-LABEL: lshr_v4i32:
%1 = lshr <4 x i32> <i32 1, i32 2, i32 4, i32 8>,
; CHECK-NOT: srl
; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 1
; CHECK-NOT: srl
- store volatile <4 x i32> %1, <4 x i32>* %c
+ store volatile <4 x i32> %1, ptr %c
; CHECK-DAG: st.w [[R1]], 0($4)
%2 = lshr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>,
; CHECK-DAG: addiu [[CPOOL:\$[0-9]+]], {{.*}}, %lo($
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0([[CPOOL]])
; CHECK-NOT: srl
- store volatile <4 x i32> %2, <4 x i32>* %c
+ store volatile <4 x i32> %2, ptr %c
; CHECK-DAG: st.w [[R1]], 0($4)
ret void
; CHECK-LABEL: .size lshr_v4i32
}
-define void @shl_v4i32(<4 x i32>* %c) nounwind {
+define void @shl_v4i32(ptr %c) nounwind {
; CHECK-LABEL: shl_v4i32:
%1 = shl <4 x i32> <i32 8, i32 4, i32 2, i32 1>,
; CHECK-NOT: sll
; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 8
; CHECK-NOT: sll
- store volatile <4 x i32> %1, <4 x i32>* %c
+ store volatile <4 x i32> %1, ptr %c
; CHECK-DAG: st.w [[R1]], 0($4)
%2 = shl <4 x i32> <i32 -8, i32 -4, i32 -2, i32 -1>,
; CHECK-NOT: sll
; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], -8
; CHECK-NOT: sll
- store volatile <4 x i32> %2, <4 x i32>* %c
+ store volatile <4 x i32> %2, ptr %c
; CHECK-DAG: st.w [[R1]], 0($4)
ret void
define void @llvm_mips_bclr_w_test_const_vec() nounwind {
entry:
%0 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> <i32 2147483649, i32 2147483649, i32 7, i32 7>, <4 x i32> <i32 -1, i32 31, i32 2, i32 34>)
- store <4 x i32> %0, <4 x i32>* @llvm_mips_bclr_w_test_const_vec_res
+ store <4 x i32> %0, ptr @llvm_mips_bclr_w_test_const_vec_res
ret void
}
define void @llvm_mips_bneg_w_test_const_vec() nounwind {
entry:
%0 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> <i32 2147483649, i32 2147483649, i32 7, i32 7>, <4 x i32> <i32 -1, i32 31, i32 2, i32 34>)
- store <4 x i32> %0, <4 x i32>* @llvm_mips_bneg_w_test_const_vec_res
+ store <4 x i32> %0, ptr @llvm_mips_bneg_w_test_const_vec_res
ret void
}
define void @llvm_mips_bset_w_test_const_vec() nounwind {
entry:
%0 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 -1, i32 31, i32 2, i32 34>)
- store <4 x i32> %0, <4 x i32>* @llvm_mips_bset_w_test_const_vec_res
+ store <4 x i32> %0, ptr @llvm_mips_bset_w_test_const_vec_res
ret void
}
define void @llvm_mips_sll_w_test_const_vec() nounwind {
entry:
%0 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 -1, i32 31, i32 2, i32 34>)
- store <4 x i32> %0, <4 x i32>* @llvm_mips_sll_w_test_const_vec_res
+ store <4 x i32> %0, ptr @llvm_mips_sll_w_test_const_vec_res
ret void
}
define void @llvm_mips_sra_w_test_const_vec() nounwind {
entry:
%0 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> <i32 -16, i32 16, i32 16, i32 16>, <4 x i32> <i32 2, i32 -30, i32 33, i32 1>)
- store <4 x i32> %0, <4 x i32>* @llvm_mips_sra_w_test_const_vec_res
+ store <4 x i32> %0, ptr @llvm_mips_sra_w_test_const_vec_res
ret void
}
define void @llvm_mips_srl_w_test_const_vec() nounwind {
entry:
%0 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> <i32 -16, i32 16, i32 16, i32 16>, <4 x i32> <i32 2, i32 -30, i32 33, i32 1>)
- store <4 x i32> %0, <4 x i32>* @llvm_mips_srl_w_test_const_vec_res
+ store <4 x i32> %0, ptr @llvm_mips_srl_w_test_const_vec_res
ret void
}
define void @llvm_mips_bclr_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_bclr_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bclr_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bclr.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_bclr_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_bclr_b_RES
ret void
}
define void @llvm_mips_bclr_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_bclr_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_bclr_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.bclr.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_bclr_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_bclr_h_RES
ret void
}
define void @llvm_mips_bclr_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_bclr_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_bclr_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_bclr_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_bclr_w_RES
ret void
}
define void @llvm_mips_bclr_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_bclr_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_bclr_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.bclr.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_bclr_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_bclr_d_RES
ret void
}
define void @llvm_mips_bneg_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_bneg_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bneg_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bneg.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_bneg_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_bneg_b_RES
ret void
}
define void @llvm_mips_bneg_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_bneg_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_bneg_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.bneg.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_bneg_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_bneg_h_RES
ret void
}
define void @llvm_mips_bneg_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_bneg_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_bneg_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_bneg_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_bneg_w_RES
ret void
}
define void @llvm_mips_bneg_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_bneg_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_bneg_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.bneg.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_bneg_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_bneg_d_RES
ret void
}
define void @llvm_mips_bset_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_bset_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bset_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.bset.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_bset_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_bset_b_RES
ret void
}
define void @llvm_mips_bset_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_bset_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_bset_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.bset.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_bset_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_bset_h_RES
ret void
}
define void @llvm_mips_bset_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_bset_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_bset_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_bset_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_bset_w_RES
ret void
}
define void @llvm_mips_bset_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_bset_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_bset_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.bset.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_bset_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_bset_d_RES
ret void
}
define void @llvm_mips_sll_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_sll_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_sll_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.sll.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_sll_b_RES
ret void
}
define void @llvm_mips_sll_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_sll_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_sll_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.sll.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_sll_h_RES
ret void
}
define void @llvm_mips_sll_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_sll_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_sll_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_sll_w_RES
ret void
}
define void @llvm_mips_sll_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_sll_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_sll_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.sll.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_sll_d_RES
ret void
}
define void @llvm_mips_sra_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_sra_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_sra_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.sra.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_sra_b_RES
ret void
}
define void @llvm_mips_sra_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_sra_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_sra_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.sra.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_sra_h_RES
ret void
}
define void @llvm_mips_sra_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_sra_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_sra_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_sra_w_RES
ret void
}
define void @llvm_mips_sra_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_sra_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_sra_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.sra.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_sra_d_RES
ret void
}
define void @llvm_mips_srl_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_srl_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_srl_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.srl.b(<16 x i8> %0, <16 x i8> %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_srl_b_RES
ret void
}
define void @llvm_mips_srl_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_srl_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_srl_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.srl.h(<8 x i16> %0, <8 x i16> %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_srl_h_RES
ret void
}
define void @llvm_mips_srl_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_srl_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_srl_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> %0, <4 x i32> %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_srl_w_RES
ret void
}
define void @llvm_mips_srl_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_srl_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_srl_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.srl.d(<2 x i64> %0, <2 x i64> %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_srl_d_RES
ret void
}
; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
-define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @vshf_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v16i8_0:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[PTR_A]])
; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R1]]
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @vshf_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @vshf_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v16i8_1:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1]
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @vshf_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v16i8_2:
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 16>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[PTR_A]])
; CHECK-DAG: vshf.b [[R3]], [[R2]], [[R2]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @vshf_v16i8_3(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v16i8_3:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 17, i32 24, i32 25, i32 18, i32 19, i32 20, i32 28, i32 19, i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
; The concatenation step of vshf is bitwise not vectorwise so we must reverse
; the operands to get the right answer.
; CHECK-DAG: vshf.b [[R3]], [[R2]], [[R1]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @vshf_v16i8_4(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @vshf_v16i8_4(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v16i8_4:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> %1, <16 x i32> <i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17>
; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1]
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @vshf_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v8i16_0:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[PTR_A]])
; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R1]]
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @vshf_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @vshf_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v8i16_1:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1]
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @vshf_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v8i16_2:
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 8>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[PTR_A]])
; CHECK-DAG: vshf.h [[R3]], [[R2]], [[R2]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @vshf_v8i16_3(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v8i16_3:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
; The concatenation step of vshf is bitwise not vectorwise so we must reverse
; the operands to get the right answer.
; CHECK-DAG: vshf.h [[R3]], [[R2]], [[R1]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @vshf_v8i16_4(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v8i16_4:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> %1, <8 x i32> <i32 1, i32 9, i32 1, i32 9, i32 1, i32 9, i32 1, i32 9>
; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1]
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; Note: v4i32 only has one 4-element set so it's impossible to get a vshf.w
; instruction when using a single vector.
-define void @vshf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @vshf_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v4i32_0:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @vshf_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @vshf_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v4i32_1:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: splati.w [[R3:\$w[0-9]+]], [[R1]][1]
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @vshf_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @vshf_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v4i32_2:
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 5, i32 6, i32 4>
; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R2]], 36
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @vshf_v4i32_3(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v4i32_3:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 6, i32 4>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
; The concatenation step of vshf is bitwise not vectorwise so we must reverse
; the operands to get the right answer.
; CHECK-DAG: vshf.w [[R3]], [[R2]], [[R1]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @vshf_v4i32_4(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @vshf_v4i32_4(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v4i32_4:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> %1, <4 x i32> <i32 1, i32 5, i32 5, i32 1>
; The two operand vectors are the same so element 1 and 5 are equivalent.
; CHECK-DAG: splati.w [[R3:\$w[0-9]+]], [[R1]][1]
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @vshf_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v2i64_0:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[PTR_A]])
; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R1]]
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @vshf_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @vshf_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v2i64_1:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @vshf_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v2i64_2:
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 2>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[PTR_A]])
; CHECK-DAG: vshf.d [[R3]], [[R2]], [[R2]]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @vshf_v2i64_3(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v2i64_3:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 2>
; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
; The concatenation step of vshf is bitwise not vectorwise so we must reverse
; the operands to get the right answer.
; CHECK-DAG: vshf.d [[R3]], [[R2]], [[R1]]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @vshf_v2i64_4(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @vshf_v2i64_4(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: vshf_v2i64_4:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <2 x i64> %1, <2 x i64> %1, <2 x i32> <i32 1, i32 3>
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @shf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @shf_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: shf_v16i8_0:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 3, i32 2, i32 0, i32 5, i32 7, i32 6, i32 4, i32 9, i32 11, i32 10, i32 8, i32 13, i32 15, i32 14, i32 12>
; CHECK-DAG: shf.b [[R3:\$w[0-9]+]], [[R1]], 45
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @shf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @shf_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: shf_v8i16_0:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
; CHECK-DAG: shf.h [[R3:\$w[0-9]+]], [[R1]], 27
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @shf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @shf_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: shf_v4i32_0:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; shf.d does not exist
-define void @ilvev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvev_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvev_v16i8_0:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
; CHECK-DAG: ilvev.b [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @ilvev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvev_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvev_v8i16_0:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
; CHECK-DAG: ilvev.h [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @ilvev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvev_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvev_v4i32_0:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
; CHECK-DAG: ilvev.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @ilvev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvev_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvev_v2i64_0:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
; Interleaving one operand with itself.
-define void @ilvev_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvev_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvev_v16i8_1:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
; CHECK-DAG: ilvev.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @ilvev_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvev_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvev_v8i16_1:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
; CHECK-DAG: ilvev.h [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @ilvev_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvev_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvev_v4i32_1:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
; CHECK-DAG: ilvev.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @ilvev_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvev_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvev_v2i64_1:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 0>
; ilvev.d with two identical operands is equivalent to splati.d
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][0]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @ilvev_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvev_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvev_v16i8_2:
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 16, i32 16, i32 18, i32 18, i32 20, i32 20, i32 22, i32 22, i32 24, i32 24, i32 26, i32 26, i32 28, i32 28, i32 30, i32 30>
; CHECK-DAG: ilvev.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @ilvev_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvev_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvev_v8i16_2:
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
; CHECK-DAG: ilvev.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @ilvev_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvev_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvev_v4i32_2:
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 4, i32 6, i32 6>
; CHECK-DAG: ilvev.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @ilvev_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvev_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvev_v2i64_2:
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 2, i32 2>
; ilvev.d with two identical operands is equivalent to splati.d
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R2]][0]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @ilvod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvod_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvod_v16i8_0:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
; CHECK-DAG: ilvod.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @ilvod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvod_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvod_v8i16_0:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
; CHECK-DAG: ilvod.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @ilvod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvod_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvod_v4i32_0:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
; CHECK-DAG: ilvod.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @ilvod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvod_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvod_v2i64_0:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @ilvod_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvod_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvod_v16i8_1:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
; CHECK-DAG: ilvod.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @ilvod_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvod_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvod_v8i16_1:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
; CHECK-DAG: ilvod.h [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @ilvod_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvod_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvod_v4i32_1:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
; CHECK-DAG: ilvod.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @ilvod_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvod_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvod_v2i64_1:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 1>
; ilvod.d with two identical operands is equivalent to splati.d
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @ilvod_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvod_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvod_v16i8_2:
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 17, i32 17, i32 19, i32 19, i32 21, i32 21, i32 23, i32 23, i32 25, i32 25, i32 27, i32 27, i32 29, i32 29, i32 31, i32 31>
; CHECK-DAG: ilvod.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @ilvod_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvod_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvod_v8i16_2:
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
; CHECK-DAG: ilvod.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @ilvod_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvod_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvod_v4i32_2:
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 5, i32 5, i32 7, i32 7>
; CHECK-DAG: ilvod.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @ilvod_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvod_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvod_v2i64_2:
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 3>
; ilvod.d with two identical operands is equivalent to splati.d
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R2]][1]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @ilvr_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvr_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvr_v16i8_0:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
; CHECK-DAG: ilvr.b [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @ilvr_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvr_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvr_v8i16_0:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
; CHECK-DAG: ilvr.h [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @ilvr_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvr_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvr_v4i32_0:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
; CHECK-DAG: ilvr.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @ilvr_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvr_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvr_v2i64_0:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
; ilvr.d and ilvev.d are equivalent for v2i64
; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @ilvr_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvr_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvr_v16i8_1:
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 16, i32 16, i32 17, i32 17, i32 18, i32 18, i32 19, i32 19, i32 20, i32 20, i32 21, i32 21, i32 22, i32 22, i32 23, i32 23>
; CHECK-DAG: ilvr.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @ilvr_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvr_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvr_v8i16_1:
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11>
; CHECK-DAG: ilvr.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @ilvr_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvr_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvr_v4i32_1:
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 4, i32 5, i32 5>
; CHECK-DAG: ilvr.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @ilvr_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvr_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvr_v2i64_1:
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 2, i32 2>
; ilvr.d and splati.d are equivalent for v2i64
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R2]][0]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @ilvr_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvr_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvr_v16i8_2:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
; CHECK-DAG: ilvr.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @ilvr_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvr_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvr_v8i16_2:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
; CHECK-DAG: ilvr.h [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @ilvr_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvr_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvr_v4i32_2:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
; CHECK-DAG: ilvr.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @ilvr_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvr_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvr_v2i64_2:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 0>
; ilvr.d and splati.d are equivalent for v2i64
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][0]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @ilvl_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvl_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvl_v16i8_0:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
; CHECK-DAG: ilvl.b [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @ilvl_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvl_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvl_v8i16_0:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
; CHECK-DAG: ilvl.h [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @ilvl_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvl_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvl_v4i32_0:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
; CHECK-DAG: ilvl.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @ilvl_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvl_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvl_v2i64_0:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
; ilvl.d and ilvod.d are equivalent for v2i64
; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @ilvl_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvl_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvl_v16i8_1:
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 24, i32 24, i32 25, i32 25, i32 26, i32 26, i32 27, i32 27, i32 28, i32 28, i32 29, i32 29, i32 30, i32 30, i32 31, i32 31>
; CHECK-DAG: ilvl.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @ilvl_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvl_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvl_v8i16_1:
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
; CHECK-DAG: ilvl.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @ilvl_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvl_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvl_v4i32_1:
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 6, i32 6, i32 7, i32 7>
; CHECK-DAG: ilvl.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @ilvl_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvl_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvl_v2i64_1:
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 3>
; ilvl.d and splati.d are equivalent for v2i64
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R2]][1]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @ilvl_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvl_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvl_v16i8_2:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11, i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
; CHECK-DAG: ilvl.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @ilvl_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvl_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvl_v8i16_2:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
; CHECK-DAG: ilvl.h [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @ilvl_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvl_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvl_v4i32_2:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
; CHECK-DAG: ilvl.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @ilvl_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvl_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: ilvl_v2i64_2:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 1>
; ilvl.d and splati.d are equivalent for v2i64
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @pckev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @pckev_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckev_v16i8_0:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
; CHECK-DAG: pckev.b [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @pckev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @pckev_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckev_v8i16_0:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; CHECK-DAG: pckev.h [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @pckev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @pckev_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckev_v4i32_0:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-DAG: pckev.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @pckev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @pckev_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckev_v2i64_0:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
; pckev.d and ilvev.d are equivalent for v2i64
; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @pckev_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @pckev_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckev_v16i8_1:
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
; CHECK-DAG: pckev.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @pckev_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @pckev_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckev_v8i16_1:
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 10, i32 12, i32 14, i32 8, i32 10, i32 12, i32 14>
; CHECK-DAG: pckev.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @pckev_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @pckev_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckev_v4i32_1:
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 6, i32 4, i32 6>
; CHECK-DAG: pckev.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @pckev_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @pckev_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckev_v2i64_1:
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 2, i32 2>
; pckev.d and splati.d are equivalent for v2i64
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R2]][0]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @pckev_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @pckev_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckev_v16i8_2:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; CHECK-DAG: pckev.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @pckev_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @pckev_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckev_v8i16_2:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 0, i32 2, i32 4, i32 6>
; CHECK-DAG: pckev.h [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @pckev_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @pckev_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckev_v4i32_2:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 2, i32 0, i32 2>
; CHECK-DAG: pckev.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @pckev_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @pckev_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckev_v2i64_2:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 0>
; pckev.d and splati.d are equivalent for v2i64
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][0]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @pckod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @pckod_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckod_v16i8_0:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
; CHECK-DAG: pckod.b [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @pckod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @pckod_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckod_v8i16_0:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
; CHECK-DAG: pckod.h [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @pckod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @pckod_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckod_v4i32_0:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
; CHECK-DAG: pckod.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @pckod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @pckod_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckod_v2i64_0:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
; pckod.d and ilvod.d are equivalent for v2i64
; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @pckod_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @pckod_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckod_v16i8_1:
- %1 = load <16 x i8>, <16 x i8>* %a
- %2 = load <16 x i8>, <16 x i8>* %b
+ %1 = load <16 x i8>, ptr %a
+ %2 = load <16 x i8>, ptr %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
; CHECK-DAG: pckod.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @pckod_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @pckod_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckod_v8i16_1:
- %1 = load <8 x i16>, <8 x i16>* %a
- %2 = load <8 x i16>, <8 x i16>* %b
+ %1 = load <8 x i16>, ptr %a
+ %2 = load <8 x i16>, ptr %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 9, i32 11, i32 13, i32 15, i32 9, i32 11, i32 13, i32 15>
; CHECK-DAG: pckod.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @pckod_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @pckod_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckod_v4i32_1:
- %1 = load <4 x i32>, <4 x i32>* %a
- %2 = load <4 x i32>, <4 x i32>* %b
+ %1 = load <4 x i32>, ptr %a
+ %2 = load <4 x i32>, ptr %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 5, i32 7, i32 5, i32 7>
; CHECK-DAG: pckod.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @pckod_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @pckod_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckod_v2i64_1:
- %1 = load <2 x i64>, <2 x i64>* %a
- %2 = load <2 x i64>, <2 x i64>* %b
+ %1 = load <2 x i64>, ptr %a
+ %2 = load <2 x i64>, ptr %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 3>
; pckod.d and splati.d are equivalent for v2i64
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R2]][1]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @pckod_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @pckod_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckod_v16i8_2:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
- %2 = load <16 x i8>, <16 x i8>* %b
+ %2 = load <16 x i8>, ptr %b
%3 = shufflevector <16 x i8> %1, <16 x i8> %2,
<16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
; CHECK-DAG: pckod.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <16 x i8> %3, <16 x i8>* %c
+ store <16 x i8> %3, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @pckod_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @pckod_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckod_v8i16_2:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
- %2 = load <8 x i16>, <8 x i16>* %b
+ %2 = load <8 x i16>, ptr %b
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 1, i32 3, i32 5, i32 7>
; CHECK-DAG: pckod.h [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <8 x i16> %3, <8 x i16>* %c
+ store <8 x i16> %3, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @pckod_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @pckod_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckod_v4i32_2:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
- %2 = load <4 x i32>, <4 x i32>* %b
+ %2 = load <4 x i32>, ptr %b
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 3, i32 1, i32 3>
; CHECK-DAG: pckod.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
- store <4 x i32> %3, <4 x i32>* %c
+ store <4 x i32> %3, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @pckod_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @pckod_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
; CHECK-LABEL: pckod_v2i64_2:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
- %2 = load <2 x i64>, <2 x i64>* %b
+ %2 = load <2 x i64>, ptr %b
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 1>
; pckod.d and splati.d are equivalent for v2i64
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
- store <2 x i64> %3, <2 x i64>* %c
+ store <2 x i64> %3, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
}
-define void @splati_v16i8_0(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @splati_v16i8_0(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: splati_v16i8_0:
- %1 = load <16 x i8>, <16 x i8>* %a
+ %1 = load <16 x i8>, ptr %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> undef,
<16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][4]
- store <16 x i8> %2, <16 x i8>* %c
+ store <16 x i8> %2, ptr %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
}
-define void @splati_v8i16_0(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @splati_v8i16_0(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: splati_v8i16_0:
- %1 = load <8 x i16>, <8 x i16>* %a
+ %1 = load <8 x i16>, ptr %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][4]
- store <8 x i16> %2, <8 x i16>* %c
+ store <8 x i16> %2, ptr %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
}
-define void @splati_v4i32_0(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @splati_v4i32_0(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: splati_v4i32_0:
- %1 = load <4 x i32>, <4 x i32>* %a
+ %1 = load <4 x i32>, ptr %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
; CHECK-DAG: splati.w [[R3:\$w[0-9]+]], [[R1]][3]
- store <4 x i32> %2, <4 x i32>* %c
+ store <4 x i32> %2, ptr %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
}
-define void @splati_v2i64_0(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @splati_v2i64_0(ptr %c, ptr %a) nounwind {
; CHECK-LABEL: splati_v2i64_0:
- %1 = load <2 x i64>, <2 x i64>* %a
+ %1 = load <2 x i64>, ptr %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
- store <2 x i64> %2, <2 x i64>* %c
+ store <2 x i64> %2, ptr %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
-define i32 @test_i8(<16 x i8>* %p0, <16 x i8>* %q1) nounwind {
+define i32 @test_i8(ptr %p0, ptr %q1) nounwind {
entry:
- %p1 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 1
- %p2 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 2
- %p3 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 3
- %p4 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 4
- %p5 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 5
- %p6 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 6
- %p7 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 7
- %p8 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 8
- %p9 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 9
- %p10 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 10
- %p11 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 11
- %p12 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 12
- %p13 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 13
- %p14 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 14
- %p15 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 15
- %p16 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 16
- %p17 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 17
- %p18 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 18
- %p19 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 19
- %p20 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 20
- %p21 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 21
- %p22 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 22
- %p23 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 23
- %p24 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 24
- %p25 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 25
- %p26 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 26
- %p27 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 27
- %p28 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 28
- %p29 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 29
- %p30 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 30
- %p31 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 31
- %p32 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 32
- %p33 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 33
- %0 = load <16 x i8>, <16 x i8>* %p0, align 16
- %1 = load <16 x i8>, <16 x i8>* %p1, align 16
- %2 = load <16 x i8>, <16 x i8>* %p2, align 16
- %3 = load <16 x i8>, <16 x i8>* %p3, align 16
- %4 = load <16 x i8>, <16 x i8>* %p4, align 16
- %5 = load <16 x i8>, <16 x i8>* %p5, align 16
- %6 = load <16 x i8>, <16 x i8>* %p6, align 16
- %7 = load <16 x i8>, <16 x i8>* %p7, align 16
- %8 = load <16 x i8>, <16 x i8>* %p8, align 16
- %9 = load <16 x i8>, <16 x i8>* %p9, align 16
- %10 = load <16 x i8>, <16 x i8>* %p10, align 16
- %11 = load <16 x i8>, <16 x i8>* %p11, align 16
- %12 = load <16 x i8>, <16 x i8>* %p12, align 16
- %13 = load <16 x i8>, <16 x i8>* %p13, align 16
- %14 = load <16 x i8>, <16 x i8>* %p14, align 16
- %15 = load <16 x i8>, <16 x i8>* %p15, align 16
- %16 = load <16 x i8>, <16 x i8>* %p16, align 16
- %17 = load <16 x i8>, <16 x i8>* %p17, align 16
- %18 = load <16 x i8>, <16 x i8>* %p18, align 16
- %19 = load <16 x i8>, <16 x i8>* %p19, align 16
- %20 = load <16 x i8>, <16 x i8>* %p20, align 16
- %21 = load <16 x i8>, <16 x i8>* %p21, align 16
- %22 = load <16 x i8>, <16 x i8>* %p22, align 16
- %23 = load <16 x i8>, <16 x i8>* %p23, align 16
- %24 = load <16 x i8>, <16 x i8>* %p24, align 16
- %25 = load <16 x i8>, <16 x i8>* %p25, align 16
- %26 = load <16 x i8>, <16 x i8>* %p26, align 16
- %27 = load <16 x i8>, <16 x i8>* %p27, align 16
- %28 = load <16 x i8>, <16 x i8>* %p28, align 16
- %29 = load <16 x i8>, <16 x i8>* %p29, align 16
- %30 = load <16 x i8>, <16 x i8>* %p30, align 16
- %31 = load <16 x i8>, <16 x i8>* %p31, align 16
- %32 = load <16 x i8>, <16 x i8>* %p32, align 16
- %33 = load <16 x i8>, <16 x i8>* %p33, align 16
+ %p1 = getelementptr <16 x i8>, ptr %p0, i32 1
+ %p2 = getelementptr <16 x i8>, ptr %p0, i32 2
+ %p3 = getelementptr <16 x i8>, ptr %p0, i32 3
+ %p4 = getelementptr <16 x i8>, ptr %p0, i32 4
+ %p5 = getelementptr <16 x i8>, ptr %p0, i32 5
+ %p6 = getelementptr <16 x i8>, ptr %p0, i32 6
+ %p7 = getelementptr <16 x i8>, ptr %p0, i32 7
+ %p8 = getelementptr <16 x i8>, ptr %p0, i32 8
+ %p9 = getelementptr <16 x i8>, ptr %p0, i32 9
+ %p10 = getelementptr <16 x i8>, ptr %p0, i32 10
+ %p11 = getelementptr <16 x i8>, ptr %p0, i32 11
+ %p12 = getelementptr <16 x i8>, ptr %p0, i32 12
+ %p13 = getelementptr <16 x i8>, ptr %p0, i32 13
+ %p14 = getelementptr <16 x i8>, ptr %p0, i32 14
+ %p15 = getelementptr <16 x i8>, ptr %p0, i32 15
+ %p16 = getelementptr <16 x i8>, ptr %p0, i32 16
+ %p17 = getelementptr <16 x i8>, ptr %p0, i32 17
+ %p18 = getelementptr <16 x i8>, ptr %p0, i32 18
+ %p19 = getelementptr <16 x i8>, ptr %p0, i32 19
+ %p20 = getelementptr <16 x i8>, ptr %p0, i32 20
+ %p21 = getelementptr <16 x i8>, ptr %p0, i32 21
+ %p22 = getelementptr <16 x i8>, ptr %p0, i32 22
+ %p23 = getelementptr <16 x i8>, ptr %p0, i32 23
+ %p24 = getelementptr <16 x i8>, ptr %p0, i32 24
+ %p25 = getelementptr <16 x i8>, ptr %p0, i32 25
+ %p26 = getelementptr <16 x i8>, ptr %p0, i32 26
+ %p27 = getelementptr <16 x i8>, ptr %p0, i32 27
+ %p28 = getelementptr <16 x i8>, ptr %p0, i32 28
+ %p29 = getelementptr <16 x i8>, ptr %p0, i32 29
+ %p30 = getelementptr <16 x i8>, ptr %p0, i32 30
+ %p31 = getelementptr <16 x i8>, ptr %p0, i32 31
+ %p32 = getelementptr <16 x i8>, ptr %p0, i32 32
+ %p33 = getelementptr <16 x i8>, ptr %p0, i32 33
+ %0 = load <16 x i8>, ptr %p0, align 16
+ %1 = load <16 x i8>, ptr %p1, align 16
+ %2 = load <16 x i8>, ptr %p2, align 16
+ %3 = load <16 x i8>, ptr %p3, align 16
+ %4 = load <16 x i8>, ptr %p4, align 16
+ %5 = load <16 x i8>, ptr %p5, align 16
+ %6 = load <16 x i8>, ptr %p6, align 16
+ %7 = load <16 x i8>, ptr %p7, align 16
+ %8 = load <16 x i8>, ptr %p8, align 16
+ %9 = load <16 x i8>, ptr %p9, align 16
+ %10 = load <16 x i8>, ptr %p10, align 16
+ %11 = load <16 x i8>, ptr %p11, align 16
+ %12 = load <16 x i8>, ptr %p12, align 16
+ %13 = load <16 x i8>, ptr %p13, align 16
+ %14 = load <16 x i8>, ptr %p14, align 16
+ %15 = load <16 x i8>, ptr %p15, align 16
+ %16 = load <16 x i8>, ptr %p16, align 16
+ %17 = load <16 x i8>, ptr %p17, align 16
+ %18 = load <16 x i8>, ptr %p18, align 16
+ %19 = load <16 x i8>, ptr %p19, align 16
+ %20 = load <16 x i8>, ptr %p20, align 16
+ %21 = load <16 x i8>, ptr %p21, align 16
+ %22 = load <16 x i8>, ptr %p22, align 16
+ %23 = load <16 x i8>, ptr %p23, align 16
+ %24 = load <16 x i8>, ptr %p24, align 16
+ %25 = load <16 x i8>, ptr %p25, align 16
+ %26 = load <16 x i8>, ptr %p26, align 16
+ %27 = load <16 x i8>, ptr %p27, align 16
+ %28 = load <16 x i8>, ptr %p28, align 16
+ %29 = load <16 x i8>, ptr %p29, align 16
+ %30 = load <16 x i8>, ptr %p30, align 16
+ %31 = load <16 x i8>, ptr %p31, align 16
+ %32 = load <16 x i8>, ptr %p32, align 16
+ %33 = load <16 x i8>, ptr %p33, align 16
%r1 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
%r2 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r1, <16 x i8> %2)
%r3 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r2, <16 x i8> %3)
; CHECK: ld.b {{.*}} Reload
; CHECK: .size
-define i32 @test_i16(<8 x i16>* %p0, <8 x i16>* %q1) nounwind {
+define i32 @test_i16(ptr %p0, ptr %q1) nounwind {
entry:
- %p1 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 1
- %p2 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 2
- %p3 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 3
- %p4 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 4
- %p5 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 5
- %p6 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 6
- %p7 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 7
- %p8 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 8
- %p9 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 9
- %p10 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 10
- %p11 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 11
- %p12 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 12
- %p13 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 13
- %p14 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 14
- %p15 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 15
- %p16 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 16
- %p17 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 17
- %p18 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 18
- %p19 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 19
- %p20 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 20
- %p21 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 21
- %p22 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 22
- %p23 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 23
- %p24 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 24
- %p25 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 25
- %p26 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 26
- %p27 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 27
- %p28 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 28
- %p29 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 29
- %p30 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 30
- %p31 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 31
- %p32 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 32
- %p33 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 33
- %0 = load <8 x i16>, <8 x i16>* %p0, align 16
- %1 = load <8 x i16>, <8 x i16>* %p1, align 16
- %2 = load <8 x i16>, <8 x i16>* %p2, align 16
- %3 = load <8 x i16>, <8 x i16>* %p3, align 16
- %4 = load <8 x i16>, <8 x i16>* %p4, align 16
- %5 = load <8 x i16>, <8 x i16>* %p5, align 16
- %6 = load <8 x i16>, <8 x i16>* %p6, align 16
- %7 = load <8 x i16>, <8 x i16>* %p7, align 16
- %8 = load <8 x i16>, <8 x i16>* %p8, align 16
- %9 = load <8 x i16>, <8 x i16>* %p9, align 16
- %10 = load <8 x i16>, <8 x i16>* %p10, align 16
- %11 = load <8 x i16>, <8 x i16>* %p11, align 16
- %12 = load <8 x i16>, <8 x i16>* %p12, align 16
- %13 = load <8 x i16>, <8 x i16>* %p13, align 16
- %14 = load <8 x i16>, <8 x i16>* %p14, align 16
- %15 = load <8 x i16>, <8 x i16>* %p15, align 16
- %16 = load <8 x i16>, <8 x i16>* %p16, align 16
- %17 = load <8 x i16>, <8 x i16>* %p17, align 16
- %18 = load <8 x i16>, <8 x i16>* %p18, align 16
- %19 = load <8 x i16>, <8 x i16>* %p19, align 16
- %20 = load <8 x i16>, <8 x i16>* %p20, align 16
- %21 = load <8 x i16>, <8 x i16>* %p21, align 16
- %22 = load <8 x i16>, <8 x i16>* %p22, align 16
- %23 = load <8 x i16>, <8 x i16>* %p23, align 16
- %24 = load <8 x i16>, <8 x i16>* %p24, align 16
- %25 = load <8 x i16>, <8 x i16>* %p25, align 16
- %26 = load <8 x i16>, <8 x i16>* %p26, align 16
- %27 = load <8 x i16>, <8 x i16>* %p27, align 16
- %28 = load <8 x i16>, <8 x i16>* %p28, align 16
- %29 = load <8 x i16>, <8 x i16>* %p29, align 16
- %30 = load <8 x i16>, <8 x i16>* %p30, align 16
- %31 = load <8 x i16>, <8 x i16>* %p31, align 16
- %32 = load <8 x i16>, <8 x i16>* %p32, align 16
- %33 = load <8 x i16>, <8 x i16>* %p33, align 16
+ %p1 = getelementptr <8 x i16>, ptr %p0, i32 1
+ %p2 = getelementptr <8 x i16>, ptr %p0, i32 2
+ %p3 = getelementptr <8 x i16>, ptr %p0, i32 3
+ %p4 = getelementptr <8 x i16>, ptr %p0, i32 4
+ %p5 = getelementptr <8 x i16>, ptr %p0, i32 5
+ %p6 = getelementptr <8 x i16>, ptr %p0, i32 6
+ %p7 = getelementptr <8 x i16>, ptr %p0, i32 7
+ %p8 = getelementptr <8 x i16>, ptr %p0, i32 8
+ %p9 = getelementptr <8 x i16>, ptr %p0, i32 9
+ %p10 = getelementptr <8 x i16>, ptr %p0, i32 10
+ %p11 = getelementptr <8 x i16>, ptr %p0, i32 11
+ %p12 = getelementptr <8 x i16>, ptr %p0, i32 12
+ %p13 = getelementptr <8 x i16>, ptr %p0, i32 13
+ %p14 = getelementptr <8 x i16>, ptr %p0, i32 14
+ %p15 = getelementptr <8 x i16>, ptr %p0, i32 15
+ %p16 = getelementptr <8 x i16>, ptr %p0, i32 16
+ %p17 = getelementptr <8 x i16>, ptr %p0, i32 17
+ %p18 = getelementptr <8 x i16>, ptr %p0, i32 18
+ %p19 = getelementptr <8 x i16>, ptr %p0, i32 19
+ %p20 = getelementptr <8 x i16>, ptr %p0, i32 20
+ %p21 = getelementptr <8 x i16>, ptr %p0, i32 21
+ %p22 = getelementptr <8 x i16>, ptr %p0, i32 22
+ %p23 = getelementptr <8 x i16>, ptr %p0, i32 23
+ %p24 = getelementptr <8 x i16>, ptr %p0, i32 24
+ %p25 = getelementptr <8 x i16>, ptr %p0, i32 25
+ %p26 = getelementptr <8 x i16>, ptr %p0, i32 26
+ %p27 = getelementptr <8 x i16>, ptr %p0, i32 27
+ %p28 = getelementptr <8 x i16>, ptr %p0, i32 28
+ %p29 = getelementptr <8 x i16>, ptr %p0, i32 29
+ %p30 = getelementptr <8 x i16>, ptr %p0, i32 30
+ %p31 = getelementptr <8 x i16>, ptr %p0, i32 31
+ %p32 = getelementptr <8 x i16>, ptr %p0, i32 32
+ %p33 = getelementptr <8 x i16>, ptr %p0, i32 33
+ %0 = load <8 x i16>, ptr %p0, align 16
+ %1 = load <8 x i16>, ptr %p1, align 16
+ %2 = load <8 x i16>, ptr %p2, align 16
+ %3 = load <8 x i16>, ptr %p3, align 16
+ %4 = load <8 x i16>, ptr %p4, align 16
+ %5 = load <8 x i16>, ptr %p5, align 16
+ %6 = load <8 x i16>, ptr %p6, align 16
+ %7 = load <8 x i16>, ptr %p7, align 16
+ %8 = load <8 x i16>, ptr %p8, align 16
+ %9 = load <8 x i16>, ptr %p9, align 16
+ %10 = load <8 x i16>, ptr %p10, align 16
+ %11 = load <8 x i16>, ptr %p11, align 16
+ %12 = load <8 x i16>, ptr %p12, align 16
+ %13 = load <8 x i16>, ptr %p13, align 16
+ %14 = load <8 x i16>, ptr %p14, align 16
+ %15 = load <8 x i16>, ptr %p15, align 16
+ %16 = load <8 x i16>, ptr %p16, align 16
+ %17 = load <8 x i16>, ptr %p17, align 16
+ %18 = load <8 x i16>, ptr %p18, align 16
+ %19 = load <8 x i16>, ptr %p19, align 16
+ %20 = load <8 x i16>, ptr %p20, align 16
+ %21 = load <8 x i16>, ptr %p21, align 16
+ %22 = load <8 x i16>, ptr %p22, align 16
+ %23 = load <8 x i16>, ptr %p23, align 16
+ %24 = load <8 x i16>, ptr %p24, align 16
+ %25 = load <8 x i16>, ptr %p25, align 16
+ %26 = load <8 x i16>, ptr %p26, align 16
+ %27 = load <8 x i16>, ptr %p27, align 16
+ %28 = load <8 x i16>, ptr %p28, align 16
+ %29 = load <8 x i16>, ptr %p29, align 16
+ %30 = load <8 x i16>, ptr %p30, align 16
+ %31 = load <8 x i16>, ptr %p31, align 16
+ %32 = load <8 x i16>, ptr %p32, align 16
+ %33 = load <8 x i16>, ptr %p33, align 16
%r1 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
%r2 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r1, <8 x i16> %2)
%r3 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r2, <8 x i16> %3)
; CHECK: ld.h {{.*}} Reload
; CHECK: .size
-define i32 @test_i32(<4 x i32>* %p0, <4 x i32>* %q1) nounwind {
+define i32 @test_i32(ptr %p0, ptr %q1) nounwind {
entry:
- %p1 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 1
- %p2 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 2
- %p3 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 3
- %p4 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 4
- %p5 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 5
- %p6 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 6
- %p7 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 7
- %p8 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 8
- %p9 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 9
- %p10 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 10
- %p11 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 11
- %p12 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 12
- %p13 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 13
- %p14 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 14
- %p15 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 15
- %p16 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 16
- %p17 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 17
- %p18 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 18
- %p19 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 19
- %p20 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 20
- %p21 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 21
- %p22 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 22
- %p23 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 23
- %p24 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 24
- %p25 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 25
- %p26 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 26
- %p27 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 27
- %p28 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 28
- %p29 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 29
- %p30 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 30
- %p31 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 31
- %p32 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 32
- %p33 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 33
- %0 = load <4 x i32>, <4 x i32>* %p0, align 16
- %1 = load <4 x i32>, <4 x i32>* %p1, align 16
- %2 = load <4 x i32>, <4 x i32>* %p2, align 16
- %3 = load <4 x i32>, <4 x i32>* %p3, align 16
- %4 = load <4 x i32>, <4 x i32>* %p4, align 16
- %5 = load <4 x i32>, <4 x i32>* %p5, align 16
- %6 = load <4 x i32>, <4 x i32>* %p6, align 16
- %7 = load <4 x i32>, <4 x i32>* %p7, align 16
- %8 = load <4 x i32>, <4 x i32>* %p8, align 16
- %9 = load <4 x i32>, <4 x i32>* %p9, align 16
- %10 = load <4 x i32>, <4 x i32>* %p10, align 16
- %11 = load <4 x i32>, <4 x i32>* %p11, align 16
- %12 = load <4 x i32>, <4 x i32>* %p12, align 16
- %13 = load <4 x i32>, <4 x i32>* %p13, align 16
- %14 = load <4 x i32>, <4 x i32>* %p14, align 16
- %15 = load <4 x i32>, <4 x i32>* %p15, align 16
- %16 = load <4 x i32>, <4 x i32>* %p16, align 16
- %17 = load <4 x i32>, <4 x i32>* %p17, align 16
- %18 = load <4 x i32>, <4 x i32>* %p18, align 16
- %19 = load <4 x i32>, <4 x i32>* %p19, align 16
- %20 = load <4 x i32>, <4 x i32>* %p20, align 16
- %21 = load <4 x i32>, <4 x i32>* %p21, align 16
- %22 = load <4 x i32>, <4 x i32>* %p22, align 16
- %23 = load <4 x i32>, <4 x i32>* %p23, align 16
- %24 = load <4 x i32>, <4 x i32>* %p24, align 16
- %25 = load <4 x i32>, <4 x i32>* %p25, align 16
- %26 = load <4 x i32>, <4 x i32>* %p26, align 16
- %27 = load <4 x i32>, <4 x i32>* %p27, align 16
- %28 = load <4 x i32>, <4 x i32>* %p28, align 16
- %29 = load <4 x i32>, <4 x i32>* %p29, align 16
- %30 = load <4 x i32>, <4 x i32>* %p30, align 16
- %31 = load <4 x i32>, <4 x i32>* %p31, align 16
- %32 = load <4 x i32>, <4 x i32>* %p32, align 16
- %33 = load <4 x i32>, <4 x i32>* %p33, align 16
+ %p1 = getelementptr <4 x i32>, ptr %p0, i32 1
+ %p2 = getelementptr <4 x i32>, ptr %p0, i32 2
+ %p3 = getelementptr <4 x i32>, ptr %p0, i32 3
+ %p4 = getelementptr <4 x i32>, ptr %p0, i32 4
+ %p5 = getelementptr <4 x i32>, ptr %p0, i32 5
+ %p6 = getelementptr <4 x i32>, ptr %p0, i32 6
+ %p7 = getelementptr <4 x i32>, ptr %p0, i32 7
+ %p8 = getelementptr <4 x i32>, ptr %p0, i32 8
+ %p9 = getelementptr <4 x i32>, ptr %p0, i32 9
+ %p10 = getelementptr <4 x i32>, ptr %p0, i32 10
+ %p11 = getelementptr <4 x i32>, ptr %p0, i32 11
+ %p12 = getelementptr <4 x i32>, ptr %p0, i32 12
+ %p13 = getelementptr <4 x i32>, ptr %p0, i32 13
+ %p14 = getelementptr <4 x i32>, ptr %p0, i32 14
+ %p15 = getelementptr <4 x i32>, ptr %p0, i32 15
+ %p16 = getelementptr <4 x i32>, ptr %p0, i32 16
+ %p17 = getelementptr <4 x i32>, ptr %p0, i32 17
+ %p18 = getelementptr <4 x i32>, ptr %p0, i32 18
+ %p19 = getelementptr <4 x i32>, ptr %p0, i32 19
+ %p20 = getelementptr <4 x i32>, ptr %p0, i32 20
+ %p21 = getelementptr <4 x i32>, ptr %p0, i32 21
+ %p22 = getelementptr <4 x i32>, ptr %p0, i32 22
+ %p23 = getelementptr <4 x i32>, ptr %p0, i32 23
+ %p24 = getelementptr <4 x i32>, ptr %p0, i32 24
+ %p25 = getelementptr <4 x i32>, ptr %p0, i32 25
+ %p26 = getelementptr <4 x i32>, ptr %p0, i32 26
+ %p27 = getelementptr <4 x i32>, ptr %p0, i32 27
+ %p28 = getelementptr <4 x i32>, ptr %p0, i32 28
+ %p29 = getelementptr <4 x i32>, ptr %p0, i32 29
+ %p30 = getelementptr <4 x i32>, ptr %p0, i32 30
+ %p31 = getelementptr <4 x i32>, ptr %p0, i32 31
+ %p32 = getelementptr <4 x i32>, ptr %p0, i32 32
+ %p33 = getelementptr <4 x i32>, ptr %p0, i32 33
+ %0 = load <4 x i32>, ptr %p0, align 16
+ %1 = load <4 x i32>, ptr %p1, align 16
+ %2 = load <4 x i32>, ptr %p2, align 16
+ %3 = load <4 x i32>, ptr %p3, align 16
+ %4 = load <4 x i32>, ptr %p4, align 16
+ %5 = load <4 x i32>, ptr %p5, align 16
+ %6 = load <4 x i32>, ptr %p6, align 16
+ %7 = load <4 x i32>, ptr %p7, align 16
+ %8 = load <4 x i32>, ptr %p8, align 16
+ %9 = load <4 x i32>, ptr %p9, align 16
+ %10 = load <4 x i32>, ptr %p10, align 16
+ %11 = load <4 x i32>, ptr %p11, align 16
+ %12 = load <4 x i32>, ptr %p12, align 16
+ %13 = load <4 x i32>, ptr %p13, align 16
+ %14 = load <4 x i32>, ptr %p14, align 16
+ %15 = load <4 x i32>, ptr %p15, align 16
+ %16 = load <4 x i32>, ptr %p16, align 16
+ %17 = load <4 x i32>, ptr %p17, align 16
+ %18 = load <4 x i32>, ptr %p18, align 16
+ %19 = load <4 x i32>, ptr %p19, align 16
+ %20 = load <4 x i32>, ptr %p20, align 16
+ %21 = load <4 x i32>, ptr %p21, align 16
+ %22 = load <4 x i32>, ptr %p22, align 16
+ %23 = load <4 x i32>, ptr %p23, align 16
+ %24 = load <4 x i32>, ptr %p24, align 16
+ %25 = load <4 x i32>, ptr %p25, align 16
+ %26 = load <4 x i32>, ptr %p26, align 16
+ %27 = load <4 x i32>, ptr %p27, align 16
+ %28 = load <4 x i32>, ptr %p28, align 16
+ %29 = load <4 x i32>, ptr %p29, align 16
+ %30 = load <4 x i32>, ptr %p30, align 16
+ %31 = load <4 x i32>, ptr %p31, align 16
+ %32 = load <4 x i32>, ptr %p32, align 16
+ %33 = load <4 x i32>, ptr %p33, align 16
%r1 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
%r2 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r1, <4 x i32> %2)
%r3 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r2, <4 x i32> %3)
; CHECK: ld.w {{.*}} Reload
; CHECK: .size
-define i32 @test_i64(<2 x i64>* %p0, <2 x i64>* %q1) nounwind {
+define i32 @test_i64(ptr %p0, ptr %q1) nounwind {
entry:
- %p1 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 1
- %p2 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 2
- %p3 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 3
- %p4 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 4
- %p5 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 5
- %p6 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 6
- %p7 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 7
- %p8 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 8
- %p9 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 9
- %p10 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 10
- %p11 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 11
- %p12 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 12
- %p13 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 13
- %p14 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 14
- %p15 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 15
- %p16 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 16
- %p17 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 17
- %p18 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 18
- %p19 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 19
- %p20 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 20
- %p21 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 21
- %p22 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 22
- %p23 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 23
- %p24 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 24
- %p25 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 25
- %p26 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 26
- %p27 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 27
- %p28 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 28
- %p29 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 29
- %p30 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 30
- %p31 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 31
- %p32 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 32
- %p33 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 33
- %0 = load <2 x i64>, <2 x i64>* %p0, align 16
- %1 = load <2 x i64>, <2 x i64>* %p1, align 16
- %2 = load <2 x i64>, <2 x i64>* %p2, align 16
- %3 = load <2 x i64>, <2 x i64>* %p3, align 16
- %4 = load <2 x i64>, <2 x i64>* %p4, align 16
- %5 = load <2 x i64>, <2 x i64>* %p5, align 16
- %6 = load <2 x i64>, <2 x i64>* %p6, align 16
- %7 = load <2 x i64>, <2 x i64>* %p7, align 16
- %8 = load <2 x i64>, <2 x i64>* %p8, align 16
- %9 = load <2 x i64>, <2 x i64>* %p9, align 16
- %10 = load <2 x i64>, <2 x i64>* %p10, align 16
- %11 = load <2 x i64>, <2 x i64>* %p11, align 16
- %12 = load <2 x i64>, <2 x i64>* %p12, align 16
- %13 = load <2 x i64>, <2 x i64>* %p13, align 16
- %14 = load <2 x i64>, <2 x i64>* %p14, align 16
- %15 = load <2 x i64>, <2 x i64>* %p15, align 16
- %16 = load <2 x i64>, <2 x i64>* %p16, align 16
- %17 = load <2 x i64>, <2 x i64>* %p17, align 16
- %18 = load <2 x i64>, <2 x i64>* %p18, align 16
- %19 = load <2 x i64>, <2 x i64>* %p19, align 16
- %20 = load <2 x i64>, <2 x i64>* %p20, align 16
- %21 = load <2 x i64>, <2 x i64>* %p21, align 16
- %22 = load <2 x i64>, <2 x i64>* %p22, align 16
- %23 = load <2 x i64>, <2 x i64>* %p23, align 16
- %24 = load <2 x i64>, <2 x i64>* %p24, align 16
- %25 = load <2 x i64>, <2 x i64>* %p25, align 16
- %26 = load <2 x i64>, <2 x i64>* %p26, align 16
- %27 = load <2 x i64>, <2 x i64>* %p27, align 16
- %28 = load <2 x i64>, <2 x i64>* %p28, align 16
- %29 = load <2 x i64>, <2 x i64>* %p29, align 16
- %30 = load <2 x i64>, <2 x i64>* %p30, align 16
- %31 = load <2 x i64>, <2 x i64>* %p31, align 16
- %32 = load <2 x i64>, <2 x i64>* %p32, align 16
- %33 = load <2 x i64>, <2 x i64>* %p33, align 16
+ %p1 = getelementptr <2 x i64>, ptr %p0, i32 1
+ %p2 = getelementptr <2 x i64>, ptr %p0, i32 2
+ %p3 = getelementptr <2 x i64>, ptr %p0, i32 3
+ %p4 = getelementptr <2 x i64>, ptr %p0, i32 4
+ %p5 = getelementptr <2 x i64>, ptr %p0, i32 5
+ %p6 = getelementptr <2 x i64>, ptr %p0, i32 6
+ %p7 = getelementptr <2 x i64>, ptr %p0, i32 7
+ %p8 = getelementptr <2 x i64>, ptr %p0, i32 8
+ %p9 = getelementptr <2 x i64>, ptr %p0, i32 9
+ %p10 = getelementptr <2 x i64>, ptr %p0, i32 10
+ %p11 = getelementptr <2 x i64>, ptr %p0, i32 11
+ %p12 = getelementptr <2 x i64>, ptr %p0, i32 12
+ %p13 = getelementptr <2 x i64>, ptr %p0, i32 13
+ %p14 = getelementptr <2 x i64>, ptr %p0, i32 14
+ %p15 = getelementptr <2 x i64>, ptr %p0, i32 15
+ %p16 = getelementptr <2 x i64>, ptr %p0, i32 16
+ %p17 = getelementptr <2 x i64>, ptr %p0, i32 17
+ %p18 = getelementptr <2 x i64>, ptr %p0, i32 18
+ %p19 = getelementptr <2 x i64>, ptr %p0, i32 19
+ %p20 = getelementptr <2 x i64>, ptr %p0, i32 20
+ %p21 = getelementptr <2 x i64>, ptr %p0, i32 21
+ %p22 = getelementptr <2 x i64>, ptr %p0, i32 22
+ %p23 = getelementptr <2 x i64>, ptr %p0, i32 23
+ %p24 = getelementptr <2 x i64>, ptr %p0, i32 24
+ %p25 = getelementptr <2 x i64>, ptr %p0, i32 25
+ %p26 = getelementptr <2 x i64>, ptr %p0, i32 26
+ %p27 = getelementptr <2 x i64>, ptr %p0, i32 27
+ %p28 = getelementptr <2 x i64>, ptr %p0, i32 28
+ %p29 = getelementptr <2 x i64>, ptr %p0, i32 29
+ %p30 = getelementptr <2 x i64>, ptr %p0, i32 30
+ %p31 = getelementptr <2 x i64>, ptr %p0, i32 31
+ %p32 = getelementptr <2 x i64>, ptr %p0, i32 32
+ %p33 = getelementptr <2 x i64>, ptr %p0, i32 33
+ %0 = load <2 x i64>, ptr %p0, align 16
+ %1 = load <2 x i64>, ptr %p1, align 16
+ %2 = load <2 x i64>, ptr %p2, align 16
+ %3 = load <2 x i64>, ptr %p3, align 16
+ %4 = load <2 x i64>, ptr %p4, align 16
+ %5 = load <2 x i64>, ptr %p5, align 16
+ %6 = load <2 x i64>, ptr %p6, align 16
+ %7 = load <2 x i64>, ptr %p7, align 16
+ %8 = load <2 x i64>, ptr %p8, align 16
+ %9 = load <2 x i64>, ptr %p9, align 16
+ %10 = load <2 x i64>, ptr %p10, align 16
+ %11 = load <2 x i64>, ptr %p11, align 16
+ %12 = load <2 x i64>, ptr %p12, align 16
+ %13 = load <2 x i64>, ptr %p13, align 16
+ %14 = load <2 x i64>, ptr %p14, align 16
+ %15 = load <2 x i64>, ptr %p15, align 16
+ %16 = load <2 x i64>, ptr %p16, align 16
+ %17 = load <2 x i64>, ptr %p17, align 16
+ %18 = load <2 x i64>, ptr %p18, align 16
+ %19 = load <2 x i64>, ptr %p19, align 16
+ %20 = load <2 x i64>, ptr %p20, align 16
+ %21 = load <2 x i64>, ptr %p21, align 16
+ %22 = load <2 x i64>, ptr %p22, align 16
+ %23 = load <2 x i64>, ptr %p23, align 16
+ %24 = load <2 x i64>, ptr %p24, align 16
+ %25 = load <2 x i64>, ptr %p25, align 16
+ %26 = load <2 x i64>, ptr %p26, align 16
+ %27 = load <2 x i64>, ptr %p27, align 16
+ %28 = load <2 x i64>, ptr %p28, align 16
+ %29 = load <2 x i64>, ptr %p29, align 16
+ %30 = load <2 x i64>, ptr %p30, align 16
+ %31 = load <2 x i64>, ptr %p31, align 16
+ %32 = load <2 x i64>, ptr %p32, align 16
+ %33 = load <2 x i64>, ptr %p33, align 16
%r1 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
%r2 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r1, <2 x i64> %2)
%r3 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r2, <2 x i64> %3)
define void @llvm_mips_and_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_and_v_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_and_v_b_ARG2
%2 = bitcast <16 x i8> %0 to <16 x i8>
%3 = bitcast <16 x i8> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <16 x i8>
- store <16 x i8> %5, <16 x i8>* @llvm_mips_and_v_b_RES
+ store <16 x i8> %5, ptr @llvm_mips_and_v_b_RES
ret void
}
define void @llvm_mips_and_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_and_v_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_and_v_h_ARG2
%2 = bitcast <8 x i16> %0 to <16 x i8>
%3 = bitcast <8 x i16> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <8 x i16>
- store <8 x i16> %5, <8 x i16>* @llvm_mips_and_v_h_RES
+ store <8 x i16> %5, ptr @llvm_mips_and_v_h_RES
ret void
}
define void @llvm_mips_and_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_and_v_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_and_v_w_ARG2
%2 = bitcast <4 x i32> %0 to <16 x i8>
%3 = bitcast <4 x i32> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <4 x i32>
- store <4 x i32> %5, <4 x i32>* @llvm_mips_and_v_w_RES
+ store <4 x i32> %5, ptr @llvm_mips_and_v_w_RES
ret void
}
define void @llvm_mips_and_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_and_v_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_and_v_d_ARG2
%2 = bitcast <2 x i64> %0 to <16 x i8>
%3 = bitcast <2 x i64> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <2 x i64>
- store <2 x i64> %5, <2 x i64>* @llvm_mips_and_v_d_RES
+ store <2 x i64> %5, ptr @llvm_mips_and_v_d_RES
ret void
}
;
define void @and_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_and_v_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_and_v_b_ARG2
%2 = and <16 x i8> %0, %1
- store <16 x i8> %2, <16 x i8>* @llvm_mips_and_v_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_and_v_b_RES
ret void
}
;
define void @and_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_and_v_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_and_v_h_ARG2
%2 = and <8 x i16> %0, %1
- store <8 x i16> %2, <8 x i16>* @llvm_mips_and_v_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_and_v_h_RES
ret void
}
define void @and_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_and_v_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_and_v_w_ARG2
%2 = and <4 x i32> %0, %1
- store <4 x i32> %2, <4 x i32>* @llvm_mips_and_v_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_and_v_w_RES
ret void
}
define void @and_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_and_v_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_and_v_d_ARG2
%2 = and <2 x i64> %0, %1
- store <2 x i64> %2, <2 x i64>* @llvm_mips_and_v_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_and_v_d_RES
ret void
}
define void @llvm_mips_bmnz_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG2
- %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_bmnz_v_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bmnz_v_b_ARG2
+ %2 = load <16 x i8>, ptr @llvm_mips_bmnz_v_b_ARG3
%3 = bitcast <16 x i8> %0 to <16 x i8>
%4 = bitcast <16 x i8> %1 to <16 x i8>
%5 = bitcast <16 x i8> %2 to <16 x i8>
%6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <16 x i8>
- store <16 x i8> %7, <16 x i8>* @llvm_mips_bmnz_v_b_RES
+ store <16 x i8> %7, ptr @llvm_mips_bmnz_v_b_RES
ret void
}
define void @llvm_mips_bmnz_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_bmnz_v_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_bmnz_v_h_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_bmnz_v_h_ARG3
%3 = bitcast <8 x i16> %0 to <16 x i8>
%4 = bitcast <8 x i16> %1 to <16 x i8>
%5 = bitcast <8 x i16> %2 to <16 x i8>
%6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <8 x i16>
- store <8 x i16> %7, <8 x i16>* @llvm_mips_bmnz_v_h_RES
+ store <8 x i16> %7, ptr @llvm_mips_bmnz_v_h_RES
ret void
}
define void @llvm_mips_bmnz_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_bmnz_v_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_bmnz_v_w_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_bmnz_v_w_ARG3
%3 = bitcast <4 x i32> %0 to <16 x i8>
%4 = bitcast <4 x i32> %1 to <16 x i8>
%5 = bitcast <4 x i32> %2 to <16 x i8>
%6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <4 x i32>
- store <4 x i32> %7, <4 x i32>* @llvm_mips_bmnz_v_w_RES
+ store <4 x i32> %7, ptr @llvm_mips_bmnz_v_w_RES
ret void
}
define void @llvm_mips_bmnz_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG2
- %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_bmnz_v_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_bmnz_v_d_ARG2
+ %2 = load <2 x i64>, ptr @llvm_mips_bmnz_v_d_ARG3
%3 = bitcast <2 x i64> %0 to <16 x i8>
%4 = bitcast <2 x i64> %1 to <16 x i8>
%5 = bitcast <2 x i64> %2 to <16 x i8>
%6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <2 x i64>
- store <2 x i64> %7, <2 x i64>* @llvm_mips_bmnz_v_d_RES
+ store <2 x i64> %7, ptr @llvm_mips_bmnz_v_d_RES
ret void
}
define void @llvm_mips_bmz_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG2
- %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_bmz_v_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bmz_v_b_ARG2
+ %2 = load <16 x i8>, ptr @llvm_mips_bmz_v_b_ARG3
%3 = bitcast <16 x i8> %0 to <16 x i8>
%4 = bitcast <16 x i8> %1 to <16 x i8>
%5 = bitcast <16 x i8> %2 to <16 x i8>
%6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <16 x i8>
- store <16 x i8> %7, <16 x i8>* @llvm_mips_bmz_v_b_RES
+ store <16 x i8> %7, ptr @llvm_mips_bmz_v_b_RES
ret void
}
define void @llvm_mips_bmz_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_bmz_v_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_bmz_v_h_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_bmz_v_h_ARG3
%3 = bitcast <8 x i16> %0 to <16 x i8>
%4 = bitcast <8 x i16> %1 to <16 x i8>
%5 = bitcast <8 x i16> %2 to <16 x i8>
%6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <8 x i16>
- store <8 x i16> %7, <8 x i16>* @llvm_mips_bmz_v_h_RES
+ store <8 x i16> %7, ptr @llvm_mips_bmz_v_h_RES
ret void
}
define void @llvm_mips_bmz_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_bmz_v_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_bmz_v_w_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_bmz_v_w_ARG3
%3 = bitcast <4 x i32> %0 to <16 x i8>
%4 = bitcast <4 x i32> %1 to <16 x i8>
%5 = bitcast <4 x i32> %2 to <16 x i8>
%6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <4 x i32>
- store <4 x i32> %7, <4 x i32>* @llvm_mips_bmz_v_w_RES
+ store <4 x i32> %7, ptr @llvm_mips_bmz_v_w_RES
ret void
}
define void @llvm_mips_bmz_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG2
- %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_bmz_v_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_bmz_v_d_ARG2
+ %2 = load <2 x i64>, ptr @llvm_mips_bmz_v_d_ARG3
%3 = bitcast <2 x i64> %0 to <16 x i8>
%4 = bitcast <2 x i64> %1 to <16 x i8>
%5 = bitcast <2 x i64> %2 to <16 x i8>
%6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <2 x i64>
- store <2 x i64> %7, <2 x i64>* @llvm_mips_bmz_v_d_RES
+ store <2 x i64> %7, ptr @llvm_mips_bmz_v_d_RES
ret void
}
define void @llvm_mips_bsel_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG2
- %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG3
+ %0 = load <16 x i8>, ptr @llvm_mips_bsel_v_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_bsel_v_b_ARG2
+ %2 = load <16 x i8>, ptr @llvm_mips_bsel_v_b_ARG3
%3 = bitcast <16 x i8> %0 to <16 x i8>
%4 = bitcast <16 x i8> %1 to <16 x i8>
%5 = bitcast <16 x i8> %2 to <16 x i8>
%6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <16 x i8>
- store <16 x i8> %7, <16 x i8>* @llvm_mips_bsel_v_b_RES
+ store <16 x i8> %7, ptr @llvm_mips_bsel_v_b_RES
ret void
}
define void @llvm_mips_bsel_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG2
- %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG3
+ %0 = load <8 x i16>, ptr @llvm_mips_bsel_v_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_bsel_v_h_ARG2
+ %2 = load <8 x i16>, ptr @llvm_mips_bsel_v_h_ARG3
%3 = bitcast <8 x i16> %0 to <16 x i8>
%4 = bitcast <8 x i16> %1 to <16 x i8>
%5 = bitcast <8 x i16> %2 to <16 x i8>
%6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <8 x i16>
- store <8 x i16> %7, <8 x i16>* @llvm_mips_bsel_v_h_RES
+ store <8 x i16> %7, ptr @llvm_mips_bsel_v_h_RES
ret void
}
define void @llvm_mips_bsel_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG2
- %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG3
+ %0 = load <4 x i32>, ptr @llvm_mips_bsel_v_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_bsel_v_w_ARG2
+ %2 = load <4 x i32>, ptr @llvm_mips_bsel_v_w_ARG3
%3 = bitcast <4 x i32> %0 to <16 x i8>
%4 = bitcast <4 x i32> %1 to <16 x i8>
%5 = bitcast <4 x i32> %2 to <16 x i8>
%6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <4 x i32>
- store <4 x i32> %7, <4 x i32>* @llvm_mips_bsel_v_w_RES
+ store <4 x i32> %7, ptr @llvm_mips_bsel_v_w_RES
ret void
}
define void @llvm_mips_bsel_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG2
- %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG3
+ %0 = load <2 x i64>, ptr @llvm_mips_bsel_v_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_bsel_v_d_ARG2
+ %2 = load <2 x i64>, ptr @llvm_mips_bsel_v_d_ARG3
%3 = bitcast <2 x i64> %0 to <16 x i8>
%4 = bitcast <2 x i64> %1 to <16 x i8>
%5 = bitcast <2 x i64> %2 to <16 x i8>
%6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
%7 = bitcast <16 x i8> %6 to <2 x i64>
- store <2 x i64> %7, <2 x i64>* @llvm_mips_bsel_v_d_RES
+ store <2 x i64> %7, ptr @llvm_mips_bsel_v_d_RES
ret void
}
define void @llvm_mips_nor_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nor_v_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_nor_v_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_nor_v_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_nor_v_b_ARG2
%2 = bitcast <16 x i8> %0 to <16 x i8>
%3 = bitcast <16 x i8> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <16 x i8>
- store <16 x i8> %5, <16 x i8>* @llvm_mips_nor_v_b_RES
+ store <16 x i8> %5, ptr @llvm_mips_nor_v_b_RES
ret void
}
define void @llvm_mips_nor_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nor_v_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_nor_v_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_nor_v_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_nor_v_h_ARG2
%2 = bitcast <8 x i16> %0 to <16 x i8>
%3 = bitcast <8 x i16> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <8 x i16>
- store <8 x i16> %5, <8 x i16>* @llvm_mips_nor_v_h_RES
+ store <8 x i16> %5, ptr @llvm_mips_nor_v_h_RES
ret void
}
define void @llvm_mips_nor_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nor_v_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_nor_v_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_nor_v_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_nor_v_w_ARG2
%2 = bitcast <4 x i32> %0 to <16 x i8>
%3 = bitcast <4 x i32> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <4 x i32>
- store <4 x i32> %5, <4 x i32>* @llvm_mips_nor_v_w_RES
+ store <4 x i32> %5, ptr @llvm_mips_nor_v_w_RES
ret void
}
define void @llvm_mips_nor_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nor_v_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_nor_v_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_nor_v_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_nor_v_d_ARG2
%2 = bitcast <2 x i64> %0 to <16 x i8>
%3 = bitcast <2 x i64> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <2 x i64>
- store <2 x i64> %5, <2 x i64>* @llvm_mips_nor_v_d_RES
+ store <2 x i64> %5, ptr @llvm_mips_nor_v_d_RES
ret void
}
define void @llvm_mips_or_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_or_v_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_or_v_b_ARG2
%2 = bitcast <16 x i8> %0 to <16 x i8>
%3 = bitcast <16 x i8> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <16 x i8>
- store <16 x i8> %5, <16 x i8>* @llvm_mips_or_v_b_RES
+ store <16 x i8> %5, ptr @llvm_mips_or_v_b_RES
ret void
}
define void @llvm_mips_or_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_or_v_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_or_v_h_ARG2
%2 = bitcast <8 x i16> %0 to <16 x i8>
%3 = bitcast <8 x i16> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <8 x i16>
- store <8 x i16> %5, <8 x i16>* @llvm_mips_or_v_h_RES
+ store <8 x i16> %5, ptr @llvm_mips_or_v_h_RES
ret void
}
define void @llvm_mips_or_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_or_v_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_or_v_w_ARG2
%2 = bitcast <4 x i32> %0 to <16 x i8>
%3 = bitcast <4 x i32> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <4 x i32>
- store <4 x i32> %5, <4 x i32>* @llvm_mips_or_v_w_RES
+ store <4 x i32> %5, ptr @llvm_mips_or_v_w_RES
ret void
}
define void @llvm_mips_or_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_or_v_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_or_v_d_ARG2
%2 = bitcast <2 x i64> %0 to <16 x i8>
%3 = bitcast <2 x i64> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <2 x i64>
- store <2 x i64> %5, <2 x i64>* @llvm_mips_or_v_d_RES
+ store <2 x i64> %5, ptr @llvm_mips_or_v_d_RES
ret void
}
;
define void @or_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_or_v_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_or_v_b_ARG2
%2 = or <16 x i8> %0, %1
- store <16 x i8> %2, <16 x i8>* @llvm_mips_or_v_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_or_v_b_RES
ret void
}
;
define void @or_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_or_v_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_or_v_h_ARG2
%2 = or <8 x i16> %0, %1
- store <8 x i16> %2, <8 x i16>* @llvm_mips_or_v_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_or_v_h_RES
ret void
}
define void @or_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_or_v_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_or_v_w_ARG2
%2 = or <4 x i32> %0, %1
- store <4 x i32> %2, <4 x i32>* @llvm_mips_or_v_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_or_v_w_RES
ret void
}
define void @or_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_or_v_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_or_v_d_ARG2
%2 = or <2 x i64> %0, %1
- store <2 x i64> %2, <2 x i64>* @llvm_mips_or_v_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_or_v_d_RES
ret void
}
define void @llvm_mips_xor_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_xor_v_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_xor_v_b_ARG2
%2 = bitcast <16 x i8> %0 to <16 x i8>
%3 = bitcast <16 x i8> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <16 x i8>
- store <16 x i8> %5, <16 x i8>* @llvm_mips_xor_v_b_RES
+ store <16 x i8> %5, ptr @llvm_mips_xor_v_b_RES
ret void
}
define void @llvm_mips_xor_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_xor_v_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_xor_v_h_ARG2
%2 = bitcast <8 x i16> %0 to <16 x i8>
%3 = bitcast <8 x i16> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <8 x i16>
- store <8 x i16> %5, <8 x i16>* @llvm_mips_xor_v_h_RES
+ store <8 x i16> %5, ptr @llvm_mips_xor_v_h_RES
ret void
}
define void @llvm_mips_xor_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_xor_v_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_xor_v_w_ARG2
%2 = bitcast <4 x i32> %0 to <16 x i8>
%3 = bitcast <4 x i32> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <4 x i32>
- store <4 x i32> %5, <4 x i32>* @llvm_mips_xor_v_w_RES
+ store <4 x i32> %5, ptr @llvm_mips_xor_v_w_RES
ret void
}
define void @llvm_mips_xor_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_xor_v_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_xor_v_d_ARG2
%2 = bitcast <2 x i64> %0 to <16 x i8>
%3 = bitcast <2 x i64> %1 to <16 x i8>
%4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
%5 = bitcast <16 x i8> %4 to <2 x i64>
- store <2 x i64> %5, <2 x i64>* @llvm_mips_xor_v_d_RES
+ store <2 x i64> %5, ptr @llvm_mips_xor_v_d_RES
ret void
}
;
define void @xor_v_b_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG1
- %1 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG2
+ %0 = load <16 x i8>, ptr @llvm_mips_xor_v_b_ARG1
+ %1 = load <16 x i8>, ptr @llvm_mips_xor_v_b_ARG2
%2 = xor <16 x i8> %0, %1
- store <16 x i8> %2, <16 x i8>* @llvm_mips_xor_v_b_RES
+ store <16 x i8> %2, ptr @llvm_mips_xor_v_b_RES
ret void
}
;
define void @xor_v_h_test() nounwind {
entry:
- %0 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG1
- %1 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG2
+ %0 = load <8 x i16>, ptr @llvm_mips_xor_v_h_ARG1
+ %1 = load <8 x i16>, ptr @llvm_mips_xor_v_h_ARG2
%2 = xor <8 x i16> %0, %1
- store <8 x i16> %2, <8 x i16>* @llvm_mips_xor_v_h_RES
+ store <8 x i16> %2, ptr @llvm_mips_xor_v_h_RES
ret void
}
define void @xor_v_w_test() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG1
- %1 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG2
+ %0 = load <4 x i32>, ptr @llvm_mips_xor_v_w_ARG1
+ %1 = load <4 x i32>, ptr @llvm_mips_xor_v_w_ARG2
%2 = xor <4 x i32> %0, %1
- store <4 x i32> %2, <4 x i32>* @llvm_mips_xor_v_w_RES
+ store <4 x i32> %2, ptr @llvm_mips_xor_v_w_RES
ret void
}
define void @xor_v_d_test() nounwind {
entry:
- %0 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG1
- %1 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG2
+ %0 = load <2 x i64>, ptr @llvm_mips_xor_v_d_ARG1
+ %1 = load <2 x i64>, ptr @llvm_mips_xor_v_d_ARG2
%2 = xor <2 x i64> %0, %1
- store <2 x i64> %2, <2 x i64>* @llvm_mips_xor_v_d_RES
+ store <2 x i64> %2, ptr @llvm_mips_xor_v_d_RES
ret void
}
define i32 @llvm_mips_bnz_v_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnz_v_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_bnz_v_ARG1
%1 = tail call i32 @llvm.mips.bnz.v(<16 x i8> %0)
%2 = icmp eq i32 %1, 0
br i1 %2, label %true, label %false
define i32 @llvm_mips_bz_v_test() nounwind {
entry:
- %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bz_v_ARG1
+ %0 = load <16 x i8>, ptr @llvm_mips_bz_v_ARG1
%1 = tail call i32 @llvm.mips.bz.v(<16 x i8> %0)
%2 = icmp eq i32 %1, 0
br i1 %2, label %true, label %false
define void @test() nounwind {
entry:
- %0 = load i32, i32* @iiii, align 4
- %1 = load i32, i32* @jjjj, align 4
+ %0 = load i32, ptr @iiii, align 4
+ %1 = load i32, ptr @jjjj, align 4
%mul = mul nsw i32 %1, %0
; 16: mult ${{[0-9]+}}, ${{[0-9]+}}
; 16: mflo ${{[0-9]+}}
- store i32 %mul, i32* @kkkk, align 4
+ store i32 %mul, ptr @kkkk, align 4
ret void
}
define void @test() nounwind {
entry:
- %0 = load i64, i64* @iiii, align 8
- %1 = load i64, i64* @jjjj, align 8
+ %0 = load i64, ptr @iiii, align 8
+ %1 = load i64, ptr @jjjj, align 8
%mul = mul nsw i64 %1, %0
- store i64 %mul, i64* @kkkk, align 8
+ store i64 %mul, ptr @kkkk, align 8
; 16: multu ${{[0-9]+}}, ${{[0-9]+}}
; 16: mfhi ${{[0-9]+}}
; 16: mult ${{[0-9]+}}, ${{[0-9]+}}
; This test tests that a block whose address is taken is bundle-aligned in NaCl.
-@bb_array = constant [2 x i8*] [i8* blockaddress(@test2, %bb1),
- i8* blockaddress(@test2, %bb2)], align 4
+@bb_array = constant [2 x ptr] [ptr blockaddress(@test2, %bb1),
+ ptr blockaddress(@test2, %bb2)], align 4
define i32 @test2(i32 %i) {
entry:
- %elementptr = getelementptr inbounds [2 x i8*], [2 x i8*]* @bb_array, i32 0, i32 %i
- %0 = load i8*, i8** %elementptr, align 4
- indirectbr i8* %0, [label %bb1, label %bb2]
+ %elementptr = getelementptr inbounds [2 x ptr], ptr @bb_array, i32 0, i32 %i
+ %0 = load ptr, ptr %elementptr, align 4
+ indirectbr ptr %0, [label %bb1, label %bb2]
bb1:
ret i32 111
define void @test1() {
- %1 = load i32, i32* @x, align 4
+ %1 = load i32, ptr @x, align 4
call void @f1(i32 %1)
ret void
define void @test2() {
- store i32 1, i32* @x, align 4
+ store i32 1, ptr @x, align 4
call void @f2()
ret void
@var = external global i32
define void @f() {
- %val1 = load volatile i32, i32* @var
- %val2 = load volatile i32, i32* @var
- %val3 = load volatile i32, i32* @var
- %val4 = load volatile i32, i32* @var
- %val5 = load volatile i32, i32* @var
- %val6 = load volatile i32, i32* @var
- %val7 = load volatile i32, i32* @var
- %val8 = load volatile i32, i32* @var
- %val9 = load volatile i32, i32* @var
- %val10 = load volatile i32, i32* @var
- %val11 = load volatile i32, i32* @var
- %val12 = load volatile i32, i32* @var
- %val13 = load volatile i32, i32* @var
- %val14 = load volatile i32, i32* @var
- %val15 = load volatile i32, i32* @var
- %val16 = load volatile i32, i32* @var
- store volatile i32 %val1, i32* @var
- store volatile i32 %val2, i32* @var
- store volatile i32 %val3, i32* @var
- store volatile i32 %val4, i32* @var
- store volatile i32 %val5, i32* @var
- store volatile i32 %val6, i32* @var
- store volatile i32 %val7, i32* @var
- store volatile i32 %val8, i32* @var
- store volatile i32 %val9, i32* @var
- store volatile i32 %val10, i32* @var
- store volatile i32 %val11, i32* @var
- store volatile i32 %val12, i32* @var
- store volatile i32 %val13, i32* @var
- store volatile i32 %val14, i32* @var
- store volatile i32 %val15, i32* @var
- store volatile i32 %val16, i32* @var
+ %val1 = load volatile i32, ptr @var
+ %val2 = load volatile i32, ptr @var
+ %val3 = load volatile i32, ptr @var
+ %val4 = load volatile i32, ptr @var
+ %val5 = load volatile i32, ptr @var
+ %val6 = load volatile i32, ptr @var
+ %val7 = load volatile i32, ptr @var
+ %val8 = load volatile i32, ptr @var
+ %val9 = load volatile i32, ptr @var
+ %val10 = load volatile i32, ptr @var
+ %val11 = load volatile i32, ptr @var
+ %val12 = load volatile i32, ptr @var
+ %val13 = load volatile i32, ptr @var
+ %val14 = load volatile i32, ptr @var
+ %val15 = load volatile i32, ptr @var
+ %val16 = load volatile i32, ptr @var
+ store volatile i32 %val1, ptr @var
+ store volatile i32 %val2, ptr @var
+ store volatile i32 %val3, ptr @var
+ store volatile i32 %val4, ptr @var
+ store volatile i32 %val5, ptr @var
+ store volatile i32 %val6, ptr @var
+ store volatile i32 %val7, ptr @var
+ store volatile i32 %val8, ptr @var
+ store volatile i32 %val9, ptr @var
+ store volatile i32 %val10, ptr @var
+ store volatile i32 %val11, ptr @var
+ store volatile i32 %val12, ptr @var
+ store volatile i32 %val13, ptr @var
+ store volatile i32 %val14, ptr @var
+ store volatile i32 %val15, ptr @var
+ store volatile i32 %val16, ptr @var
ret void
; Check that t6, t7 and t8 are used in non-NaCl code.
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%sub = sub nsw i32 0, %0
; 16: neg ${{[0-9]+}}, ${{[0-9]+}}
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %sub)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %sub)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; Force the float into an odd-numbered register using named registers and
; load the vector.
%b = call float asm sideeffect "mov.s $0, $1", "={$f13},{$f12}" (float %a)
- %0 = load volatile <4 x float>, <4 x float>* @v4f32
+ %0 = load volatile <4 x float>, ptr @v4f32
; Clobber all except $f12/$w12 and $f13
;
; vector.
call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
%1 = insertelement <4 x float> %0, float %b, i32 0
- store <4 x float> %1, <4 x float>* @v4f32
+ store <4 x float> %1, ptr @v4f32
ret void
}
; Force the float into an odd-numbered register using named registers and
; load the vector.
%b = call float asm sideeffect "mov.s $0, $1", "={$f13},{$f12}" (float %a)
- %0 = load volatile <4 x float>, <4 x float>* @v4f32
+ %0 = load volatile <4 x float>, ptr @v4f32
; Clobber all except $f12/$w12 and $f13
;
; vector.
call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
%1 = insertelement <4 x float> %0, float %b, i32 1
- store <4 x float> %1, <4 x float>* @v4f32
+ store <4 x float> %1, ptr @v4f32
ret void
}
define float @msa_extract_0() {
entry:
- %0 = load volatile <4 x float>, <4 x float>* @v4f32
+ %0 = load volatile <4 x float>, ptr @v4f32
%1 = call <4 x float> asm sideeffect "move.v $0, $1", "={$w13},{$w12}" (<4 x float> %0)
; Clobber all except $f12, and $f13
define float @msa_extract_1() {
entry:
- %0 = load volatile <4 x float>, <4 x float>* @v4f32
+ %0 = load volatile <4 x float>, ptr @v4f32
%1 = call <4 x float> asm sideeffect "move.v $0, $1", "={$w13},{$w12}" (<4 x float> %0)
; Clobber all except $f13
; Function Attrs: nounwind
define void @foo() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%conv = fpext float %0 to double
%add = fadd double %conv, 1.500000e+00
%conv1 = fptrunc double %add to float
- store float %conv1, float* @x, align 4
+ store float %conv1, ptr @x, align 4
ret void
}
; CHECK: .ent foo
; Function Attrs: nounwind
define void @nofoo() #1 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%conv = fpext float %0 to double
%add = fadd double %conv, 3.900000e+00
%conv1 = fptrunc double %add to float
- store float %conv1, float* @x, align 4
+ store float %conv1, ptr @x, align 4
ret void
}
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @x, align 4
+ %0 = load i32, ptr @x, align 4
%neg = xor i32 %0, -1
; 16: not ${{[0-9]+}}, ${{[0-9]+}}
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %neg)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %neg)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; CHECK-NEXT: addiu $sp, $sp, 64
entry:
%agg.tmp10 = alloca %struct.S3, align 4
- call void @callee1(float 2.000000e+01, %struct.S1* byval(%struct.S1) bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
- call void @callee2(%struct.S2* byval(%struct.S2) @f1.s2) nounwind
- %tmp11 = getelementptr inbounds %struct.S3, %struct.S3* %agg.tmp10, i32 0, i32 0
- store i8 11, i8* %tmp11, align 4
- call void @callee3(float 2.100000e+01, %struct.S3* byval(%struct.S3) %agg.tmp10, %struct.S1* byval(%struct.S1) bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
+ call void @callee1(float 2.000000e+01, ptr byval(%struct.S1) @f1.s1) nounwind
+ call void @callee2(ptr byval(%struct.S2) @f1.s2) nounwind
+ store i8 11, ptr %agg.tmp10, align 4
+ call void @callee3(float 2.100000e+01, ptr byval(%struct.S3) %agg.tmp10, ptr byval(%struct.S1) @f1.s1) nounwind
ret void
}
-declare void @callee1(float, %struct.S1* byval(%struct.S1))
+declare void @callee1(float, ptr byval(%struct.S1))
-declare void @callee2(%struct.S2* byval(%struct.S2))
+declare void @callee2(ptr byval(%struct.S2))
-declare void @callee3(float, %struct.S3* byval(%struct.S3), %struct.S1* byval(%struct.S1))
+declare void @callee3(float, ptr byval(%struct.S3), ptr byval(%struct.S1))
-define void @f2(float %f, %struct.S1* nocapture byval(%struct.S1) %s1) nounwind {
+define void @f2(float %f, ptr nocapture byval(%struct.S1) %s1) nounwind {
; CHECK-LABEL: f2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lui $2, %hi(_gp_disp)
; CHECK-NEXT: jr $ra
; CHECK-NEXT: addiu $sp, $sp, 48
entry:
- %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5
- %tmp = load i32, i32* %i2, align 4
- %d = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 4
- %tmp1 = load double, double* %d, align 8
- %ll = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 3
- %tmp2 = load i64, i64* %ll, align 8
- %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2
- %tmp3 = load i32, i32* %i, align 4
- %s = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1
- %tmp4 = load i16, i16* %s, align 2
- %c = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 0
- %tmp5 = load i8, i8* %c, align 1
+ %i2 = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 5
+ %tmp = load i32, ptr %i2, align 4
+ %d = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 4
+ %tmp1 = load double, ptr %d, align 8
+ %ll = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 3
+ %tmp2 = load i64, ptr %ll, align 8
+ %i = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 2
+ %tmp3 = load i32, ptr %i, align 4
+ %s = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 1
+ %tmp4 = load i16, ptr %s, align 2
+ %tmp5 = load i8, ptr %s1, align 1
tail call void @callee4(i32 %tmp, double %tmp1, i64 %tmp2, i32 %tmp3, i16 signext %tmp4, i8 signext %tmp5, float %f) nounwind
ret void
}
declare void @callee4(i32, double, i64, i32, i16 signext, i8 signext, float)
-define void @f3(%struct.S2* nocapture byval(%struct.S2) %s2) nounwind {
+define void @f3(ptr nocapture byval(%struct.S2) %s2) nounwind {
; CHECK-LABEL: f3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lui $2, %hi(_gp_disp)
; CHECK-NEXT: jr $ra
; CHECK-NEXT: addiu $sp, $sp, 48
entry:
- %arrayidx = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 0
- %tmp = load i32, i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 3
- %tmp3 = load i32, i32* %arrayidx2, align 4
+ %tmp = load i32, ptr %s2, align 4
+ %arrayidx2 = getelementptr inbounds %struct.S2, ptr %s2, i32 0, i32 0, i32 3
+ %tmp3 = load i32, ptr %arrayidx2, align 4
tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp3, i16 signext 4, i8 signext 5, float 6.000000e+00) nounwind
ret void
}
-define void @f4(float %f, %struct.S3* nocapture byval(%struct.S3) %s3, %struct.S1* nocapture byval(%struct.S1) %s1) nounwind {
+define void @f4(float %f, ptr nocapture byval(%struct.S3) %s3, ptr nocapture byval(%struct.S1) %s1) nounwind {
; CHECK-LABEL: f4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lui $2, %hi(_gp_disp)
; CHECK-NEXT: jr $ra
; CHECK-NEXT: addiu $sp, $sp, 48
entry:
- %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2
- %tmp = load i32, i32* %i, align 4
- %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5
- %tmp1 = load i32, i32* %i2, align 4
- %c = getelementptr inbounds %struct.S3, %struct.S3* %s3, i32 0, i32 0
- %tmp2 = load i8, i8* %c, align 1
+ %i = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 2
+ %tmp = load i32, ptr %i, align 4
+ %i2 = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 5
+ %tmp1 = load i32, ptr %i2, align 4
+ %tmp2 = load i8, ptr %s3, align 1
tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp1, i16 signext 4, i8 signext %tmp2, float 6.000000e+00) nounwind
ret void
}
%struct.S4 = type { [4 x i32] }
-define void @f5(i64 %a0, %struct.S4* nocapture byval(%struct.S4) %a1) nounwind {
+define void @f5(i64 %a0, ptr nocapture byval(%struct.S4) %a1) nounwind {
; CHECK-LABEL: f5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lui $2, %hi(_gp_disp)
; CHECK-NEXT: jr $ra
; CHECK-NEXT: addiu $sp, $sp, 32
entry:
- tail call void @f6(%struct.S4* byval(%struct.S4) %a1, i64 %a0) nounwind
+ tail call void @f6(ptr byval(%struct.S4) %a1, i64 %a0) nounwind
ret void
}
-declare void @f6(%struct.S4* nocapture byval(%struct.S4), i64)
+declare void @f6(ptr nocapture byval(%struct.S4), i64)
; variable argument is returned from the correct stack location.
-declare void @llvm.va_start(i8*) nounwind
-declare void @llvm.va_end(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
+declare void @llvm.va_end(ptr) nounwind
; return int
define i32 @va1(i32 %a, ...) nounwind {
entry:
%a.addr = alloca i32, align 4
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%b = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, i32
- store i32 %0, i32* %b, align 4
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_end(i8* %ap2)
- %tmp = load i32, i32* %b, align 4
+ store i32 %a, ptr %a.addr, align 4
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, i32
+ store i32 %0, ptr %b, align 4
+ call void @llvm.va_end(ptr %ap)
+ %tmp = load i32, ptr %b, align 4
ret i32 %tmp
; CHECK-LABEL: va1:
define double @va2(i32 %a, ...) nounwind {
entry:
%a.addr = alloca i32, align 4
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%b = alloca double, align 8
- store i32 %a, i32* %a.addr, align 4
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, double
- store double %0, double* %b, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_end(i8* %ap2)
- %tmp = load double, double* %b, align 8
+ store i32 %a, ptr %a.addr, align 4
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, double
+ store double %0, ptr %b, align 8
+ call void @llvm.va_end(ptr %ap)
+ %tmp = load double, ptr %b, align 8
ret double %tmp
; CHECK-LABEL: va2:
define i32 @va3(double %a, ...) nounwind {
entry:
%a.addr = alloca double, align 8
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%b = alloca i32, align 4
- store double %a, double* %a.addr, align 8
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, i32
- store i32 %0, i32* %b, align 4
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_end(i8* %ap2)
- %tmp = load i32, i32* %b, align 4
+ store double %a, ptr %a.addr, align 8
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, i32
+ store i32 %0, ptr %b, align 4
+ call void @llvm.va_end(ptr %ap)
+ %tmp = load i32, ptr %b, align 4
ret i32 %tmp
; CHECK-LABEL: va3:
define double @va4(double %a, ...) nounwind {
entry:
%a.addr = alloca double, align 8
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%b = alloca double, align 8
- store double %a, double* %a.addr, align 8
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, double
- store double %0, double* %b, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_end(i8* %ap2)
- %tmp = load double, double* %b, align 8
+ store double %a, ptr %a.addr, align 8
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, double
+ store double %0, ptr %b, align 8
+ call void @llvm.va_end(ptr %ap)
+ %tmp = load double, ptr %b, align 8
ret double %tmp
; CHECK-LABEL: va4:
%a.addr = alloca i32, align 4
%b.addr = alloca i32, align 4
%c.addr = alloca i32, align 4
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%d = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- store i32 %b, i32* %b.addr, align 4
- store i32 %c, i32* %c.addr, align 4
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, i32
- store i32 %0, i32* %d, align 4
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_end(i8* %ap2)
- %tmp = load i32, i32* %d, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store i32 %b, ptr %b.addr, align 4
+ store i32 %c, ptr %c.addr, align 4
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, i32
+ store i32 %0, ptr %d, align 4
+ call void @llvm.va_end(ptr %ap)
+ %tmp = load i32, ptr %d, align 4
ret i32 %tmp
; CHECK-LABEL: va5:
%a.addr = alloca i32, align 4
%b.addr = alloca i32, align 4
%c.addr = alloca i32, align 4
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%d = alloca double, align 8
- store i32 %a, i32* %a.addr, align 4
- store i32 %b, i32* %b.addr, align 4
- store i32 %c, i32* %c.addr, align 4
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, double
- store double %0, double* %d, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_end(i8* %ap2)
- %tmp = load double, double* %d, align 8
+ store i32 %a, ptr %a.addr, align 4
+ store i32 %b, ptr %b.addr, align 4
+ store i32 %c, ptr %c.addr, align 4
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, double
+ store double %0, ptr %d, align 8
+ call void @llvm.va_end(ptr %ap)
+ %tmp = load double, ptr %d, align 8
ret double %tmp
; CHECK-LABEL: va6:
entry:
%a.addr = alloca i32, align 4
%b.addr = alloca double, align 8
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%c = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- store double %b, double* %b.addr, align 8
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, i32
- store i32 %0, i32* %c, align 4
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_end(i8* %ap2)
- %tmp = load i32, i32* %c, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store double %b, ptr %b.addr, align 8
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, i32
+ store i32 %0, ptr %c, align 4
+ call void @llvm.va_end(ptr %ap)
+ %tmp = load i32, ptr %c, align 4
ret i32 %tmp
; CHECK-LABEL: va7:
entry:
%a.addr = alloca i32, align 4
%b.addr = alloca double, align 8
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%c = alloca double, align 8
- store i32 %a, i32* %a.addr, align 4
- store double %b, double* %b.addr, align 8
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, double
- store double %0, double* %c, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_end(i8* %ap2)
- %tmp = load double, double* %c, align 8
+ store i32 %a, ptr %a.addr, align 4
+ store double %b, ptr %b.addr, align 8
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, double
+ store double %0, ptr %c, align 8
+ call void @llvm.va_end(ptr %ap)
+ %tmp = load double, ptr %c, align 8
ret double %tmp
; CHECK-LABEL: va8:
%a.addr = alloca double, align 8
%b.addr = alloca double, align 8
%c.addr = alloca i32, align 4
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%d = alloca i32, align 4
- store double %a, double* %a.addr, align 8
- store double %b, double* %b.addr, align 8
- store i32 %c, i32* %c.addr, align 4
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, i32
- store i32 %0, i32* %d, align 4
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_end(i8* %ap2)
- %tmp = load i32, i32* %d, align 4
+ store double %a, ptr %a.addr, align 8
+ store double %b, ptr %b.addr, align 8
+ store i32 %c, ptr %c.addr, align 4
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, i32
+ store i32 %0, ptr %d, align 4
+ call void @llvm.va_end(ptr %ap)
+ %tmp = load i32, ptr %d, align 4
ret i32 %tmp
; CHECK-LABEL: va9:
%a.addr = alloca double, align 8
%b.addr = alloca double, align 8
%c.addr = alloca i32, align 4
- %ap = alloca i8*, align 4
+ %ap = alloca ptr, align 4
%d = alloca double, align 8
- store double %a, double* %a.addr, align 8
- store double %b, double* %b.addr, align 8
- store i32 %c, i32* %c.addr, align 4
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- %0 = va_arg i8** %ap, double
- store double %0, double* %d, align 8
- %ap2 = bitcast i8** %ap to i8*
- call void @llvm.va_end(i8* %ap2)
- %tmp = load double, double* %d, align 8
+ store double %a, ptr %a.addr, align 8
+ store double %b, ptr %b.addr, align 8
+ store i32 %c, ptr %c.addr, align 4
+ call void @llvm.va_start(ptr %ap)
+ %0 = va_arg ptr %ap, double
+ store double %0, ptr %d, align 8
+ call void @llvm.va_end(ptr %ap)
+ %tmp = load double, ptr %d, align 8
ret double %tmp
; CHECK-LABEL: va10:
; OCTEON: bbit0 $1, 1, [[BB0:(\$|\.L)BB[0-9_]+]]
; OCTEON-PIC-NOT: b {{[[:space:]].*}}
; OCTEON-NOT: j {{[[:space:]].*}}
- %0 = load i64, i64* @var, align 8
+ %0 = load i64, ptr @var, align 8
%and = and i64 %0, 2
%tobool = icmp eq i64 %and, 0
br i1 %tobool, label %if.end, label %if.then
; OCTEON: bbit1 $1, 1, [[BB0:(\$|\.L)BB[0-9_]+]]
; OCTEON-PIC-NOT: b {{[[:space:]].*}}
; OCTEON-NOT: j {{[[:space:]].*}}
- %0 = load i64, i64* @var, align 8
+ %0 = load i64, ptr @var, align 8
%and = and i64 %0, 2
%tobool = icmp eq i64 %and, 0
br i1 %tobool, label %if.then, label %if.end
entry:
%retval = alloca i32, align 4
%i = alloca i32, align 4
- store i32 0, i32* %retval
- store i32 0, i32* %i, align 4
+ store i32 0, ptr %retval
+ store i32 0, ptr %i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32, i32* %i, align 4
+ %0 = load i32, ptr %i, align 4
%cmp = icmp slt i32 %0, 10
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- call void bitcast (void (...)* @foo to void ()*)()
+ call void @foo()
; CHECK: jalr $25
br label %for.inc
for.inc: ; preds = %for.body
- %1 = load i32, i32* %i, align 4
+ %1 = load i32, ptr %i, align 4
%inc = add nsw i32 %1, 1
- store i32 %inc, i32* %i, align 4
+ store i32 %inc, ptr %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %2 = load i32, i32* %retval
+ %2 = load i32, ptr %retval
ret i32 %2
}
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @x, align 4
- %1 = load i32, i32* @y, align 4
+ %0 = load i32, ptr @x, align 4
+ %1 = load i32, ptr @y, align 4
%or = or i32 %0, %1
; 16: or ${{[0-9]+}}, ${{[0-9]+}}
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %or)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %or)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; RUN: llc %s -mtriple=mipsel -o - | FileCheck %s
-define i1 @no__mulodi4(i32 %a, i64 %b, i32* %c) {
+define i1 @no__mulodi4(i32 %a, i64 %b, ptr %c) {
; CHECK-LABEL: no__mulodi4
; CHECK-NOT: jal __mulodi4
; CHECK-NOT: jal __multi3
%5 = sext i32 %4 to i64
%6 = icmp ne i64 %3, %5
%7 = or i1 %2, %6
- store i32 %4, i32* %c, align 4
+ store i32 %4, ptr %c, align 4
ret i1 %7
}
; BE: lw $2, 4($4)
; LE: lw $2, 0($4)
-define i32 @a(<2 x i32> * %a) {
+define i32 @a(ptr %a) {
entry:
-%0 = load <2 x i32>, <2 x i32> * %a
+%0 = load <2 x i32>, ptr %a
%1 = bitcast <2 x i32> %0 to i64
%2 = trunc i64 %1 to i32
ret i32 %2
; BE: lw $2, 12($4)
; LE: lw $2, 0($4)
-define i32 @b(<4 x i32> * %a) {
+define i32 @b(ptr %a) {
entry:
-%0 = load <4 x i32>, <4 x i32> * %a
+%0 = load <4 x i32>, ptr %a
%1 = bitcast <4 x i32> %0 to i128
%2 = trunc i128 %1 to i32
ret i32 %2
; BE: lw $2, 0($4)
; LE: lw $2, 0($4)
-define i32 @c(i64 * %a) {
+define i32 @c(ptr %a) {
entry:
-%0 = load i64, i64 * %a
+%0 = load i64, ptr %a
%1 = bitcast i64 %0 to <2 x i32>
%2 = extractelement <2 x i32> %1, i32 0
ret i32 %2
; BE: lw $2, 4($4)
; LE: lw $2, 4($4)
-define i32 @d(i64 * %a) {
+define i32 @d(ptr %a) {
entry:
-%0 = load i64, i64 * %a
+%0 = load i64, ptr %a
%1 = bitcast i64 %0 to <2 x i32>
%2 = extractelement <2 x i32> %1, i32 1
ret i32 %2
; This could result in one of the pointers being considered dereferenceable
; and other not.
-define void @foo(i8*) {
+define void @foo(ptr) {
start:
%a = alloca [22 x i8]
%b = alloca [22 x i8]
- %c = bitcast [22 x i8]* %a to i8*
- %d = getelementptr inbounds [22 x i8], [22 x i8]* %b, i32 0, i32 2
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %c, i8* %d, i32 20, i1 false)
- %e = getelementptr inbounds [22 x i8], [22 x i8]* %b, i32 0, i32 6
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %e, i32 12, i1 false)
+ %d = getelementptr inbounds [22 x i8], ptr %b, i32 0, i32 2
+ call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr %d, i32 20, i1 false)
+ %e = getelementptr inbounds [22 x i8], ptr %b, i32 0, i32 6
+ call void @llvm.memcpy.p0.p0.i32(ptr %0, ptr %e, i32 12, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
define void @e() !dbg !19 {
entry:
- %0 = load i32, i32* @c, align 4, !dbg !28, !tbaa !31
+ %0 = load i32, ptr @c, align 4, !dbg !28, !tbaa !31
%tobool8 = icmp eq i32 %0, 0, !dbg !35
br i1 %tobool8, label %for.end, label %for.body.preheader, !dbg !35
br label %for.body, !dbg !36
for.body: ; preds = %for.body.preheader
- %1 = load i8, i8* undef, align 1, !dbg !36, !tbaa !38
+ %1 = load i8, ptr undef, align 1, !dbg !36, !tbaa !38
%conv = zext i8 %1 to i32, !dbg !36
%cmp = icmp sgt i32 %0, %conv, !dbg !39
br i1 %cmp, label %if.end, label %if.then, !dbg !40
if.then: ; preds = %for.body
tail call void @llvm.dbg.value(metadata i32 %conv, metadata !41, metadata !DIExpression()), !dbg !43
%idxprom5 = zext i8 %1 to i64, !dbg !44
- %call = tail call i32 bitcast (i32 (...)* @g to i32 (i32)*)(i32 signext undef) #3, !dbg !45
+ %call = tail call i32 @g(i32 signext undef) #3, !dbg !45
br label %if.end, !dbg !46
if.end: ; preds = %if.then, %for.body
br i1 %tobool, label %if.end, label %cleanup7.critedge, !dbg !21
if.end: ; preds = %entry
- %call6 = call i32 bitcast (i32 (...)* @j to i32 (i32)*)(i32 signext %conv)
+ %call6 = call i32 @j(i32 signext %conv)
#4, !dbg !22
br label %cleanup7, !dbg !23
cleanup7.critedge: ; preds = %entry
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull undef) #4, !dbg !24
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull undef) #4, !dbg !24
br label %cleanup7
cleanup7: ; preds = %cleanup7.critedge,
ret void
}
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
declare i32 @j(...)
; STATIC-NEXT: sd $[[R0]]
%val = alloca i64, align 8
- store i64 and (i64 ptrtoint (void ()* @foo to i64), i64 268435455), i64* %val, align 8
- %0 = load i64, i64* %val, align 8
+ store i64 and (i64 ptrtoint (ptr @foo to i64), i64 268435455), ptr %val, align 8
+ %0 = load i64, ptr %val, align 8
ret void
}
; CHECK: lw $[[R1]], %got(assignSE2partition)($[[R2]])
-%struct.img_par = type { i32, i32, i32, i32, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [16 x [16 x i16]], [6 x [32 x i32]], [16 x [16 x i32]], [4 x [12 x [4 x [4 x i32]]]], [16 x i32], i8**, i32*, i32***, i32**, i32, i32, i32, i32, %struct.Slice*, %struct.macroblock*, i32, i32, i32, i32, i32, i32, %struct.DecRefPicMarking_s*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32***, i32***, i32****, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x [2 x i32]], [3 x [2 x i32]], i32, i32, i32, i32, %struct.timeb, %struct.timeb, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
-%struct.Slice = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.datapartition*, %struct.MotionInfoContexts*, %struct.TextureInfoContexts*, i32, i32*, i32*, i32*, i32, i32*, i32*, i32*, i32 (%struct.img_par*, %struct.inp_par*)*, i32, i32, i32, i32 }
-%struct.datapartition = type { %struct.Bitstream*, %struct.DecodingEnvironment, i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)* }
-%struct.Bitstream = type { i32, i32, i32, i32, i8*, i32 }
-%struct.DecodingEnvironment = type { i32, i32, i32, i32, i32, i8*, i32* }
-%struct.syntaxelement = type { i32, i32, i32, i32, i32, i32, i32, i32, void (i32, i32, i32*, i32*)*, void (%struct.syntaxelement*, %struct.img_par*, %struct.DecodingEnvironment*)* }
+%struct.img_par = type { i32, i32, i32, i32, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [16 x [16 x i16]], [6 x [32 x i32]], [16 x [16 x i32]], [4 x [12 x [4 x [4 x i32]]]], [16 x i32], ptr, ptr, ptr, ptr, i32, i32, i32, i32, ptr, ptr, i32, i32, i32, i32, i32, i32, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x [2 x i32]], [3 x [2 x i32]], i32, i32, i32, i32, %struct.timeb, %struct.timeb, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%struct.Slice = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, i32, ptr, ptr, ptr, i32, ptr, ptr, ptr, ptr, i32, i32, i32, i32 }
+%struct.datapartition = type { ptr, %struct.DecodingEnvironment, ptr }
+%struct.Bitstream = type { i32, i32, i32, i32, ptr, i32 }
+%struct.DecodingEnvironment = type { i32, i32, i32, i32, i32, ptr, ptr }
+%struct.syntaxelement = type { i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr }
%struct.MotionInfoContexts = type { [4 x [11 x %struct.BiContextType]], [2 x [9 x %struct.BiContextType]], [2 x [10 x %struct.BiContextType]], [2 x [6 x %struct.BiContextType]], [4 x %struct.BiContextType], [4 x %struct.BiContextType], [3 x %struct.BiContextType] }
%struct.BiContextType = type { i16, i8 }
%struct.TextureInfoContexts = type { [2 x %struct.BiContextType], [4 x %struct.BiContextType], [3 x [4 x %struct.BiContextType]], [10 x [4 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [5 x %struct.BiContextType]], [10 x [5 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]] }
%struct.inp_par = type { [1000 x i8], [1000 x i8], [1000 x i8], i32, i32, i32, i32, i32, i32, i32, i32 }
-%struct.macroblock = type { i32, [2 x i32], i32, i32, %struct.macroblock*, %struct.macroblock*, i32, [2 x [4 x [4 x [2 x i32]]]], i32, i64, i64, i32, i32, [4 x i8], [4 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
-%struct.DecRefPicMarking_s = type { i32, i32, i32, i32, i32, %struct.DecRefPicMarking_s* }
+%struct.macroblock = type { i32, [2 x i32], i32, i32, ptr, ptr, i32, [2 x [4 x [4 x [2 x i32]]]], i32, i64, i64, i32, i32, [4 x i8], [4 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%struct.DecRefPicMarking_s = type { i32, i32, i32, i32, i32, ptr }
%struct.timeb = type { i32, i16, i16, i16 }
@assignSE2partition = external global [0 x [20 x i32]]
@FIELD_SCAN8x8 = external constant [64 x [2 x i8]]
-define void @readLumaCoeff8x8_CABAC(%struct.img_par* %img, i32 %b8) {
+define void @readLumaCoeff8x8_CABAC(ptr %img, i32 %b8) {
- %1 = load i32, i32* undef, align 4
+ %1 = load i32, ptr undef, align 4
br i1 false, label %2, label %3
; <label>:2 ; preds = %0
br label %6
; <label>:6 ; preds = %5, %4
- %7 = phi [2 x i8]* [ getelementptr inbounds ([64 x [2 x i8]], [64 x [2 x i8]]* @FIELD_SCAN8x8, i32 0, i32 0), %4 ], [ null, %5 ]
+ %7 = phi ptr [ @FIELD_SCAN8x8, %4 ], [ null, %5 ]
br i1 undef, label %switch.lookup6, label %8
switch.lookup6: ; preds = %6
; <label>:9 ; preds = %8
%10 = and i32 %b8, 1
%11 = shl nuw nsw i32 %10, 3
- %12 = getelementptr inbounds %struct.Slice, %struct.Slice* null, i32 0, i32 9
+ %12 = getelementptr inbounds %struct.Slice, ptr null, i32 0, i32 9
br i1 undef, label %.preheader, label %.preheader11
.preheader11: ; preds = %21, %9
br label %15
; <label>:15 ; preds = %14, %13
- %16 = getelementptr inbounds [0 x [20 x i32]], [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef
- %17 = load i32, i32* %16, align 4
- %18 = getelementptr inbounds %struct.datapartition, %struct.datapartition* null, i32 %17, i32 2
- %19 = load i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)*, i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)** %18, align 4
- %20 = call i32 %19(%struct.syntaxelement* undef, %struct.img_par* %img, %struct.datapartition* undef)
+ %16 = getelementptr inbounds [0 x [20 x i32]], ptr @assignSE2partition, i32 0, i32 %1, i32 undef
+ %17 = load i32, ptr %16, align 4
+ %18 = getelementptr inbounds %struct.datapartition, ptr null, i32 %17, i32 2
+ %19 = load ptr, ptr %18, align 4
+ %20 = call i32 %19(ptr undef, ptr %img, ptr undef)
br i1 false, label %.loopexit, label %21
; <label>:21 ; preds = %15
%22 = add i32 %coef_ctr.013, 1
%23 = add i32 %22, 0
- %24 = getelementptr inbounds [2 x i8], [2 x i8]* %7, i32 %23, i32 0
+ %24 = getelementptr inbounds [2 x i8], ptr %7, i32 %23, i32 0
%25 = add nsw i32 0, %11
- %26 = getelementptr inbounds %struct.img_par, %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %25
- store i32 0, i32* %26, align 4
+ %26 = getelementptr inbounds %struct.img_par, ptr %img, i32 0, i32 27, i32 undef, i32 %25
+ store i32 0, ptr %26, align 4
%27 = add nsw i32 %k.014, 1
%28 = icmp slt i32 %27, 65
br i1 %28, label %.preheader11, label %.loopexit
br label %31
; <label>:31 ; preds = %30, %29
- %32 = getelementptr inbounds [0 x [20 x i32]], [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef
- %33 = load i32, i32* %32, align 4
- %34 = getelementptr inbounds %struct.datapartition, %struct.datapartition* null, i32 %33
- %35 = call i32 undef(%struct.syntaxelement* undef, %struct.img_par* %img, %struct.datapartition* %34)
+ %32 = getelementptr inbounds [0 x [20 x i32]], ptr @assignSE2partition, i32 0, i32 %1, i32 undef
+ %33 = load i32, ptr %32, align 4
+ %34 = getelementptr inbounds %struct.datapartition, ptr null, i32 %33
+ %35 = call i32 undef(ptr undef, ptr %img, ptr %34)
br i1 false, label %.loopexit, label %36
; <label>:36 ; preds = %31
- %37 = load i32, i32* undef, align 4
+ %37 = load i32, ptr undef, align 4
%38 = add i32 %coef_ctr.29, 1
%39 = add i32 %38, %37
- %40 = getelementptr inbounds [2 x i8], [2 x i8]* %7, i32 %39, i32 0
- %41 = load i8, i8* %40, align 1
+ %40 = getelementptr inbounds [2 x i8], ptr %7, i32 %39, i32 0
+ %41 = load i8, ptr %40, align 1
%42 = zext i8 %41 to i32
%43 = add nsw i32 %42, %11
- %44 = getelementptr inbounds %struct.img_par, %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %43
- store i32 0, i32* %44, align 4
+ %44 = getelementptr inbounds %struct.img_par, ptr %img, i32 0, i32 27, i32 undef, i32 %43
+ store i32 0, ptr %44, align 4
%45 = add nsw i32 %k.110, 1
%46 = icmp slt i32 %45, 65
br i1 %46, label %.preheader, label %.loopexit
ret void
}
-define void()* @foo() {
+define ptr @foo() {
; CHECK: foo:
; CHECK: lw $[[REG:.*]], %got($bar)($1)
; CHECK-NEXT: jr $ra
; CHECK-NEXT: addiu $2, $[[REG]], %lo($bar)
- ret void()* @bar
+ ret ptr @bar
}
; CHECK: lw $[[R0:[0-9]+]], %got($baz)($
; CHECK: lw ${{[0-9]+}}, %lo($baz)($[[R0]])
call void @foo()
- %1 = load i32, i32* @baz, align 4
+ %1 = load i32, ptr @baz, align 4
ret i32 %1
}
; RUN: llc < %s -march=mipsel | FileCheck %s
@a0 = external global i32
-@b0 = external global i32*
+@b0 = external global ptr
@a1 = external global i32
-@b1 = external global i32*
+@b1 = external global ptr
@a2 = external global i32
-@b2 = external global i32*
+@b2 = external global ptr
@a3 = external global i32
-@b3 = external global i32*
+@b3 = external global ptr
@a4 = external global i32
-@b4 = external global i32*
+@b4 = external global ptr
@a5 = external global i32
-@b5 = external global i32*
+@b5 = external global ptr
@a6 = external global i32
-@b6 = external global i32*
+@b6 = external global ptr
@a7 = external global i32
-@b7 = external global i32*
+@b7 = external global ptr
@a8 = external global i32
-@b8 = external global i32*
+@b8 = external global ptr
@a9 = external global i32
-@b9 = external global i32*
+@b9 = external global ptr
@a10 = external global i32
-@b10 = external global i32*
+@b10 = external global ptr
@a11 = external global i32
-@b11 = external global i32*
+@b11 = external global ptr
@a12 = external global i32
-@b12 = external global i32*
+@b12 = external global ptr
@a13 = external global i32
-@b13 = external global i32*
+@b13 = external global ptr
@a14 = external global i32
-@b14 = external global i32*
+@b14 = external global ptr
@a15 = external global i32
-@b15 = external global i32*
+@b15 = external global ptr
@a16 = external global i32
-@b16 = external global i32*
+@b16 = external global ptr
@a17 = external global i32
-@b17 = external global i32*
+@b17 = external global ptr
@a18 = external global i32
-@b18 = external global i32*
+@b18 = external global ptr
@a19 = external global i32
-@b19 = external global i32*
+@b19 = external global ptr
@a20 = external global i32
-@b20 = external global i32*
+@b20 = external global ptr
@a21 = external global i32
-@b21 = external global i32*
+@b21 = external global ptr
@a22 = external global i32
-@b22 = external global i32*
+@b22 = external global ptr
@a23 = external global i32
-@b23 = external global i32*
+@b23 = external global ptr
@a24 = external global i32
-@b24 = external global i32*
+@b24 = external global ptr
@a25 = external global i32
-@b25 = external global i32*
+@b25 = external global ptr
@a26 = external global i32
-@b26 = external global i32*
+@b26 = external global ptr
@a27 = external global i32
-@b27 = external global i32*
+@b27 = external global ptr
@a28 = external global i32
-@b28 = external global i32*
+@b28 = external global ptr
@a29 = external global i32
-@b29 = external global i32*
-@c0 = external global i32*
-@c1 = external global i32*
-@c2 = external global i32*
-@c3 = external global i32*
-@c4 = external global i32*
-@c5 = external global i32*
-@c6 = external global i32*
-@c7 = external global i32*
-@c8 = external global i32*
-@c9 = external global i32*
-@c10 = external global i32*
-@c11 = external global i32*
-@c12 = external global i32*
-@c13 = external global i32*
-@c14 = external global i32*
-@c15 = external global i32*
-@c16 = external global i32*
-@c17 = external global i32*
-@c18 = external global i32*
-@c19 = external global i32*
-@c20 = external global i32*
-@c21 = external global i32*
-@c22 = external global i32*
-@c23 = external global i32*
-@c24 = external global i32*
-@c25 = external global i32*
-@c26 = external global i32*
-@c27 = external global i32*
-@c28 = external global i32*
-@c29 = external global i32*
+@b29 = external global ptr
+@c0 = external global ptr
+@c1 = external global ptr
+@c2 = external global ptr
+@c3 = external global ptr
+@c4 = external global ptr
+@c5 = external global ptr
+@c6 = external global ptr
+@c7 = external global ptr
+@c8 = external global ptr
+@c9 = external global ptr
+@c10 = external global ptr
+@c11 = external global ptr
+@c12 = external global ptr
+@c13 = external global ptr
+@c14 = external global ptr
+@c15 = external global ptr
+@c16 = external global ptr
+@c17 = external global ptr
+@c18 = external global ptr
+@c19 = external global ptr
+@c20 = external global ptr
+@c21 = external global ptr
+@c22 = external global ptr
+@c23 = external global ptr
+@c24 = external global ptr
+@c25 = external global ptr
+@c26 = external global ptr
+@c27 = external global ptr
+@c28 = external global ptr
+@c29 = external global ptr
define i32 @f1() nounwind {
entry:
; CHECK: lw $ra, {{[0-9]+}}($sp) # 4-byte Folded Reload
; CHECK: jr $ra
- %0 = load i32, i32* @a0, align 4
- %1 = load i32*, i32** @b0, align 4
- store i32 %0, i32* %1, align 4
- %2 = load i32, i32* @a1, align 4
- %3 = load i32*, i32** @b1, align 4
- store i32 %2, i32* %3, align 4
- %4 = load i32, i32* @a2, align 4
- %5 = load i32*, i32** @b2, align 4
- store i32 %4, i32* %5, align 4
- %6 = load i32, i32* @a3, align 4
- %7 = load i32*, i32** @b3, align 4
- store i32 %6, i32* %7, align 4
- %8 = load i32, i32* @a4, align 4
- %9 = load i32*, i32** @b4, align 4
- store i32 %8, i32* %9, align 4
- %10 = load i32, i32* @a5, align 4
- %11 = load i32*, i32** @b5, align 4
- store i32 %10, i32* %11, align 4
- %12 = load i32, i32* @a6, align 4
- %13 = load i32*, i32** @b6, align 4
- store i32 %12, i32* %13, align 4
- %14 = load i32, i32* @a7, align 4
- %15 = load i32*, i32** @b7, align 4
- store i32 %14, i32* %15, align 4
- %16 = load i32, i32* @a8, align 4
- %17 = load i32*, i32** @b8, align 4
- store i32 %16, i32* %17, align 4
- %18 = load i32, i32* @a9, align 4
- %19 = load i32*, i32** @b9, align 4
- store i32 %18, i32* %19, align 4
- %20 = load i32, i32* @a10, align 4
- %21 = load i32*, i32** @b10, align 4
- store i32 %20, i32* %21, align 4
- %22 = load i32, i32* @a11, align 4
- %23 = load i32*, i32** @b11, align 4
- store i32 %22, i32* %23, align 4
- %24 = load i32, i32* @a12, align 4
- %25 = load i32*, i32** @b12, align 4
- store i32 %24, i32* %25, align 4
- %26 = load i32, i32* @a13, align 4
- %27 = load i32*, i32** @b13, align 4
- store i32 %26, i32* %27, align 4
- %28 = load i32, i32* @a14, align 4
- %29 = load i32*, i32** @b14, align 4
- store i32 %28, i32* %29, align 4
- %30 = load i32, i32* @a15, align 4
- %31 = load i32*, i32** @b15, align 4
- store i32 %30, i32* %31, align 4
- %32 = load i32, i32* @a16, align 4
- %33 = load i32*, i32** @b16, align 4
- store i32 %32, i32* %33, align 4
- %34 = load i32, i32* @a17, align 4
- %35 = load i32*, i32** @b17, align 4
- store i32 %34, i32* %35, align 4
- %36 = load i32, i32* @a18, align 4
- %37 = load i32*, i32** @b18, align 4
- store i32 %36, i32* %37, align 4
- %38 = load i32, i32* @a19, align 4
- %39 = load i32*, i32** @b19, align 4
- store i32 %38, i32* %39, align 4
- %40 = load i32, i32* @a20, align 4
- %41 = load i32*, i32** @b20, align 4
- store i32 %40, i32* %41, align 4
- %42 = load i32, i32* @a21, align 4
- %43 = load i32*, i32** @b21, align 4
- store i32 %42, i32* %43, align 4
- %44 = load i32, i32* @a22, align 4
- %45 = load i32*, i32** @b22, align 4
- store i32 %44, i32* %45, align 4
- %46 = load i32, i32* @a23, align 4
- %47 = load i32*, i32** @b23, align 4
- store i32 %46, i32* %47, align 4
- %48 = load i32, i32* @a24, align 4
- %49 = load i32*, i32** @b24, align 4
- store i32 %48, i32* %49, align 4
- %50 = load i32, i32* @a25, align 4
- %51 = load i32*, i32** @b25, align 4
- store i32 %50, i32* %51, align 4
- %52 = load i32, i32* @a26, align 4
- %53 = load i32*, i32** @b26, align 4
- store i32 %52, i32* %53, align 4
- %54 = load i32, i32* @a27, align 4
- %55 = load i32*, i32** @b27, align 4
- store i32 %54, i32* %55, align 4
- %56 = load i32, i32* @a28, align 4
- %57 = load i32*, i32** @b28, align 4
- store i32 %56, i32* %57, align 4
- %58 = load i32, i32* @a29, align 4
- %59 = load i32*, i32** @b29, align 4
- store i32 %58, i32* %59, align 4
- %60 = load i32, i32* @a0, align 4
- %61 = load i32*, i32** @c0, align 4
- store i32 %60, i32* %61, align 4
- %62 = load i32, i32* @a1, align 4
- %63 = load i32*, i32** @c1, align 4
- store i32 %62, i32* %63, align 4
- %64 = load i32, i32* @a2, align 4
- %65 = load i32*, i32** @c2, align 4
- store i32 %64, i32* %65, align 4
- %66 = load i32, i32* @a3, align 4
- %67 = load i32*, i32** @c3, align 4
- store i32 %66, i32* %67, align 4
- %68 = load i32, i32* @a4, align 4
- %69 = load i32*, i32** @c4, align 4
- store i32 %68, i32* %69, align 4
- %70 = load i32, i32* @a5, align 4
- %71 = load i32*, i32** @c5, align 4
- store i32 %70, i32* %71, align 4
- %72 = load i32, i32* @a6, align 4
- %73 = load i32*, i32** @c6, align 4
- store i32 %72, i32* %73, align 4
- %74 = load i32, i32* @a7, align 4
- %75 = load i32*, i32** @c7, align 4
- store i32 %74, i32* %75, align 4
- %76 = load i32, i32* @a8, align 4
- %77 = load i32*, i32** @c8, align 4
- store i32 %76, i32* %77, align 4
- %78 = load i32, i32* @a9, align 4
- %79 = load i32*, i32** @c9, align 4
- store i32 %78, i32* %79, align 4
- %80 = load i32, i32* @a10, align 4
- %81 = load i32*, i32** @c10, align 4
- store i32 %80, i32* %81, align 4
- %82 = load i32, i32* @a11, align 4
- %83 = load i32*, i32** @c11, align 4
- store i32 %82, i32* %83, align 4
- %84 = load i32, i32* @a12, align 4
- %85 = load i32*, i32** @c12, align 4
- store i32 %84, i32* %85, align 4
- %86 = load i32, i32* @a13, align 4
- %87 = load i32*, i32** @c13, align 4
- store i32 %86, i32* %87, align 4
- %88 = load i32, i32* @a14, align 4
- %89 = load i32*, i32** @c14, align 4
- store i32 %88, i32* %89, align 4
- %90 = load i32, i32* @a15, align 4
- %91 = load i32*, i32** @c15, align 4
- store i32 %90, i32* %91, align 4
- %92 = load i32, i32* @a16, align 4
- %93 = load i32*, i32** @c16, align 4
- store i32 %92, i32* %93, align 4
- %94 = load i32, i32* @a17, align 4
- %95 = load i32*, i32** @c17, align 4
- store i32 %94, i32* %95, align 4
- %96 = load i32, i32* @a18, align 4
- %97 = load i32*, i32** @c18, align 4
- store i32 %96, i32* %97, align 4
- %98 = load i32, i32* @a19, align 4
- %99 = load i32*, i32** @c19, align 4
- store i32 %98, i32* %99, align 4
- %100 = load i32, i32* @a20, align 4
- %101 = load i32*, i32** @c20, align 4
- store i32 %100, i32* %101, align 4
- %102 = load i32, i32* @a21, align 4
- %103 = load i32*, i32** @c21, align 4
- store i32 %102, i32* %103, align 4
- %104 = load i32, i32* @a22, align 4
- %105 = load i32*, i32** @c22, align 4
- store i32 %104, i32* %105, align 4
- %106 = load i32, i32* @a23, align 4
- %107 = load i32*, i32** @c23, align 4
- store i32 %106, i32* %107, align 4
- %108 = load i32, i32* @a24, align 4
- %109 = load i32*, i32** @c24, align 4
- store i32 %108, i32* %109, align 4
- %110 = load i32, i32* @a25, align 4
- %111 = load i32*, i32** @c25, align 4
- store i32 %110, i32* %111, align 4
- %112 = load i32, i32* @a26, align 4
- %113 = load i32*, i32** @c26, align 4
- store i32 %112, i32* %113, align 4
- %114 = load i32, i32* @a27, align 4
- %115 = load i32*, i32** @c27, align 4
- store i32 %114, i32* %115, align 4
- %116 = load i32, i32* @a28, align 4
- %117 = load i32*, i32** @c28, align 4
- store i32 %116, i32* %117, align 4
- %118 = load i32, i32* @a29, align 4
- %119 = load i32*, i32** @c29, align 4
- store i32 %118, i32* %119, align 4
- %120 = load i32, i32* @a0, align 4
+ %0 = load i32, ptr @a0, align 4
+ %1 = load ptr, ptr @b0, align 4
+ store i32 %0, ptr %1, align 4
+ %2 = load i32, ptr @a1, align 4
+ %3 = load ptr, ptr @b1, align 4
+ store i32 %2, ptr %3, align 4
+ %4 = load i32, ptr @a2, align 4
+ %5 = load ptr, ptr @b2, align 4
+ store i32 %4, ptr %5, align 4
+ %6 = load i32, ptr @a3, align 4
+ %7 = load ptr, ptr @b3, align 4
+ store i32 %6, ptr %7, align 4
+ %8 = load i32, ptr @a4, align 4
+ %9 = load ptr, ptr @b4, align 4
+ store i32 %8, ptr %9, align 4
+ %10 = load i32, ptr @a5, align 4
+ %11 = load ptr, ptr @b5, align 4
+ store i32 %10, ptr %11, align 4
+ %12 = load i32, ptr @a6, align 4
+ %13 = load ptr, ptr @b6, align 4
+ store i32 %12, ptr %13, align 4
+ %14 = load i32, ptr @a7, align 4
+ %15 = load ptr, ptr @b7, align 4
+ store i32 %14, ptr %15, align 4
+ %16 = load i32, ptr @a8, align 4
+ %17 = load ptr, ptr @b8, align 4
+ store i32 %16, ptr %17, align 4
+ %18 = load i32, ptr @a9, align 4
+ %19 = load ptr, ptr @b9, align 4
+ store i32 %18, ptr %19, align 4
+ %20 = load i32, ptr @a10, align 4
+ %21 = load ptr, ptr @b10, align 4
+ store i32 %20, ptr %21, align 4
+ %22 = load i32, ptr @a11, align 4
+ %23 = load ptr, ptr @b11, align 4
+ store i32 %22, ptr %23, align 4
+ %24 = load i32, ptr @a12, align 4
+ %25 = load ptr, ptr @b12, align 4
+ store i32 %24, ptr %25, align 4
+ %26 = load i32, ptr @a13, align 4
+ %27 = load ptr, ptr @b13, align 4
+ store i32 %26, ptr %27, align 4
+ %28 = load i32, ptr @a14, align 4
+ %29 = load ptr, ptr @b14, align 4
+ store i32 %28, ptr %29, align 4
+ %30 = load i32, ptr @a15, align 4
+ %31 = load ptr, ptr @b15, align 4
+ store i32 %30, ptr %31, align 4
+ %32 = load i32, ptr @a16, align 4
+ %33 = load ptr, ptr @b16, align 4
+ store i32 %32, ptr %33, align 4
+ %34 = load i32, ptr @a17, align 4
+ %35 = load ptr, ptr @b17, align 4
+ store i32 %34, ptr %35, align 4
+ %36 = load i32, ptr @a18, align 4
+ %37 = load ptr, ptr @b18, align 4
+ store i32 %36, ptr %37, align 4
+ %38 = load i32, ptr @a19, align 4
+ %39 = load ptr, ptr @b19, align 4
+ store i32 %38, ptr %39, align 4
+ %40 = load i32, ptr @a20, align 4
+ %41 = load ptr, ptr @b20, align 4
+ store i32 %40, ptr %41, align 4
+ %42 = load i32, ptr @a21, align 4
+ %43 = load ptr, ptr @b21, align 4
+ store i32 %42, ptr %43, align 4
+ %44 = load i32, ptr @a22, align 4
+ %45 = load ptr, ptr @b22, align 4
+ store i32 %44, ptr %45, align 4
+ %46 = load i32, ptr @a23, align 4
+ %47 = load ptr, ptr @b23, align 4
+ store i32 %46, ptr %47, align 4
+ %48 = load i32, ptr @a24, align 4
+ %49 = load ptr, ptr @b24, align 4
+ store i32 %48, ptr %49, align 4
+ %50 = load i32, ptr @a25, align 4
+ %51 = load ptr, ptr @b25, align 4
+ store i32 %50, ptr %51, align 4
+ %52 = load i32, ptr @a26, align 4
+ %53 = load ptr, ptr @b26, align 4
+ store i32 %52, ptr %53, align 4
+ %54 = load i32, ptr @a27, align 4
+ %55 = load ptr, ptr @b27, align 4
+ store i32 %54, ptr %55, align 4
+ %56 = load i32, ptr @a28, align 4
+ %57 = load ptr, ptr @b28, align 4
+ store i32 %56, ptr %57, align 4
+ %58 = load i32, ptr @a29, align 4
+ %59 = load ptr, ptr @b29, align 4
+ store i32 %58, ptr %59, align 4
+ %60 = load i32, ptr @a0, align 4
+ %61 = load ptr, ptr @c0, align 4
+ store i32 %60, ptr %61, align 4
+ %62 = load i32, ptr @a1, align 4
+ %63 = load ptr, ptr @c1, align 4
+ store i32 %62, ptr %63, align 4
+ %64 = load i32, ptr @a2, align 4
+ %65 = load ptr, ptr @c2, align 4
+ store i32 %64, ptr %65, align 4
+ %66 = load i32, ptr @a3, align 4
+ %67 = load ptr, ptr @c3, align 4
+ store i32 %66, ptr %67, align 4
+ %68 = load i32, ptr @a4, align 4
+ %69 = load ptr, ptr @c4, align 4
+ store i32 %68, ptr %69, align 4
+ %70 = load i32, ptr @a5, align 4
+ %71 = load ptr, ptr @c5, align 4
+ store i32 %70, ptr %71, align 4
+ %72 = load i32, ptr @a6, align 4
+ %73 = load ptr, ptr @c6, align 4
+ store i32 %72, ptr %73, align 4
+ %74 = load i32, ptr @a7, align 4
+ %75 = load ptr, ptr @c7, align 4
+ store i32 %74, ptr %75, align 4
+ %76 = load i32, ptr @a8, align 4
+ %77 = load ptr, ptr @c8, align 4
+ store i32 %76, ptr %77, align 4
+ %78 = load i32, ptr @a9, align 4
+ %79 = load ptr, ptr @c9, align 4
+ store i32 %78, ptr %79, align 4
+ %80 = load i32, ptr @a10, align 4
+ %81 = load ptr, ptr @c10, align 4
+ store i32 %80, ptr %81, align 4
+ %82 = load i32, ptr @a11, align 4
+ %83 = load ptr, ptr @c11, align 4
+ store i32 %82, ptr %83, align 4
+ %84 = load i32, ptr @a12, align 4
+ %85 = load ptr, ptr @c12, align 4
+ store i32 %84, ptr %85, align 4
+ %86 = load i32, ptr @a13, align 4
+ %87 = load ptr, ptr @c13, align 4
+ store i32 %86, ptr %87, align 4
+ %88 = load i32, ptr @a14, align 4
+ %89 = load ptr, ptr @c14, align 4
+ store i32 %88, ptr %89, align 4
+ %90 = load i32, ptr @a15, align 4
+ %91 = load ptr, ptr @c15, align 4
+ store i32 %90, ptr %91, align 4
+ %92 = load i32, ptr @a16, align 4
+ %93 = load ptr, ptr @c16, align 4
+ store i32 %92, ptr %93, align 4
+ %94 = load i32, ptr @a17, align 4
+ %95 = load ptr, ptr @c17, align 4
+ store i32 %94, ptr %95, align 4
+ %96 = load i32, ptr @a18, align 4
+ %97 = load ptr, ptr @c18, align 4
+ store i32 %96, ptr %97, align 4
+ %98 = load i32, ptr @a19, align 4
+ %99 = load ptr, ptr @c19, align 4
+ store i32 %98, ptr %99, align 4
+ %100 = load i32, ptr @a20, align 4
+ %101 = load ptr, ptr @c20, align 4
+ store i32 %100, ptr %101, align 4
+ %102 = load i32, ptr @a21, align 4
+ %103 = load ptr, ptr @c21, align 4
+ store i32 %102, ptr %103, align 4
+ %104 = load i32, ptr @a22, align 4
+ %105 = load ptr, ptr @c22, align 4
+ store i32 %104, ptr %105, align 4
+ %106 = load i32, ptr @a23, align 4
+ %107 = load ptr, ptr @c23, align 4
+ store i32 %106, ptr %107, align 4
+ %108 = load i32, ptr @a24, align 4
+ %109 = load ptr, ptr @c24, align 4
+ store i32 %108, ptr %109, align 4
+ %110 = load i32, ptr @a25, align 4
+ %111 = load ptr, ptr @c25, align 4
+ store i32 %110, ptr %111, align 4
+ %112 = load i32, ptr @a26, align 4
+ %113 = load ptr, ptr @c26, align 4
+ store i32 %112, ptr %113, align 4
+ %114 = load i32, ptr @a27, align 4
+ %115 = load ptr, ptr @c27, align 4
+ store i32 %114, ptr %115, align 4
+ %116 = load i32, ptr @a28, align 4
+ %117 = load ptr, ptr @c28, align 4
+ store i32 %116, ptr %117, align 4
+ %118 = load i32, ptr @a29, align 4
+ %119 = load ptr, ptr @c29, align 4
+ store i32 %118, ptr %119, align 4
+ %120 = load i32, ptr @a0, align 4
ret i32 %120
}
; CHECK: rdhwr
; CHECK: .set pop
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
ret i32 %0
}
; Previously we were adding them for local dynamic TLS function pointers and
; function pointers with internal linkage.
-@fnptr_internal = internal global void()* @checkFunctionPointerCall
-@fnptr_internal_const = internal constant void()* @checkFunctionPointerCall
-@fnptr_const = constant void()* @checkFunctionPointerCall
-@fnptr_global = global void()* @checkFunctionPointerCall
+@fnptr_internal = internal global ptr @checkFunctionPointerCall
+@fnptr_internal_const = internal constant ptr @checkFunctionPointerCall
+@fnptr_const = constant ptr @checkFunctionPointerCall
+@fnptr_global = global ptr @checkFunctionPointerCall
define void @checkFunctionPointerCall() {
entry:
; ALL-LABEL: checkFunctionPointerCall:
; ALL-NOT: MIPS_JALR
- %func_internal = load void()*, void()** @fnptr_internal
+ %func_internal = load ptr, ptr @fnptr_internal
call void %func_internal()
- %func_internal_const = load void()*, void()** @fnptr_internal_const
+ %func_internal_const = load ptr, ptr @fnptr_internal_const
call void %func_internal_const()
- %func_const = load void()*, void()** @fnptr_const
+ %func_const = load ptr, ptr @fnptr_const
call void %func_const()
- %func_global = load void()*, void()** @fnptr_global
+ %func_global = load ptr, ptr @fnptr_global
call void %func_global()
ret void
}
-@tls_fnptr_gd = thread_local global void()* @checkTlsFunctionPointerCall
-@tls_fnptr_ld = thread_local(localdynamic) global void()* @checkTlsFunctionPointerCall
-@tls_fnptr_ie = thread_local(initialexec) global void()* @checkTlsFunctionPointerCall
-@tls_fnptr_le = thread_local(localexec) global void()* @checkTlsFunctionPointerCall
+@tls_fnptr_gd = thread_local global ptr @checkTlsFunctionPointerCall
+@tls_fnptr_ld = thread_local(localdynamic) global ptr @checkTlsFunctionPointerCall
+@tls_fnptr_ie = thread_local(initialexec) global ptr @checkTlsFunctionPointerCall
+@tls_fnptr_le = thread_local(localexec) global ptr @checkTlsFunctionPointerCall
define void @checkTlsFunctionPointerCall() {
entry:
; JALR-ALL: .reloc {{.+}}MIPS_JALR, __tls_get_addr
; NORELOC-NOT: .reloc
; ALL-NOT: _MIPS_JALR
- %func_gd = load void()*, void()** @tls_fnptr_gd
+ %func_gd = load ptr, ptr @tls_fnptr_gd
call void %func_gd()
- %func_ld = load void()*, void()** @tls_fnptr_ld
+ %func_ld = load ptr, ptr @tls_fnptr_ld
call void %func_ld()
- %func_ie = load void()*, void()** @tls_fnptr_ie
+ %func_ie = load ptr, ptr @tls_fnptr_ie
call void %func_ie()
- %func_le = load void()*, void()** @tls_fnptr_le
+ %func_le = load ptr, ptr @tls_fnptr_le
call void %func_le()
ret void
}
define void @test() nounwind {
entry:
- %0 = load i32, i32* @iiii, align 4
- %1 = load i32, i32* @jjjj, align 4
+ %0 = load i32, ptr @iiii, align 4
+ %1 = load i32, ptr @jjjj, align 4
%rem = srem i32 %0, %1
; 16: div $zero, ${{[0-9]+}}, ${{[0-9]+}}
; 16: mfhi ${{[0-9]+}}
- store i32 %rem, i32* @kkkk, align 4
+ store i32 %rem, ptr @kkkk, align 4
ret void
}
define void @test() nounwind {
entry:
- %0 = load i32, i32* @iiii, align 4
- %1 = load i32, i32* @jjjj, align 4
+ %0 = load i32, ptr @iiii, align 4
+ %1 = load i32, ptr @jjjj, align 4
%rem = urem i32 %0, %1
; 16: divu $zero, ${{[0-9]+}}, ${{[0-9]+}}
; 16: mfhi ${{[0-9]+}}
- store i32 %rem, i32* @kkkk, align 4
+ store i32 %rem, ptr @kkkk, align 4
ret void
}
; RUN: llc -march=mipsel -verify-machineinstrs < %s | FileCheck %s
-define i8* @f1() nounwind {
+define ptr @f1() nounwind {
entry:
- %0 = call i8* @llvm.returnaddress(i32 0)
- ret i8* %0
+ %0 = call ptr @llvm.returnaddress(i32 0)
+ ret ptr %0
; CHECK: move $2, $ra
}
-define i8* @f2() nounwind {
+define ptr @f2() nounwind {
entry:
call void @g()
- %0 = call i8* @llvm.returnaddress(i32 0)
- ret i8* %0
+ %0 = call ptr @llvm.returnaddress(i32 0)
+ ret ptr %0
; CHECK: move $[[R0:[0-9]+]], $ra
; CHECK: jal
; CHECK: move $2, $[[R0]]
}
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone
declare void @g()
; RUN: not llc -march=mips < %s 2>&1 | FileCheck %s
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone
-define i8* @f() nounwind {
+define ptr @f() nounwind {
entry:
- %0 = call i8* @llvm.returnaddress(i32 1)
- ret i8* %0
+ %0 = call ptr @llvm.returnaddress(i32 1)
+ ret ptr %0
; CHECK: error: return address can be determined only for current frame
}
define void @it() #0 {
entry:
%call = call i32 @i(i32 1)
- store i32 %call, i32* @xi, align 4
+ store i32 %call, ptr @xi, align 4
ret void
; PIC: .ent it
; STATIC: .ent it
define void @ft() #0 {
entry:
%call = call float @f()
- store float %call, float* @x, align 4
+ store float %call, ptr @x, align 4
ret void
; PIC: .ent ft
; PIC: save $16, $17, $ra, $18, [[FS:[0-9]+]]
define void @dt() #0 {
entry:
%call = call double @d()
- store double %call, double* @xd, align 8
+ store double %call, ptr @xd, align 8
ret void
; PIC: .ent dt
; PIC: save $16, $17, $ra, $18, [[FS:[0-9]+]]
; Function Attrs: nounwind
define void @fft() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
%call = call float @ff(float %0)
- store float %call, float* @x, align 4
+ store float %call, ptr @x, align 4
ret void
; PIC: .ent fft
; PIC: save $16, $17, $ra, $18, [[FS:[0-9]+]]
; Function Attrs: nounwind
define void @vft() #0 {
entry:
- %0 = load float, float* @x, align 4
+ %0 = load float, ptr @x, align 4
call void @vf(float %0)
ret void
; PIC: .ent vft
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%conv = trunc i32 %0 to i8
- store i8 %conv, i8* @c, align 1
- %1 = load i32, i32* @i, align 4
- %2 = load i8, i8* @c, align 1
+ store i8 %conv, ptr @c, align 1
+ %1 = load i32, ptr @i, align 4
+ %2 = load i8, ptr @c, align 1
%conv1 = sext i8 %2 to i32
; 16: sb ${{[0-9]+}}, 0(${{[0-9]+}})
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %1, i32 %conv1)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1, i32 %conv1)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; Function Attrs: nounwind optsize
define void @t() #0 {
entry:
- %0 = load i32, i32* @i, align 4
- %1 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @i, align 4
+ %1 = load i32, ptr @j, align 4
%cmp = icmp eq i32 %0, %1
%cond = select i1 %cmp, i32 1, i32 3
- store i32 %cond, i32* @k, align 4
+ store i32 %cond, ptr @k, align 4
ret void
; cond-b-short: bteqz $BB0_{{[0-9]+}} # 16 bit inst
}
; Function Attrs: nounwind optsize
define void @t() #0 {
entry:
- %0 = load i32, i32* @i, align 4
- %1 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @i, align 4
+ %1 = load i32, ptr @j, align 4
%cmp = icmp ne i32 %0, %1
%cond = select i1 %cmp, i32 1, i32 3
- store i32 %cond, i32* @k, align 4
+ store i32 %cond, ptr @k, align 4
; cond-b-short: btnez $BB0_{{[0-9]+}} # 16 bit inst
ret void
}
define void @t() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp eq i32 %0, 10
- %1 = load i32, i32* @i, align 4
- %2 = load i32, i32* @j, align 4
+ %1 = load i32, ptr @i, align 4
+ %2 = load i32, ptr @j, align 4
%cond = select i1 %cmp, i32 %1, i32 %2
- store i32 %cond, i32* @i, align 4
+ store i32 %cond, ptr @i, align 4
ret void
}
define void @t() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp ne i32 %0, 10
- %1 = load i32, i32* @i, align 4
- %2 = load i32, i32* @j, align 4
+ %1 = load i32, ptr @i, align 4
+ %2 = load i32, ptr @j, align 4
%cond = select i1 %cmp, i32 %1, i32 %2
- store i32 %cond, i32* @i, align 4
+ store i32 %cond, ptr @i, align 4
ret void
}
define void @t() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp slt i32 %0, 10
- %1 = load i32, i32* @j, align 4
- %2 = load i32, i32* @i, align 4
+ %1 = load i32, ptr @j, align 4
+ %2 = load i32, ptr @i, align 4
%cond = select i1 %cmp, i32 %1, i32 %2
- store i32 %cond, i32* @i, align 4
+ store i32 %cond, ptr @i, align 4
ret void
}
; 64R6-NEXT: jr $ra
; 64R6-NEXT: or $2, $1, $2
entry:
- %tmp = load double, double* @d2, align 8
- %tmp1 = load double, double* @d3, align 8
+ %tmp = load double, ptr @d2, align 8
+ %tmp1 = load double, ptr @d3, align 8
%cmp = fcmp oeq double %tmp, %tmp1
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
; 64R6-NEXT: jr $ra
; 64R6-NEXT: or $2, $1, $2
entry:
- %tmp = load double, double* @d2, align 8
- %tmp1 = load double, double* @d3, align 8
+ %tmp = load double, ptr @d2, align 8
+ %tmp1 = load double, ptr @d3, align 8
%cmp = fcmp olt double %tmp, %tmp1
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
; 64R6-NEXT: jr $ra
; 64R6-NEXT: or $2, $1, $2
entry:
- %tmp = load double, double* @d2, align 8
- %tmp1 = load double, double* @d3, align 8
+ %tmp = load double, ptr @d2, align 8
+ %tmp1 = load double, ptr @d3, align 8
%cmp = fcmp ogt double %tmp, %tmp1
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
; SOURCE-SCHED: mtc1
; SOURCE-SCHED: c.olt.s
; SOURCE-SCHED: jr
- store float 0.000000e+00, float* @gf0, align 4
- store float 1.000000e+00, float* @gf1, align 4
+ store float 0.000000e+00, ptr @gf0, align 4
+ store float 1.000000e+00, ptr @gf1, align 4
%cmp = fcmp olt float %a, %b
%conv = zext i1 %cmp to i32
%conv1 = sitofp i32 %conv to float
define double @select_cc_f64(double %a, double %b) nounwind {
entry:
- store double 0.000000e+00, double* @gd0, align 8
- store double 1.000000e+00, double* @gd1, align 8
+ store double 0.000000e+00, ptr @gd0, align 8
+ store double 1.000000e+00, ptr @gd1, align 8
%cmp = fcmp olt double %a, %b
%conv = zext i1 %cmp to i32
%conv1 = sitofp i32 %conv to double
define void @foo() nounwind {
entry:
%0 = alloca [2 x i8], align 32
- %1 = getelementptr inbounds [2 x i8], [2 x i8]* %0, i32 0, i32 0
- store i8 1, i8* %1
- %2 = getelementptr inbounds [2 x i8], [2 x i8]* %0, i32 0, i32 1
- store i8 1, i8* %2
+ store i8 1, ptr %0
+ %1 = getelementptr inbounds [2 x i8], ptr %0, i32 0, i32 1
+ store i8 1, ptr %1
ret void
}
define void @calc_seleq() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%cmp = icmp eq i32 %0, %1
br i1 %cmp, label %cond.true, label %cond.false
cond.true: ; preds = %entry
- %2 = load i32, i32* @f, align 4
+ %2 = load i32, ptr @f, align 4
br label %cond.end
cond.false: ; preds = %entry
- %3 = load i32, i32* @t, align 4
+ %3 = load i32, ptr @t, align 4
br label %cond.end
cond.end: ; preds = %cond.false, %cond.true
%cond = phi i32 [ %2, %cond.true ], [ %3, %cond.false ]
- store i32 %cond, i32* @z1, align 4
- %4 = load i32, i32* @b, align 4
- %5 = load i32, i32* @a, align 4
+ store i32 %cond, ptr @z1, align 4
+ %4 = load i32, ptr @b, align 4
+ %5 = load i32, ptr @a, align 4
%cmp1 = icmp eq i32 %4, %5
br i1 %cmp1, label %cond.true2, label %cond.false3
cond.true2: ; preds = %cond.end
- %6 = load i32, i32* @f, align 4
+ %6 = load i32, ptr @f, align 4
br label %cond.end4
cond.false3: ; preds = %cond.end
- %7 = load i32, i32* @t, align 4
+ %7 = load i32, ptr @t, align 4
br label %cond.end4
cond.end4: ; preds = %cond.false3, %cond.true2
%cond5 = phi i32 [ %6, %cond.true2 ], [ %7, %cond.false3 ]
- store i32 %cond5, i32* @z2, align 4
- %8 = load i32, i32* @c, align 4
- %9 = load i32, i32* @a, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %8 = load i32, ptr @c, align 4
+ %9 = load i32, ptr @a, align 4
%cmp6 = icmp eq i32 %8, %9
br i1 %cmp6, label %cond.true7, label %cond.false8
cond.true7: ; preds = %cond.end4
- %10 = load i32, i32* @t, align 4
+ %10 = load i32, ptr @t, align 4
br label %cond.end9
cond.false8: ; preds = %cond.end4
- %11 = load i32, i32* @f, align 4
+ %11 = load i32, ptr @f, align 4
br label %cond.end9
cond.end9: ; preds = %cond.false8, %cond.true7
%cond10 = phi i32 [ %10, %cond.true7 ], [ %11, %cond.false8 ]
- store i32 %cond10, i32* @z3, align 4
- %12 = load i32, i32* @a, align 4
- %13 = load i32, i32* @c, align 4
+ store i32 %cond10, ptr @z3, align 4
+ %12 = load i32, ptr @a, align 4
+ %13 = load i32, ptr @c, align 4
%cmp11 = icmp eq i32 %12, %13
br i1 %cmp11, label %cond.true12, label %cond.false13
cond.true12: ; preds = %cond.end9
- %14 = load i32, i32* @t, align 4
+ %14 = load i32, ptr @t, align 4
br label %cond.end14
cond.false13: ; preds = %cond.end9
- %15 = load i32, i32* @f, align 4
+ %15 = load i32, ptr @f, align 4
br label %cond.end14
cond.end14: ; preds = %cond.false13, %cond.true12
%cond15 = phi i32 [ %14, %cond.true12 ], [ %15, %cond.false13 ]
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define void @calc_seleqk() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp eq i32 %0, 1
br i1 %cmp, label %cond.true, label %cond.false
cond.true: ; preds = %entry
- %1 = load i32, i32* @t, align 4
+ %1 = load i32, ptr @t, align 4
br label %cond.end
cond.false: ; preds = %entry
- %2 = load i32, i32* @f, align 4
+ %2 = load i32, ptr @f, align 4
br label %cond.end
cond.end: ; preds = %cond.false, %cond.true
%cond = phi i32 [ %1, %cond.true ], [ %2, %cond.false ]
- store i32 %cond, i32* @z1, align 4
- %3 = load i32, i32* @a, align 4
+ store i32 %cond, ptr @z1, align 4
+ %3 = load i32, ptr @a, align 4
%cmp1 = icmp eq i32 %3, 1000
br i1 %cmp1, label %cond.true2, label %cond.false3
cond.true2: ; preds = %cond.end
- %4 = load i32, i32* @f, align 4
+ %4 = load i32, ptr @f, align 4
br label %cond.end4
cond.false3: ; preds = %cond.end
- %5 = load i32, i32* @t, align 4
+ %5 = load i32, ptr @t, align 4
br label %cond.end4
cond.end4: ; preds = %cond.false3, %cond.true2
%cond5 = phi i32 [ %4, %cond.true2 ], [ %5, %cond.false3 ]
- store i32 %cond5, i32* @z2, align 4
- %6 = load i32, i32* @b, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %6 = load i32, ptr @b, align 4
%cmp6 = icmp eq i32 %6, 3
br i1 %cmp6, label %cond.true7, label %cond.false8
cond.true7: ; preds = %cond.end4
- %7 = load i32, i32* @f, align 4
+ %7 = load i32, ptr @f, align 4
br label %cond.end9
cond.false8: ; preds = %cond.end4
- %8 = load i32, i32* @t, align 4
+ %8 = load i32, ptr @t, align 4
br label %cond.end9
cond.end9: ; preds = %cond.false8, %cond.true7
%cond10 = phi i32 [ %7, %cond.true7 ], [ %8, %cond.false8 ]
- store i32 %cond10, i32* @z3, align 4
- %9 = load i32, i32* @b, align 4
+ store i32 %cond10, ptr @z3, align 4
+ %9 = load i32, ptr @b, align 4
%cmp11 = icmp eq i32 %9, 1000
br i1 %cmp11, label %cond.true12, label %cond.false13
cond.true12: ; preds = %cond.end9
- %10 = load i32, i32* @t, align 4
+ %10 = load i32, ptr @t, align 4
br label %cond.end14
cond.false13: ; preds = %cond.end9
- %11 = load i32, i32* @f, align 4
+ %11 = load i32, ptr @f, align 4
br label %cond.end14
cond.end14: ; preds = %cond.false13, %cond.true12
%cond15 = phi i32 [ %10, %cond.true12 ], [ %11, %cond.false13 ]
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define void @calc_z() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp sge i32 %0, 1000
br i1 %cmp, label %cond.true, label %cond.false
cond.true: ; preds = %entry
- %1 = load i32, i32* @f, align 4
+ %1 = load i32, ptr @f, align 4
br label %cond.end
cond.false: ; preds = %entry
- %2 = load i32, i32* @t, align 4
+ %2 = load i32, ptr @t, align 4
br label %cond.end
cond.end: ; preds = %cond.false, %cond.true
%cond = phi i32 [ %1, %cond.true ], [ %2, %cond.false ]
- store i32 %cond, i32* @z1, align 4
- %3 = load i32, i32* @b, align 4
+ store i32 %cond, ptr @z1, align 4
+ %3 = load i32, ptr @b, align 4
%cmp1 = icmp sge i32 %3, 1
br i1 %cmp1, label %cond.true2, label %cond.false3
cond.true2: ; preds = %cond.end
- %4 = load i32, i32* @t, align 4
+ %4 = load i32, ptr @t, align 4
br label %cond.end4
cond.false3: ; preds = %cond.end
- %5 = load i32, i32* @f, align 4
+ %5 = load i32, ptr @f, align 4
br label %cond.end4
cond.end4: ; preds = %cond.false3, %cond.true2
%cond5 = phi i32 [ %4, %cond.true2 ], [ %5, %cond.false3 ]
- store i32 %cond5, i32* @z2, align 4
- %6 = load i32, i32* @c, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %6 = load i32, ptr @c, align 4
%cmp6 = icmp sge i32 %6, 2
br i1 %cmp6, label %cond.true7, label %cond.false8
cond.true7: ; preds = %cond.end4
- %7 = load i32, i32* @t, align 4
+ %7 = load i32, ptr @t, align 4
br label %cond.end9
cond.false8: ; preds = %cond.end4
- %8 = load i32, i32* @f, align 4
+ %8 = load i32, ptr @f, align 4
br label %cond.end9
cond.end9: ; preds = %cond.false8, %cond.true7
%cond10 = phi i32 [ %7, %cond.true7 ], [ %8, %cond.false8 ]
- store i32 %cond10, i32* @z3, align 4
- %9 = load i32, i32* @a, align 4
+ store i32 %cond10, ptr @z3, align 4
+ %9 = load i32, ptr @a, align 4
%cmp11 = icmp sge i32 %9, 2
br i1 %cmp11, label %cond.true12, label %cond.false13
cond.true12: ; preds = %cond.end9
- %10 = load i32, i32* @t, align 4
+ %10 = load i32, ptr @t, align 4
br label %cond.end14
cond.false13: ; preds = %cond.end9
- %11 = load i32, i32* @f, align 4
+ %11 = load i32, ptr @f, align 4
br label %cond.end14
cond.end14: ; preds = %cond.false13, %cond.true12
%cond15 = phi i32 [ %10, %cond.true12 ], [ %11, %cond.false13 ]
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define i32 @calc_z() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
entry:
%retval = alloca i32, align 4
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%cmp = icmp sgt i32 %0, %1
br i1 %cmp, label %cond.true, label %cond.false
cond.true: ; preds = %entry
- %2 = load i32, i32* @f, align 4
+ %2 = load i32, ptr @f, align 4
br label %cond.end
cond.false: ; preds = %entry
- %3 = load i32, i32* @t, align 4
+ %3 = load i32, ptr @t, align 4
br label %cond.end
cond.end: ; preds = %cond.false, %cond.true
%cond = phi i32 [ %2, %cond.true ], [ %3, %cond.false ]
- store i32 %cond, i32* @z1, align 4
- %4 = load i32, i32* @b, align 4
- %5 = load i32, i32* @a, align 4
+ store i32 %cond, ptr @z1, align 4
+ %4 = load i32, ptr @b, align 4
+ %5 = load i32, ptr @a, align 4
%cmp1 = icmp sgt i32 %4, %5
br i1 %cmp1, label %cond.true2, label %cond.false3
cond.true2: ; preds = %cond.end
- %6 = load i32, i32* @t, align 4
+ %6 = load i32, ptr @t, align 4
br label %cond.end4
cond.false3: ; preds = %cond.end
- %7 = load i32, i32* @f, align 4
+ %7 = load i32, ptr @f, align 4
br label %cond.end4
cond.end4: ; preds = %cond.false3, %cond.true2
%cond5 = phi i32 [ %6, %cond.true2 ], [ %7, %cond.false3 ]
- store i32 %cond5, i32* @z2, align 4
- %8 = load i32, i32* @c, align 4
- %9 = load i32, i32* @a, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %8 = load i32, ptr @c, align 4
+ %9 = load i32, ptr @a, align 4
%cmp6 = icmp sgt i32 %8, %9
br i1 %cmp6, label %cond.true7, label %cond.false8
cond.true7: ; preds = %cond.end4
- %10 = load i32, i32* @f, align 4
+ %10 = load i32, ptr @f, align 4
br label %cond.end9
cond.false8: ; preds = %cond.end4
- %11 = load i32, i32* @t, align 4
+ %11 = load i32, ptr @t, align 4
br label %cond.end9
cond.end9: ; preds = %cond.false8, %cond.true7
%cond10 = phi i32 [ %10, %cond.true7 ], [ %11, %cond.false8 ]
- store i32 %cond10, i32* @z3, align 4
- %12 = load i32, i32* @a, align 4
- %13 = load i32, i32* @c, align 4
+ store i32 %cond10, ptr @z3, align 4
+ %12 = load i32, ptr @a, align 4
+ %13 = load i32, ptr @c, align 4
%cmp11 = icmp sgt i32 %12, %13
br i1 %cmp11, label %cond.true12, label %cond.false13
cond.true12: ; preds = %cond.end9
- %14 = load i32, i32* @f, align 4
+ %14 = load i32, ptr @f, align 4
br label %cond.end14
cond.false13: ; preds = %cond.end9
- %15 = load i32, i32* @t, align 4
+ %15 = load i32, ptr @t, align 4
br label %cond.end14
cond.end14: ; preds = %cond.false13, %cond.true12
%cond15 = phi i32 [ %14, %cond.true12 ], [ %15, %cond.false13 ]
- store i32 %cond15, i32* @z4, align 4
- %16 = load i32, i32* %retval
+ store i32 %cond15, ptr @z4, align 4
+ %16 = load i32, ptr %retval
ret i32 %16
}
define void @calc_z() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%cmp = icmp sle i32 %0, %1
br i1 %cmp, label %cond.true, label %cond.false
cond.true: ; preds = %entry
- %2 = load i32, i32* @t, align 4
+ %2 = load i32, ptr @t, align 4
br label %cond.end
cond.false: ; preds = %entry
- %3 = load i32, i32* @f, align 4
+ %3 = load i32, ptr @f, align 4
br label %cond.end
cond.end: ; preds = %cond.false, %cond.true
%cond = phi i32 [ %2, %cond.true ], [ %3, %cond.false ]
- store i32 %cond, i32* @z1, align 4
- %4 = load i32, i32* @b, align 4
- %5 = load i32, i32* @a, align 4
+ store i32 %cond, ptr @z1, align 4
+ %4 = load i32, ptr @b, align 4
+ %5 = load i32, ptr @a, align 4
%cmp1 = icmp sle i32 %4, %5
br i1 %cmp1, label %cond.true2, label %cond.false3
cond.true2: ; preds = %cond.end
- %6 = load i32, i32* @f, align 4
+ %6 = load i32, ptr @f, align 4
br label %cond.end4
cond.false3: ; preds = %cond.end
- %7 = load i32, i32* @t, align 4
+ %7 = load i32, ptr @t, align 4
br label %cond.end4
cond.end4: ; preds = %cond.false3, %cond.true2
%cond5 = phi i32 [ %6, %cond.true2 ], [ %7, %cond.false3 ]
- store i32 %cond5, i32* @z2, align 4
- %8 = load i32, i32* @c, align 4
- %9 = load i32, i32* @a, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %8 = load i32, ptr @c, align 4
+ %9 = load i32, ptr @a, align 4
%cmp6 = icmp sle i32 %8, %9
br i1 %cmp6, label %cond.true7, label %cond.false8
cond.true7: ; preds = %cond.end4
- %10 = load i32, i32* @t, align 4
+ %10 = load i32, ptr @t, align 4
br label %cond.end9
cond.false8: ; preds = %cond.end4
- %11 = load i32, i32* @f, align 4
+ %11 = load i32, ptr @f, align 4
br label %cond.end9
cond.end9: ; preds = %cond.false8, %cond.true7
%cond10 = phi i32 [ %10, %cond.true7 ], [ %11, %cond.false8 ]
- store i32 %cond10, i32* @z3, align 4
- %12 = load i32, i32* @a, align 4
- %13 = load i32, i32* @c, align 4
+ store i32 %cond10, ptr @z3, align 4
+ %12 = load i32, ptr @a, align 4
+ %13 = load i32, ptr @c, align 4
%cmp11 = icmp sle i32 %12, %13
br i1 %cmp11, label %cond.true12, label %cond.false13
cond.true12: ; preds = %cond.end9
- %14 = load i32, i32* @t, align 4
+ %14 = load i32, ptr @t, align 4
br label %cond.end14
cond.false13: ; preds = %cond.end9
- %15 = load i32, i32* @f, align 4
+ %15 = load i32, ptr @f, align 4
br label %cond.end14
cond.end14: ; preds = %cond.false13, %cond.true12
%cond15 = phi i32 [ %14, %cond.true12 ], [ %15, %cond.false13 ]
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define void @calc_selltk() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp slt i32 %0, 1000
br i1 %cmp, label %cond.true, label %cond.false
cond.true: ; preds = %entry
- %1 = load i32, i32* @t, align 4
+ %1 = load i32, ptr @t, align 4
br label %cond.end
cond.false: ; preds = %entry
- %2 = load i32, i32* @f, align 4
+ %2 = load i32, ptr @f, align 4
br label %cond.end
cond.end: ; preds = %cond.false, %cond.true
%cond = phi i32 [ %1, %cond.true ], [ %2, %cond.false ]
- store i32 %cond, i32* @z1, align 4
- %3 = load i32, i32* @b, align 4
+ store i32 %cond, ptr @z1, align 4
+ %3 = load i32, ptr @b, align 4
%cmp1 = icmp slt i32 %3, 2
br i1 %cmp1, label %cond.true2, label %cond.false3
cond.true2: ; preds = %cond.end
- %4 = load i32, i32* @f, align 4
+ %4 = load i32, ptr @f, align 4
br label %cond.end4
cond.false3: ; preds = %cond.end
- %5 = load i32, i32* @t, align 4
+ %5 = load i32, ptr @t, align 4
br label %cond.end4
cond.end4: ; preds = %cond.false3, %cond.true2
%cond5 = phi i32 [ %4, %cond.true2 ], [ %5, %cond.false3 ]
- store i32 %cond5, i32* @z2, align 4
- %6 = load i32, i32* @c, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %6 = load i32, ptr @c, align 4
%cmp6 = icmp sgt i32 %6, 2
br i1 %cmp6, label %cond.true7, label %cond.false8
cond.true7: ; preds = %cond.end4
- %7 = load i32, i32* @f, align 4
+ %7 = load i32, ptr @f, align 4
br label %cond.end9
cond.false8: ; preds = %cond.end4
- %8 = load i32, i32* @t, align 4
+ %8 = load i32, ptr @t, align 4
br label %cond.end9
cond.end9: ; preds = %cond.false8, %cond.true7
%cond10 = phi i32 [ %7, %cond.true7 ], [ %8, %cond.false8 ]
- store i32 %cond10, i32* @z3, align 4
- %9 = load i32, i32* @a, align 4
+ store i32 %cond10, ptr @z3, align 4
+ %9 = load i32, ptr @a, align 4
%cmp11 = icmp sgt i32 %9, 2
br i1 %cmp11, label %cond.true12, label %cond.false13
cond.true12: ; preds = %cond.end9
- %10 = load i32, i32* @f, align 4
+ %10 = load i32, ptr @f, align 4
br label %cond.end14
cond.false13: ; preds = %cond.end9
- %11 = load i32, i32* @t, align 4
+ %11 = load i32, ptr @t, align 4
br label %cond.end14
cond.end14: ; preds = %cond.false13, %cond.true12
%cond15 = phi i32 [ %10, %cond.true12 ], [ %11, %cond.false13 ]
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define void @calc_seleq() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%cmp = icmp ne i32 %0, %1
br i1 %cmp, label %cond.true, label %cond.false
cond.true: ; preds = %entry
- %2 = load i32, i32* @f, align 4
+ %2 = load i32, ptr @f, align 4
br label %cond.end
cond.false: ; preds = %entry
- %3 = load i32, i32* @t, align 4
+ %3 = load i32, ptr @t, align 4
br label %cond.end
cond.end: ; preds = %cond.false, %cond.true
%cond = phi i32 [ %2, %cond.true ], [ %3, %cond.false ]
- store i32 %cond, i32* @z1, align 4
- %4 = load i32, i32* @b, align 4
- %5 = load i32, i32* @a, align 4
+ store i32 %cond, ptr @z1, align 4
+ %4 = load i32, ptr @b, align 4
+ %5 = load i32, ptr @a, align 4
%cmp1 = icmp ne i32 %4, %5
br i1 %cmp1, label %cond.true2, label %cond.false3
cond.true2: ; preds = %cond.end
- %6 = load i32, i32* @f, align 4
+ %6 = load i32, ptr @f, align 4
br label %cond.end4
cond.false3: ; preds = %cond.end
- %7 = load i32, i32* @t, align 4
+ %7 = load i32, ptr @t, align 4
br label %cond.end4
cond.end4: ; preds = %cond.false3, %cond.true2
%cond5 = phi i32 [ %6, %cond.true2 ], [ %7, %cond.false3 ]
- store i32 %cond5, i32* @z2, align 4
- %8 = load i32, i32* @c, align 4
- %9 = load i32, i32* @a, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %8 = load i32, ptr @c, align 4
+ %9 = load i32, ptr @a, align 4
%cmp6 = icmp ne i32 %8, %9
br i1 %cmp6, label %cond.true7, label %cond.false8
cond.true7: ; preds = %cond.end4
- %10 = load i32, i32* @t, align 4
+ %10 = load i32, ptr @t, align 4
br label %cond.end9
cond.false8: ; preds = %cond.end4
- %11 = load i32, i32* @f, align 4
+ %11 = load i32, ptr @f, align 4
br label %cond.end9
cond.end9: ; preds = %cond.false8, %cond.true7
%cond10 = phi i32 [ %10, %cond.true7 ], [ %11, %cond.false8 ]
- store i32 %cond10, i32* @z3, align 4
- %12 = load i32, i32* @a, align 4
- %13 = load i32, i32* @c, align 4
+ store i32 %cond10, ptr @z3, align 4
+ %12 = load i32, ptr @a, align 4
+ %13 = load i32, ptr @c, align 4
%cmp11 = icmp ne i32 %12, %13
br i1 %cmp11, label %cond.true12, label %cond.false13
cond.true12: ; preds = %cond.end9
- %14 = load i32, i32* @t, align 4
+ %14 = load i32, ptr @t, align 4
br label %cond.end14
cond.false13: ; preds = %cond.end9
- %15 = load i32, i32* @f, align 4
+ %15 = load i32, ptr @f, align 4
br label %cond.end14
cond.end14: ; preds = %cond.false13, %cond.true12
%cond15 = phi i32 [ %14, %cond.true12 ], [ %15, %cond.false13 ]
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define void @calc_z() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp ne i32 %0, 1
br i1 %cmp, label %cond.true, label %cond.false
cond.true: ; preds = %entry
- %1 = load i32, i32* @f, align 4
+ %1 = load i32, ptr @f, align 4
br label %cond.end
cond.false: ; preds = %entry
- %2 = load i32, i32* @t, align 4
+ %2 = load i32, ptr @t, align 4
br label %cond.end
cond.end: ; preds = %cond.false, %cond.true
%cond = phi i32 [ %1, %cond.true ], [ %2, %cond.false ]
- store i32 %cond, i32* @z1, align 4
- %3 = load i32, i32* @a, align 4
+ store i32 %cond, ptr @z1, align 4
+ %3 = load i32, ptr @a, align 4
%cmp1 = icmp ne i32 %3, 1000
br i1 %cmp1, label %cond.true2, label %cond.false3
cond.true2: ; preds = %cond.end
- %4 = load i32, i32* @t, align 4
+ %4 = load i32, ptr @t, align 4
br label %cond.end4
cond.false3: ; preds = %cond.end
- %5 = load i32, i32* @f, align 4
+ %5 = load i32, ptr @f, align 4
br label %cond.end4
cond.end4: ; preds = %cond.false3, %cond.true2
%cond5 = phi i32 [ %4, %cond.true2 ], [ %5, %cond.false3 ]
- store i32 %cond5, i32* @z2, align 4
- %6 = load i32, i32* @b, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %6 = load i32, ptr @b, align 4
%cmp6 = icmp ne i32 %6, 3
br i1 %cmp6, label %cond.true7, label %cond.false8
cond.true7: ; preds = %cond.end4
- %7 = load i32, i32* @t, align 4
+ %7 = load i32, ptr @t, align 4
br label %cond.end9
cond.false8: ; preds = %cond.end4
- %8 = load i32, i32* @f, align 4
+ %8 = load i32, ptr @f, align 4
br label %cond.end9
cond.end9: ; preds = %cond.false8, %cond.true7
%cond10 = phi i32 [ %7, %cond.true7 ], [ %8, %cond.false8 ]
- store i32 %cond10, i32* @z3, align 4
- %9 = load i32, i32* @b, align 4
+ store i32 %cond10, ptr @z3, align 4
+ %9 = load i32, ptr @b, align 4
%cmp11 = icmp ne i32 %9, 1000
br i1 %cmp11, label %cond.true12, label %cond.false13
cond.true12: ; preds = %cond.end9
- %10 = load i32, i32* @f, align 4
+ %10 = load i32, ptr @f, align 4
br label %cond.end14
cond.false13: ; preds = %cond.end9
- %11 = load i32, i32* @t, align 4
+ %11 = load i32, ptr @t, align 4
br label %cond.end14
cond.end14: ; preds = %cond.false13, %cond.true12
%cond15 = phi i32 [ %10, %cond.true12 ], [ %11, %cond.false13 ]
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define i32 @main() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
entry:
call void @calc_z() "target-cpu"="mips16" "target-features"="+mips16,+o32"
- %0 = load i32, i32* @z1, align 4
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %0) "target-cpu"="mips16" "target-features"="+mips16,+o32"
- %1 = load i32, i32* @z2, align 4
- %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1) "target-cpu"="mips16" "target-features"="+mips16,+o32"
- %2 = load i32, i32* @z3, align 4
- %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %2) "target-cpu"="mips16" "target-features"="+mips16,+o32"
- %3 = load i32, i32* @z4, align 4
- %call3 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %3) "target-cpu"="mips16" "target-features"="+mips16,+o32"
+ %0 = load i32, ptr @z1, align 4
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %0) "target-cpu"="mips16" "target-features"="+mips16,+o32"
+ %1 = load i32, ptr @z2, align 4
+ %call1 = call i32 (ptr, ...) @printf(ptr @.str, i32 %1) "target-cpu"="mips16" "target-features"="+mips16,+o32"
+ %2 = load i32, ptr @z3, align 4
+ %call2 = call i32 (ptr, ...) @printf(ptr @.str, i32 %2) "target-cpu"="mips16" "target-features"="+mips16,+o32"
+ %3 = load i32, ptr @z4, align 4
+ %call3 = call i32 (ptr, ...) @printf(ptr @.str, i32 %3) "target-cpu"="mips16" "target-features"="+mips16,+o32"
ret i32 0
}
-declare i32 @printf(i8*, ...) "target-cpu"="mips16" "target-features"="+mips16,+o32"
+declare i32 @printf(ptr, ...) "target-cpu"="mips16" "target-features"="+mips16,+o32"
attributes #0 = { nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" }
attributes #1 = { "target-cpu"="mips16" "target-features"="+mips16,+o32" }
define void @calc_seleq() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%cmp = icmp eq i32 %0, %1
- %2 = load i32, i32* @f, align 4
- %3 = load i32, i32* @t, align 4
+ %2 = load i32, ptr @f, align 4
+ %3 = load i32, ptr @t, align 4
%cond = select i1 %cmp, i32 %2, i32 %3
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: cmp ${{[0-9]+}}, ${{[0-9]+}}
; 16: bteqz $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
- store i32 %cond, i32* @z2, align 4
- %4 = load i32, i32* @c, align 4
+ store i32 %cond, ptr @z2, align 4
+ %4 = load i32, ptr @c, align 4
%cmp6 = icmp eq i32 %4, %0
%cond10 = select i1 %cmp6, i32 %3, i32 %2
- store i32 %cond10, i32* @z3, align 4
- store i32 %cond10, i32* @z4, align 4
+ store i32 %cond10, ptr @z3, align 4
+ store i32 %cond10, ptr @z4, align 4
ret void
}
define void @calc_seleqk() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp eq i32 %0, 1
- %1 = load i32, i32* @t, align 4
- %2 = load i32, i32* @f, align 4
+ %1 = load i32, ptr @t, align 4
+ %2 = load i32, ptr @f, align 4
%cond = select i1 %cmp, i32 %1, i32 %2
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: cmpi ${{[0-9]+}}, 1
; 16: bteqz $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
%cmp1 = icmp eq i32 %0, 10
%cond5 = select i1 %cmp1, i32 %2, i32 %1
- store i32 %cond5, i32* @z2, align 4
- %3 = load i32, i32* @b, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %3 = load i32, ptr @b, align 4
%cmp6 = icmp eq i32 %3, 3
%cond10 = select i1 %cmp6, i32 %2, i32 %1
- store i32 %cond10, i32* @z3, align 4
+ store i32 %cond10, ptr @z3, align 4
; 16: cmpi ${{[0-9]+}}, 10
; 16: bteqz $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
%cmp11 = icmp eq i32 %3, 10
%cond15 = select i1 %cmp11, i32 %1, i32 %2
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define void @calc_seleqz() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp eq i32 %0, 0
- %1 = load i32, i32* @t, align 4
- %2 = load i32, i32* @f, align 4
+ %1 = load i32, ptr @t, align 4
+ %2 = load i32, ptr @f, align 4
%cond = select i1 %cmp, i32 %1, i32 %2
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: beqz ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
- %3 = load i32, i32* @b, align 4
+ %3 = load i32, ptr @b, align 4
%cmp1 = icmp eq i32 %3, 0
%cond5 = select i1 %cmp1, i32 %2, i32 %1
- store i32 %cond5, i32* @z2, align 4
- %4 = load i32, i32* @c, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %4 = load i32, ptr @c, align 4
%cmp6 = icmp eq i32 %4, 0
%cond10 = select i1 %cmp6, i32 %1, i32 %2
- store i32 %cond10, i32* @z3, align 4
- store i32 %cond, i32* @z4, align 4
+ store i32 %cond10, ptr @z3, align 4
+ store i32 %cond, ptr @z4, align 4
ret void
}
define void @calc_selge() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%cmp = icmp sge i32 %0, %1
- %2 = load i32, i32* @f, align 4
- %3 = load i32, i32* @t, align 4
+ %2 = load i32, ptr @f, align 4
+ %3 = load i32, ptr @t, align 4
%cond = select i1 %cmp, i32 %2, i32 %3
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: slt ${{[0-9]+}}, ${{[0-9]+}}
; 16: bteqz $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
%cmp1 = icmp sge i32 %1, %0
%cond5 = select i1 %cmp1, i32 %3, i32 %2
- store i32 %cond5, i32* @z2, align 4
- %4 = load i32, i32* @c, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %4 = load i32, ptr @c, align 4
%cmp6 = icmp sge i32 %4, %0
%cond10 = select i1 %cmp6, i32 %3, i32 %2
- store i32 %cond10, i32* @z3, align 4
+ store i32 %cond10, ptr @z3, align 4
%cmp11 = icmp sge i32 %0, %4
%cond15 = select i1 %cmp11, i32 %3, i32 %2
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define i32 @calc_selgt() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%cmp = icmp sgt i32 %0, %1
; 16: slt ${{[0-9]+}}, ${{[0-9]+}}
; 16: btnez $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
- %2 = load i32, i32* @f, align 4
- %3 = load i32, i32* @t, align 4
+ %2 = load i32, ptr @f, align 4
+ %3 = load i32, ptr @t, align 4
%cond = select i1 %cmp, i32 %2, i32 %3
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
%cmp1 = icmp sgt i32 %1, %0
%cond5 = select i1 %cmp1, i32 %3, i32 %2
- store i32 %cond5, i32* @z2, align 4
- %4 = load i32, i32* @c, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %4 = load i32, ptr @c, align 4
%cmp6 = icmp sgt i32 %4, %0
%cond10 = select i1 %cmp6, i32 %2, i32 %3
- store i32 %cond10, i32* @z3, align 4
+ store i32 %cond10, ptr @z3, align 4
%cmp11 = icmp sgt i32 %0, %4
%cond15 = select i1 %cmp11, i32 %2, i32 %3
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret i32 undef
}
define void @calc_selle() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%cmp = icmp sle i32 %0, %1
- %2 = load i32, i32* @t, align 4
- %3 = load i32, i32* @f, align 4
+ %2 = load i32, ptr @t, align 4
+ %3 = load i32, ptr @f, align 4
%cond = select i1 %cmp, i32 %2, i32 %3
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: slt ${{[0-9]+}}, ${{[0-9]+}}
; 16: bteqz $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
%cmp1 = icmp sle i32 %1, %0
%cond5 = select i1 %cmp1, i32 %3, i32 %2
- store i32 %cond5, i32* @z2, align 4
- %4 = load i32, i32* @c, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %4 = load i32, ptr @c, align 4
%cmp6 = icmp sle i32 %4, %0
%cond10 = select i1 %cmp6, i32 %2, i32 %3
- store i32 %cond10, i32* @z3, align 4
+ store i32 %cond10, ptr @z3, align 4
%cmp11 = icmp sle i32 %0, %4
%cond15 = select i1 %cmp11, i32 %2, i32 %3
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define void @calc_selltk() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp slt i32 %0, 10
- %1 = load i32, i32* @t, align 4
- %2 = load i32, i32* @f, align 4
+ %1 = load i32, ptr @t, align 4
+ %2 = load i32, ptr @f, align 4
%cond = select i1 %cmp, i32 %1, i32 %2
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: slti ${{[0-9]+}}, {{[0-9]+}}
; 16: btnez $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
- %3 = load i32, i32* @b, align 4
+ %3 = load i32, ptr @b, align 4
%cmp1 = icmp slt i32 %3, 2
%cond5 = select i1 %cmp1, i32 %2, i32 %1
- store i32 %cond5, i32* @z2, align 4
- %4 = load i32, i32* @c, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %4 = load i32, ptr @c, align 4
%cmp6 = icmp sgt i32 %4, 2
%cond10 = select i1 %cmp6, i32 %2, i32 %1
- store i32 %cond10, i32* @z3, align 4
+ store i32 %cond10, ptr @z3, align 4
%cmp11 = icmp sgt i32 %0, 2
%cond15 = select i1 %cmp11, i32 %2, i32 %1
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define void @calc_selne() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%cmp = icmp ne i32 %0, %1
- %2 = load i32, i32* @t, align 4
- %3 = load i32, i32* @f, align 4
+ %2 = load i32, ptr @t, align 4
+ %3 = load i32, ptr @f, align 4
%cond = select i1 %cmp, i32 %2, i32 %3
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: cmp ${{[0-9]+}}, ${{[0-9]+}}
; 16: btnez $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
- store i32 %cond, i32* @z2, align 4
- %4 = load i32, i32* @c, align 4
+ store i32 %cond, ptr @z2, align 4
+ %4 = load i32, ptr @c, align 4
%cmp6 = icmp ne i32 %4, %0
%cond10 = select i1 %cmp6, i32 %3, i32 %2
- store i32 %cond10, i32* @z3, align 4
- store i32 %cond10, i32* @z4, align 4
+ store i32 %cond10, ptr @z3, align 4
+ store i32 %cond10, ptr @z4, align 4
ret void
}
define void @calc_selnek() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp ne i32 %0, 1
- %1 = load i32, i32* @f, align 4
- %2 = load i32, i32* @t, align 4
+ %1 = load i32, ptr @f, align 4
+ %2 = load i32, ptr @t, align 4
%cond = select i1 %cmp, i32 %1, i32 %2
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: cmpi ${{[0-9]+}}, 1
; 16: btnez $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
%cmp1 = icmp ne i32 %0, 10
%cond5 = select i1 %cmp1, i32 %2, i32 %1
- store i32 %cond5, i32* @z2, align 4
- %3 = load i32, i32* @b, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %3 = load i32, ptr @b, align 4
%cmp6 = icmp ne i32 %3, 3
%cond10 = select i1 %cmp6, i32 %2, i32 %1
- store i32 %cond10, i32* @z3, align 4
+ store i32 %cond10, ptr @z3, align 4
; 16: cmpi ${{[0-9]+}}, 10
; 16: btnez $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
%cmp11 = icmp ne i32 %3, 10
%cond15 = select i1 %cmp11, i32 %1, i32 %2
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define void @calc_selnez() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%cmp = icmp ne i32 %0, 0
- %1 = load i32, i32* @f, align 4
- %2 = load i32, i32* @t, align 4
+ %1 = load i32, ptr @f, align 4
+ %2 = load i32, ptr @t, align 4
%cond = select i1 %cmp, i32 %1, i32 %2
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: bnez ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
- %3 = load i32, i32* @b, align 4
+ %3 = load i32, ptr @b, align 4
%cmp1 = icmp ne i32 %3, 0
%cond5 = select i1 %cmp1, i32 %2, i32 %1
- store i32 %cond5, i32* @z2, align 4
- %4 = load i32, i32* @c, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %4 = load i32, ptr @c, align 4
%cmp6 = icmp ne i32 %4, 0
%cond10 = select i1 %cmp6, i32 %1, i32 %2
- store i32 %cond10, i32* @z3, align 4
- store i32 %cond, i32* @z4, align 4
+ store i32 %cond10, ptr @z3, align 4
+ store i32 %cond, ptr @z4, align 4
ret void
}
define void @calc_selnez2() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%tobool = icmp ne i32 %0, 0
- %1 = load i32, i32* @f, align 4
- %2 = load i32, i32* @t, align 4
+ %1 = load i32, ptr @f, align 4
+ %2 = load i32, ptr @t, align 4
%cond = select i1 %tobool, i32 %1, i32 %2
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: bnez ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
- %3 = load i32, i32* @b, align 4
+ %3 = load i32, ptr @b, align 4
%tobool1 = icmp ne i32 %3, 0
%cond5 = select i1 %tobool1, i32 %2, i32 %1
- store i32 %cond5, i32* @z2, align 4
- %4 = load i32, i32* @c, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %4 = load i32, ptr @c, align 4
%tobool6 = icmp ne i32 %4, 0
%cond10 = select i1 %tobool6, i32 %1, i32 %2
- store i32 %cond10, i32* @z3, align 4
- store i32 %cond, i32* @z4, align 4
+ store i32 %cond10, ptr @z3, align 4
+ store i32 %cond, ptr @z4, align 4
ret void
}
define void @calc_seluge() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%cmp = icmp uge i32 %0, %1
- %2 = load i32, i32* @f, align 4
- %3 = load i32, i32* @t, align 4
+ %2 = load i32, ptr @f, align 4
+ %3 = load i32, ptr @t, align 4
%cond = select i1 %cmp, i32 %2, i32 %3
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
; 16: bteqz $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
%cmp1 = icmp uge i32 %1, %0
%cond5 = select i1 %cmp1, i32 %3, i32 %2
- store i32 %cond5, i32* @z2, align 4
- %4 = load i32, i32* @c, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %4 = load i32, ptr @c, align 4
%cmp6 = icmp uge i32 %4, %0
%cond10 = select i1 %cmp6, i32 %3, i32 %2
- store i32 %cond10, i32* @z3, align 4
+ store i32 %cond10, ptr @z3, align 4
%cmp11 = icmp uge i32 %0, %4
%cond15 = select i1 %cmp11, i32 %3, i32 %2
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define void @calc_selugt() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%cmp = icmp ugt i32 %0, %1
- %2 = load i32, i32* @f, align 4
- %3 = load i32, i32* @t, align 4
+ %2 = load i32, ptr @f, align 4
+ %3 = load i32, ptr @t, align 4
%cond = select i1 %cmp, i32 %2, i32 %3
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
; 16: btnez $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
%cmp1 = icmp ugt i32 %1, %0
%cond5 = select i1 %cmp1, i32 %3, i32 %2
- store i32 %cond5, i32* @z2, align 4
- %4 = load i32, i32* @c, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %4 = load i32, ptr @c, align 4
%cmp6 = icmp ugt i32 %4, %0
%cond10 = select i1 %cmp6, i32 %2, i32 %3
- store i32 %cond10, i32* @z3, align 4
+ store i32 %cond10, ptr @z3, align 4
%cmp11 = icmp ugt i32 %0, %4
%cond15 = select i1 %cmp11, i32 %2, i32 %3
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
define void @calc_selule() nounwind {
entry:
- %0 = load i32, i32* @a, align 4
- %1 = load i32, i32* @b, align 4
+ %0 = load i32, ptr @a, align 4
+ %1 = load i32, ptr @b, align 4
%cmp = icmp ule i32 %0, %1
- %2 = load i32, i32* @t, align 4
- %3 = load i32, i32* @f, align 4
+ %2 = load i32, ptr @t, align 4
+ %3 = load i32, ptr @f, align 4
%cond = select i1 %cmp, i32 %2, i32 %3
- store i32 %cond, i32* @z1, align 4
+ store i32 %cond, ptr @z1, align 4
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
; 16: bteqz $BB{{[0-9]+}}_{{[0-9]}}
; 16: move ${{[0-9]+}}, ${{[0-9]+}}
%cmp1 = icmp ule i32 %1, %0
%cond5 = select i1 %cmp1, i32 %3, i32 %2
- store i32 %cond5, i32* @z2, align 4
- %4 = load i32, i32* @c, align 4
+ store i32 %cond5, ptr @z2, align 4
+ %4 = load i32, ptr @c, align 4
%cmp6 = icmp ule i32 %4, %0
%cond10 = select i1 %cmp6, i32 %2, i32 %3
- store i32 %cond10, i32* @z3, align 4
+ store i32 %cond10, ptr @z3, align 4
%cmp11 = icmp ule i32 %0, %4
%cond15 = select i1 %cmp11, i32 %2, i32 %3
- store i32 %cond15, i32* @z4, align 4
+ store i32 %cond15, ptr @z4, align 4
ret void
}
br i1 %cmp, label %if.then, label %if.end
if.then:
- store i32 %a, i32* @g1, align 4
+ store i32 %a, ptr @g1, align 4
br label %if.end
if.end:
br i1 %cmp, label %if.then, label %if.end
if.then:
- store i32 %a, i32* @g1, align 4
+ store i32 %a, ptr @g1, align 4
br label %if.end
if.end:
br i1 %cmp, label %if.then, label %if.end
if.then:
- store i32 %a, i32* @g1, align 4
+ store i32 %a, ptr @g1, align 4
br label %if.end
if.end:
br i1 %cmp, label %if.then, label %if.end
if.then:
- store i32 %a, i32* @g1, align 4
+ store i32 %a, ptr @g1, align 4
br label %if.end
if.end:
br i1 %cmp, label %if.then, label %if.end
if.then:
- store i32 %a, i32* @g1, align 4
+ store i32 %a, ptr @g1, align 4
br label %if.end
if.end:
br i1 %cmp, label %if.then, label %if.end
if.then:
- store i32 %a, i32* @g1, align 4
+ store i32 %a, ptr @g1, align 4
br label %if.end
if.end:
br i1 %cmp, label %if.then, label %if.end
if.then:
- store i32 %a, i32* @g1, align 4
+ store i32 %a, ptr @g1, align 4
br label %if.end
if.end:
br i1 %cmp, label %if.then, label %if.end
if.then:
- store i32 %a, i32* @g1, align 4
+ store i32 %a, ptr @g1, align 4
br label %if.end
if.end:
define void @test() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
- %1 = load i32, i32* @k, align 4
+ %0 = load i32, ptr @i, align 4
+ %1 = load i32, ptr @k, align 4
%cmp = icmp eq i32 %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: xor $[[REGISTER:[0-9A-Ba-b_]+]], ${{[0-9]+}}
; 16: sltiu $[[REGISTER:[0-9A-Ba-b_]+]], 1
; MMR6: sltiu ${{[0-9]+}}, ${{[0-9]+}}, 1
define void @test() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%cmp = icmp eq i32 %0, 0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: sltiu ${{[0-9]+}}, 1
; MMR6: sltiu ${{[0-9]+}}, ${{[0-9]+}}, 1
; 16: move ${{[0-9]+}}, $24
- %1 = load i32, i32* @j, align 4
+ %1 = load i32, ptr @j, align 4
%cmp1 = icmp eq i32 %1, 99
%conv2 = zext i1 %cmp1 to i32
- store i32 %conv2, i32* @r2, align 4
+ store i32 %conv2, ptr @r2, align 4
; 16: xor $[[REGISTER:[0-9A-Ba-b_]+]], ${{[0-9]+}}
; 16: sltiu $[[REGISTER:[0-9A-Ba-b_]+]], 1
; MMR6: sltiu ${{[0-9]+}}, ${{[0-9]+}}, 1
define void @test() nounwind {
entry:
- %0 = load i32, i32* @k, align 4
- %1 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @k, align 4
+ %1 = load i32, ptr @j, align 4
%cmp = icmp sge i32 %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: slt ${{[0-9]+}}, ${{[0-9]+}}
; MMR6: slt ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: move $[[REGISTER:[0-9]+]], $24
; 16: xor $[[REGISTER]], ${{[0-9]+}}
- %2 = load i32, i32* @m, align 4
+ %2 = load i32, ptr @m, align 4
%cmp1 = icmp sge i32 %0, %2
%conv2 = zext i1 %cmp1 to i32
- store i32 %conv2, i32* @r2, align 4
+ store i32 %conv2, ptr @r2, align 4
ret void
}
define void @test() nounwind {
entry:
- %0 = load i32, i32* @k, align 4
+ %0 = load i32, ptr @k, align 4
%cmp = icmp sgt i32 %0, -32769
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: slti ${{[0-9]+}}, -32768
; MMR6: slt ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: move ${{[0-9]+}}, $24
define void @test() nounwind {
entry:
- %0 = load i32, i32* @j, align 4
- %1 = load i32, i32* @k, align 4
+ %0 = load i32, ptr @j, align 4
+ %1 = load i32, ptr @k, align 4
%cmp = icmp sle i32 %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: slt ${{[0-9]+}}, ${{[0-9]+}}
; MMR6: slt ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: move $[[REGISTER:[0-9]+]], $24
; 16: xor $[[REGISTER]], ${{[0-9]+}}
- %2 = load i32, i32* @m, align 4
+ %2 = load i32, ptr @m, align 4
%cmp1 = icmp sle i32 %2, %1
%conv2 = zext i1 %cmp1 to i32
- store i32 %conv2, i32* @r2, align 4
+ store i32 %conv2, ptr @r2, align 4
ret void
}
define void @test() nounwind {
entry:
- %0 = load i32, i32* @j, align 4
- %1 = load i32, i32* @k, align 4
+ %0 = load i32, ptr @j, align 4
+ %1 = load i32, ptr @k, align 4
%cmp = icmp slt i32 %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: slt ${{[0-9]+}}, ${{[0-9]+}}
; MMR6: slt ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: move ${{[0-9]+}}, $24
define void @test() nounwind {
entry:
- %0 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @j, align 4
%cmp = icmp slt i32 %0, 10
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: slti $[[REGISTER:[0-9]+]], 10
; MMR6: slti $[[REGISTER:[0-9]+]], $[[REGISTER:[0-9]+]], 10
; 16: move $[[REGISTER]], $24
define void @test() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
- %1 = load i32, i32* @k, align 4
+ %0 = load i32, ptr @i, align 4
+ %1 = load i32, ptr @k, align 4
%cmp = icmp ne i32 %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: xor $[[REGISTER:[0-9]+]], ${{[0-9]+}}
; 16: sltu ${{[0-9]+}}, $[[REGISTER]]
; MMR6: sltu ${{[0-9]+}}, $zero, ${{[0-9]+}}
define void @test() nounwind {
entry:
- %0 = load i32, i32* @k, align 4
- %1 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @k, align 4
+ %1 = load i32, ptr @j, align 4
%cmp = icmp uge i32 %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
; MMR6: sltu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: move $[[REGISTER:[0-9]+]], $24
; 16: xor $[[REGISTER]], ${{[0-9]+}}
- %2 = load i32, i32* @m, align 4
+ %2 = load i32, ptr @m, align 4
%cmp1 = icmp uge i32 %0, %2
%conv2 = zext i1 %cmp1 to i32
- store i32 %conv2, i32* @r2, align 4
+ store i32 %conv2, ptr @r2, align 4
ret void
}
define void @test() nounwind {
entry:
- %0 = load i32, i32* @k, align 4
- %1 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @k, align 4
+ %1 = load i32, ptr @j, align 4
%cmp = icmp ugt i32 %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
; MMR6: sltu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: move ${{[0-9]+}}, $24
define void @test() nounwind {
entry:
- %0 = load i32, i32* @j, align 4
- %1 = load i32, i32* @k, align 4
+ %0 = load i32, ptr @j, align 4
+ %1 = load i32, ptr @k, align 4
%cmp = icmp ule i32 %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
; MMR6: sltu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: move $[[REGISTER:[0-9]+]], $24
; 16: xor $[[REGISTER]], ${{[0-9]+}}
- %2 = load i32, i32* @m, align 4
+ %2 = load i32, ptr @m, align 4
%cmp1 = icmp ule i32 %2, %1
%conv2 = zext i1 %cmp1 to i32
- store i32 %conv2, i32* @r2, align 4
+ store i32 %conv2, ptr @r2, align 4
ret void
}
define void @test() nounwind {
entry:
- %0 = load i32, i32* @j, align 4
- %1 = load i32, i32* @k, align 4
+ %0 = load i32, ptr @j, align 4
+ %1 = load i32, ptr @k, align 4
%cmp = icmp ult i32 %0, %1
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
; MMR6: sltu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: move ${{[0-9]+}}, $24
define void @test() nounwind {
entry:
- %0 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @j, align 4
%cmp = icmp ult i32 %0, 10
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @r1, align 4
+ store i32 %conv, ptr @r1, align 4
; 16: sltiu ${{[0-9]+}}, 10 # 16 bit inst
; MMR6: sltiu ${{[0-9]+}}, ${{[0-9]+}}, 1
; 16: move ${{[0-9]+}}, $24
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%conv = trunc i32 %0 to i16
- store i16 %conv, i16* @s, align 2
- %1 = load i32, i32* @i, align 4
- %2 = load i16, i16* @s, align 2
+ store i16 %conv, ptr @s, align 2
+ %1 = load i32, ptr @i, align 4
+ %2 = load i16, ptr @s, align 2
%conv1 = sext i16 %2 to i32
; 16: sh ${{[0-9]+}}, 0(${{[0-9]+}})
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %1, i32 %conv1)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1, i32 %conv1)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; Function Attrs: nounwind
define void @foo() #0 {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%tobool = icmp ne i32 %0, 0
br i1 %tobool, label %if.then, label %if.else
if.then: ; preds = %entry
- call void bitcast (void (...)* @goo to void ()*)()
+ call void @goo()
br label %if.end
if.else: ; preds = %entry
- call void bitcast (void (...)* @hoo to void ()*)()
+ call void @hoo()
br label %if.end
if.end: ; preds = %if.else, %if.then
define void @store_int_float_(float %a) {
entry:
%conv = fptosi float %a to i32
- store i32 %conv, i32* @gint_, align 4
+ store i32 %conv, ptr @gint_, align 4
ret void
}
define void @store_int_double_(double %a) {
entry:
%conv = fptosi double %a to i32
- store i32 %conv, i32* @gint_, align 4
+ store i32 %conv, ptr @gint_, align 4
ret void
}
define void @store_LL_float_(float %a) {
entry:
%conv = fptosi float %a to i64
- store i64 %conv, i64* @gLL_, align 8
+ store i64 %conv, ptr @gLL_, align 8
ret void
}
define void @store_LL_double_(double %a) {
entry:
%conv = fptosi double %a to i64
- store i64 %conv, i64* @gLL_, align 8
+ store i64 %conv, ptr @gLL_, align 8
ret void
}
%tobool1. = or i1 %tobool1, %not.tobool
%lor.ext = zext i1 %tobool1. to i32
%conv = sitofp i32 %lor.ext to double
- %1 = load double, double* @foo12.d4, align 8
+ %1 = load double, ptr @foo12.d4, align 8
%add = fadd double %conv, %1
- store double %add, double* @foo12.d4, align 8
+ store double %add, ptr @foo12.d4, align 8
ret double %add
}
define i32 @main() nounwind {
entry:
; 16: sll ${{[0-9]+}}, ${{[0-9]+}}, {{[0-9]+}}
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%shl = shl i32 %0, 4
; 16: sll ${{[0-9]+}}, ${{[0-9]+}}, {{[0-9]+}}
- store i32 %shl, i32* @j, align 4
- %1 = load i32, i32* @j, align 4
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1)
+ store i32 %shl, ptr @j, align 4
+ %1 = load i32, ptr @j, align 4
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
- %1 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @i, align 4
+ %1 = load i32, ptr @j, align 4
%shl = shl i32 %0, %1
; 16: sllv ${{[0-9]+}}, ${{[0-9]+}}
- store i32 %shl, i32* @i, align 4
- %2 = load i32, i32* @j, align 4
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %2)
+ store i32 %shl, ptr @i, align 4
+ %2 = load i32, ptr @j, align 4
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %2)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
define i32 @geti() nounwind readonly {
entry:
; CHECK: lw ${{[0-9]+}}, %gp_rel(i)($gp)
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
ret i32 %0
}
define i64 @test_acreg_copy(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
entry:
- %0 = load i64, i64* @g1, align 8
+ %0 = load i64, ptr @g1, align 8
%1 = tail call i64 @llvm.mips.maddu(i64 %0, i32 %a0, i32 %a1)
%2 = tail call i64 @llvm.mips.maddu(i64 %0, i32 %a2, i32 %a3)
- store i64 %1, i64* @g1, align 8
- store i64 %2, i64* @g2, align 8
+ store i64 %1, ptr @g1, align 8
+ store i64 %2, ptr @g2, align 8
tail call void @foo1()
- store i64 %2, i64* @g3, align 8
+ store i64 %2, ptr @g3, align 8
ret i64 %1
}
%1 = bitcast i32 %b.coerce to <2 x i16>
%cmp3 = icmp slt <2 x i16> %0, %1
%sext = sext <2 x i1> %cmp3 to <2 x i16>
- store <2 x i16> %sext, <2 x i16>* @g4, align 4
+ store <2 x i16> %sext, ptr @g4, align 4
tail call void @foo1()
- %2 = load <2 x i16>, <2 x i16>* @g5, align 4
- %3 = load <2 x i16>, <2 x i16>* @g6, align 4
+ %2 = load <2 x i16>, ptr @g5, align 4
+ %3 = load <2 x i16>, ptr @g6, align 4
%or = select <2 x i1> %cmp3, <2 x i16> %2, <2 x i16> %3
%4 = bitcast <2 x i16> %or to i32
%.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
define void @foo1() #0 {
entry:
%c = alloca [10 x i8], align 1
- %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %c, i32 0, i32 0
- call void @x(i8* %arraydecay)
- %arraydecay1 = getelementptr inbounds [10 x i8], [10 x i8]* %c, i32 0, i32 0
- call void @x(i8* %arraydecay1)
+ call void @x(ptr %c)
+ call void @x(ptr %c)
ret void
; CHECK: .ent foo1
; CHECK: save $16, $17, $ra, [[FS:[0-9]+]] # 16 bit inst
; CHECK: .end foo1
}
-declare void @x(i8*) #1
+declare void @x(ptr) #1
; Function Attrs: nounwind
define void @foo2() #0 {
entry:
%c = alloca [150 x i8], align 1
- %arraydecay = getelementptr inbounds [150 x i8], [150 x i8]* %c, i32 0, i32 0
- call void @x(i8* %arraydecay)
- %arraydecay1 = getelementptr inbounds [150 x i8], [150 x i8]* %c, i32 0, i32 0
- call void @x(i8* %arraydecay1)
+ call void @x(ptr %c)
+ call void @x(ptr %c)
ret void
; CHECK: .ent foo2
; CHECK: save $16, $17, $ra, [[FS:[0-9]+]]
define void @foo3() #0 {
entry:
%call = call float @xf()
- store float %call, float* @f, align 4
+ store float %call, ptr @f, align 4
ret void
; CHECK: .ent foo3
; CHECK: save $16, $17, $ra, $18, [[FS:[0-9]+]]
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%shr = ashr i32 %0, 3
; 16: sra ${{[0-9]+}}, ${{[0-9]+}}, {{[0-9]+}}
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %shr)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %shr)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
- %1 = load i32, i32* @j, align 4
+ %0 = load i32, ptr @i, align 4
+ %1 = load i32, ptr @j, align 4
%shr = ashr i32 %0, %1
; 16: srav ${{[0-9]+}}, ${{[0-9]+}}
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %shr)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %shr)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%shr = lshr i32 %0, 4
; 16: srl ${{[0-9]+}}, ${{[0-9]+}}, {{[0-9]+}}
- store i32 %shr, i32* @j, align 4
- %1 = load i32, i32* @j, align 4
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1)
+ store i32 %shr, ptr @j, align 4
+ %1 = load i32, ptr @j, align 4
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
- %1 = load i32, i32* @k, align 4
+ %0 = load i32, ptr @i, align 4
+ %1 = load i32, ptr @k, align 4
%shr = lshr i32 %0, %1
; 16: srlv ${{[0-9]+}}, ${{[0-9]+}}
- store i32 %shr, i32* @j, align 4
- %2 = load i32, i32* @j, align 4
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %2)
+ store i32 %shr, ptr @j, align 4
+ %2 = load i32, ptr @j, align 4
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %2)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; RUN: llc -march=mipsel -relocation-model=pic < %s | FileCheck %s
-@g1 = external global i32*
+@g1 = external global ptr
; CHECK-LABEL: foo1:
; CHECK: lw ${{[0-9]+}}, %got(g1)
define i32 @foo1() {
entry:
%b = alloca [16 x i32], align 4
- %0 = bitcast [16 x i32]* %b to i8*
- call void @llvm.lifetime.start.p0i8(i64 64, i8* %0)
- %arraydecay = getelementptr inbounds [16 x i32], [16 x i32]* %b, i32 0, i32 0
+ call void @llvm.lifetime.start.p0(i64 64, ptr %b)
br label %for.body
for.body: ; preds = %for.body, %entry
%i.05 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%v.04 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %1 = load i32*, i32** @g1, align 4
- %arrayidx = getelementptr inbounds i32, i32* %1, i32 %i.05
- %2 = load i32, i32* %arrayidx, align 4
- %call = call i32 @foo2(i32 %2, i32* %arraydecay)
+ %0 = load ptr, ptr @g1, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %0, i32 %i.05
+ %1 = load i32, ptr %arrayidx, align 4
+ %call = call i32 @foo2(i32 %1, ptr %b)
%add = add nsw i32 %call, %v.04
%inc = add nsw i32 %i.05, 1
%exitcond = icmp eq i32 %inc, 10000
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body
- call void @llvm.lifetime.end.p0i8(i64 64, i8* %0)
+ call void @llvm.lifetime.end.p0(i64 64, ptr %b)
ret i32 %add
}
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare i32 @foo2(i32, i32*)
+declare i32 @foo2(i32, ptr)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
; RUN: llc -march=mipsel -mattr=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16_b
@.str = private unnamed_addr constant [9 x i8] c"%hd %c \0A\00", align 1
-@sp = common global i16* null, align 4
-@cp = common global i8* null, align 4
+@sp = common global ptr null, align 4
+@cp = common global ptr null, align 4
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
define void @test() nounwind {
entry:
%s = alloca i16, align 4
%c = alloca i8, align 4
- store i16 16, i16* %s, align 4
- store i8 99, i8* %c, align 4
- store i16* %s, i16** @sp, align 4
- store i8* %c, i8** @cp, align 4
- %call.i.i = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 16, i32 99) nounwind
- %0 = load i16*, i16** @sp, align 4
- store i16 32, i16* %0, align 2
- %1 = load i8*, i8** @cp, align 4
- store i8 97, i8* %1, align 1
- %2 = load i16, i16* %s, align 4
- %3 = load i8, i8* %c, align 4
+ store i16 16, ptr %s, align 4
+ store i8 99, ptr %c, align 4
+ store ptr %s, ptr @sp, align 4
+ store ptr %c, ptr @cp, align 4
+ %call.i.i = call i32 (ptr, ...) @printf(ptr @.str, i32 16, i32 99) nounwind
+ %0 = load ptr, ptr @sp, align 4
+ store i16 32, ptr %0, align 2
+ %1 = load ptr, ptr @cp, align 4
+ store i8 97, ptr %1, align 1
+ %2 = load i16, ptr %s, align 4
+ %3 = load i8, ptr %c, align 4
%conv.i = sext i16 %2 to i32
%conv1.i = sext i8 %3 to i32
- %call.i = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %conv.i, i32 %conv1.i) nounwind
+ %call.i = call i32 (ptr, ...) @printf(ptr @.str, i32 %conv.i, i32 %conv1.i) nounwind
ret void
; 16_b-LABEL: test:
; 16_h-LABEL: test:
; 16_h: lh ${{[0-9]+}}, [[offset2]](${{[0-9]+}})
}
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @kkkk, align 4
- %1 = load i32, i32* @llll, align 4
+ %0 = load i32, ptr @kkkk, align 4
+ %1 = load i32, ptr @llll, align 4
%add = add nsw i32 %0, 10
%add1 = add nsw i32 %1, 10
- %2 = load i32, i32* @mmmm, align 4
+ %2 = load i32, ptr @mmmm, align 4
%sub = add nsw i32 %2, -3
- %3 = load i32, i32* @nnnn, align 4
+ %3 = load i32, ptr @nnnn, align 4
%add2 = add nsw i32 %3, 10
- %4 = load i32, i32* @oooo, align 4
+ %4 = load i32, ptr @oooo, align 4
%add3 = add nsw i32 %4, 4
- %5 = load i32, i32* @pppp, align 4
+ %5 = load i32, ptr @pppp, align 4
%sub4 = add nsw i32 %5, -5
- %6 = load i32, i32* @qqqq, align 4
+ %6 = load i32, ptr @qqqq, align 4
%sub5 = add nsw i32 %6, -10
- %7 = load i32, i32* @rrrr, align 4
+ %7 = load i32, ptr @rrrr, align 4
%add6 = add nsw i32 %7, 6
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @.str, i32 0, i32 0), i32 %sub5, i32 %add6, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) nounwind
- %call7 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @.str, i32 0, i32 0), i32 %0, i32 %1, i32 %add, i32 %add1, i32 %sub, i32 %add2, i32 %add3, i32 %sub4, i32 %sub5, i32 %add6) nounwind
+ %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %sub5, i32 %add6, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) nounwind
+ %call7 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %0, i32 %1, i32 %add, i32 %add1, i32 %sub, i32 %add2, i32 %add3, i32 %sub4, i32 %sub5, i32 %add6) nounwind
ret i32 0
}
; 16: sw ${{[0-9]+}}, {{[0-9]+}}($sp) # 4-byte Folded Spill
; 16: sw ${{[0-9]+}}, {{[0-9]+}}($sp) # 4-byte Folded Spill
; 16: lw ${{[0-9]+}}, {{[0-9]+}}($sp) # 4-byte Folded Reload
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @i, align 4
%sub = sub nsw i32 %0, 5
; 16: addiu ${{[0-9]+}}, -{{[0-9]+}}
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %sub)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %sub)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @j, align 4
- %1 = load i32, i32* @i, align 4
+ %0 = load i32, ptr @j, align 4
+ %1 = load i32, ptr @i, align 4
%sub = sub nsw i32 %0, %1
; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %sub)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %sub)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
%struct.unaligned = type <{ i32 }>
-define void @zero_u(%struct.unaligned* nocapture %p) nounwind {
+define void @zero_u(ptr nocapture %p) nounwind {
entry:
; CHECK: swl $zero
; CHECK: swr $zero
- %x = getelementptr inbounds %struct.unaligned, %struct.unaligned* %p, i32 0, i32 0
- store i32 0, i32* %x, align 1
+ store i32 0, ptr %p, align 1
ret void
}
-define void @zero_a(i32* nocapture %p) nounwind {
+define void @zero_a(ptr nocapture %p) nounwind {
entry:
; CHECK: sw $zero
- store i32 0, i32* %p, align 4
+ store i32 0, ptr %p, align 4
ret void
}
; Function Attrs: nounwind optsize
define float @h() {
entry:
- %call = tail call float bitcast (float (...)* @g to float ()*)()
+ %call = tail call float @g()
ret float %call
; CHECK: .ent h
; CHECK: save $16, $ra, $18, 32
; STATIC64: j
; PIC16: jalrc
- %0 = load i32, i32* @g0, align 4
- %1 = load i32, i32* @g1, align 4
- %2 = load i32, i32* @g2, align 4
- %3 = load i32, i32* @g3, align 4
- %4 = load i32, i32* @g4, align 4
- %5 = load i32, i32* @g5, align 4
- %6 = load i32, i32* @g6, align 4
- %7 = load i32, i32* @g7, align 4
- %8 = load i32, i32* @g8, align 4
- %9 = load i32, i32* @g9, align 4
+ %0 = load i32, ptr @g0, align 4
+ %1 = load i32, ptr @g1, align 4
+ %2 = load i32, ptr @g2, align 4
+ %3 = load i32, ptr @g3, align 4
+ %4 = load i32, ptr @g4, align 4
+ %5 = load i32, ptr @g5, align 4
+ %6 = load i32, ptr @g6, align 4
+ %7 = load i32, ptr @g7, align 4
+ %8 = load i32, ptr @g8, align 4
+ %9 = load i32, ptr @g9, align 4
%call = tail call fastcc i32 @callee5(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9)
ret i32 %call
}
@gs1 = external global %struct.S
-declare i32 @callee9(%struct.S* byval(%struct.S))
+declare i32 @callee9(ptr byval(%struct.S))
define i32 @caller9_0() nounwind {
entry:
; PIC64R6: jalrc $25
; PIC16: jalrc
- %call = tail call i32 @callee9(%struct.S* byval(%struct.S) @gs1) nounwind
+ %call = tail call i32 @callee9(ptr byval(%struct.S) @gs1) nounwind
ret i32 %call
}
ret i32 %call
}
-declare i32 @callee11(%struct.S* byval(%struct.S))
+declare i32 @callee11(ptr byval(%struct.S))
define i32 @caller11() nounwind noinline {
entry:
; PIC64R6: jalrc $25
; PIC16: jalrc
- %call = tail call i32 @callee11(%struct.S* byval(%struct.S) @gs1) nounwind
+ %call = tail call i32 @callee11(ptr byval(%struct.S) @gs1) nounwind
ret i32 %call
}
declare i32 @callee12()
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
-define i32 @caller12(%struct.S* nocapture byval(%struct.S) %a0) nounwind {
+define i32 @caller12(ptr nocapture byval(%struct.S) %a0) nounwind {
entry:
; ALL-LABEL: caller12:
; PIC32: jalr $25
; PIC64R6: jalrc $25
; PIC16: jalrc
- %0 = bitcast %struct.S* %a0 to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 bitcast (%struct.S* @gs1 to i8*), i8* align 4 %0, i32 8, i1 false)
+ tail call void @llvm.memcpy.p0.p0.i32(ptr align 4 @gs1, ptr align 4 %a0, i32 8, i1 false)
%call = tail call i32 @callee12() nounwind
ret i32 %call
}
; 64-NEXT: jr $ra
; 64-NEXT: sw $1, 0($2)
entry:
- %0 = load i32, i32* @x, align 4
+ %0 = load i32, ptr @x, align 4
%cmp2 = icmp eq i32 %0, 0
- %1 = load i32, i32* @a, align 4
- %2 = load i32, i32* @b, align 4
+ %1 = load i32, ptr @a, align 4
+ %2 = load i32, ptr @b, align 4
%cond = select i1 %cmp2, i32 %1, i32 %2
- store i32 %cond, i32* @x, align 4
+ store i32 %cond, ptr @x, align 4
ret void
}
; RUN: llc -march=mipsel < %s | FileCheck %s
; RUN: llc -march=mips64el < %s | FileCheck %s
-declare i8* @llvm.thread.pointer() nounwind readnone
+declare ptr @llvm.thread.pointer() nounwind readnone
-define i8* @thread_pointer() {
+define ptr @thread_pointer() {
; CHECK: rdhwr $3, $29
- %1 = tail call i8* @llvm.thread.pointer()
- ret i8* %1
+ %1 = tail call ptr @llvm.thread.pointer()
+ ret ptr %1
}
; RUN: llc -march=mipsel -relocation-model=pic -disable-mips-delay-filler < %s | FileCheck %s
@foo = thread_local global i32 42
-@bar = hidden thread_local alias i32, i32* @foo
+@bar = hidden thread_local alias i32, ptr @foo
-define i32* @zed() {
+define ptr @zed() {
; CHECK-DAG: __tls_get_addr
; CHECK-DAG: %tlsldm(bar)
- ret i32* @bar
+ ret ptr @bar
}
; ----- no model specified -----
-define i32* @f1() {
+define ptr @f1() {
entry:
- ret i32* @external_gd
+ ret ptr @external_gd
; Non-PIC code can use initial-exec, PIC code has to use general dynamic.
; CHECK-NONPIC-LABEL: f1:
; CHECK-PIC: %tlsgd
}
-define i32* @f2() {
+define ptr @f2() {
entry:
- ret i32* @internal_gd
+ ret ptr @internal_gd
; Non-PIC code can use local exec, PIC code can use local dynamic.
; CHECK-NONPIC-LABEL: f2:
; ----- localdynamic specified -----
-define i32* @f3() {
+define ptr @f3() {
entry:
- ret i32* @external_ld
+ ret ptr @external_ld
; Non-PIC code can use initial exec, PIC should use local dynamic.
; CHECK-NONPIC-LABEL: f3:
; CHECK-PIC: %tlsldm
}
-define i32* @f4() {
+define ptr @f4() {
entry:
- ret i32* @internal_ld
+ ret ptr @internal_ld
; Non-PIC code can use local exec, PIC code can use local dynamic.
; CHECK-NONPIC-LABEL: f4:
; ----- initialexec specified -----
-define i32* @f5() {
+define ptr @f5() {
entry:
- ret i32* @external_ie
+ ret ptr @external_ie
; Non-PIC and PIC code will use initial exec as specified.
; CHECK-NONPIC-LABEL: f5:
; CHECK-PIC: %gottprel
}
-define i32* @f6() {
+define ptr @f6() {
entry:
- ret i32* @internal_ie
+ ret ptr @internal_ie
; Non-PIC code can use local exec, PIC code use initial exec as specified.
; CHECK-NONPIC-LABEL: f6:
; ----- localexec specified -----
-define i32* @f7() {
+define ptr @f7() {
entry:
- ret i32* @external_le
+ ret ptr @external_le
; Non-PIC and PIC code will use local exec as specified.
; CHECK-NONPIC-LABEL: f7:
; CHECK-PIC: %tprel_hi
}
-define i32* @f8() {
+define ptr @f8() {
entry:
- ret i32* @internal_le
+ ret ptr @internal_le
; Non-PIC and PIC code will use local exec as specified.
; CHECK-NONPIC-LABEL: f8:
define dso_local i32 @f1() nounwind {
entry:
- %tmp = load i32, i32* @t1, align 4
+ %tmp = load i32, ptr @t1, align 4
ret i32 %tmp
; STATIC32-LABEL: f1:
define dso_local i32 @f2() nounwind {
entry:
- %tmp = load i32, i32* @t2, align 4
+ %tmp = load i32, ptr @t2, align 4
ret i32 %tmp
; STATICGP32-LABEL: f2:
; MM: addu16 $[[R1:[0-9]+]], $[[R0]], $2
; MM: lw ${{[0-9]+}}, %dtprel_lo(f3.i)($[[R1]])
- %0 = load i32, i32* @f3.i, align 4
+ %0 = load i32, ptr @f3.i, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @f3.i, align 4
+ store i32 %inc, ptr @f3.i, align 4
ret i32 %inc
}
define dso_preemptable i32 @f1() nounwind {
entry:
- %tmp = load i32, i32* @t1, align 4
+ %tmp = load i32, ptr @t1, align 4
ret i32 %tmp
; PIC32-LABEL: f1:
define dso_preemptable i32 @f2() nounwind {
entry:
- %tmp = load i32, i32* @t2, align 4
+ %tmp = load i32, ptr @t2, align 4
ret i32 %tmp
; PIC32-LABEL: f2:
; MM: addu16 $[[R1:[0-9]+]], $[[R0]], $2
; MM: lw ${{[0-9]+}}, %dtprel_lo(f3.i)($[[R1]])
- %0 = load i32, i32* @f3.i, align 4
+ %0 = load i32, ptr @f3.i, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @f3.i, align 4
+ store i32 %inc, ptr @f3.i, align 4
ret i32 %inc
}
define i32 @foo() nounwind readonly {
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
; PIC16: lw ${{[0-9]+}}, %call16(__tls_get_addr)(${{[0-9]+}})
; PIC16: addiu ${{[0-9]+}}, %tlsgd(a)
ret i32 %0
@f.i = internal thread_local unnamed_addr global i32 1, align 4
-define i8* @f(i8* nocapture %a) nounwind {
+define ptr @f(ptr nocapture %a) nounwind {
entry:
- %0 = load i32, i32* @f.i, align 4
+ %0 = load i32, ptr @f.i, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* @f.i, align 4
- %1 = inttoptr i32 %inc to i8*
+ store i32 %inc, ptr @f.i, align 4
+ %1 = inttoptr i32 %inc to ptr
; PIC16: addiu ${{[0-9]+}}, %tlsldm(f.i)
- ret i8* %1
+ ret ptr %1
}
entry:
%b = alloca i32, align 4
%a = alloca float, align 4
- store volatile i32 1, i32* %b, align 4
- %0 = load volatile i32, i32* %b, align 4
+ store volatile i32 1, ptr %b, align 4
+ %0 = load volatile i32, ptr %b, align 4
%conv = uitofp i32 %0 to float
- store float %conv, float* %a, align 4
+ store float %conv, ptr %a, align 4
ret void
}
define i32 @main() nounwind {
entry:
- store i32 10, i32* getelementptr inbounds (%struct.ua, %struct.ua* @foo, i32 0, i32 1), align 1
+ store i32 10, ptr getelementptr inbounds (%struct.ua, ptr @foo, i32 0, i32 1), align 1
; 16: sb ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}})
; 16: sb ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}})
; 16: sb ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}})
; Test that the correct ISA version of the unaligned memory operations is
; selected up front.
-define void @g2(i32* %a, i32* %b) {
+define void @g2(ptr %a, ptr %b) {
; MIPS-LABEL: name: g2
; MIPS: bb.0.entry:
; MIPS: liveins: $a0, $a1
; MICROMIPS: SWR_MM [[LWR_MM]], [[COPY]], 3 :: (store (s32) into %ir.b, align 1)
; MICROMIPS: RetRA
entry:
- %0 = load i32, i32* %a, align 1
- store i32 %0, i32* %b, align 1
+ %0 = load i32, ptr %a, align 1
+ store i32 %0, ptr %b, align 1
ret void
}
; MIPS32R6-EB-NEXT: jr $ra
; MIPS32R6-EB-NEXT: addiu $sp, $sp, 24
entry:
- tail call void @foo2(%struct.S1* byval(%struct.S1) getelementptr inbounds (%struct.S2, %struct.S2* @s2, i32 0, i32 1)) nounwind
+ tail call void @foo2(ptr byval(%struct.S1) getelementptr inbounds (%struct.S2, ptr @s2, i32 0, i32 1)) nounwind
ret void
}
; MIPS32R6-EB-NEXT: jr $ra
; MIPS32R6-EB-NEXT: addiu $sp, $sp, 24
entry:
- tail call void @foo4(%struct.S4* byval(%struct.S4) @s4) nounwind
+ tail call void @foo4(ptr byval(%struct.S4) @s4) nounwind
ret void
}
-declare void @foo2(%struct.S1* byval(%struct.S1))
-declare void @foo4(%struct.S4* byval(%struct.S4))
+declare void @foo2(ptr byval(%struct.S1))
+declare void @foo4(ptr byval(%struct.S4))
@b = external global %struct.a, align 1
; Function Attrs: norecurse nounwind readnone
-define %struct.a* @d() {
+define ptr @d() {
; CHECK-LABEL: d:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lui $1, %highest(b)
; CHECK-NEXT: jr $ra
; CHECK-NEXT: daddiu $2, $1, %lo(b)
entry:
- ret %struct.a* @b
+ ret ptr @b
}
; Function below generates a v2i16 to f32 bitcast.
; Test that we are able to match it.
-define float @f(<8 x i16>* %a) {
+define float @f(ptr %a) {
; CHECK-LABEL: f:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addiu $sp, $sp, -32
; CHECK-NEXT: addiu $sp, $sp, 32
entry:
%m = alloca <8 x i16>
- %0 = load <8 x i16>, <8 x i16>* %a
- store <8 x i16> %0, <8 x i16>* %m
+ %0 = load <8 x i16>, ptr %a
+ store <8 x i16> %0, ptr %m
%1 = bitcast <8 x i16> %0 to <4 x float>
%2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 3, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
%3 = shufflevector <8 x float> zeroinitializer, <8 x float> %2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
; CHECK: lw
; CHECK: sw
- %0 = load <2 x i16>, <2 x i16>* @g1, align 4
- store <2 x i16> %0, <2 x i16>* @g0, align 4
+ %0 = load <2 x i16>, ptr @g1, align 4
+ store <2 x i16> %0, ptr @g0, align 4
ret void
}
; CHECK: lw
; CHECK: sw
- %0 = load <4 x i8>, <4 x i8>* @g3, align 4
- store <4 x i8> %0, <4 x i8>* @g2, align 4
+ %0 = load <4 x i8>, ptr @g3, align 4
+ store <4 x i8> %0, ptr @g2, align 4
ret void
}
define void @foo0() nounwind {
entry:
- %0 = load <4 x i32>, <4 x i32>* @a, align 16
- %1 = load <4 x i32>, <4 x i32>* @b, align 16
+ %0 = load <4 x i32>, ptr @a, align 16
+ %1 = load <4 x i32>, ptr @b, align 16
%cmp = icmp slt <4 x i32> %0, %1
%sext = sext <4 x i1> %cmp to <4 x i32>
- store <4 x i32> %sext, <4 x i32>* @g0, align 16
+ store <4 x i32> %sext, ptr @g0, align 16
ret void
}
; RUN: llc -march=mips < %s | FileCheck %s
-@t = common global i32 (...)* null, align 4
+@t = common global ptr null, align 4
define void @f() nounwind {
entry:
- store i32 (...)* @test_weak, i32 (...)** @t, align 4
+ store ptr @test_weak, ptr @t, align 4
ret void
}
; RUN: llc -march=mipsel -mattr=mips16 -relocation-model=pic < %s | FileCheck -strict-whitespace %s -check-prefix=16
; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s -strict-whitespace -check-prefix=32R2
-@main.L = internal unnamed_addr constant [5 x i8*] [i8* blockaddress(@main, %L1), i8* blockaddress(@main, %L2), i8* blockaddress(@main, %L3), i8* blockaddress(@main, %L4), i8* null], align 4
+@main.L = internal unnamed_addr constant [5 x ptr] [ptr blockaddress(@main, %L1), ptr blockaddress(@main, %L2), ptr blockaddress(@main, %L3), ptr blockaddress(@main, %L4), ptr null], align 4
@str = private unnamed_addr constant [2 x i8] c"A\00"
@str5 = private unnamed_addr constant [2 x i8] c"B\00"
@str6 = private unnamed_addr constant [2 x i8] c"C\00"
; 16: jalrc ${{[0-9]+}}
; 16: jrc ${{[0-9]+}}
; 16: jrc $ra
- %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str, i32 0, i32 0))
+ %puts = tail call i32 @puts(ptr @str)
br label %L1
L1: ; preds = %entry, %L3
%i.0 = phi i32 [ 0, %entry ], [ %inc, %L3 ]
- %puts5 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str5, i32 0, i32 0))
+ %puts5 = tail call i32 @puts(ptr @str5)
br label %L2
L2: ; preds = %L1, %L3
%i.1 = phi i32 [ %i.0, %L1 ], [ %inc, %L3 ]
- %puts6 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str6, i32 0, i32 0))
+ %puts6 = tail call i32 @puts(ptr @str6)
br label %L3
L3: ; preds = %L2, %L3
%i.2 = phi i32 [ %i.1, %L2 ], [ %inc, %L3 ]
- %puts7 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str7, i32 0, i32 0))
+ %puts7 = tail call i32 @puts(ptr @str7)
%inc = add i32 %i.2, 1
- %arrayidx = getelementptr inbounds [5 x i8*], [5 x i8*]* @main.L, i32 0, i32 %i.2
- %0 = load i8*, i8** %arrayidx, align 4
- indirectbr i8* %0, [label %L1, label %L2, label %L3, label %L4]
+ %arrayidx = getelementptr inbounds [5 x ptr], ptr @main.L, i32 0, i32 %i.2
+ %0 = load ptr, ptr %arrayidx, align 4
+ indirectbr ptr %0, [label %L1, label %L2, label %L3, label %L4]
L4: ; preds = %L3
- %puts8 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str8, i32 0, i32 0))
+ %puts8 = tail call i32 @puts(ptr @str8)
ret i32 0
}
-declare i32 @puts(i8* nocapture) nounwind
+declare i32 @puts(ptr nocapture) nounwind
define i32 @ext(i32 %s, i32 %pos, i32 %sz) nounwind readnone {
entry:
ret i32 %and
}
-define void @ins(i32 %s, i32* nocapture %d) nounwind {
+define void @ins(i32 %s, ptr nocapture %d) nounwind {
entry:
; 32R2: ins ${{[0-9]+}}, $4, 5, 9
%and = shl i32 %s, 5
%shl = and i32 %and, 16352
- %tmp3 = load i32, i32* %d, align 4
+ %tmp3 = load i32, ptr %d, align 4
%and5 = and i32 %tmp3, -16353
%or = or i32 %and5, %shl
- store i32 %or, i32* %d, align 4
+ store i32 %or, ptr %d, align 4
ret void
}
define i32 @main() nounwind {
entry:
- %0 = load i32, i32* @x, align 4
- %1 = load i32, i32* @y, align 4
+ %0 = load i32, ptr @x, align 4
+ %1 = load i32, ptr @y, align 4
%xor = xor i32 %0, %1
; 16: xor ${{[0-9]+}}, ${{[0-9]+}}
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %xor)
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %xor)
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
; 64R6: seleqz $2, $[[R0]], $4
%tobool = icmp ne i32 %s, 0
- %0 = load i32, i32* @g1, align 4
+ %0 = load i32, ptr @g1, align 4
%cond = select i1 %tobool, i32 0, i32 %0
ret i32 %cond
}
; 64R6: selnez $2, $[[R0]], $4
%tobool = icmp ne i32 %s, 0
- %0 = load i32, i32* @g1, align 4
+ %0 = load i32, ptr @g1, align 4
%cond = select i1 %tobool, i32 %0, i32 0
ret i32 %cond
}
; 64R6: seleqz $2, $[[R0]], $4
%tobool = icmp ne i64 %s, 0
- %0 = load i64, i64* @g2, align 4
+ %0 = load i64, ptr @g2, align 4
%cond = select i1 %tobool, i64 0, i64 %0
ret i64 %cond
}
; 64R6: selnez $2, $[[R0]], $4
%tobool = icmp ne i64 %s, 0
- %0 = load i64, i64* @g2, align 4
+ %0 = load i64, ptr @g2, align 4
%cond = select i1 %tobool, i64 %0, i64 0
ret i64 %cond
}