From f7f59b15aa1b7648c82220c8e4bf8a8e04b92926 Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Sun, 27 May 2012 19:35:41 +0000 Subject: [PATCH] These tests used intrinsics with the wrong prototype. They weren't caught because the old verifier just checked that something "was a pointer", but not that the pointee was correct. llvm-svn: 157544 --- llvm/test/CodeGen/ARM/vlddup.ll | 32 +++++++++++++----------- llvm/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll | 6 ++--- llvm/test/CodeGen/X86/sse4a.ll | 12 ++++----- llvm/test/Verifier/2008-08-22-MemCpyAlignment.ll | 4 +-- 4 files changed, 28 insertions(+), 26 deletions(-) diff --git a/llvm/test/CodeGen/ARM/vlddup.ll b/llvm/test/CodeGen/ARM/vlddup.ll index 61d73c1..c69473f 100644 --- a/llvm/test/CodeGen/ARM/vlddup.ll +++ b/llvm/test/CodeGen/ARM/vlddup.ll @@ -75,12 +75,12 @@ define <8 x i8> @vld2dupi8(i8* %A) nounwind { ret <8 x i8> %tmp5 } -define <4 x i16> @vld2dupi16(i16* %A) nounwind { +define <4 x i16> @vld2dupi16(i8* %A) nounwind { ;CHECK: vld2dupi16: ;Check that a power-of-two alignment smaller than the total size of the memory ;being loaded is ignored. ;CHECK: vld2.16 {d16[], d17[]}, [r0] - %tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2) + %tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i8* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2) %tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer %tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1 @@ -94,7 +94,8 @@ define <4 x i16> @vld2dupi16_update(i16** %ptr) nounwind { ;CHECK: vld2dupi16_update: ;CHECK: vld2.16 {d16[], d17[]}, [r1]! %A = load i16** %ptr - %tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2) + %A2 = bitcast i16* %A to i8* + %tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i8* %A2, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2) %tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer %tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1 @@ -105,11 +106,11 @@ define <4 x i16> @vld2dupi16_update(i16** %ptr) nounwind { ret <4 x i16> %tmp5 } -define <2 x i32> @vld2dupi32(i32* %A) nounwind { +define <2 x i32> @vld2dupi32(i8* %A) nounwind { ;CHECK: vld2dupi32: ;Check the alignment value. Max for this instruction is 64 bits: ;CHECK: vld2.32 {d16[], d17[]}, [r0, :64] - %tmp0 = tail call %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i32* %A, <2 x i32> undef, <2 x i32> undef, i32 0, i32 16) + %tmp0 = tail call %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i8* %A, <2 x i32> undef, <2 x i32> undef, i32 0, i32 16) %tmp1 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 0 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer %tmp3 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 1 @@ -119,8 +120,8 @@ define <2 x i32> @vld2dupi32(i32* %A) nounwind { } declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly -declare %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16*, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly -declare %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i32*, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly +declare %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly +declare %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> } %struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> } @@ -144,11 +145,11 @@ define <8 x i8> @vld3dupi8_update(i8** %ptr, i32 %inc) nounwind { ret <8 x i8> %tmp8 } -define <4 x i16> @vld3dupi16(i16* %A) nounwind { +define <4 x i16> @vld3dupi16(i8* %A) nounwind { ;CHECK: vld3dupi16: ;Check the (default) alignment value. VLD3 does not support alignment. ;CHECK: vld3.16 {d16[], d17[], d18[]}, [r0] - %tmp0 = tail call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 8) + %tmp0 = tail call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 8) %tmp1 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 0 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 1 @@ -161,7 +162,7 @@ define <4 x i16> @vld3dupi16(i16* %A) nounwind { } declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly -declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i16*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly +declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly %struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @@ -171,7 +172,8 @@ define <4 x i16> @vld4dupi16_update(i16** %ptr) nounwind { ;CHECK: vld4dupi16_update: ;CHECK: vld4.16 {d16[], d17[], d18[], d19[]}, [r1]! %A = load i16** %ptr - %tmp0 = tail call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 1) + %A2 = bitcast i16* %A to i8* + %tmp0 = tail call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8* %A2, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 1) %tmp1 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 0 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 1 @@ -188,12 +190,12 @@ define <4 x i16> @vld4dupi16_update(i16** %ptr) nounwind { ret <4 x i16> %tmp11 } -define <2 x i32> @vld4dupi32(i32* %A) nounwind { +define <2 x i32> @vld4dupi32(i8* %A) nounwind { ;CHECK: vld4dupi32: ;Check the alignment value. An 8-byte alignment is allowed here even though ;it is smaller than the total size of the memory being loaded. ;CHECK: vld4.32 {d16[], d17[], d18[], d19[]}, [r0, :64] - %tmp0 = tail call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i32* %A, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 8) + %tmp0 = tail call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8* %A, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 8) %tmp1 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 0 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 1 @@ -208,5 +210,5 @@ define <2 x i32> @vld4dupi32(i32* %A) nounwind { ret <2 x i32> %tmp11 } -declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i16*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly -declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i32*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly +declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly +declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly diff --git a/llvm/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll b/llvm/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll index b92477b..c3d69c7 100644 --- a/llvm/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll +++ b/llvm/test/CodeGen/MSP430/2009-12-21-FrameAddr.ll @@ -5,9 +5,9 @@ target triple = "msp430-unknown-linux-gnu" define msp430_intrcc void @foo() nounwind { entry: - %fa = call i16* @llvm.frameaddress(i32 0) - store i16 0, i16* %fa + %fa = call i8* @llvm.frameaddress(i32 0) + store i8 0, i8* %fa ret void } -declare i16* @llvm.frameaddress(i32) +declare i8* @llvm.frameaddress(i32) diff --git a/llvm/test/CodeGen/X86/sse4a.ll b/llvm/test/CodeGen/X86/sse4a.ll index 0732353..14c0fb3 100644 --- a/llvm/test/CodeGen/X86/sse4a.ll +++ b/llvm/test/CodeGen/X86/sse4a.ll @@ -1,19 +1,19 @@ ; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse4a | FileCheck %s -define void @test1(float* %p, <4 x float> %a) nounwind optsize ssp { +define void @test1(i8* %p, <4 x float> %a) nounwind optsize ssp { ; CHECK: movntss entry: - tail call void @llvm.x86.sse4a.movnt.ss(float* %p, <4 x float> %a) nounwind + tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a) nounwind ret void } -declare void @llvm.x86.sse4a.movnt.ss(float*, <4 x float>) +declare void @llvm.x86.sse4a.movnt.ss(i8*, <4 x float>) -define void @test2(double* %p, <2 x double> %a) nounwind optsize ssp { +define void @test2(i8* %p, <2 x double> %a) nounwind optsize ssp { ; CHECK: movntsd entry: - tail call void @llvm.x86.sse4a.movnt.sd(double* %p, <2 x double> %a) nounwind + tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a) nounwind ret void } -declare void @llvm.x86.sse4a.movnt.sd(double*, <2 x double>) +declare void @llvm.x86.sse4a.movnt.sd(i8*, <2 x double>) diff --git a/llvm/test/Verifier/2008-08-22-MemCpyAlignment.ll b/llvm/test/Verifier/2008-08-22-MemCpyAlignment.ll index aaf69ae..125325c 100644 --- a/llvm/test/Verifier/2008-08-22-MemCpyAlignment.ll +++ b/llvm/test/Verifier/2008-08-22-MemCpyAlignment.ll @@ -3,9 +3,9 @@ define void @x(i8* %a, i8* %src, i64 %len, i32 %align) nounwind { entry: - tail call void @llvm.memcpy.i64( i8* %a, i8* %src, i64 %len, i32 %align) nounwind + tail call void @llvm.memcpy.p0i8.p0i8.i64( i8* %a, i8* %src, i64 %len, i32 %align, i1 false) nounwind ret void } -declare void @llvm.memcpy.i64( i8* %a, i8* %src, i64 %len, i32) +declare void @llvm.memcpy.p0i8.p0i8.i64( i8* %a, i8* %src, i64 %len, i32, i1) -- 2.7.4