From 4cb7cf276df36a15e9b416f3d0b55d5604615112 Mon Sep 17 00:00:00 2001 From: Aaron Watry Date: Tue, 16 Jul 2013 14:28:58 +0000 Subject: [PATCH] libclc: vload/vstore disable assembly and fix offset calculation This commit gets us back to pure CLC and fixes offset calculations. The next commit will re-enable the assembly implementation for R600, fix bugs related to 64-bit address spaces, and also fix the incorrect assumption that address space identifiers are the same in all architectures. llvm-svn: 186415 --- libclc/generic/lib/SOURCES | 2 - libclc/generic/lib/shared/vload.cl | 64 +++------------------------ libclc/generic/lib/shared/vload_if.ll | 60 ------------------------- libclc/generic/lib/shared/vstore.cl | 80 ++++++---------------------------- libclc/generic/lib/shared/vstore_if.ll | 59 ------------------------- 5 files changed, 20 insertions(+), 245 deletions(-) delete mode 100644 libclc/generic/lib/shared/vload_if.ll delete mode 100644 libclc/generic/lib/shared/vstore_if.ll diff --git a/libclc/generic/lib/SOURCES b/libclc/generic/lib/SOURCES index c2da3d7..21a7eaa 100644 --- a/libclc/generic/lib/SOURCES +++ b/libclc/generic/lib/SOURCES @@ -26,10 +26,8 @@ shared/clamp.cl shared/max.cl shared/min.cl shared/vload.cl -shared/vload_if.ll shared/vload_impl.ll shared/vstore.cl -shared/vstore_if.ll shared/vstore_impl.ll workitem/get_global_id.cl workitem/get_global_size.cl diff --git a/libclc/generic/lib/shared/vload.cl b/libclc/generic/lib/shared/vload.cl index 4dd7918..6793072 100644 --- a/libclc/generic/lib/shared/vload.cl +++ b/libclc/generic/lib/shared/vload.cl @@ -2,23 +2,23 @@ #define VLOAD_VECTORIZE(PRIM_TYPE, ADDR_SPACE) \ _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##2 vload2(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ - return (PRIM_TYPE##2)(x[offset] , x[offset+1]); \ + return (PRIM_TYPE##2)(x[2*offset] , x[2*offset+1]); \ } \ \ _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##3 vload3(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ - return (PRIM_TYPE##3)(x[offset] , x[offset+1], x[offset+2]); \ + return (PRIM_TYPE##3)(x[3*offset] , x[3*offset+1], x[3*offset+2]); \ } \ \ _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##4 vload4(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ - return (PRIM_TYPE##4)(x[offset], x[offset+1], x[offset+2], x[offset+3]); \ + return (PRIM_TYPE##4)(x[4*offset], x[4*offset+1], x[4*offset+2], x[4*offset+3]); \ } \ \ _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##8 vload8(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ - return (PRIM_TYPE##8)(vload4(offset, x), vload4(offset+4, x)); \ + return (PRIM_TYPE##8)(vload4(0, &x[8*offset]), vload4(1, &x[8*offset])); \ } \ \ _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##16 vload16(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \ - return (PRIM_TYPE##16)(vload8(offset, x), vload8(offset+8, x)); \ + return (PRIM_TYPE##16)(vload8(0, &x[16*offset]), vload8(1, &x[16*offset])); \ } \ #define VLOAD_ADDR_SPACES(__CLC_SCALAR_GENTYPE) \ @@ -27,12 +27,13 @@ VLOAD_VECTORIZE(__CLC_SCALAR_GENTYPE, __constant) \ VLOAD_VECTORIZE(__CLC_SCALAR_GENTYPE, __global) \ -//int/uint are special... see below #define VLOAD_TYPES() \ VLOAD_ADDR_SPACES(char) \ VLOAD_ADDR_SPACES(uchar) \ VLOAD_ADDR_SPACES(short) \ VLOAD_ADDR_SPACES(ushort) \ + VLOAD_ADDR_SPACES(int) \ + VLOAD_ADDR_SPACES(uint) \ VLOAD_ADDR_SPACES(long) \ VLOAD_ADDR_SPACES(ulong) \ VLOAD_ADDR_SPACES(float) \ @@ -43,54 +44,3 @@ VLOAD_TYPES() #pragma OPENCL EXTENSION cl_khr_fp64 : enable VLOAD_ADDR_SPACES(double) #endif - -VLOAD_VECTORIZE(int, __private) -VLOAD_VECTORIZE(int, __local) -VLOAD_VECTORIZE(int, __constant) -VLOAD_VECTORIZE(uint, __private) -VLOAD_VECTORIZE(uint, __local) -VLOAD_VECTORIZE(uint, __constant) - -_CLC_OVERLOAD _CLC_DEF int2 vload2(size_t offset, const global int *x) { - return (int2)(x[offset] , x[offset+1]); -} -_CLC_OVERLOAD _CLC_DEF int3 vload3(size_t offset, const global int *x) { - return (int3)(vload2(offset, x), x[offset+2]); -} -_CLC_OVERLOAD _CLC_DEF uint2 vload2(size_t offset, const global uint *x) { - return (uint2)(x[offset] , x[offset+1]); -} -_CLC_OVERLOAD _CLC_DEF uint3 vload3(size_t offset, const global uint *x) { - return (uint3)(vload2(offset, x), x[offset+2]); -} - -/*Note: It is known that R600 doesn't support load <2 x ?> and <3 x ?>... so - * they aren't actually overridden here - */ -_CLC_DECL int4 __clc_vload4_int__global(size_t offset, const __global int *); -_CLC_DECL int8 __clc_vload8_int__global(size_t offset, const __global int *); -_CLC_DECL int16 __clc_vload16_int__global(size_t offset, const __global int *); - -_CLC_OVERLOAD _CLC_DEF int4 vload4(size_t offset, const global int *x) { - return __clc_vload4_int__global(offset, x); -} -_CLC_OVERLOAD _CLC_DEF int8 vload8(size_t offset, const global int *x) { - return __clc_vload8_int__global(offset, x); -} -_CLC_OVERLOAD _CLC_DEF int16 vload16(size_t offset, const global int *x) { - return __clc_vload16_int__global(offset, x); -} - -_CLC_DECL uint4 __clc_vload4_uint__global(size_t offset, const __global uint *); -_CLC_DECL uint8 __clc_vload8_uint__global(size_t offset, const __global uint *); -_CLC_DECL uint16 __clc_vload16_uint__global(size_t offset, const __global uint *); - -_CLC_OVERLOAD _CLC_DEF uint4 vload4(size_t offset, const global uint *x) { - return __clc_vload4_uint__global(offset, x); -} -_CLC_OVERLOAD _CLC_DEF uint8 vload8(size_t offset, const global uint *x) { - return __clc_vload8_uint__global(offset, x); -} -_CLC_OVERLOAD _CLC_DEF uint16 vload16(size_t offset, const global uint *x) { - return __clc_vload16_uint__global(offset, x); -} diff --git a/libclc/generic/lib/shared/vload_if.ll b/libclc/generic/lib/shared/vload_if.ll deleted file mode 100644 index 2634d37..0000000 --- a/libclc/generic/lib/shared/vload_if.ll +++ /dev/null @@ -1,60 +0,0 @@ -;Start int global vload - -declare <2 x i32> @__clc_vload2_impl_i32__global(i32 %x, i32 %y) -declare <3 x i32> @__clc_vload3_impl_i32__global(i32 %x, i32 %y) -declare <4 x i32> @__clc_vload4_impl_i32__global(i32 %x, i32 %y) -declare <8 x i32> @__clc_vload8_impl_i32__global(i32 %x, i32 %y) -declare <16 x i32> @__clc_vload16_impl_i32__global(i32 %x, i32 %y) - -define <2 x i32> @__clc_vload2_int__global(i32 %x, i32 %y) nounwind readonly alwaysinline { - %call = call <2 x i32> @__clc_vload2_impl_i32__global(i32 %x, i32 %y) - ret <2 x i32> %call -} - -define <3 x i32> @__clc_vload3_int__global(i32 %x, i32 %y) nounwind readonly alwaysinline { - %call = call <3 x i32> @__clc_vload3_impl_i32__global(i32 %x, i32 %y) - ret <3 x i32> %call -} - -define <4 x i32> @__clc_vload4_int__global(i32 %x, i32 %y) nounwind readonly alwaysinline { - %call = call <4 x i32> @__clc_vload4_impl_i32__global(i32 %x, i32 %y) - ret <4 x i32> %call -} - -define <8 x i32> @__clc_vload8_int__global(i32 %x, i32 %y) nounwind readonly alwaysinline { - %call = call <8 x i32> @__clc_vload8_impl_i32__global(i32 %x, i32 %y) - ret <8 x i32> %call -} - -define <16 x i32> @__clc_vload16_int__global(i32 %x, i32 %y) nounwind readonly alwaysinline { - %call = call <16 x i32> @__clc_vload16_impl_i32__global(i32 %x, i32 %y) - ret <16 x i32> %call -} - - -;Start uint global vload - -define <2 x i32> @__clc_vload2_uint__global(i32 %x, i32 %y) nounwind readonly alwaysinline { - %call = call <2 x i32> @__clc_vload2_impl_i32__global(i32 %x, i32 %y) - ret <2 x i32> %call -} - -define <3 x i32> @__clc_vload3_uint__global(i32 %x, i32 %y) nounwind readonly alwaysinline { - %call = call <3 x i32> @__clc_vload3_impl_i32__global(i32 %x, i32 %y) - ret <3 x i32> %call -} - -define <4 x i32> @__clc_vload4_uint__global(i32 %x, i32 %y) nounwind readonly alwaysinline { - %call = call <4 x i32> @__clc_vload4_impl_i32__global(i32 %x, i32 %y) - ret <4 x i32> %call -} - -define <8 x i32> @__clc_vload8_uint__global(i32 %x, i32 %y) nounwind readonly alwaysinline { - %call = call <8 x i32> @__clc_vload8_impl_i32__global(i32 %x, i32 %y) - ret <8 x i32> %call -} - -define <16 x i32> @__clc_vload16_uint__global(i32 %x, i32 %y) nounwind readonly alwaysinline { - %call = call <16 x i32> @__clc_vload16_impl_i32__global(i32 %x, i32 %y) - ret <16 x i32> %call -} diff --git a/libclc/generic/lib/shared/vstore.cl b/libclc/generic/lib/shared/vstore.cl index 17c2c4c..f6d360e 100644 --- a/libclc/generic/lib/shared/vstore.cl +++ b/libclc/generic/lib/shared/vstore.cl @@ -4,29 +4,29 @@ #define VSTORE_VECTORIZE(PRIM_TYPE, ADDR_SPACE) \ _CLC_OVERLOAD _CLC_DEF void vstore2(PRIM_TYPE##2 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ - mem[offset] = vec.s0; \ - mem[offset+1] = vec.s1; \ + mem[2*offset] = vec.s0; \ + mem[2*offset+1] = vec.s1; \ } \ \ _CLC_OVERLOAD _CLC_DEF void vstore3(PRIM_TYPE##3 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ - mem[offset] = vec.s0; \ - mem[offset+1] = vec.s1; \ - mem[offset+2] = vec.s2; \ + mem[3*offset] = vec.s0; \ + mem[3*offset+1] = vec.s1; \ + mem[3*offset+2] = vec.s2; \ } \ \ _CLC_OVERLOAD _CLC_DEF void vstore4(PRIM_TYPE##4 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ - vstore2(vec.lo, offset, mem); \ - vstore2(vec.hi, offset+2, mem); \ + vstore2(vec.lo, 0, &mem[offset*4]); \ + vstore2(vec.hi, 1, &mem[offset*4]); \ } \ \ _CLC_OVERLOAD _CLC_DEF void vstore8(PRIM_TYPE##8 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ - vstore4(vec.lo, offset, mem); \ - vstore4(vec.hi, offset+4, mem); \ + vstore4(vec.lo, 0, &mem[offset*8]); \ + vstore4(vec.hi, 1, &mem[offset*8]); \ } \ \ _CLC_OVERLOAD _CLC_DEF void vstore16(PRIM_TYPE##16 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \ - vstore8(vec.lo, offset, mem); \ - vstore8(vec.hi, offset+8, mem); \ + vstore8(vec.lo, 0, &mem[offset*16]); \ + vstore8(vec.hi, 1, &mem[offset*16]); \ } \ #define VSTORE_ADDR_SPACES(__CLC_SCALAR___CLC_GENTYPE) \ @@ -34,12 +34,13 @@ VSTORE_VECTORIZE(__CLC_SCALAR___CLC_GENTYPE, __local) \ VSTORE_VECTORIZE(__CLC_SCALAR___CLC_GENTYPE, __global) \ -//int/uint are special... see below #define VSTORE_TYPES() \ VSTORE_ADDR_SPACES(char) \ VSTORE_ADDR_SPACES(uchar) \ VSTORE_ADDR_SPACES(short) \ VSTORE_ADDR_SPACES(ushort) \ + VSTORE_ADDR_SPACES(int) \ + VSTORE_ADDR_SPACES(uint) \ VSTORE_ADDR_SPACES(long) \ VSTORE_ADDR_SPACES(ulong) \ VSTORE_ADDR_SPACES(float) \ @@ -50,58 +51,3 @@ VSTORE_TYPES() #pragma OPENCL EXTENSION cl_khr_fp64 : enable VSTORE_ADDR_SPACES(double) #endif - -VSTORE_VECTORIZE(int, __private) -VSTORE_VECTORIZE(int, __local) -VSTORE_VECTORIZE(uint, __private) -VSTORE_VECTORIZE(uint, __local) - -_CLC_OVERLOAD _CLC_DEF void vstore2(int2 vec, size_t offset, global int *mem) { - mem[offset] = vec.s0; - mem[offset+1] = vec.s1; -} -_CLC_OVERLOAD _CLC_DEF void vstore3(int3 vec, size_t offset, global int *mem) { - mem[offset] = vec.s0; - mem[offset+1] = vec.s1; - mem[offset+2] = vec.s2; -} -_CLC_OVERLOAD _CLC_DEF void vstore2(uint2 vec, size_t offset, global uint *mem) { - mem[offset] = vec.s0; - mem[offset+1] = vec.s1; -} -_CLC_OVERLOAD _CLC_DEF void vstore3(uint3 vec, size_t offset, global uint *mem) { - mem[offset] = vec.s0; - mem[offset+1] = vec.s1; - mem[offset+2] = vec.s2; -} - -/*Note: R600 probably doesn't support store <2 x ?> and <3 x ?>... so - * they aren't actually overridden here... lowest-common-denominator - */ -_CLC_DECL void __clc_vstore4_int__global(int4 vec, size_t offset, __global int *); -_CLC_DECL void __clc_vstore8_int__global(int8 vec, size_t offset, __global int *); -_CLC_DECL void __clc_vstore16_int__global(int16 vec, size_t offset, __global int *); - -_CLC_OVERLOAD _CLC_DEF void vstore4(int4 vec, size_t offset, global int *x) { - __clc_vstore4_int__global(vec, offset, x); -} -_CLC_OVERLOAD _CLC_DEF void vstore8(int8 vec, size_t offset, global int *x) { - __clc_vstore8_int__global(vec, offset, x); -} -_CLC_OVERLOAD _CLC_DEF void vstore16(int16 vec, size_t offset, global int *x) { - __clc_vstore16_int__global(vec, offset, x); -} - -_CLC_DECL void __clc_vstore4_uint__global(uint4 vec, size_t offset, __global uint *); -_CLC_DECL void __clc_vstore8_uint__global(uint8 vec, size_t offset, __global uint *); -_CLC_DECL void __clc_vstore16_uint__global(uint16 vec, size_t offset, __global uint *); - -_CLC_OVERLOAD _CLC_DEF void vstore4(uint4 vec, size_t offset, global uint *x) { - __clc_vstore4_uint__global(vec, offset, x); -} -_CLC_OVERLOAD _CLC_DEF void vstore8(uint8 vec, size_t offset, global uint *x) { - __clc_vstore8_uint__global(vec, offset, x); -} -_CLC_OVERLOAD _CLC_DEF void vstore16(uint16 vec, size_t offset, global uint *x) { - __clc_vstore16_uint__global(vec, offset, x); -} diff --git a/libclc/generic/lib/shared/vstore_if.ll b/libclc/generic/lib/shared/vstore_if.ll deleted file mode 100644 index 30eb552..0000000 --- a/libclc/generic/lib/shared/vstore_if.ll +++ /dev/null @@ -1,59 +0,0 @@ -;Start int global vstore - -declare void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %x, i32 %y) -declare void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %x, i32 %y) -declare void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %x, i32 %y) -declare void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %x, i32 %y) -declare void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %x, i32 %y) - -define void @__clc_vstore2_int__global(<2 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { - call void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %x, i32 %y) - ret void -} - -define void @__clc_vstore3_int__global(<3 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { - call void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %x, i32 %y) - ret void -} - -define void @__clc_vstore4_int__global(<4 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { - call void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %x, i32 %y) - ret void -} - -define void @__clc_vstore8_int__global(<8 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { - call void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %x, i32 %y) - ret void -} - -define void @__clc_vstore16_int__global(<16 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { - call void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %x, i32 %y) - ret void -} - - -;Start uint global vstore -define void @__clc_vstore2_uint__global(<2 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { - call void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %x, i32 %y) - ret void -} - -define void @__clc_vstore3_uint__global(<3 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { - call void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %x, i32 %y) - ret void -} - -define void @__clc_vstore4_uint__global(<4 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { - call void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %x, i32 %y) - ret void -} - -define void @__clc_vstore8_uint__global(<8 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { - call void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %x, i32 %y) - ret void -} - -define void @__clc_vstore16_uint__global(<16 x i32> %vec, i32 %x, i32 %y) nounwind alwaysinline { - call void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %x, i32 %y) - ret void -} \ No newline at end of file -- 2.7.4