2 Copyright (c) 2012 Advanced Micro Devices, Inc.
\r
4 This software is provided 'as-is', without any express or implied warranty.
\r
5 In no event will the authors be held liable for any damages arising from the use of this software.
\r
6 Permission is granted to anyone to use this software for any purpose,
\r
7 including commercial applications, and to alter it and redistribute it freely,
\r
8 subject to the following restrictions:
\r
10 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
\r
11 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
\r
12 3. This notice may not be removed or altered from any source distribution.
\r
14 //Originally written by Takahiro Harada
\r
17 #pragma OPENCL EXTENSION cl_amd_printf : enable
\r
18 #pragma OPENCL EXTENSION cl_khr_local_int32_base_atomics : enable
\r
19 #pragma OPENCL EXTENSION cl_khr_global_int32_base_atomics : enable
\r
20 #pragma OPENCL EXTENSION cl_khr_local_int32_extended_atomics : enable
\r
21 #pragma OPENCL EXTENSION cl_khr_global_int32_extended_atomics : enable
\r
23 #ifdef cl_ext_atomic_counters_32
\r
24 #pragma OPENCL EXTENSION cl_ext_atomic_counters_32 : enable
\r
26 #define counter32_t volatile __global int*
\r
30 typedef unsigned int u32;
\r
31 typedef unsigned short u16;
\r
32 typedef unsigned char u8;
\r
34 #define GET_GROUP_IDX get_group_id(0)
\r
35 #define GET_LOCAL_IDX get_local_id(0)
\r
36 #define GET_GLOBAL_IDX get_global_id(0)
\r
37 #define GET_GROUP_SIZE get_local_size(0)
\r
38 #define GET_NUM_GROUPS get_num_groups(0)
\r
39 #define GROUP_LDS_BARRIER barrier(CLK_LOCAL_MEM_FENCE)
\r
40 #define GROUP_MEM_FENCE mem_fence(CLK_LOCAL_MEM_FENCE)
\r
41 #define AtomInc(x) atom_inc(&(x))
\r
42 #define AtomInc1(x, out) out = atom_inc(&(x))
\r
43 #define AppendInc(x, out) out = atomic_inc(x)
\r
44 #define AtomAdd(x, value) atom_add(&(x), value)
\r
45 #define AtomCmpxhg(x, cmp, value) atom_cmpxchg( &(x), cmp, value )
\r
46 #define AtomXhg(x, value) atom_xchg ( &(x), value )
\r
49 #define SELECT_UINT4( b, a, condition ) select( b,a,condition )
\r
51 #define make_float4 (float4)
\r
52 #define make_float2 (float2)
\r
53 #define make_uint4 (uint4)
\r
54 #define make_int4 (int4)
\r
55 #define make_uint2 (uint2)
\r
56 #define make_int2 (int2)
\r
69 float4 m_worldPos[4];
\r
70 float4 m_worldNormal;
\r
93 #define STACK_SIZE (WG_SIZE*10)
\r
94 //#define STACK_SIZE (WG_SIZE)
\r
95 #define RING_SIZE 1024
\r
96 #define RING_SIZE_MASK (RING_SIZE-1)
\r
97 #define CHECK_SIZE (WG_SIZE)
\r
100 #define GET_RING_CAPACITY (RING_SIZE - ldsRingEnd)
\r
101 #define RING_END ldsTmp
\r
103 u32 readBuf(__local u32* buff, int idx)
\r
105 idx = idx % (32*CHECK_SIZE);
\r
106 int bitIdx = idx%32;
\r
107 int bufIdx = idx/32;
\r
108 return buff[bufIdx] & (1<<bitIdx);
\r
111 void writeBuf(__local u32* buff, int idx)
\r
113 idx = idx % (32*CHECK_SIZE);
\r
114 int bitIdx = idx%32;
\r
115 int bufIdx = idx/32;
\r
116 // buff[bufIdx] |= (1<<bitIdx);
\r
117 atom_or( &buff[bufIdx], (1<<bitIdx) );
\r
120 u32 tryWrite(__local u32* buff, int idx)
\r
122 idx = idx % (32*CHECK_SIZE);
\r
123 int bitIdx = idx%32;
\r
124 int bufIdx = idx/32;
\r
125 u32 ans = (u32)atom_or( &buff[bufIdx], (1<<bitIdx) );
\r
126 return ((ans >> bitIdx)&1) == 0;
\r
129 // batching on the GPU
\r
130 __kernel void CreateBatches( __global Contact4* gConstraints, __global Contact4* gConstraintsOut,
\r
131 __global u32* gN, __global u32* gStart,
\r
134 __local u32 ldsStackIdx[STACK_SIZE];
\r
135 __local u32 ldsStackEnd;
\r
136 __local Elem ldsRingElem[RING_SIZE];
\r
137 __local u32 ldsRingEnd;
\r
138 __local u32 ldsTmp;
\r
139 __local u32 ldsCheckBuffer[CHECK_SIZE];
\r
140 __local u32 ldsFixedBuffer[CHECK_SIZE];
\r
141 __local u32 ldsGEnd;
\r
142 __local u32 ldsDstEnd;
\r
144 int wgIdx = GET_GROUP_IDX;
\r
145 int lIdx = GET_LOCAL_IDX;
\r
147 const int m_n = gN[wgIdx];
\r
148 const int m_start = gStart[wgIdx];
\r
149 const int m_staticIdx = cb.m_staticIdx;
\r
156 ldsDstEnd = m_start;
\r
160 for(int ie=0; ie<250; ie++)
\r
162 ldsFixedBuffer[lIdx] = 0;
\r
164 for(int giter=0; giter<4; giter++)
\r
166 int ringCap = GET_RING_CAPACITY;
\r
169 if( ldsGEnd < m_n )
\r
171 while( ringCap > WG_SIZE )
\r
173 if( ldsGEnd >= m_n ) break;
\r
174 if( lIdx < ringCap - WG_SIZE )
\r
177 AtomInc1( ldsGEnd, srcIdx );
\r
181 AtomInc1( ldsRingEnd, dstIdx );
\r
183 int a = gConstraints[m_start+srcIdx].m_bodyA;
\r
184 int b = gConstraints[m_start+srcIdx].m_bodyB;
\r
185 ldsRingElem[dstIdx].m_a = (a>b)? b:a;
\r
186 ldsRingElem[dstIdx].m_b = (a>b)? a:b;
\r
187 ldsRingElem[dstIdx].m_idx = srcIdx;
\r
190 ringCap = GET_RING_CAPACITY;
\r
197 __local Elem* dst = ldsRingElem;
\r
198 if( lIdx == 0 ) RING_END = 0;
\r
201 int end = ldsRingEnd;
\r
204 for(int ii=0; ii<end; ii+=WG_SIZE, srcIdx+=WG_SIZE)
\r
207 if(srcIdx<end) e = ldsRingElem[srcIdx];
\r
208 bool done = (srcIdx<end)?false:true;
\r
210 for(int i=lIdx; i<CHECK_SIZE; i+=WG_SIZE) ldsCheckBuffer[lIdx] = 0;
\r
214 int aUsed = readBuf( ldsFixedBuffer, e.m_a);
\r
215 int bUsed = readBuf( ldsFixedBuffer, e.m_b);
\r
217 if( aUsed==0 && bUsed==0 )
\r
222 aAvailable = tryWrite( ldsCheckBuffer, e.m_a );
\r
223 bAvailable = tryWrite( ldsCheckBuffer, e.m_b );
\r
225 //aAvailable = (m_staticIdx == e.m_a)? 1: aAvailable;
\r
226 //bAvailable = (m_staticIdx == e.m_b)? 1: bAvailable;
\r
228 bool success = (aAvailable && bAvailable);
\r
231 writeBuf( ldsFixedBuffer, e.m_a );
\r
232 writeBuf( ldsFixedBuffer, e.m_b );
\r
243 int dstIdx; AtomInc1( ldsStackEnd, dstIdx );
\r
244 if( dstIdx < STACK_SIZE )
\r
245 ldsStackIdx[dstIdx] = e.m_idx;
\r
248 AtomAdd( ldsStackEnd, -1 );
\r
253 int dstIdx; AtomInc1( RING_END, dstIdx );
\r
258 // if filled, flush
\r
259 if( ldsStackEnd == STACK_SIZE )
\r
261 for(int i=lIdx; i<STACK_SIZE; i+=WG_SIZE)
\r
263 int idx = m_start + ldsStackIdx[i];
\r
264 int dstIdx; AtomInc1( ldsDstEnd, dstIdx );
\r
265 gConstraintsOut[ dstIdx ] = gConstraints[ idx ];
\r
266 gConstraintsOut[ dstIdx ].m_batchIdx = ie;
\r
268 if( lIdx == 0 ) ldsStackEnd = 0;
\r
270 //for(int i=lIdx; i<CHECK_SIZE; i+=WG_SIZE)
\r
271 ldsFixedBuffer[lIdx] = 0;
\r
276 if( lIdx == 0 ) ldsRingEnd = RING_END;
\r
281 for(int i=lIdx; i<ldsStackEnd; i+=WG_SIZE)
\r
283 int idx = m_start + ldsStackIdx[i];
\r
284 int dstIdx; AtomInc1( ldsDstEnd, dstIdx );
\r
285 gConstraintsOut[ dstIdx ] = gConstraints[ idx ];
\r
286 gConstraintsOut[ dstIdx ].m_batchIdx = ie;
\r
289 // in case it couldn't consume any pair. Flush them
\r
290 // todo. Serial batch worth while?
\r
291 if( ldsStackEnd == 0 )
\r
293 for(int i=lIdx; i<ldsRingEnd; i+=WG_SIZE)
\r
295 int idx = m_start + ldsRingElem[i].m_idx;
\r
296 int dstIdx; AtomInc1( ldsDstEnd, dstIdx );
\r
297 gConstraintsOut[ dstIdx ] = gConstraints[ idx ];
\r
298 gConstraintsOut[ dstIdx ].m_batchIdx = 100+i;
\r
301 if( lIdx == 0 ) ldsRingEnd = 0;
\r
304 if( lIdx == 0 ) ldsStackEnd = 0;
\r
309 if( ldsGEnd == m_n && ldsRingEnd == 0 )
\r