From 48e889bed24649c873e2ba5de5e1af97dbd7c8ba Mon Sep 17 00:00:00 2001 From: bsegovia Date: Thu, 28 Jul 2011 05:39:58 +0000 Subject: [PATCH] %s/genx_gpgpu/intel_gpgpu/ --- src/cl_command_queue.c | 14 ++++++------ src/cl_command_queue.h | 4 ++-- src/intel/genx_gpgpu.c | 60 +++++++++++++++++++++++++------------------------- src/intel/genx_gpgpu.h | 36 +++++++++++++++--------------- 4 files changed, 57 insertions(+), 57 deletions(-) diff --git a/src/cl_command_queue.c b/src/cl_command_queue.c index f7a2740..c65f9ca 100644 --- a/src/cl_command_queue.c +++ b/src/cl_command_queue.c @@ -44,7 +44,7 @@ cl_command_queue_new(cl_context ctx) queue->ref_n = 1; queue->ctx = ctx; TRY_ALLOC_NO_ERR (queue->gpgpu, - genx_gpgpu_state_new((struct intel_driver*) ctx->intel_drv)); + intel_gpgpu_new((struct intel_driver*) ctx->intel_drv)); /* Append the command queue in the list */ pthread_mutex_lock(&ctx->queue_lock); @@ -84,7 +84,7 @@ cl_command_queue_delete(cl_command_queue queue) pthread_mutex_unlock(&queue->ctx->queue_lock); cl_mem_delete(queue->perf); cl_context_delete(queue->ctx); - genx_gpgpu_state_delete(queue->gpgpu); + intel_gpgpu_delete(queue->gpgpu); queue->magic = CL_MAGIC_DEAD_HEADER; /* For safety */ cl_free(queue); } @@ -119,7 +119,7 @@ cl_command_queue_bind_surface(cl_command_queue queue, drm_intel_bo **scratch) { cl_context ctx = queue->ctx; - genx_gpgpu_state_t *gpgpu = queue->gpgpu; + intel_gpgpu_t *gpgpu = queue->gpgpu; drm_intel_bufmgr *bufmgr = cl_context_get_intel_bufmgr(ctx); cl_mem mem = NULL; drm_intel_bo *bo = NULL, *sync_bo = NULL; @@ -222,7 +222,7 @@ cl_command_queue_enqueue_wrk_grp3(cl_command_queue queue, uint32_t thread_n, uint32_t barrierID) { - genx_gpgpu_state_t *gpgpu = queue->gpgpu; + intel_gpgpu_t *gpgpu = queue->gpgpu; uint32_t i; for (i = 0; i < thread_n; ++i) { const size_t sz = sizeof(cl_inline_header_t) + 3*sizeof(cl_local_id_t); @@ -246,7 +246,7 @@ cl_command_queue_enqueue_wrk_grp2(cl_command_queue queue, uint32_t thread_n, uint32_t barrierID) { - genx_gpgpu_state_t *gpgpu = queue->gpgpu; + intel_gpgpu_t *gpgpu = queue->gpgpu; uint32_t i; for (i = 0; i < thread_n; ++i) { const size_t sz = sizeof(cl_inline_header_t) + 2*sizeof(cl_local_id_t); @@ -268,7 +268,7 @@ cl_command_queue_enqueue_wrk_grp1(cl_command_queue queue, uint32_t thread_n, uint32_t barrierID) { - genx_gpgpu_state_t *gpgpu = queue->gpgpu; + intel_gpgpu_t *gpgpu = queue->gpgpu; uint32_t i; for (i = 0; i < thread_n; ++i) { const size_t sz = sizeof(cl_inline_header_t) + sizeof(cl_local_id_t); @@ -432,7 +432,7 @@ cl_command_queue_ND_kernel(cl_command_queue queue, const size_t *local_wk_sz) { cl_context ctx = queue->ctx; - genx_gpgpu_state_t *gpgpu = queue->gpgpu; + intel_gpgpu_t *gpgpu = queue->gpgpu; drm_intel_bo *slm_bo = NULL, *private_bo = NULL, *scratch_bo = NULL; size_t cst_sz = ker->patch.curbe.sz; size_t wrk_grp_sz, wrk_grp_n, batch_sz; diff --git a/src/cl_command_queue.h b/src/cl_command_queue.h index 3e60d75..0e0a37f 100644 --- a/src/cl_command_queue.h +++ b/src/cl_command_queue.h @@ -24,7 +24,7 @@ #include "CL/cl.h" #include -struct genx_gpgpu_state; +struct intel_gpgpu; /* Basically, this is a (kind-of) batch buffer */ struct _cl_command_queue { @@ -32,7 +32,7 @@ struct _cl_command_queue { volatile int ref_n; /* We reference count this object */ cl_context ctx; /* Its parent context */ cl_command_queue prev, next; /* We chain the command queues together */ - struct genx_gpgpu_state *gpgpu; /* Setup all GEN commands */ + struct intel_gpgpu *gpgpu; /* Setup all GEN commands */ cl_mem perf; /* Where to put the perf counters */ cl_mem fulsim_out; /* Fulsim will output this buffer */ struct _drm_intel_bo *last_batch;/* To synchronize using clFinish */ diff --git a/src/intel/genx_gpgpu.c b/src/intel/genx_gpgpu.c index adf73f9..a0d70ff 100644 --- a/src/intel/genx_gpgpu.c +++ b/src/intel/genx_gpgpu.c @@ -310,7 +310,7 @@ struct opaque_sampler_state { struct intel_driver; /* Handle GPGPU state (actually "media" state) */ -struct genx_gpgpu_state +struct intel_gpgpu { intel_driver_t *drv; intel_batchbuffer_t *batch; @@ -344,12 +344,12 @@ struct genx_gpgpu_state /* Be sure that the size is still valid */ STATIC_ASSERT(sizeof(struct opaque_sampler_state) == sizeof(struct i965_sampler_state)); -LOCAL genx_gpgpu_state_t* -genx_gpgpu_state_new(intel_driver_t *drv) +LOCAL intel_gpgpu_t* +intel_gpgpu_new(intel_driver_t *drv) { - genx_gpgpu_state_t *state = NULL; + intel_gpgpu_t *state = NULL; - TRY_ALLOC_NO_ERR (state, CALLOC(genx_gpgpu_state_t)); + TRY_ALLOC_NO_ERR (state, CALLOC(intel_gpgpu_t)); state->drv = drv; state->batch = intel_batchbuffer_new(state->drv); assert(state->batch); @@ -358,13 +358,13 @@ genx_gpgpu_state_new(intel_driver_t *drv) exit: return state; error: - genx_gpgpu_state_delete(state); + intel_gpgpu_delete(state); state = NULL; goto exit; } LOCAL void -genx_gpgpu_state_delete(genx_gpgpu_state_t *state) +intel_gpgpu_delete(intel_gpgpu_t *state) { uint32_t i; @@ -390,7 +390,7 @@ genx_gpgpu_state_delete(genx_gpgpu_state_t *state) } static void -gpgpu_select_pipeline(genx_gpgpu_state_t *state) +gpgpu_select_pipeline(intel_gpgpu_t *state) { BEGIN_BATCH(state->batch, 1); OUT_BATCH(state->batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA); @@ -398,7 +398,7 @@ gpgpu_select_pipeline(genx_gpgpu_state_t *state) } static void -gpgpu_set_base_address(genx_gpgpu_state_t *state) +gpgpu_set_base_address(intel_gpgpu_t *state) { const uint32_t def_cc = cc_llc_mlc; /* default Cache Control value */ BEGIN_BATCH(state->batch, 10); @@ -426,7 +426,7 @@ gpgpu_set_base_address(genx_gpgpu_state_t *state) } static void -gpgpu_load_vfe_state(genx_gpgpu_state_t *state) +gpgpu_load_vfe_state(intel_gpgpu_t *state) { BEGIN_BATCH(state->batch, 8); OUT_BATCH(state->batch, CMD_MEDIA_STATE_POINTERS | (8-2)); @@ -450,7 +450,7 @@ gpgpu_load_vfe_state(genx_gpgpu_state_t *state) } static void -gpgpu_load_constant_buffer(genx_gpgpu_state_t *state) +gpgpu_load_constant_buffer(intel_gpgpu_t *state) { BEGIN_BATCH(state->batch, 4); OUT_BATCH(state->batch, CMD(2,0,1) | (4 - 2)); /* length-2 */ @@ -463,7 +463,7 @@ gpgpu_load_constant_buffer(genx_gpgpu_state_t *state) } static void -gpgpu_load_idrt(genx_gpgpu_state_t *state) +gpgpu_load_idrt(intel_gpgpu_t *state) { BEGIN_BATCH(state->batch, 4); OUT_BATCH(state->batch, CMD(2,0,2) | (4 - 2)); /* length-2 */ @@ -474,7 +474,7 @@ gpgpu_load_idrt(genx_gpgpu_state_t *state) } LOCAL void -gpgpu_batch_start(genx_gpgpu_state_t *state) +gpgpu_batch_start(intel_gpgpu_t *state) { intel_batchbuffer_start_atomic(state->batch, 256); intel_batchbuffer_emit_mi_flush(state->batch); @@ -500,7 +500,7 @@ gpgpu_batch_start(genx_gpgpu_state_t *state) } LOCAL void -gpgpu_batch_end(genx_gpgpu_state_t *state, int32_t flush_mode) +gpgpu_batch_end(intel_gpgpu_t *state, int32_t flush_mode) { /* Insert the performance counter command */ if (state->perf_b.bo) { @@ -525,19 +525,19 @@ gpgpu_batch_end(genx_gpgpu_state_t *state, int32_t flush_mode) } LOCAL void -gpgpu_batch_reset(genx_gpgpu_state_t *state, size_t sz) +gpgpu_batch_reset(intel_gpgpu_t *state, size_t sz) { intel_batchbuffer_reset(state->batch, sz); } LOCAL void -gpgpu_flush(genx_gpgpu_state_t *state) +gpgpu_flush(intel_gpgpu_t *state) { intel_batchbuffer_flush(state->batch); } LOCAL void -gpgpu_state_init(genx_gpgpu_state_t *state, +gpgpu_state_init(intel_gpgpu_t *state, uint32_t max_threads, uint32_t size_vfe_entry, uint32_t num_vfe_entries, @@ -613,7 +613,7 @@ gpgpu_state_init(genx_gpgpu_state_t *state, } LOCAL void -gpgpu_bind_surf_2d(genx_gpgpu_state_t *state, +gpgpu_bind_surf_2d(intel_gpgpu_t *state, int32_t index, dri_bo* obj_bo, uint32_t offset, @@ -672,7 +672,7 @@ gpgpu_bind_surf_2d(genx_gpgpu_state_t *state, } LOCAL void -gpgpu_bind_shared_surf_2d(genx_gpgpu_state_t *state, +gpgpu_bind_shared_surf_2d(intel_gpgpu_t *state, int32_t index, dri_bo* obj_bo, uint32_t offset, @@ -761,7 +761,7 @@ gpgpu_bind_shared_surf_2d(genx_gpgpu_state_t *state, } LOCAL void -gpgpu_bind_buf(genx_gpgpu_state_t *state, +gpgpu_bind_buf(intel_gpgpu_t *state, int32_t index, dri_bo* obj_bo, uint32_t offset, @@ -829,7 +829,7 @@ gpgpu_bind_buf(genx_gpgpu_state_t *state, } LOCAL void -gpgpu_set_sampler(genx_gpgpu_state_t *state, uint32_t index, uint32_t non_normalized) +gpgpu_set_sampler(intel_gpgpu_t *state, uint32_t index, uint32_t non_normalized) { struct i965_sampler_state *sampler = NULL; assert(index < (int) MAX_SAMPLERS); @@ -851,7 +851,7 @@ gpgpu_set_sampler(genx_gpgpu_state_t *state, uint32_t index, uint32_t non_normal } static void -gpgpu_build_sampler_table(genx_gpgpu_state_t *state) +gpgpu_build_sampler_table(intel_gpgpu_t *state) { dri_bo_subdata(state->sampler_state_b.bo, 0, @@ -860,7 +860,7 @@ gpgpu_build_sampler_table(genx_gpgpu_state_t *state) } static void -gpgpu_build_binding_table(genx_gpgpu_state_t *state) +gpgpu_build_binding_table(intel_gpgpu_t *state) { uint32_t *binding_table; dri_bo *bo = state->binding_table_b.bo; @@ -886,7 +886,7 @@ gpgpu_build_binding_table(genx_gpgpu_state_t *state) } static void -gpgpu_build_idrt(genx_gpgpu_state_t *state, +gpgpu_build_idrt(intel_gpgpu_t *state, genx_gpgpu_kernel_t *kernel, uint32_t ker_n) { @@ -938,7 +938,7 @@ gpgpu_build_idrt(genx_gpgpu_state_t *state, } LOCAL void -gpgpu_upload_constants(genx_gpgpu_state_t *state, void* data, uint32_t size) +gpgpu_upload_constants(intel_gpgpu_t *state, void* data, uint32_t size) { unsigned char *constant_buffer = NULL; @@ -950,7 +950,7 @@ gpgpu_upload_constants(genx_gpgpu_state_t *state, void* data, uint32_t size) } LOCAL void -gpgpu_states_setup(genx_gpgpu_state_t *state, genx_gpgpu_kernel_t *kernel, uint32_t ker_n) +gpgpu_states_setup(intel_gpgpu_t *state, genx_gpgpu_kernel_t *kernel, uint32_t ker_n) { gpgpu_build_sampler_table(state); gpgpu_build_binding_table(state); @@ -958,7 +958,7 @@ gpgpu_states_setup(genx_gpgpu_state_t *state, genx_gpgpu_kernel_t *kernel, uint3 } LOCAL void -gpgpu_update_barrier(genx_gpgpu_state_t *state, uint32_t barrierID, uint32_t thread_n) +gpgpu_update_barrier(intel_gpgpu_t *state, uint32_t barrierID, uint32_t thread_n) { BEGIN_BATCH(state->batch, 4); OUT_BATCH(state->batch, CMD_MEDIA_STATE_FLUSH | 0); @@ -969,7 +969,7 @@ gpgpu_update_barrier(genx_gpgpu_state_t *state, uint32_t barrierID, uint32_t thr } LOCAL void -gpgpu_set_perf_counters(genx_gpgpu_state_t *state, dri_bo *perf) +gpgpu_set_perf_counters(intel_gpgpu_t *state, dri_bo *perf) { if (state->perf_b.bo) drm_intel_bo_unreference(state->perf_b.bo); @@ -978,7 +978,7 @@ gpgpu_set_perf_counters(genx_gpgpu_state_t *state, dri_bo *perf) } LOCAL void -gpgpu_run(genx_gpgpu_state_t *state, int32_t ki) +gpgpu_run(intel_gpgpu_t *state, int32_t ki) { BEGIN_BATCH(state->batch, 6); OUT_BATCH(state->batch, GEN_CMD_MEDIA_OBJECT | 5); @@ -992,7 +992,7 @@ gpgpu_run(genx_gpgpu_state_t *state, int32_t ki) } LOCAL char* -gpgpu_run_with_inline(genx_gpgpu_state_t *state, int32_t ki, size_t sz) +gpgpu_run_with_inline(intel_gpgpu_t *state, int32_t ki, size_t sz) { const uint32_t len = (uint32_t) (sz >> 2); diff --git a/src/intel/genx_gpgpu.h b/src/intel/genx_gpgpu.h index d263604..8f2e867 100644 --- a/src/intel/genx_gpgpu.h +++ b/src/intel/genx_gpgpu.h @@ -51,19 +51,19 @@ typedef struct genx_gpgpu_kernel struct intel_driver; /* Covenient way to talk to the device */ -typedef struct genx_gpgpu_state genx_gpgpu_state_t; +typedef struct intel_gpgpu intel_gpgpu_t; /* Buffer object as exposed by drm_intel */ struct _drm_intel_bo; /* Allocate and initialize a GPGPU state */ -extern genx_gpgpu_state_t* genx_gpgpu_state_new(struct intel_driver*); +extern intel_gpgpu_t* intel_gpgpu_new(struct intel_driver*); /* Destroy and deallocate a GPGPU state */ -extern void genx_gpgpu_state_delete(genx_gpgpu_state_t*); +extern void intel_gpgpu_delete(intel_gpgpu_t*); /* Set surface descriptor in the current binding table */ -extern void gpgpu_bind_surf_2d(genx_gpgpu_state_t*, +extern void gpgpu_bind_surf_2d(intel_gpgpu_t*, int32_t index, struct _drm_intel_bo* obj_bo, uint32_t offset, @@ -79,7 +79,7 @@ extern void gpgpu_bind_surf_2d(genx_gpgpu_state_t*, * Automatically determines and sets tiling mode which is transparently * supported by media block read/write */ -extern void gpgpu_bind_shared_surf_2d(genx_gpgpu_state_t*, +extern void gpgpu_bind_shared_surf_2d(intel_gpgpu_t*, int32_t index, struct _drm_intel_bo* obj_bo, uint32_t offset, @@ -92,7 +92,7 @@ extern void gpgpu_bind_shared_surf_2d(genx_gpgpu_state_t*, uint32_t cchint); /* Set typeless buffer descriptor in the current binding table */ -extern void gpgpu_bind_buf(genx_gpgpu_state_t*, +extern void gpgpu_bind_buf(intel_gpgpu_t*, int32_t index, struct _drm_intel_bo* obj_bo, uint32_t offset, @@ -100,7 +100,7 @@ extern void gpgpu_bind_buf(genx_gpgpu_state_t*, uint32_t cchint); /* Configure state, size in 512-bit units */ -extern void gpgpu_state_init(genx_gpgpu_state_t*, +extern void gpgpu_state_init(intel_gpgpu_t*, uint32_t max_threads, uint32_t size_vfe_entry, uint32_t num_vfe_entries, @@ -108,39 +108,39 @@ extern void gpgpu_state_init(genx_gpgpu_state_t*, uint32_t num_cs_entries); /* Set the buffer object where to report performance counters */ -extern void gpgpu_set_perf_counters(genx_gpgpu_state_t*, struct _drm_intel_bo *perf); +extern void gpgpu_set_perf_counters(intel_gpgpu_t*, struct _drm_intel_bo *perf); /* Fills current constant buffer with data */ -extern void gpgpu_upload_constants(genx_gpgpu_state_t*, void* data, uint32_t size); +extern void gpgpu_upload_constants(intel_gpgpu_t*, void* data, uint32_t size); /* Setup all indirect states */ -extern void gpgpu_states_setup(genx_gpgpu_state_t*, genx_gpgpu_kernel_t* kernel, uint32_t ker_n); +extern void gpgpu_states_setup(intel_gpgpu_t*, genx_gpgpu_kernel_t* kernel, uint32_t ker_n); /* Make HW threads use barrierID */ -extern void gpgpu_update_barrier(genx_gpgpu_state_t*, uint32_t barrierID, uint32_t thread_n); +extern void gpgpu_update_barrier(intel_gpgpu_t*, uint32_t barrierID, uint32_t thread_n); /* Set a sampler (TODO: add other sampler fields) */ -extern void gpgpu_set_sampler(genx_gpgpu_state_t*, uint32_t index, uint32_t non_normalized); +extern void gpgpu_set_sampler(intel_gpgpu_t*, uint32_t index, uint32_t non_normalized); /* Allocate the batch buffer and return the BO used for the batch buffer */ -extern void gpgpu_batch_reset(genx_gpgpu_state_t*, size_t sz); +extern void gpgpu_batch_reset(intel_gpgpu_t*, size_t sz); /* Atomic begin, pipeline select, urb, pipeline state and constant buffer */ -extern void gpgpu_batch_start(genx_gpgpu_state_t*); +extern void gpgpu_batch_start(intel_gpgpu_t*); /* atomic end with possibly inserted flush */ -extern void gpgpu_batch_end(genx_gpgpu_state_t*, int32_t flush_mode); +extern void gpgpu_batch_end(intel_gpgpu_t*, int32_t flush_mode); /* Emit MI_FLUSH */ -extern void gpgpu_flush(genx_gpgpu_state_t*); +extern void gpgpu_flush(intel_gpgpu_t*); /* Enqueue a MEDIA object with no inline data */ -extern void gpgpu_run(genx_gpgpu_state_t*, int32_t ki); +extern void gpgpu_run(intel_gpgpu_t*, int32_t ki); /* Enqueue a MEDIA object with inline data to push afterward. Returns the * pointer where to push. sz is the size of the data we are going to pass */ -extern char* gpgpu_run_with_inline(genx_gpgpu_state_t*, int32_t ki, size_t sz); +extern char* gpgpu_run_with_inline(intel_gpgpu_t*, int32_t ki, size_t sz); #endif /* __GENX_GPGPU_H__ */ -- 2.7.4