From e4dc005bcebfa5723aeba3e0cffce986bdd4d52d Mon Sep 17 00:00:00 2001 From: Kenneth Graunke Date: Tue, 28 Mar 2017 14:45:59 -0700 Subject: [PATCH] i965: Combine intel_batchbuffer_reloc and intel_batchbuffer_reloc64 These two functions do the exact same thing. One returns a uint64_t, and the other takes the same uint64_t and truncates it to a uint32_t. We only need the uint64_t variant - the caller can truncate if it wants. This patch gives us one function, intel_batchbuffer_reloc, that does the 64-bit thing. Reviewed-by: Jason Ekstrand --- src/mesa/drivers/dri/i965/genX_blorp_exec.c | 15 +++--------- src/mesa/drivers/dri/i965/intel_batchbuffer.c | 22 +---------------- src/mesa/drivers/dri/i965/intel_batchbuffer.h | 35 +++++++++++---------------- 3 files changed, 19 insertions(+), 53 deletions(-) diff --git a/src/mesa/drivers/dri/i965/genX_blorp_exec.c b/src/mesa/drivers/dri/i965/genX_blorp_exec.c index 35310fa..b8dcf9f 100644 --- a/src/mesa/drivers/dri/i965/genX_blorp_exec.c +++ b/src/mesa/drivers/dri/i965/genX_blorp_exec.c @@ -55,17 +55,10 @@ blorp_emit_reloc(struct blorp_batch *batch, struct brw_context *brw = batch->driver_batch; uint32_t offset = (char *)location - (char *)brw->batch.map; - if (brw->gen >= 8) { - return intel_batchbuffer_reloc64(&brw->batch, address.buffer, offset, - address.read_domains, - address.write_domain, - address.offset + delta); - } else { - return intel_batchbuffer_reloc(&brw->batch, address.buffer, offset, - address.read_domains, - address.write_domain, - address.offset + delta); - } + return intel_batchbuffer_reloc(&brw->batch, address.buffer, offset, + address.read_domains, + address.write_domain, + address.offset + delta); } static void diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c index 49a00c1..1599a2c 100644 --- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c +++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c @@ -576,7 +576,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw, /* This is the only way buffers get added to the validate list. */ -uint32_t +uint64_t intel_batchbuffer_reloc(struct intel_batchbuffer *batch, drm_intel_bo *buffer, uint32_t offset, uint32_t read_domains, uint32_t write_domain, @@ -597,26 +597,6 @@ intel_batchbuffer_reloc(struct intel_batchbuffer *batch, return buffer->offset64 + delta; } -uint64_t -intel_batchbuffer_reloc64(struct intel_batchbuffer *batch, - drm_intel_bo *buffer, uint32_t offset, - uint32_t read_domains, uint32_t write_domain, - uint32_t delta) -{ - int ret = drm_intel_bo_emit_reloc(batch->bo, offset, - buffer, delta, - read_domains, write_domain); - assert(ret == 0); - (void) ret; - - /* Using the old buffer offset, write in what the right data would be, in - * case the buffer doesn't move and we can short-circuit the relocation - * processing in the kernel - */ - return buffer->offset64 + delta; -} - - void intel_batchbuffer_data(struct brw_context *brw, const void *data, GLuint bytes, enum brw_gpu_ring ring) diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.h b/src/mesa/drivers/dri/i965/intel_batchbuffer.h index 01d4804..cf545ec 100644 --- a/src/mesa/drivers/dri/i965/intel_batchbuffer.h +++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.h @@ -64,18 +64,12 @@ void intel_batchbuffer_data(struct brw_context *brw, const void *data, GLuint bytes, enum brw_gpu_ring ring); -uint32_t intel_batchbuffer_reloc(struct intel_batchbuffer *batch, +uint64_t intel_batchbuffer_reloc(struct intel_batchbuffer *batch, drm_intel_bo *buffer, uint32_t offset, uint32_t read_domains, uint32_t write_domain, uint32_t delta); -uint64_t intel_batchbuffer_reloc64(struct intel_batchbuffer *batch, - drm_intel_bo *buffer, - uint32_t offset, - uint32_t read_domains, - uint32_t write_domain, - uint32_t delta); #define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map)) @@ -161,23 +155,22 @@ intel_batchbuffer_advance(struct brw_context *brw) #define OUT_BATCH(d) *__map++ = (d) #define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f))) -#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \ - uint32_t __offset = (__map - brw->batch.map) * 4; \ - OUT_BATCH(intel_batchbuffer_reloc(&brw->batch, (buf), __offset, \ - (read_domains), \ - (write_domain), \ - (delta))); \ +#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \ + uint32_t __offset = (__map - brw->batch.map) * 4; \ + uint32_t reloc = \ + intel_batchbuffer_reloc(&brw->batch, (buf), __offset, \ + (read_domains), (write_domain), (delta)); \ + OUT_BATCH(reloc); \ } while (0) /* Handle 48-bit address relocations for Gen8+ */ -#define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \ - uint32_t __offset = (__map - brw->batch.map) * 4; \ - uint64_t reloc64 = intel_batchbuffer_reloc64(&brw->batch, (buf), __offset, \ - (read_domains), \ - (write_domain), \ - (delta)); \ - OUT_BATCH(reloc64); \ - OUT_BATCH(reloc64 >> 32); \ +#define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \ + uint32_t __offset = (__map - brw->batch.map) * 4; \ + uint64_t reloc64 = \ + intel_batchbuffer_reloc(&brw->batch, (buf), __offset, \ + (read_domains), (write_domain), (delta)); \ + OUT_BATCH(reloc64); \ + OUT_BATCH(reloc64 >> 32); \ } while (0) #define ADVANCE_BATCH() \ -- 2.7.4