1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 #include "intel_batchbuffer.h"
34 #define MAX_BATCH_SIZE 0x400000
37 intel_batchbuffer_reset(struct intel_batchbuffer *batch, int buffer_size)
39 struct intel_driver_data *intel = batch->intel;
40 int batch_size = buffer_size;
42 assert(batch->flag == I915_EXEC_RENDER ||
43 batch->flag == I915_EXEC_BLT ||
44 batch->flag == I915_EXEC_BSD ||
45 batch->flag == I915_EXEC_VEBOX);
47 dri_bo_unreference(batch->buffer);
48 batch->buffer = dri_bo_alloc(intel->bufmgr,
52 assert(batch->buffer);
53 dri_bo_map(batch->buffer, 1);
54 assert(batch->buffer->virtual);
55 batch->map = batch->buffer->virtual;
56 batch->size = batch_size;
57 batch->ptr = batch->map;
62 intel_batchbuffer_space(struct intel_batchbuffer *batch)
64 return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
68 struct intel_batchbuffer *
69 intel_batchbuffer_new(struct intel_driver_data *intel, int flag, int buffer_size)
71 struct intel_batchbuffer *batch = calloc(1, sizeof(*batch));
72 assert(flag == I915_EXEC_RENDER ||
73 flag == I915_EXEC_BSD ||
74 flag == I915_EXEC_BLT ||
75 flag == I915_EXEC_VEBOX);
77 if (!buffer_size || buffer_size < BATCH_SIZE) {
78 buffer_size = BATCH_SIZE;
81 /* the buffer size can't exceed 4M */
82 if (buffer_size > MAX_BATCH_SIZE) {
83 buffer_size = MAX_BATCH_SIZE;
88 batch->run = drm_intel_bo_mrb_exec;
89 intel_batchbuffer_reset(batch, buffer_size);
94 void intel_batchbuffer_free(struct intel_batchbuffer *batch)
97 dri_bo_unmap(batch->buffer);
101 dri_bo_unreference(batch->buffer);
106 intel_batchbuffer_flush(struct intel_batchbuffer *batch)
108 unsigned int used = batch->ptr - batch->map;
114 if ((used & 4) == 0) {
115 *(unsigned int*)batch->ptr = 0;
119 *(unsigned int*)batch->ptr = MI_BATCH_BUFFER_END;
121 dri_bo_unmap(batch->buffer);
122 used = batch->ptr - batch->map;
123 batch->run(batch->buffer, used, 0, 0, 0, batch->flag);
124 intel_batchbuffer_reset(batch, batch->size);
128 intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, unsigned int x)
130 assert(intel_batchbuffer_space(batch) >= 4);
131 *(unsigned int *)batch->ptr = x;
136 intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, dri_bo *bo,
137 uint32_t read_domains, uint32_t write_domains,
140 assert(batch->ptr - batch->map < batch->size);
141 dri_bo_emit_reloc(batch->buffer, read_domains, write_domains,
142 delta, batch->ptr - batch->map, bo);
143 intel_batchbuffer_emit_dword(batch, bo->offset + delta);
147 intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
150 assert(size < batch->size - 8);
152 if (intel_batchbuffer_space(batch) < size) {
153 intel_batchbuffer_flush(batch);
158 intel_batchbuffer_data(struct intel_batchbuffer *batch,
162 assert((size & 3) == 0);
163 intel_batchbuffer_require_space(batch, size);
166 memcpy(batch->ptr, data, size);
171 intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
173 struct intel_driver_data *intel = batch->intel;
175 if (IS_GEN6(intel->device_id) ||
176 IS_GEN7(intel->device_id)) {
177 if (batch->flag == I915_EXEC_RENDER) {
178 BEGIN_BATCH(batch, 4);
179 OUT_BATCH(batch, CMD_PIPE_CONTROL | 0x2);
181 if (IS_GEN6(intel->device_id))
183 CMD_PIPE_CONTROL_WC_FLUSH |
184 CMD_PIPE_CONTROL_TC_FLUSH |
185 CMD_PIPE_CONTROL_NOWRITE);
188 CMD_PIPE_CONTROL_WC_FLUSH |
189 CMD_PIPE_CONTROL_TC_FLUSH |
190 CMD_PIPE_CONTROL_DC_FLUSH |
191 CMD_PIPE_CONTROL_NOWRITE);
195 ADVANCE_BATCH(batch);
197 if (batch->flag == I915_EXEC_BLT) {
198 BEGIN_BLT_BATCH(batch, 4);
199 OUT_BLT_BATCH(batch, MI_FLUSH_DW);
200 OUT_BLT_BATCH(batch, 0);
201 OUT_BLT_BATCH(batch, 0);
202 OUT_BLT_BATCH(batch, 0);
203 ADVANCE_BLT_BATCH(batch);
204 }else if (batch->flag == I915_EXEC_VEBOX) {
205 BEGIN_VEB_BATCH(batch, 4);
206 OUT_VEB_BATCH(batch, MI_FLUSH_DW);
207 OUT_VEB_BATCH(batch, 0);
208 OUT_VEB_BATCH(batch, 0);
209 OUT_VEB_BATCH(batch, 0);
210 ADVANCE_VEB_BATCH(batch);
212 assert(batch->flag == I915_EXEC_BSD);
213 BEGIN_BCS_BATCH(batch, 4);
214 OUT_BCS_BATCH(batch, MI_FLUSH_DW | MI_FLUSH_DW_VIDEO_PIPELINE_CACHE_INVALIDATE);
215 OUT_BCS_BATCH(batch, 0);
216 OUT_BCS_BATCH(batch, 0);
217 OUT_BCS_BATCH(batch, 0);
218 ADVANCE_BCS_BATCH(batch);
222 if (batch->flag == I915_EXEC_RENDER) {
223 BEGIN_BATCH(batch, 1);
224 OUT_BATCH(batch, MI_FLUSH | MI_FLUSH_STATE_INSTRUCTION_CACHE_INVALIDATE);
225 ADVANCE_BATCH(batch);
227 assert(batch->flag == I915_EXEC_BSD);
228 BEGIN_BCS_BATCH(batch, 1);
229 OUT_BCS_BATCH(batch, MI_FLUSH | MI_FLUSH_STATE_INSTRUCTION_CACHE_INVALIDATE);
230 ADVANCE_BCS_BATCH(batch);
236 intel_batchbuffer_begin_batch(struct intel_batchbuffer *batch, int total)
238 batch->emit_total = total * 4;
239 batch->emit_start = batch->ptr;
243 intel_batchbuffer_advance_batch(struct intel_batchbuffer *batch)
245 assert(batch->emit_total == (batch->ptr - batch->emit_start));
249 intel_batchbuffer_check_batchbuffer_flag(struct intel_batchbuffer *batch, int flag)
251 if (flag != I915_EXEC_RENDER &&
252 flag != I915_EXEC_BLT &&
253 flag != I915_EXEC_BSD &&
254 flag != I915_EXEC_VEBOX)
257 if (batch->flag == flag)
260 intel_batchbuffer_flush(batch);
265 intel_batchbuffer_check_free_space(struct intel_batchbuffer *batch, int size)
267 return intel_batchbuffer_space(batch) >= size;
271 intel_batchbuffer_start_atomic_helper(struct intel_batchbuffer *batch,
275 assert(!batch->atomic);
276 intel_batchbuffer_check_batchbuffer_flag(batch, flag);
277 intel_batchbuffer_require_space(batch, size);
282 intel_batchbuffer_start_atomic(struct intel_batchbuffer *batch, unsigned int size)
284 intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_RENDER, size);
288 intel_batchbuffer_start_atomic_blt(struct intel_batchbuffer *batch, unsigned int size)
290 intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_BLT, size);
294 intel_batchbuffer_start_atomic_bcs(struct intel_batchbuffer *batch, unsigned int size)
296 intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_BSD, size);
300 intel_batchbuffer_start_atomic_veb(struct intel_batchbuffer *batch, unsigned int size)
302 intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_VEBOX, size);
307 intel_batchbuffer_end_atomic(struct intel_batchbuffer *batch)
309 assert(batch->atomic);
314 intel_batchbuffer_used_size(struct intel_batchbuffer *batch)
316 return batch->ptr - batch->map;
320 intel_batchbuffer_align(struct intel_batchbuffer *batch, unsigned int alignedment)
322 int used = batch->ptr - batch->map;
325 assert((alignedment & 3) == 0);
326 pad_size = ALIGN(used, alignedment) - used;
327 assert((pad_size & 3) == 0);
328 assert(intel_batchbuffer_space(batch) >= pad_size);
330 while (pad_size >= 4) {
331 intel_batchbuffer_emit_dword(batch, 0);