MPEG-2 encoding path
[profile/ivi/vaapi-intel-driver.git] / src / intel_batchbuffer.c
1 /**************************************************************************                                                                                  
2  *                                                                                                                                                           
3  * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.                                                                                                
4  * All Rights Reserved.                                                                                                                                      
5  *                                                                                                                                                           
6  * Permission is hereby granted, free of charge, to any person obtaining a                                                                                   
7  * copy of this software and associated documentation files (the                                                                                             
8  * "Software"), to deal in the Software without restriction, including                                                                                       
9  * without limitation the rights to use, copy, modify, merge, publish,                                                                                       
10  * distribute, sub license, and/or sell copies of the Software, and to                                                                                       
11  * permit persons to whom the Software is furnished to do so, subject to                                                                                     
12  * the following conditions:                                                                                                                                 
13  *                                                                                                                                                           
14  * The above copyright notice and this permission notice (including the                                                                                      
15  * next paragraph) shall be included in all copies or substantial portions                                                                                   
16  * of the Software.                                                                                                                                          
17  *                                                                                                                                                           
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS                                                                                   
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF                                                                                                
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.                                                                                   
21  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR                                                                                    
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,                                                                                  
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE                                                                                         
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.                                                                                                    
25  *                                                                                                                                                           
26  **************************************************************************/      
27
28 #include <stdlib.h>
29 #include <string.h>
30 #include <assert.h>
31
32 #include "intel_batchbuffer.h"
33
34 #define MAX_BATCH_SIZE          0x400000
35
36 static void 
37 intel_batchbuffer_reset(struct intel_batchbuffer *batch, int buffer_size)
38 {
39     struct intel_driver_data *intel = batch->intel; 
40     int batch_size = buffer_size;
41
42     assert(batch->flag == I915_EXEC_RENDER ||
43            batch->flag == I915_EXEC_BLT ||
44            batch->flag == I915_EXEC_BSD ||
45            batch->flag == I915_EXEC_VEBOX);
46
47     dri_bo_unreference(batch->buffer);
48     batch->buffer = dri_bo_alloc(intel->bufmgr, 
49                                  "batch buffer",
50                                  batch_size,
51                                  0x1000);
52     assert(batch->buffer);
53     dri_bo_map(batch->buffer, 1);
54     assert(batch->buffer->virtual);
55     batch->map = batch->buffer->virtual;
56     batch->size = batch_size;
57     batch->ptr = batch->map;
58     batch->atomic = 0;
59 }
60
61 static unsigned int
62 intel_batchbuffer_space(struct intel_batchbuffer *batch)
63 {
64     return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
65 }
66
67
68 struct intel_batchbuffer * 
69 intel_batchbuffer_new(struct intel_driver_data *intel, int flag, int buffer_size)
70 {
71     struct intel_batchbuffer *batch = calloc(1, sizeof(*batch));
72     assert(flag == I915_EXEC_RENDER ||
73            flag == I915_EXEC_BSD ||
74            flag == I915_EXEC_BLT ||
75            flag == I915_EXEC_VEBOX);
76
77    if (!buffer_size || buffer_size < BATCH_SIZE) {
78         buffer_size = BATCH_SIZE;
79    }
80
81    /* the buffer size can't exceed 4M */
82    if (buffer_size > MAX_BATCH_SIZE) {
83         buffer_size = MAX_BATCH_SIZE;
84    }
85
86     batch->intel = intel;
87     batch->flag = flag;
88     batch->run = drm_intel_bo_mrb_exec;
89     intel_batchbuffer_reset(batch, buffer_size);
90
91     return batch;
92 }
93
94 void intel_batchbuffer_free(struct intel_batchbuffer *batch)
95 {
96     if (batch->map) {
97         dri_bo_unmap(batch->buffer);
98         batch->map = NULL;
99     }
100
101     dri_bo_unreference(batch->buffer);
102     free(batch);
103 }
104
105 void 
106 intel_batchbuffer_flush(struct intel_batchbuffer *batch)
107 {
108     unsigned int used = batch->ptr - batch->map;
109
110     if (used == 0) {
111         return;
112     }
113
114     if ((used & 4) == 0) {
115         *(unsigned int*)batch->ptr = 0;
116         batch->ptr += 4;
117     }
118
119     *(unsigned int*)batch->ptr = MI_BATCH_BUFFER_END;
120     batch->ptr += 4;
121     dri_bo_unmap(batch->buffer);
122     used = batch->ptr - batch->map;
123     batch->run(batch->buffer, used, 0, 0, 0, batch->flag);
124     intel_batchbuffer_reset(batch, batch->size);
125 }
126
127 void 
128 intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, unsigned int x)
129 {
130     assert(intel_batchbuffer_space(batch) >= 4);
131     *(unsigned int *)batch->ptr = x;
132     batch->ptr += 4;
133 }
134
135 void 
136 intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, dri_bo *bo, 
137                                 uint32_t read_domains, uint32_t write_domains, 
138                                 uint32_t delta)
139 {
140     assert(batch->ptr - batch->map < batch->size);
141     dri_bo_emit_reloc(batch->buffer, read_domains, write_domains,
142                       delta, batch->ptr - batch->map, bo);
143     intel_batchbuffer_emit_dword(batch, bo->offset + delta);
144 }
145
146 void 
147 intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
148                                    unsigned int size)
149 {
150     assert(size < batch->size - 8);
151
152     if (intel_batchbuffer_space(batch) < size) {
153         intel_batchbuffer_flush(batch);
154     }
155 }
156
157 void 
158 intel_batchbuffer_data(struct intel_batchbuffer *batch,
159                           void *data,
160                           unsigned int size)
161 {
162     assert((size & 3) == 0);
163     intel_batchbuffer_require_space(batch, size);
164
165     assert(batch->ptr);
166     memcpy(batch->ptr, data, size);
167     batch->ptr += size;
168 }
169
170 void
171 intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
172 {
173     struct intel_driver_data *intel = batch->intel; 
174
175     if (IS_GEN6(intel->device_id) ||
176         IS_GEN7(intel->device_id)) {
177         if (batch->flag == I915_EXEC_RENDER) {
178             BEGIN_BATCH(batch, 4);
179             OUT_BATCH(batch, CMD_PIPE_CONTROL | 0x2);
180
181             if (IS_GEN6(intel->device_id))
182                 OUT_BATCH(batch, 
183                           CMD_PIPE_CONTROL_WC_FLUSH |
184                           CMD_PIPE_CONTROL_TC_FLUSH |
185                           CMD_PIPE_CONTROL_NOWRITE);
186             else
187                 OUT_BATCH(batch, 
188                           CMD_PIPE_CONTROL_WC_FLUSH |
189                           CMD_PIPE_CONTROL_TC_FLUSH |
190                           CMD_PIPE_CONTROL_DC_FLUSH |
191                           CMD_PIPE_CONTROL_NOWRITE);
192
193             OUT_BATCH(batch, 0);
194             OUT_BATCH(batch, 0);
195             ADVANCE_BATCH(batch);
196         } else {
197             if (batch->flag == I915_EXEC_BLT) {
198                 BEGIN_BLT_BATCH(batch, 4);
199                 OUT_BLT_BATCH(batch, MI_FLUSH_DW);
200                 OUT_BLT_BATCH(batch, 0);
201                 OUT_BLT_BATCH(batch, 0);
202                 OUT_BLT_BATCH(batch, 0);
203                 ADVANCE_BLT_BATCH(batch);
204             }else if (batch->flag == I915_EXEC_VEBOX) {
205                 BEGIN_VEB_BATCH(batch, 4);
206                 OUT_VEB_BATCH(batch, MI_FLUSH_DW);
207                 OUT_VEB_BATCH(batch, 0);
208                 OUT_VEB_BATCH(batch, 0);
209                 OUT_VEB_BATCH(batch, 0);
210                 ADVANCE_VEB_BATCH(batch);
211             } else {
212                 assert(batch->flag == I915_EXEC_BSD);
213                 BEGIN_BCS_BATCH(batch, 4);
214                 OUT_BCS_BATCH(batch, MI_FLUSH_DW | MI_FLUSH_DW_VIDEO_PIPELINE_CACHE_INVALIDATE);
215                 OUT_BCS_BATCH(batch, 0);
216                 OUT_BCS_BATCH(batch, 0);
217                 OUT_BCS_BATCH(batch, 0);
218                 ADVANCE_BCS_BATCH(batch);
219             }
220         }
221     } else {
222         if (batch->flag == I915_EXEC_RENDER) {
223             BEGIN_BATCH(batch, 1);
224             OUT_BATCH(batch, MI_FLUSH | MI_FLUSH_STATE_INSTRUCTION_CACHE_INVALIDATE);
225             ADVANCE_BATCH(batch);               
226          } else {
227             assert(batch->flag == I915_EXEC_BSD);
228             BEGIN_BCS_BATCH(batch, 1);
229             OUT_BCS_BATCH(batch, MI_FLUSH | MI_FLUSH_STATE_INSTRUCTION_CACHE_INVALIDATE);
230             ADVANCE_BCS_BATCH(batch);
231         }
232     }
233 }
234
235 void
236 intel_batchbuffer_begin_batch(struct intel_batchbuffer *batch, int total)
237 {
238     batch->emit_total = total * 4;
239     batch->emit_start = batch->ptr;
240 }
241
242 void
243 intel_batchbuffer_advance_batch(struct intel_batchbuffer *batch)
244 {
245     assert(batch->emit_total == (batch->ptr - batch->emit_start));
246 }
247
248 void
249 intel_batchbuffer_check_batchbuffer_flag(struct intel_batchbuffer *batch, int flag)
250 {
251     if (flag != I915_EXEC_RENDER &&
252         flag != I915_EXEC_BLT &&
253         flag != I915_EXEC_BSD &&
254         flag != I915_EXEC_VEBOX)
255         return;
256
257     if (batch->flag == flag)
258         return;
259
260     intel_batchbuffer_flush(batch);
261     batch->flag = flag;
262 }
263
264 int
265 intel_batchbuffer_check_free_space(struct intel_batchbuffer *batch, int size)
266 {
267     return intel_batchbuffer_space(batch) >= size;
268 }
269
270 static void
271 intel_batchbuffer_start_atomic_helper(struct intel_batchbuffer *batch,
272                                       int flag,
273                                       unsigned int size)
274 {
275     assert(!batch->atomic);
276     intel_batchbuffer_check_batchbuffer_flag(batch, flag);
277     intel_batchbuffer_require_space(batch, size);
278     batch->atomic = 1;
279 }
280
281 void
282 intel_batchbuffer_start_atomic(struct intel_batchbuffer *batch, unsigned int size)
283 {
284     intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_RENDER, size);
285 }
286
287 void
288 intel_batchbuffer_start_atomic_blt(struct intel_batchbuffer *batch, unsigned int size)
289 {
290     intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_BLT, size);
291 }
292
293 void
294 intel_batchbuffer_start_atomic_bcs(struct intel_batchbuffer *batch, unsigned int size)
295 {
296     intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_BSD, size);
297 }
298
299 void
300 intel_batchbuffer_start_atomic_veb(struct intel_batchbuffer *batch, unsigned int size)
301 {
302     intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_VEBOX, size);
303 }
304
305
306 void
307 intel_batchbuffer_end_atomic(struct intel_batchbuffer *batch)
308 {
309     assert(batch->atomic);
310     batch->atomic = 0;
311 }
312
313 int
314 intel_batchbuffer_used_size(struct intel_batchbuffer *batch)
315 {
316     return batch->ptr - batch->map;
317 }
318
319 void
320 intel_batchbuffer_align(struct intel_batchbuffer *batch, unsigned int alignedment)
321 {
322     int used = batch->ptr - batch->map;
323     int pad_size;
324
325     assert((alignedment & 3) == 0);
326     pad_size = ALIGN(used, alignedment) - used;
327     assert((pad_size & 3) == 0);
328     assert(intel_batchbuffer_space(batch) >= pad_size);
329
330     while (pad_size >= 4) {
331         intel_batchbuffer_emit_dword(batch, 0);
332         pad_size -= 4;
333     }
334 }
335