add yuyv->nv12 conversion in image processing
[profile/ivi/vaapi-intel-driver.git] / src / intel_batchbuffer.c
1 /**************************************************************************                                                                                  
2  *                                                                                                                                                           
3  * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.                                                                                                
4  * All Rights Reserved.                                                                                                                                      
5  *                                                                                                                                                           
6  * Permission is hereby granted, free of charge, to any person obtaining a                                                                                   
7  * copy of this software and associated documentation files (the                                                                                             
8  * "Software"), to deal in the Software without restriction, including                                                                                       
9  * without limitation the rights to use, copy, modify, merge, publish,                                                                                       
10  * distribute, sub license, and/or sell copies of the Software, and to                                                                                       
11  * permit persons to whom the Software is furnished to do so, subject to                                                                                     
12  * the following conditions:                                                                                                                                 
13  *                                                                                                                                                           
14  * The above copyright notice and this permission notice (including the                                                                                      
15  * next paragraph) shall be included in all copies or substantial portions                                                                                   
16  * of the Software.                                                                                                                                          
17  *                                                                                                                                                           
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS                                                                                   
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF                                                                                                
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.                                                                                   
21  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR                                                                                    
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,                                                                                  
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE                                                                                         
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.                                                                                                    
25  *                                                                                                                                                           
26  **************************************************************************/      
27
28 #include <stdlib.h>
29 #include <string.h>
30 #include <assert.h>
31
32 #include "intel_batchbuffer.h"
33
34 static void 
35 intel_batchbuffer_reset(struct intel_batchbuffer *batch)
36 {
37     struct intel_driver_data *intel = batch->intel; 
38     int batch_size = BATCH_SIZE;
39
40     assert(batch->flag == I915_EXEC_RENDER ||
41            batch->flag == I915_EXEC_BLT ||
42            batch->flag == I915_EXEC_BSD);
43
44     dri_bo_unreference(batch->buffer);
45     batch->buffer = dri_bo_alloc(intel->bufmgr, 
46                                  "batch buffer",
47                                  batch_size,
48                                  0x1000);
49     assert(batch->buffer);
50     dri_bo_map(batch->buffer, 1);
51     assert(batch->buffer->virtual);
52     batch->map = batch->buffer->virtual;
53     batch->size = batch_size;
54     batch->ptr = batch->map;
55     batch->atomic = 0;
56 }
57
58 static unsigned int
59 intel_batchbuffer_space(struct intel_batchbuffer *batch)
60 {
61     return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
62 }
63
64
65 struct intel_batchbuffer * 
66 intel_batchbuffer_new(struct intel_driver_data *intel, int flag)
67 {
68     struct intel_batchbuffer *batch = calloc(1, sizeof(*batch));
69     assert(flag == I915_EXEC_RENDER ||
70            flag == I915_EXEC_BSD ||
71            flag == I915_EXEC_BLT);
72
73     batch->intel = intel;
74     batch->flag = flag;
75     batch->run = drm_intel_bo_mrb_exec;
76     intel_batchbuffer_reset(batch);
77
78     return batch;
79 }
80
81 void intel_batchbuffer_free(struct intel_batchbuffer *batch)
82 {
83     if (batch->map) {
84         dri_bo_unmap(batch->buffer);
85         batch->map = NULL;
86     }
87
88     dri_bo_unreference(batch->buffer);
89     free(batch);
90 }
91
92 void 
93 intel_batchbuffer_flush(struct intel_batchbuffer *batch)
94 {
95     unsigned int used = batch->ptr - batch->map;
96
97     if (used == 0) {
98         return;
99     }
100
101     if ((used & 4) == 0) {
102         *(unsigned int*)batch->ptr = 0;
103         batch->ptr += 4;
104     }
105
106     *(unsigned int*)batch->ptr = MI_BATCH_BUFFER_END;
107     batch->ptr += 4;
108     dri_bo_unmap(batch->buffer);
109     used = batch->ptr - batch->map;
110     batch->run(batch->buffer, used, 0, 0, 0, batch->flag);
111     intel_batchbuffer_reset(batch);
112 }
113
114 void 
115 intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, unsigned int x)
116 {
117     assert(intel_batchbuffer_space(batch) >= 4);
118     *(unsigned int *)batch->ptr = x;
119     batch->ptr += 4;
120 }
121
122 void 
123 intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, dri_bo *bo, 
124                                 uint32_t read_domains, uint32_t write_domains, 
125                                 uint32_t delta)
126 {
127     assert(batch->ptr - batch->map < batch->size);
128     dri_bo_emit_reloc(batch->buffer, read_domains, write_domains,
129                       delta, batch->ptr - batch->map, bo);
130     intel_batchbuffer_emit_dword(batch, bo->offset + delta);
131 }
132
133 void 
134 intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
135                                    unsigned int size)
136 {
137     assert(size < batch->size - 8);
138
139     if (intel_batchbuffer_space(batch) < size) {
140         intel_batchbuffer_flush(batch);
141     }
142 }
143
144 void 
145 intel_batchbuffer_data(struct intel_batchbuffer *batch,
146                           void *data,
147                           unsigned int size)
148 {
149     assert((size & 3) == 0);
150     intel_batchbuffer_require_space(batch, size);
151
152     assert(batch->ptr);
153     memcpy(batch->ptr, data, size);
154     batch->ptr += size;
155 }
156
157 void
158 intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
159 {
160     struct intel_driver_data *intel = batch->intel; 
161
162     if (IS_GEN6(intel->device_id) ||
163         IS_GEN7(intel->device_id)) {
164         if (batch->flag == I915_EXEC_RENDER) {
165             BEGIN_BATCH(batch, 4);
166             OUT_BATCH(batch, CMD_PIPE_CONTROL | 0x2);
167
168             if (IS_GEN6(intel->device_id))
169                 OUT_BATCH(batch, 
170                           CMD_PIPE_CONTROL_WC_FLUSH |
171                           CMD_PIPE_CONTROL_TC_FLUSH |
172                           CMD_PIPE_CONTROL_NOWRITE);
173             else
174                 OUT_BATCH(batch, 
175                           CMD_PIPE_CONTROL_WC_FLUSH |
176                           CMD_PIPE_CONTROL_TC_FLUSH |
177                           CMD_PIPE_CONTROL_DC_FLUSH |
178                           CMD_PIPE_CONTROL_NOWRITE);
179
180             OUT_BATCH(batch, 0);
181             OUT_BATCH(batch, 0);
182             ADVANCE_BATCH(batch);
183         } else {
184             if (batch->flag == I915_EXEC_BLT) {
185                 BEGIN_BLT_BATCH(batch, 4);
186                 OUT_BLT_BATCH(batch, MI_FLUSH_DW);
187                 OUT_BLT_BATCH(batch, 0);
188                 OUT_BLT_BATCH(batch, 0);
189                 OUT_BLT_BATCH(batch, 0);
190                 ADVANCE_BLT_BATCH(batch);
191             } else {
192                 assert(batch->flag == I915_EXEC_BSD);
193                 BEGIN_BCS_BATCH(batch, 4);
194                 OUT_BCS_BATCH(batch, MI_FLUSH_DW | MI_FLUSH_DW_VIDEO_PIPELINE_CACHE_INVALIDATE);
195                 OUT_BCS_BATCH(batch, 0);
196                 OUT_BCS_BATCH(batch, 0);
197                 OUT_BCS_BATCH(batch, 0);
198                 ADVANCE_BCS_BATCH(batch);
199             }
200         }
201     } else {
202         if (batch->flag == I915_EXEC_RENDER) {
203             BEGIN_BATCH(batch, 1);
204             OUT_BATCH(batch, MI_FLUSH | MI_FLUSH_STATE_INSTRUCTION_CACHE_INVALIDATE);
205             ADVANCE_BATCH(batch);
206         } else {
207             assert(batch->flag == I915_EXEC_BSD);
208             BEGIN_BCS_BATCH(batch, 1);
209             OUT_BCS_BATCH(batch, MI_FLUSH | MI_FLUSH_STATE_INSTRUCTION_CACHE_INVALIDATE);
210             ADVANCE_BCS_BATCH(batch);
211         }
212     }
213 }
214
215 void
216 intel_batchbuffer_begin_batch(struct intel_batchbuffer *batch, int total)
217 {
218     batch->emit_total = total * 4;
219     batch->emit_start = batch->ptr;
220 }
221
222 void
223 intel_batchbuffer_advance_batch(struct intel_batchbuffer *batch)
224 {
225     assert(batch->emit_total == (batch->ptr - batch->emit_start));
226 }
227
228 void
229 intel_batchbuffer_check_batchbuffer_flag(struct intel_batchbuffer *batch, int flag)
230 {
231     if (flag != I915_EXEC_RENDER &&
232         flag != I915_EXEC_BLT &&
233         flag != I915_EXEC_BSD)
234         return;
235
236     if (batch->flag == flag)
237         return;
238
239     intel_batchbuffer_flush(batch);
240     batch->flag = flag;
241 }
242
243 int
244 intel_batchbuffer_check_free_space(struct intel_batchbuffer *batch, int size)
245 {
246     return intel_batchbuffer_space(batch) >= size;
247 }
248
249 static void
250 intel_batchbuffer_start_atomic_helper(struct intel_batchbuffer *batch,
251                                       int flag,
252                                       unsigned int size)
253 {
254     assert(!batch->atomic);
255     intel_batchbuffer_check_batchbuffer_flag(batch, flag);
256     intel_batchbuffer_require_space(batch, size);
257     batch->atomic = 1;
258 }
259
260 void
261 intel_batchbuffer_start_atomic(struct intel_batchbuffer *batch, unsigned int size)
262 {
263     intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_RENDER, size);
264 }
265
266 void
267 intel_batchbuffer_start_atomic_blt(struct intel_batchbuffer *batch, unsigned int size)
268 {
269     intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_BLT, size);
270 }
271
272 void
273 intel_batchbuffer_start_atomic_bcs(struct intel_batchbuffer *batch, unsigned int size)
274 {
275     intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_BSD, size);
276 }
277
278 void
279 intel_batchbuffer_end_atomic(struct intel_batchbuffer *batch)
280 {
281     assert(batch->atomic);
282     batch->atomic = 0;
283 }
284
285 int
286 intel_batchbuffer_used_size(struct intel_batchbuffer *batch)
287 {
288     return batch->ptr - batch->map;
289 }
290
291 void
292 intel_batchbuffer_align(struct intel_batchbuffer *batch, unsigned int alignedment)
293 {
294     int used = batch->ptr - batch->map;
295     int pad_size;
296
297     assert((alignedment & 3) == 0);
298     pad_size = ALIGN(used, alignedment) - used;
299     assert((pad_size & 3) == 0);
300     assert(intel_batchbuffer_space(batch) >= pad_size);
301
302     while (pad_size >= 4) {
303         intel_batchbuffer_emit_dword(batch, 0);
304         pad_size -= 4;
305     }
306 }
307