546e5ba708e62b75dc07be02c7e14b33d75364a2
[profile/ivi/vaapi-intel-driver.git] / src / i965_gpe_utils.c
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Xiang Haihao <haihao.xiang@intel.com>
25  */
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <assert.h>
31
32 #include "intel_batchbuffer.h"
33 #include "intel_driver.h"
34
35 #include "i965_gpe_utils.h"
36
37 static void
38 i965_gpe_select(VADriverContextP ctx,
39                 struct i965_gpe_context *gpe_context,
40                 struct intel_batchbuffer *batch)
41 {
42     BEGIN_BATCH(batch, 1);
43     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
44     ADVANCE_BATCH(batch);
45 }
46
47 static void
48 gen6_gpe_state_base_address(VADriverContextP ctx,
49                             struct i965_gpe_context *gpe_context,
50                             struct intel_batchbuffer *batch)
51 {
52     BEGIN_BATCH(batch, 10);
53
54     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
55     OUT_BATCH(batch, BASE_ADDRESS_MODIFY);              /* General State Base Address */
56     OUT_RELOC(batch,
57               gpe_context->surface_state_binding_table.bo,
58               I915_GEM_DOMAIN_INSTRUCTION,
59               0,
60               BASE_ADDRESS_MODIFY);                     /* Surface state base address */
61     OUT_BATCH(batch, BASE_ADDRESS_MODIFY);              /* Dynamic State Base Address */
62     OUT_BATCH(batch, BASE_ADDRESS_MODIFY);              /* Indirect Object Base Address */
63     OUT_BATCH(batch, BASE_ADDRESS_MODIFY);              /* Instruction Base Address */
64     OUT_BATCH(batch, BASE_ADDRESS_MODIFY);              /* General State Access Upper Bound */
65     OUT_BATCH(batch, BASE_ADDRESS_MODIFY);              /* Dynamic State Access Upper Bound */
66     OUT_BATCH(batch, BASE_ADDRESS_MODIFY);              /* Indirect Object Access Upper Bound */
67     OUT_BATCH(batch, BASE_ADDRESS_MODIFY);              /* Instruction Access Upper Bound */
68
69     ADVANCE_BATCH(batch);
70 }
71
72 static void
73 gen6_gpe_vfe_state(VADriverContextP ctx,
74                    struct i965_gpe_context *gpe_context,
75                    struct intel_batchbuffer *batch)
76 {
77
78     BEGIN_BATCH(batch, 8);
79
80     OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (8 - 2));
81     OUT_BATCH(batch, 0);                                        /* Scratch Space Base Pointer and Space */
82     OUT_BATCH(batch,
83               gpe_context->vfe_state.max_num_threads << 16 |    /* Maximum Number of Threads */
84               gpe_context->vfe_state.num_urb_entries << 8 |     /* Number of URB Entries */
85               gpe_context->vfe_state.gpgpu_mode << 2);          /* MEDIA Mode */
86     OUT_BATCH(batch, 0);                                        /* Debug: Object ID */
87     OUT_BATCH(batch,
88               gpe_context->vfe_state.urb_entry_size << 16 |     /* URB Entry Allocation Size */
89               gpe_context->vfe_state.curbe_allocation_size);    /* CURBE Allocation Size */
90     OUT_BATCH(batch, 0);                                        /* Disable Scoreboard */
91     OUT_BATCH(batch, 0);                                        /* Disable Scoreboard */
92     OUT_BATCH(batch, 0);                                        /* Disable Scoreboard */
93         
94     ADVANCE_BATCH(batch);
95
96 }
97
98 static void
99 gen6_gpe_curbe_load(VADriverContextP ctx,
100                     struct i965_gpe_context *gpe_context,
101                     struct intel_batchbuffer *batch)
102 {
103     BEGIN_BATCH(batch, 4);
104
105     OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
106     OUT_BATCH(batch, 0);
107     OUT_BATCH(batch, gpe_context->curbe.length);
108     OUT_RELOC(batch, gpe_context->curbe.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
109
110     ADVANCE_BATCH(batch);
111 }
112
113 static void
114 gen6_gpe_idrt(VADriverContextP ctx,
115               struct i965_gpe_context *gpe_context,
116               struct intel_batchbuffer *batch)
117 {
118     BEGIN_BATCH(batch, 4);
119
120     OUT_BATCH(batch, CMD_MEDIA_INTERFACE_LOAD | (4 - 2));
121     OUT_BATCH(batch, 0);
122     OUT_BATCH(batch, gpe_context->idrt.max_entries * gpe_context->idrt.entry_size);
123     OUT_RELOC(batch, gpe_context->idrt.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
124
125     ADVANCE_BATCH(batch);
126 }
127
128 void
129 i965_gpe_load_kernels(VADriverContextP ctx,
130                       struct i965_gpe_context *gpe_context,
131                       struct i965_kernel *kernel_list,
132                       unsigned int num_kernels)
133 {
134     struct i965_driver_data *i965 = i965_driver_data(ctx);
135     int i;
136
137     assert(num_kernels <= MAX_GPE_KERNELS);
138     memcpy(gpe_context->kernels, kernel_list, sizeof(*kernel_list) * num_kernels);
139     gpe_context->num_kernels = num_kernels;
140
141     for (i = 0; i < num_kernels; i++) {
142         struct i965_kernel *kernel = &gpe_context->kernels[i];
143
144         kernel->bo = dri_bo_alloc(i965->intel.bufmgr, 
145                                   kernel->name, 
146                                   kernel->size,
147                                   0x1000);
148         assert(kernel->bo);
149         dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
150     }
151 }
152
153 void
154 i965_gpe_context_destroy(struct i965_gpe_context *gpe_context)
155 {
156     int i;
157
158     dri_bo_unreference(gpe_context->surface_state_binding_table.bo);
159     gpe_context->surface_state_binding_table.bo = NULL;
160
161     dri_bo_unreference(gpe_context->idrt.bo);
162     gpe_context->idrt.bo = NULL;
163
164     dri_bo_unreference(gpe_context->curbe.bo);
165     gpe_context->curbe.bo = NULL;
166
167     for (i = 0; i < gpe_context->num_kernels; i++) {
168         struct i965_kernel *kernel = &gpe_context->kernels[i];
169
170         dri_bo_unreference(kernel->bo);
171         kernel->bo = NULL;
172     }
173 }
174
175 void
176 i965_gpe_context_init(VADriverContextP ctx,
177                       struct i965_gpe_context *gpe_context)
178 {
179     struct i965_driver_data *i965 = i965_driver_data(ctx);
180     dri_bo *bo;
181
182     dri_bo_unreference(gpe_context->surface_state_binding_table.bo);
183     bo = dri_bo_alloc(i965->intel.bufmgr,
184                       "surface state & binding table",
185                       gpe_context->surface_state_binding_table.length,
186                       4096);
187     assert(bo);
188     gpe_context->surface_state_binding_table.bo = bo;
189
190     dri_bo_unreference(gpe_context->idrt.bo);
191     bo = dri_bo_alloc(i965->intel.bufmgr,
192                       "interface descriptor table",
193                       gpe_context->idrt.entry_size * gpe_context->idrt.max_entries,
194                       4096);
195     assert(bo);
196     gpe_context->idrt.bo = bo;
197
198     dri_bo_unreference(gpe_context->curbe.bo);
199     bo = dri_bo_alloc(i965->intel.bufmgr,
200                       "curbe buffer",
201                       gpe_context->curbe.length,
202                       4096);
203     assert(bo);
204     gpe_context->curbe.bo = bo;
205 }
206
207 void
208 gen6_gpe_pipeline_setup(VADriverContextP ctx,
209                         struct i965_gpe_context *gpe_context,
210                         struct intel_batchbuffer *batch)
211 {
212     intel_batchbuffer_emit_mi_flush(batch);
213
214     i965_gpe_select(ctx, gpe_context, batch);
215     gen6_gpe_state_base_address(ctx, gpe_context, batch);
216     gen6_gpe_vfe_state(ctx, gpe_context, batch);
217     gen6_gpe_curbe_load(ctx, gpe_context, batch);
218     gen6_gpe_idrt(ctx, gpe_context, batch);
219 }
220
221 static void
222 i965_gpe_set_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
223 {
224     switch (tiling) {
225     case I915_TILING_NONE:
226         ss->ss3.tiled_surface = 0;
227         ss->ss3.tile_walk = 0;
228         break;
229     case I915_TILING_X:
230         ss->ss3.tiled_surface = 1;
231         ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
232         break;
233     case I915_TILING_Y:
234         ss->ss3.tiled_surface = 1;
235         ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
236         break;
237     }
238 }
239
240 static void
241 i965_gpe_set_surface2_tiling(struct i965_surface_state2 *ss, unsigned int tiling)
242 {
243     switch (tiling) {
244     case I915_TILING_NONE:
245         ss->ss2.tiled_surface = 0;
246         ss->ss2.tile_walk = 0;
247         break;
248     case I915_TILING_X:
249         ss->ss2.tiled_surface = 1;
250         ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
251         break;
252     case I915_TILING_Y:
253         ss->ss2.tiled_surface = 1;
254         ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
255         break;
256     }
257 }
258
259 static void
260 gen7_gpe_set_surface_tiling(struct gen7_surface_state *ss, unsigned int tiling)
261 {
262     switch (tiling) {
263     case I915_TILING_NONE:
264         ss->ss0.tiled_surface = 0;
265         ss->ss0.tile_walk = 0;
266         break;
267     case I915_TILING_X:
268         ss->ss0.tiled_surface = 1;
269         ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
270         break;
271     case I915_TILING_Y:
272         ss->ss0.tiled_surface = 1;
273         ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
274         break;
275     }
276 }
277
278 static void
279 gen7_gpe_set_surface2_tiling(struct gen7_surface_state2 *ss, unsigned int tiling)
280 {
281     switch (tiling) {
282     case I915_TILING_NONE:
283         ss->ss2.tiled_surface = 0;
284         ss->ss2.tile_walk = 0;
285         break;
286     case I915_TILING_X:
287         ss->ss2.tiled_surface = 1;
288         ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
289         break;
290     case I915_TILING_Y:
291         ss->ss2.tiled_surface = 1;
292         ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
293         break;
294     }
295 }
296
297 static void
298 i965_gpe_set_surface2_state(VADriverContextP ctx,
299                             struct object_surface *obj_surface,
300                             struct i965_surface_state2 *ss)
301 {
302     int w, h, w_pitch;
303     unsigned int tiling, swizzle;
304
305     assert(obj_surface->bo);
306     assert(obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2'));
307
308     dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
309     w = obj_surface->orig_width;
310     h = obj_surface->orig_height;
311     w_pitch = obj_surface->width;
312
313     memset(ss, 0, sizeof(*ss));
314     /* ss0 */
315     ss->ss0.surface_base_address = obj_surface->bo->offset;
316     /* ss1 */
317     ss->ss1.cbcr_pixel_offset_v_direction = 2;
318     ss->ss1.width = w - 1;
319     ss->ss1.height = h - 1;
320     /* ss2 */
321     ss->ss2.surface_format = MFX_SURFACE_PLANAR_420_8;
322     ss->ss2.interleave_chroma = 1;
323     ss->ss2.pitch = w_pitch - 1;
324     ss->ss2.half_pitch_for_chroma = 0;
325     i965_gpe_set_surface2_tiling(ss, tiling);
326     /* ss3: UV offset for interleave mode */
327     ss->ss3.x_offset_for_cb = obj_surface->x_cb_offset;
328     ss->ss3.y_offset_for_cb = obj_surface->y_cb_offset;
329 }
330
331 void
332 i965_gpe_surface2_setup(VADriverContextP ctx,
333                         struct i965_gpe_context *gpe_context,
334                         struct object_surface *obj_surface,
335                         unsigned long binding_table_offset,
336                         unsigned long surface_state_offset)
337 {
338     struct i965_surface_state2 *ss;
339     dri_bo *bo;
340
341     bo = gpe_context->surface_state_binding_table.bo;
342     dri_bo_map(bo, 1);
343     assert(bo->virtual);
344
345     ss = (struct i965_surface_state2 *)((char *)bo->virtual + surface_state_offset);
346     i965_gpe_set_surface2_state(ctx, obj_surface, ss);
347     dri_bo_emit_reloc(bo,
348                       I915_GEM_DOMAIN_RENDER, 0,
349                       0,
350                       surface_state_offset + offsetof(struct i965_surface_state2, ss0),
351                       obj_surface->bo);
352
353     *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
354     dri_bo_unmap(bo);
355 }
356
357 static void
358 i965_gpe_set_media_rw_surface_state(VADriverContextP ctx,
359                                     struct object_surface *obj_surface,
360                                     struct i965_surface_state *ss)
361 {
362     int w, h, w_pitch;
363     unsigned int tiling, swizzle;
364
365     dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
366     w = obj_surface->orig_width;
367     h = obj_surface->orig_height;
368     w_pitch = obj_surface->width;
369
370     memset(ss, 0, sizeof(*ss));
371     /* ss0 */
372     ss->ss0.surface_type = I965_SURFACE_2D;
373     ss->ss0.surface_format = I965_SURFACEFORMAT_R8_UNORM;
374     /* ss1 */
375     ss->ss1.base_addr = obj_surface->bo->offset;
376     /* ss2 */
377     ss->ss2.width = w / 4 - 1;  /* in DWORDs for media read & write message */
378     ss->ss2.height = h - 1;
379     /* ss3 */
380     ss->ss3.pitch = w_pitch - 1;
381     i965_gpe_set_surface_tiling(ss, tiling);
382 }
383
384 void
385 i965_gpe_media_rw_surface_setup(VADriverContextP ctx,
386                                 struct i965_gpe_context *gpe_context,
387                                 struct object_surface *obj_surface,
388                                 unsigned long binding_table_offset,
389                                 unsigned long surface_state_offset)
390 {
391     struct i965_surface_state *ss;
392     dri_bo *bo;
393
394     bo = gpe_context->surface_state_binding_table.bo;
395     dri_bo_map(bo, True);
396     assert(bo->virtual);
397
398     ss = (struct i965_surface_state *)((char *)bo->virtual + surface_state_offset);
399     i965_gpe_set_media_rw_surface_state(ctx, obj_surface, ss);
400     dri_bo_emit_reloc(bo,
401                       I915_GEM_DOMAIN_RENDER, 0,
402                       0,
403                       surface_state_offset + offsetof(struct i965_surface_state, ss1),
404                       obj_surface->bo);
405
406     *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
407     dri_bo_unmap(bo);
408 }
409
410 static void
411 i965_gpe_set_buffer_surface_state(VADriverContextP ctx,
412                                   struct i965_buffer_surface *buffer_surface,
413                                   struct i965_surface_state *ss)
414 {
415     int num_entries;
416
417     assert(buffer_surface->bo);
418     num_entries = buffer_surface->num_blocks * buffer_surface->size_block / buffer_surface->pitch;
419
420     memset(ss, 0, sizeof(*ss));
421     /* ss0 */
422     ss->ss0.render_cache_read_mode = 1;
423     ss->ss0.surface_type = I965_SURFACE_BUFFER;
424     /* ss1 */
425     ss->ss1.base_addr = buffer_surface->bo->offset;
426     /* ss2 */
427     ss->ss2.width = ((num_entries - 1) & 0x7f);
428     ss->ss2.height = (((num_entries - 1) >> 7) & 0x1fff);
429     /* ss3 */
430     ss->ss3.depth = (((num_entries - 1) >> 20) & 0x7f);
431     ss->ss3.pitch = buffer_surface->pitch - 1;
432 }
433
434 void
435 i965_gpe_buffer_suface_setup(VADriverContextP ctx,
436                              struct i965_gpe_context *gpe_context,
437                              struct i965_buffer_surface *buffer_surface,
438                              unsigned long binding_table_offset,
439                              unsigned long surface_state_offset)
440 {
441     struct i965_surface_state *ss;
442     dri_bo *bo;
443
444     bo = gpe_context->surface_state_binding_table.bo;
445     dri_bo_map(bo, 1);
446     assert(bo->virtual);
447
448     ss = (struct i965_surface_state *)((char *)bo->virtual + surface_state_offset);
449     i965_gpe_set_buffer_surface_state(ctx, buffer_surface, ss);
450     dri_bo_emit_reloc(bo,
451                       I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
452                       0,
453                       surface_state_offset + offsetof(struct i965_surface_state, ss1),
454                       buffer_surface->bo);
455
456     *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
457     dri_bo_unmap(bo);
458 }
459
460 static void
461 gen7_gpe_set_surface2_state(VADriverContextP ctx,
462                             struct object_surface *obj_surface,
463                             struct gen7_surface_state2 *ss)
464 {
465     int w, h, w_pitch;
466     unsigned int tiling, swizzle;
467
468     assert(obj_surface->bo);
469     assert(obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2'));
470
471     dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
472     w = obj_surface->orig_width;
473     h = obj_surface->orig_height;
474     w_pitch = obj_surface->width;
475
476     memset(ss, 0, sizeof(*ss));
477     /* ss0 */
478     ss->ss0.surface_base_address = obj_surface->bo->offset;
479     /* ss1 */
480     ss->ss1.cbcr_pixel_offset_v_direction = 2;
481     ss->ss1.width = w - 1;
482     ss->ss1.height = h - 1;
483     /* ss2 */
484     ss->ss2.surface_format = MFX_SURFACE_PLANAR_420_8;
485     ss->ss2.interleave_chroma = 1;
486     ss->ss2.pitch = w_pitch - 1;
487     ss->ss2.half_pitch_for_chroma = 0;
488     gen7_gpe_set_surface2_tiling(ss, tiling);
489     /* ss3: UV offset for interleave mode */
490     ss->ss3.x_offset_for_cb = obj_surface->x_cb_offset;
491     ss->ss3.y_offset_for_cb = obj_surface->y_cb_offset;
492 }
493
494 void
495 gen7_gpe_surface2_setup(VADriverContextP ctx,
496                         struct i965_gpe_context *gpe_context,
497                         struct object_surface *obj_surface,
498                         unsigned long binding_table_offset,
499                         unsigned long surface_state_offset)
500 {
501     struct gen7_surface_state2 *ss;
502     dri_bo *bo;
503
504     bo = gpe_context->surface_state_binding_table.bo;
505     dri_bo_map(bo, 1);
506     assert(bo->virtual);
507
508     ss = (struct gen7_surface_state2 *)((char *)bo->virtual + surface_state_offset);
509     gen7_gpe_set_surface2_state(ctx, obj_surface, ss);
510     dri_bo_emit_reloc(bo,
511                       I915_GEM_DOMAIN_RENDER, 0,
512                       0,
513                       surface_state_offset + offsetof(struct gen7_surface_state2, ss0),
514                       obj_surface->bo);
515
516     *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
517     dri_bo_unmap(bo);
518 }
519
520 static void
521 gen7_gpe_set_media_rw_surface_state(VADriverContextP ctx,
522                                     struct object_surface *obj_surface,
523                                     struct gen7_surface_state *ss)
524 {
525     int w, h, w_pitch;
526     unsigned int tiling, swizzle;
527
528     dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
529     w = obj_surface->orig_width;
530     h = obj_surface->orig_height;
531     w_pitch = obj_surface->width;
532
533     memset(ss, 0, sizeof(*ss));
534     /* ss0 */
535     ss->ss0.surface_type = I965_SURFACE_2D;
536     ss->ss0.surface_format = I965_SURFACEFORMAT_R8_UNORM;
537     /* ss1 */
538     ss->ss1.base_addr = obj_surface->bo->offset;
539     /* ss2 */
540     ss->ss2.width = w / 4 - 1;  /* in DWORDs for media read & write message */
541     ss->ss2.height = h - 1;
542     /* ss3 */
543     ss->ss3.pitch = w_pitch - 1;
544     gen7_gpe_set_surface_tiling(ss, tiling);
545 }
546
547 static void
548 gen75_gpe_set_media_chroma_surface_state(VADriverContextP ctx,
549                                     struct object_surface *obj_surface,
550                                     struct gen7_surface_state *ss)
551 {
552     int w, h, w_pitch;
553     unsigned int tiling, swizzle;
554     int cbcr_offset;
555
556     dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
557     w = obj_surface->orig_width;
558     h = obj_surface->orig_height;
559     w_pitch = obj_surface->width;
560
561     cbcr_offset = obj_surface->height * obj_surface->width;
562     memset(ss, 0, sizeof(*ss));
563     /* ss0 */
564     ss->ss0.surface_type = I965_SURFACE_2D;
565     ss->ss0.surface_format = I965_SURFACEFORMAT_R8_UNORM;
566     /* ss1 */
567     ss->ss1.base_addr = obj_surface->bo->offset + cbcr_offset;
568     /* ss2 */
569     ss->ss2.width = w / 4 - 1;  /* in DWORDs for media read & write message */
570     ss->ss2.height = (obj_surface->height / 2) -1;
571     /* ss3 */
572     ss->ss3.pitch = w_pitch - 1;
573     gen7_gpe_set_surface_tiling(ss, tiling);
574 }
575
576 void
577 gen7_gpe_media_rw_surface_setup(VADriverContextP ctx,
578                                 struct i965_gpe_context *gpe_context,
579                                 struct object_surface *obj_surface,
580                                 unsigned long binding_table_offset,
581                                 unsigned long surface_state_offset)
582 {
583     struct gen7_surface_state *ss;
584     dri_bo *bo;
585
586     bo = gpe_context->surface_state_binding_table.bo;
587     dri_bo_map(bo, True);
588     assert(bo->virtual);
589
590     ss = (struct gen7_surface_state *)((char *)bo->virtual + surface_state_offset);
591     gen7_gpe_set_media_rw_surface_state(ctx, obj_surface, ss);
592     dri_bo_emit_reloc(bo,
593                       I915_GEM_DOMAIN_RENDER, 0,
594                       0,
595                       surface_state_offset + offsetof(struct gen7_surface_state, ss1),
596                       obj_surface->bo);
597
598     *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
599     dri_bo_unmap(bo);
600 }
601
602 void
603 gen75_gpe_media_chroma_surface_setup(VADriverContextP ctx,
604                                 struct i965_gpe_context *gpe_context,
605                                 struct object_surface *obj_surface,
606                                 unsigned long binding_table_offset,
607                                 unsigned long surface_state_offset)
608 {
609     struct gen7_surface_state *ss;
610     dri_bo *bo;
611     int cbcr_offset;
612
613         assert(obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2'));
614     bo = gpe_context->surface_state_binding_table.bo;
615     dri_bo_map(bo, True);
616     assert(bo->virtual);
617
618     cbcr_offset = obj_surface->height * obj_surface->width;
619     ss = (struct gen7_surface_state *)((char *)bo->virtual + surface_state_offset);
620     gen75_gpe_set_media_chroma_surface_state(ctx, obj_surface, ss);
621     dri_bo_emit_reloc(bo,
622                       I915_GEM_DOMAIN_RENDER, 0,
623                       cbcr_offset,
624                       surface_state_offset + offsetof(struct gen7_surface_state, ss1),
625                       obj_surface->bo);
626
627     *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
628     dri_bo_unmap(bo);
629 }
630
631
632 static void
633 gen7_gpe_set_buffer_surface_state(VADriverContextP ctx,
634                                   struct i965_buffer_surface *buffer_surface,
635                                   struct gen7_surface_state *ss)
636 {
637     int num_entries;
638
639     assert(buffer_surface->bo);
640     num_entries = buffer_surface->num_blocks * buffer_surface->size_block / buffer_surface->pitch;
641
642     memset(ss, 0, sizeof(*ss));
643     /* ss0 */
644     ss->ss0.surface_type = I965_SURFACE_BUFFER;
645     /* ss1 */
646     ss->ss1.base_addr = buffer_surface->bo->offset;
647     /* ss2 */
648     ss->ss2.width = ((num_entries - 1) & 0x7f);
649     ss->ss2.height = (((num_entries - 1) >> 7) & 0x3fff);
650     /* ss3 */
651     ss->ss3.depth = (((num_entries - 1) >> 21) & 0x3f);
652     ss->ss3.pitch = buffer_surface->pitch - 1;
653 }
654
655 void
656 gen7_gpe_buffer_suface_setup(VADriverContextP ctx,
657                              struct i965_gpe_context *gpe_context,
658                              struct i965_buffer_surface *buffer_surface,
659                              unsigned long binding_table_offset,
660                              unsigned long surface_state_offset)
661 {
662     struct gen7_surface_state *ss;
663     dri_bo *bo;
664
665     bo = gpe_context->surface_state_binding_table.bo;
666     dri_bo_map(bo, 1);
667     assert(bo->virtual);
668
669     ss = (struct gen7_surface_state *)((char *)bo->virtual + surface_state_offset);
670     gen7_gpe_set_buffer_surface_state(ctx, buffer_surface, ss);
671     dri_bo_emit_reloc(bo,
672                       I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
673                       0,
674                       surface_state_offset + offsetof(struct gen7_surface_state, ss1),
675                       buffer_surface->bo);
676
677     *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
678     dri_bo_unmap(bo);
679 }