2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Zhao Yakui <yakui.zhao@intel.com>
26 * Xiang Haihao <haihao.xiang@intel.com>
34 #include "intel_batchbuffer.h"
35 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_drv_video.h"
39 #include "i965_encoder.h"
43 #define SURFACE_STATE_PADDED_SIZE_0_GEN7 ALIGN(sizeof(struct gen7_surface_state), 32)
44 #define SURFACE_STATE_PADDED_SIZE_1_GEN7 ALIGN(sizeof(struct gen7_surface_state2), 32)
45 #define SURFACE_STATE_PADDED_SIZE_GEN7 MAX(SURFACE_STATE_PADDED_SIZE_0_GEN7, SURFACE_STATE_PADDED_SIZE_1_GEN7)
47 #define SURFACE_STATE_PADDED_SIZE_0_GEN6 ALIGN(sizeof(struct i965_surface_state), 32)
48 #define SURFACE_STATE_PADDED_SIZE_1_GEN6 ALIGN(sizeof(struct i965_surface_state2), 32)
49 #define SURFACE_STATE_PADDED_SIZE_GEN6 MAX(SURFACE_STATE_PADDED_SIZE_0_GEN6, SURFACE_STATE_PADDED_SIZE_1_GEN6)
51 #define SURFACE_STATE_PADDED_SIZE MAX(SURFACE_STATE_PADDED_SIZE_GEN6, SURFACE_STATE_PADDED_SIZE_GEN7)
52 #define SURFACE_STATE_OFFSET(index) (SURFACE_STATE_PADDED_SIZE * index)
53 #define BINDING_TABLE_OFFSET(index) (SURFACE_STATE_OFFSET(MAX_MEDIA_SURFACES_GEN6) + sizeof(unsigned int) * index)
55 #define VME_INTRA_SHADER 0
56 #define VME_INTER_SHADER 1
57 #define VME_BATCHBUFFER 2
59 #define CURBE_ALLOCATION_SIZE 37 /* in 256-bit */
60 #define CURBE_TOTAL_DATA_LENGTH (4 * 32) /* in byte, it should be less than or equal to CURBE_ALLOCATION_SIZE * 32 */
61 #define CURBE_URB_ENTRY_LENGTH 4 /* in 256-bit, it should be less than or equal to CURBE_TOTAL_DATA_LENGTH / 32 */
63 static const uint32_t gen75_vme_intra_frame[][4] = {
64 #include "shaders/vme/intra_frame.g7b"
67 static const uint32_t gen75_vme_inter_frame[][4] = {
68 #include "shaders/vme/inter_frame.g7b"
71 static const uint32_t gen75_vme_batchbuffer[][4] = {
72 #include "shaders/vme/batchbuffer.g7b"
75 static struct i965_kernel gen75_vme_kernels[] = {
78 VME_INTRA_SHADER, /*index*/
79 gen75_vme_intra_frame,
80 sizeof(gen75_vme_intra_frame),
86 gen75_vme_inter_frame,
87 sizeof(gen75_vme_inter_frame),
93 gen75_vme_batchbuffer,
94 sizeof(gen75_vme_batchbuffer),
99 /* only used for VME source surface state */
101 gen75_vme_source_surface_state(VADriverContextP ctx,
103 struct object_surface *obj_surface,
104 struct intel_encoder_context *encoder_context)
106 struct gen6_vme_context *vme_context = encoder_context->vme_context;
108 vme_context->vme_surface2_setup(ctx,
109 &vme_context->gpe_context,
111 BINDING_TABLE_OFFSET(index),
112 SURFACE_STATE_OFFSET(index));
116 gen75_vme_media_source_surface_state(VADriverContextP ctx,
118 struct object_surface *obj_surface,
119 struct intel_encoder_context *encoder_context)
121 struct gen6_vme_context *vme_context = encoder_context->vme_context;
123 vme_context->vme_media_rw_surface_setup(ctx,
124 &vme_context->gpe_context,
126 BINDING_TABLE_OFFSET(index),
127 SURFACE_STATE_OFFSET(index));
131 gen75_vme_media_chroma_source_surface_state(VADriverContextP ctx,
133 struct object_surface *obj_surface,
134 struct intel_encoder_context *encoder_context)
136 struct gen6_vme_context *vme_context = encoder_context->vme_context;
138 vme_context->vme_media_chroma_surface_setup(ctx,
139 &vme_context->gpe_context,
141 BINDING_TABLE_OFFSET(index),
142 SURFACE_STATE_OFFSET(index));
146 gen75_vme_output_buffer_setup(VADriverContextP ctx,
147 struct encode_state *encode_state,
149 struct intel_encoder_context *encoder_context)
152 struct i965_driver_data *i965 = i965_driver_data(ctx);
153 struct gen6_vme_context *vme_context = encoder_context->vme_context;
154 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
155 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
156 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
157 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
158 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
160 vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs;
161 vme_context->vme_output.pitch = 16; /* in bytes, always 16 */
164 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES;
166 vme_context->vme_output.size_block = INTER_VME_OUTPUT_IN_BYTES;
168 vme_context->vme_output.bo = dri_bo_alloc(i965->intel.bufmgr,
170 vme_context->vme_output.num_blocks * vme_context->vme_output.size_block,
172 assert(vme_context->vme_output.bo);
173 vme_context->vme_buffer_suface_setup(ctx,
174 &vme_context->gpe_context,
175 &vme_context->vme_output,
176 BINDING_TABLE_OFFSET(index),
177 SURFACE_STATE_OFFSET(index));
181 gen75_vme_output_vme_batchbuffer_setup(VADriverContextP ctx,
182 struct encode_state *encode_state,
184 struct intel_encoder_context *encoder_context)
187 struct i965_driver_data *i965 = i965_driver_data(ctx);
188 struct gen6_vme_context *vme_context = encoder_context->vme_context;
189 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
190 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
191 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
193 vme_context->vme_batchbuffer.num_blocks = width_in_mbs * height_in_mbs + 1;
194 vme_context->vme_batchbuffer.size_block = 32; /* 2 OWORDs */
195 vme_context->vme_batchbuffer.pitch = 16;
196 vme_context->vme_batchbuffer.bo = dri_bo_alloc(i965->intel.bufmgr,
198 vme_context->vme_batchbuffer.num_blocks * vme_context->vme_batchbuffer.size_block,
200 vme_context->vme_buffer_suface_setup(ctx,
201 &vme_context->gpe_context,
202 &vme_context->vme_batchbuffer,
203 BINDING_TABLE_OFFSET(index),
204 SURFACE_STATE_OFFSET(index));
208 gen75_vme_surface_setup(VADriverContextP ctx,
209 struct encode_state *encode_state,
211 struct intel_encoder_context *encoder_context)
213 struct i965_driver_data *i965 = i965_driver_data(ctx);
214 struct object_surface *obj_surface;
215 VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
217 /*Setup surfaces state*/
218 /* current picture for encoding */
219 obj_surface = SURFACE(encoder_context->input_yuv_surface);
221 gen75_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
222 gen75_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
223 gen75_vme_media_chroma_source_surface_state(ctx, 6, obj_surface, encoder_context);
227 obj_surface = SURFACE(pPicParameter->ReferenceFrames[0].picture_id);
229 if ( obj_surface->bo != NULL)
230 gen75_vme_source_surface_state(ctx, 1, obj_surface, encoder_context);
233 obj_surface = SURFACE(pPicParameter->ReferenceFrames[1].picture_id);
235 if ( obj_surface->bo != NULL )
236 gen75_vme_source_surface_state(ctx, 2, obj_surface, encoder_context);
240 gen75_vme_output_buffer_setup(ctx, encode_state, 3, encoder_context);
241 gen75_vme_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
243 return VA_STATUS_SUCCESS;
246 static VAStatus gen75_vme_interface_setup(VADriverContextP ctx,
247 struct encode_state *encode_state,
248 struct intel_encoder_context *encoder_context)
250 struct gen6_vme_context *vme_context = encoder_context->vme_context;
251 struct gen6_interface_descriptor_data *desc;
255 bo = vme_context->gpe_context.idrt.bo;
260 for (i = 0; i < GEN6_VME_KERNEL_NUMBER; i++) {
261 struct i965_kernel *kernel;
262 kernel = &vme_context->gpe_context.kernels[i];
263 assert(sizeof(*desc) == 32);
264 /*Setup the descritor table*/
265 memset(desc, 0, sizeof(*desc));
266 desc->desc0.kernel_start_pointer = (kernel->bo->offset >> 6);
267 desc->desc2.sampler_count = 1; /* FIXME: */
268 desc->desc2.sampler_state_pointer = (vme_context->vme_state.bo->offset >> 5);
269 desc->desc3.binding_table_entry_count = 1; /* FIXME: */
270 desc->desc3.binding_table_pointer = (BINDING_TABLE_OFFSET(0) >> 5);
271 desc->desc4.constant_urb_entry_read_offset = 0;
272 desc->desc4.constant_urb_entry_read_length = CURBE_URB_ENTRY_LENGTH;
275 dri_bo_emit_reloc(bo,
276 I915_GEM_DOMAIN_INSTRUCTION, 0,
278 i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc0),
280 /*Sampler State(VME state pointer)*/
281 dri_bo_emit_reloc(bo,
282 I915_GEM_DOMAIN_INSTRUCTION, 0,
284 i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc2),
285 vme_context->vme_state.bo);
290 return VA_STATUS_SUCCESS;
293 static VAStatus gen75_vme_constant_setup(VADriverContextP ctx,
294 struct encode_state *encode_state,
295 struct intel_encoder_context *encoder_context)
297 struct gen6_vme_context *vme_context = encoder_context->vme_context;
298 // unsigned char *constant_buffer;
300 dri_bo_map(vme_context->gpe_context.curbe.bo, 1);
301 assert(vme_context->gpe_context.curbe.bo->virtual);
302 // constant_buffer = vme_context->curbe.bo->virtual;
304 /*TODO copy buffer into CURB*/
306 dri_bo_unmap( vme_context->gpe_context.curbe.bo);
308 return VA_STATUS_SUCCESS;
311 static const unsigned int intra_mb_mode_cost_table[] = {
312 0x31110001, // for qp0
313 0x09110001, // for qp1
314 0x15030001, // for qp2
315 0x0b030001, // for qp3
316 0x0d030011, // for qp4
317 0x17210011, // for qp5
318 0x41210011, // for qp6
319 0x19210011, // for qp7
320 0x25050003, // for qp8
321 0x1b130003, // for qp9
322 0x1d130003, // for qp10
323 0x27070021, // for qp11
324 0x51310021, // for qp12
325 0x29090021, // for qp13
326 0x35150005, // for qp14
327 0x2b0b0013, // for qp15
328 0x2d0d0013, // for qp16
329 0x37170007, // for qp17
330 0x61410031, // for qp18
331 0x39190009, // for qp19
332 0x45250015, // for qp20
333 0x3b1b000b, // for qp21
334 0x3d1d000d, // for qp22
335 0x47270017, // for qp23
336 0x71510041, // for qp24 ! center for qp=0..30
337 0x49290019, // for qp25
338 0x55350025, // for qp26
339 0x4b2b001b, // for qp27
340 0x4d2d001d, // for qp28
341 0x57370027, // for qp29
342 0x81610051, // for qp30
343 0x57270017, // for qp31
344 0x81510041, // for qp32 ! center for qp=31..51
345 0x59290019, // for qp33
346 0x65350025, // for qp34
347 0x5b2b001b, // for qp35
348 0x5d2d001d, // for qp36
349 0x67370027, // for qp37
350 0x91610051, // for qp38
351 0x69390029, // for qp39
352 0x75450035, // for qp40
353 0x6b3b002b, // for qp41
354 0x6d3d002d, // for qp42
355 0x77470037, // for qp43
356 0xa1710061, // for qp44
357 0x79490039, // for qp45
358 0x85550045, // for qp46
359 0x7b4b003b, // for qp47
360 0x7d4d003d, // for qp48
361 0x87570047, // for qp49
362 0xb1810071, // for qp50
363 0x89590049 // for qp51
366 static void gen75_vme_state_setup_fixup(VADriverContextP ctx,
367 struct encode_state *encode_state,
368 struct intel_encoder_context *encoder_context,
369 unsigned int *vme_state_message)
371 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
372 VAEncPictureParameterBufferH264 *pic_param = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
373 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
375 if (slice_param->slice_type != SLICE_TYPE_I &&
376 slice_param->slice_type != SLICE_TYPE_SI)
378 if (encoder_context->rate_control_mode == VA_RC_CQP)
379 vme_state_message[16] = intra_mb_mode_cost_table[pic_param->pic_init_qp + slice_param->slice_qp_delta];
381 vme_state_message[16] = intra_mb_mode_cost_table[mfc_context->bit_rate_control_context[slice_param->slice_type].QpPrimeY];
384 static VAStatus gen75_vme_vme_state_setup(VADriverContextP ctx,
385 struct encode_state *encode_state,
387 struct intel_encoder_context *encoder_context)
389 struct gen6_vme_context *vme_context = encoder_context->vme_context;
390 unsigned int *vme_state_message;
393 //building VME state message
394 dri_bo_map(vme_context->vme_state.bo, 1);
395 assert(vme_context->vme_state.bo->virtual);
396 vme_state_message = (unsigned int *)vme_context->vme_state.bo->virtual;
398 vme_state_message[0] = 0x01010101;
399 vme_state_message[1] = 0x10010101;
400 vme_state_message[2] = 0x0F0F0F0F;
401 vme_state_message[3] = 0x100F0F0F;
402 vme_state_message[4] = 0x01010101;
403 vme_state_message[5] = 0x00010101;
404 vme_state_message[6] = 0x01010101;
405 vme_state_message[7] = 0x10010101;
406 vme_state_message[8] = 0x0F0F0F0F;
407 vme_state_message[9] = 0x100F0F0F;
408 vme_state_message[10] = 0x01010101;
409 vme_state_message[11] = 0x00010101;
410 vme_state_message[12] = 0x00;
411 vme_state_message[13] = 0x00;
413 vme_state_message[14] = 0x4a4a;
414 vme_state_message[15] = 0x0;
415 vme_state_message[16] = 0x4a4a4a4a;
416 vme_state_message[17] = 0x4a4a4a4a;
417 vme_state_message[18] = 0x22120200;
418 vme_state_message[19] = 0x62524232;
420 for(i = 20; i < 32; i++) {
421 vme_state_message[i] = 0;
423 //vme_state_message[16] = 0x42424242; //cost function LUT set 0 for Intra
425 gen75_vme_state_setup_fixup(ctx, encode_state, encoder_context, vme_state_message);
427 dri_bo_unmap( vme_context->vme_state.bo);
428 return VA_STATUS_SUCCESS;
432 gen75_vme_fill_vme_batchbuffer(VADriverContextP ctx,
433 struct encode_state *encode_state,
434 int mb_width, int mb_height,
436 int transform_8x8_mode_flag,
437 struct intel_encoder_context *encoder_context)
439 struct gen6_vme_context *vme_context = encoder_context->vme_context;
441 int mb_x = 0, mb_y = 0;
443 unsigned int *command_ptr;
445 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
446 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
448 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
449 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
450 int slice_mb_begin = pSliceParameter->macroblock_address;
451 int slice_mb_number = pSliceParameter->num_macroblocks;
453 for (i = 0; i < slice_mb_number; ) {
454 int mb_count = i + slice_mb_begin;
455 mb_x = mb_count % mb_width;
456 mb_y = mb_count / mb_width;
458 number_mb_cmds = mb_width; // we must mark the slice edge.
459 } else if ( (i + 128 ) <= slice_mb_number) {
460 number_mb_cmds = 128;
462 number_mb_cmds = slice_mb_number - i;
465 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
466 *command_ptr++ = kernel;
473 *command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
474 *command_ptr++ = (number_mb_cmds << 16 | transform_8x8_mode_flag | ((i==0) << 1));
481 *command_ptr++ = MI_BATCH_BUFFER_END;
483 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
486 static void gen75_vme_media_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
488 struct i965_driver_data *i965 = i965_driver_data(ctx);
489 struct gen6_vme_context *vme_context = encoder_context->vme_context;
492 i965_gpe_context_init(ctx, &vme_context->gpe_context);
494 /* VME output buffer */
495 dri_bo_unreference(vme_context->vme_output.bo);
496 vme_context->vme_output.bo = NULL;
498 dri_bo_unreference(vme_context->vme_batchbuffer.bo);
499 vme_context->vme_batchbuffer.bo = NULL;
502 dri_bo_unreference(vme_context->vme_state.bo);
503 bo = dri_bo_alloc(i965->intel.bufmgr,
507 vme_context->vme_state.bo = bo;
510 static void gen75_vme_pipeline_programing(VADriverContextP ctx,
511 struct encode_state *encode_state,
512 struct intel_encoder_context *encoder_context)
514 struct gen6_vme_context *vme_context = encoder_context->vme_context;
515 struct intel_batchbuffer *batch = encoder_context->base.batch;
516 VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
517 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
518 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
519 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
520 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
521 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
523 gen75_vme_fill_vme_batchbuffer(ctx,
525 width_in_mbs, height_in_mbs,
526 is_intra ? VME_INTRA_SHADER : VME_INTER_SHADER,
527 pPicParameter->pic_fields.bits.transform_8x8_mode_flag,
530 intel_batchbuffer_start_atomic(batch, 0x1000);
531 gen6_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
532 BEGIN_BATCH(batch, 2);
533 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (2 << 6));
535 vme_context->vme_batchbuffer.bo,
536 I915_GEM_DOMAIN_COMMAND, 0,
538 ADVANCE_BATCH(batch);
540 intel_batchbuffer_end_atomic(batch);
543 static VAStatus gen75_vme_prepare(VADriverContextP ctx,
544 struct encode_state *encode_state,
545 struct intel_encoder_context *encoder_context)
547 VAStatus vaStatus = VA_STATUS_SUCCESS;
548 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
549 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
551 /*Setup all the memory object*/
552 gen75_vme_surface_setup(ctx, encode_state, is_intra, encoder_context);
553 gen75_vme_interface_setup(ctx, encode_state, encoder_context);
554 gen75_vme_constant_setup(ctx, encode_state, encoder_context);
555 gen75_vme_vme_state_setup(ctx, encode_state, is_intra, encoder_context);
557 /*Programing media pipeline*/
558 gen75_vme_pipeline_programing(ctx, encode_state, encoder_context);
563 static VAStatus gen75_vme_run(VADriverContextP ctx,
564 struct encode_state *encode_state,
565 struct intel_encoder_context *encoder_context)
567 struct intel_batchbuffer *batch = encoder_context->base.batch;
569 intel_batchbuffer_flush(batch);
571 return VA_STATUS_SUCCESS;
574 static VAStatus gen75_vme_stop(VADriverContextP ctx,
575 struct encode_state *encode_state,
576 struct intel_encoder_context *encoder_context)
578 return VA_STATUS_SUCCESS;
582 gen75_vme_pipeline(VADriverContextP ctx,
584 struct encode_state *encode_state,
585 struct intel_encoder_context *encoder_context)
587 gen75_vme_media_init(ctx, encoder_context);
588 gen75_vme_prepare(ctx, encode_state, encoder_context);
589 gen75_vme_run(ctx, encode_state, encoder_context);
590 gen75_vme_stop(ctx, encode_state, encoder_context);
592 return VA_STATUS_SUCCESS;
596 gen75_vme_context_destroy(void *context)
598 struct gen6_vme_context *vme_context = context;
600 i965_gpe_context_destroy(&vme_context->gpe_context);
602 dri_bo_unreference(vme_context->vme_output.bo);
603 vme_context->vme_output.bo = NULL;
605 dri_bo_unreference(vme_context->vme_state.bo);
606 vme_context->vme_state.bo = NULL;
608 dri_bo_unreference(vme_context->vme_batchbuffer.bo);
609 vme_context->vme_batchbuffer.bo = NULL;
614 Bool gen75_vme_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
616 struct gen6_vme_context *vme_context = calloc(1, sizeof(struct gen6_vme_context));
618 vme_context->gpe_context.surface_state_binding_table.length = (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_MEDIA_SURFACES_GEN6;
620 vme_context->gpe_context.idrt.max_entries = MAX_INTERFACE_DESC_GEN6;
621 vme_context->gpe_context.idrt.entry_size = sizeof(struct gen6_interface_descriptor_data);
623 vme_context->gpe_context.curbe.length = CURBE_TOTAL_DATA_LENGTH;
625 vme_context->gpe_context.vfe_state.max_num_threads = 60 - 1;
626 vme_context->gpe_context.vfe_state.num_urb_entries = 16;
627 vme_context->gpe_context.vfe_state.gpgpu_mode = 0;
628 vme_context->gpe_context.vfe_state.urb_entry_size = 59 - 1;
629 vme_context->gpe_context.vfe_state.curbe_allocation_size = CURBE_ALLOCATION_SIZE - 1;
631 i965_gpe_load_kernels(ctx,
632 &vme_context->gpe_context,
634 GEN6_VME_KERNEL_NUMBER);
635 vme_context->vme_surface2_setup = gen7_gpe_surface2_setup;
636 vme_context->vme_media_rw_surface_setup = gen7_gpe_media_rw_surface_setup;
637 vme_context->vme_buffer_suface_setup = gen7_gpe_buffer_suface_setup;
638 vme_context->vme_media_chroma_surface_setup = gen75_gpe_media_chroma_surface_setup;
640 encoder_context->vme_context = vme_context;
641 encoder_context->vme_context_destroy = gen75_vme_context_destroy;
642 encoder_context->vme_pipeline = gen75_vme_pipeline;