2 * Copyright © 2009 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Zhou Chang <chang.zhou@intel.com>
33 #include <va/va_backend.h>
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
38 #include "i965_defines.h"
39 #include "i965_drv_video.h"
41 #include "i965_encoder.h"
43 #define VME_INTRA_SHADER 0
44 #define VME_INTER_SHADER 1
46 #define CURBE_ALLOCATION_SIZE 37 /* in 256-bit */
47 #define CURBE_TOTAL_DATA_LENGTH (4 * 32) /* in byte, it should be less than or equal to CURBE_ALLOCATION_SIZE * 32 */
48 #define CURBE_URB_ENTRY_LENGTH 4 /* in 256-bit, it should be less than or equal to CURBE_TOTAL_DATA_LENGTH / 32 */
50 static const uint32_t gen6_vme_intra_frame[][4] = {
51 #include "shaders/vme/intra_frame.g6b"
55 static const uint32_t gen6_vme_inter_frame[][4] = {
56 #include "shaders/vme/inter_frame.g6b"
60 static struct i965_kernel gen6_vme_kernels[] = {
63 VME_INTRA_SHADER, /*index*/
65 sizeof(gen6_vme_intra_frame),
72 sizeof(gen6_vme_inter_frame),
77 #define GEN6_VME_KERNEL_NUMBER ARRAY_ELEMS(gen6_vme_kernels)
80 gen6_vme_set_common_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
83 case I915_TILING_NONE:
84 ss->ss3.tiled_surface = 0;
85 ss->ss3.tile_walk = 0;
88 ss->ss3.tiled_surface = 1;
89 ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
92 ss->ss3.tiled_surface = 1;
93 ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
99 gen6_vme_set_source_surface_tiling(struct i965_surface_state2 *ss, unsigned int tiling)
102 case I915_TILING_NONE:
103 ss->ss2.tiled_surface = 0;
104 ss->ss2.tile_walk = 0;
107 ss->ss2.tiled_surface = 1;
108 ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
111 ss->ss2.tiled_surface = 1;
112 ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
117 /* only used for VME source surface state */
118 static void gen6_vme_source_surface_state(VADriverContextP ctx,
120 struct object_surface *obj_surface,
121 struct gen6_encoder_context *gen6_encoder_context)
123 struct i965_driver_data *i965 = i965_driver_data(ctx);
124 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
125 struct i965_surface_state2 *ss;
127 int w, h, w_pitch, h_pitch;
128 unsigned int tiling, swizzle;
130 assert(obj_surface->bo);
131 dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
133 w = obj_surface->orig_width;
134 h = obj_surface->orig_height;
135 w_pitch = obj_surface->width;
136 h_pitch = obj_surface->height;
138 bo = dri_bo_alloc(i965->intel.bufmgr,
140 sizeof(struct i965_surface_state2),
146 memset(ss, 0, sizeof(*ss));
148 ss->ss0.surface_base_address = obj_surface->bo->offset;
150 ss->ss1.cbcr_pixel_offset_v_direction = 2;
151 ss->ss1.width = w - 1;
152 ss->ss1.height = h - 1;
154 ss->ss2.surface_format = MFX_SURFACE_PLANAR_420_8;
155 ss->ss2.interleave_chroma = 1;
156 ss->ss2.pitch = w_pitch - 1;
157 ss->ss2.half_pitch_for_chroma = 0;
159 gen6_vme_set_source_surface_tiling(ss, tiling);
161 /* UV offset for interleave mode */
162 ss->ss3.x_offset_for_cb = 0;
163 ss->ss3.y_offset_for_cb = h_pitch;
167 dri_bo_emit_reloc(bo,
168 I915_GEM_DOMAIN_RENDER, 0,
170 offsetof(struct i965_surface_state2, ss0),
173 assert(index < MAX_MEDIA_SURFACES_GEN6);
174 vme_context->surface_state[index].bo = bo;
178 gen6_vme_media_source_surface_state(VADriverContextP ctx,
180 struct object_surface *obj_surface,
181 struct gen6_encoder_context *gen6_encoder_context)
183 struct i965_driver_data *i965 = i965_driver_data(ctx);
184 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
185 struct i965_surface_state *ss;
188 unsigned int tiling, swizzle;
190 w = obj_surface->orig_width;
191 h = obj_surface->orig_height;
192 w_pitch = obj_surface->width;
195 dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
196 bo = dri_bo_alloc(i965->intel.bufmgr,
198 sizeof(struct i965_surface_state),
202 dri_bo_map(bo, True);
205 memset(ss, 0, sizeof(*ss));
206 ss->ss0.surface_type = I965_SURFACE_2D;
207 ss->ss0.surface_format = I965_SURFACEFORMAT_R8_UNORM;
208 ss->ss1.base_addr = obj_surface->bo->offset;
209 ss->ss2.width = w / 4 - 1;
210 ss->ss2.height = h - 1;
211 ss->ss3.pitch = w_pitch - 1;
212 gen6_vme_set_common_surface_tiling(ss, tiling);
213 dri_bo_emit_reloc(bo,
214 I915_GEM_DOMAIN_RENDER,
217 offsetof(struct i965_surface_state, ss1),
221 assert(index < MAX_MEDIA_SURFACES_GEN6);
222 vme_context->surface_state[index].bo = bo;
226 gen6_vme_output_buffer_setup(VADriverContextP ctx,
227 struct encode_state *encode_state,
229 struct gen6_encoder_context *gen6_encoder_context)
232 struct i965_driver_data *i965 = i965_driver_data(ctx);
233 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
234 struct i965_surface_state *ss;
236 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param->buffer;
237 VAEncSliceParameterBuffer *pSliceParameter = (VAEncSliceParameterBuffer *)encode_state->slice_params[0]->buffer;
238 int is_intra = pSliceParameter->slice_flags.bits.is_intra;
239 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
240 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
244 vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs;
246 vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs * 4;
248 vme_context->vme_output.size_block = 16; /* an OWORD */
249 vme_context->vme_output.pitch = ALIGN(vme_context->vme_output.size_block, 16);
250 bo = dri_bo_alloc(i965->intel.bufmgr,
252 vme_context->vme_output.num_blocks * vme_context->vme_output.pitch,
255 vme_context->vme_output.bo = bo;
257 bo = dri_bo_alloc(i965->intel.bufmgr,
258 "VME output buffer state",
259 sizeof(struct i965_surface_state),
265 memset(ss, 0, sizeof(*ss));
267 /* always use 16 bytes as pitch on Sandy Bridge */
268 num_entries = vme_context->vme_output.num_blocks * vme_context->vme_output.pitch / 16;
269 ss->ss0.render_cache_read_mode = 1;
270 ss->ss0.surface_type = I965_SURFACE_BUFFER;
271 ss->ss1.base_addr = vme_context->vme_output.bo->offset;
272 ss->ss2.width = ((num_entries - 1) & 0x7f);
273 ss->ss2.height = (((num_entries - 1) >> 7) & 0x1fff);
274 ss->ss3.depth = (((num_entries - 1) >> 20) & 0x7f);
275 ss->ss3.pitch = vme_context->vme_output.pitch - 1;
276 dri_bo_emit_reloc(bo,
277 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
279 offsetof(struct i965_surface_state, ss1),
280 vme_context->vme_output.bo);
284 assert(index < MAX_MEDIA_SURFACES_GEN6);
285 vme_context->surface_state[index].bo = bo;
286 return VA_STATUS_SUCCESS;
289 static VAStatus gen6_vme_surface_setup(VADriverContextP ctx,
290 struct encode_state *encode_state,
292 struct gen6_encoder_context *gen6_encoder_context)
294 struct i965_driver_data *i965 = i965_driver_data(ctx);
295 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
296 struct object_surface *obj_surface;
297 unsigned int *binding_table;
298 dri_bo *bo = vme_context->binding_table.bo;
300 VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param->buffer;
302 /*Setup surfaces state*/
303 /* current picture for encoding */
304 obj_surface = SURFACE(encode_state->current_render_target);
306 gen6_vme_source_surface_state(ctx, 0, obj_surface, gen6_encoder_context);
307 gen6_vme_media_source_surface_state(ctx, 4, obj_surface, gen6_encoder_context);
311 obj_surface = SURFACE(pPicParameter->reference_picture);
313 gen6_vme_source_surface_state(ctx, 1, obj_surface, gen6_encoder_context);
314 /* reference 1, FIXME: */
315 // obj_surface = SURFACE(pPicParameter->reference_picture);
316 // assert(obj_surface);
317 //gen6_vme_source_surface_state(ctx, 2, obj_surface);
321 gen6_vme_output_buffer_setup(ctx, encode_state, 3, gen6_encoder_context);
323 /*Building binding table*/
326 binding_table = bo->virtual;
327 memset(binding_table, 0, bo->size);
329 for (i = 0; i < MAX_MEDIA_SURFACES_GEN6; i++) {
330 if (vme_context->surface_state[i].bo) {
331 binding_table[i] = vme_context->surface_state[i].bo->offset;
332 dri_bo_emit_reloc(bo,
333 I915_GEM_DOMAIN_INSTRUCTION, 0,
335 i * sizeof(*binding_table),
336 vme_context->surface_state[i].bo);
340 dri_bo_unmap(vme_context->binding_table.bo);
342 return VA_STATUS_SUCCESS;
345 static VAStatus gen6_vme_interface_setup(VADriverContextP ctx,
346 struct encode_state *encode_state,
347 struct gen6_encoder_context *gen6_encoder_context)
349 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
350 struct gen6_interface_descriptor_data *desc;
354 bo = vme_context->idrt.bo;
359 for (i = 0; i < GEN6_VME_KERNEL_NUMBER; i++) {
360 struct i965_kernel *kernel;
361 kernel = &gen6_vme_kernels[i];
362 assert(sizeof(*desc) == 32);
363 /*Setup the descritor table*/
364 memset(desc, 0, sizeof(*desc));
365 desc->desc0.kernel_start_pointer = (kernel->bo->offset >> 6);
366 desc->desc2.sampler_count = 1; /* FIXME: */
367 desc->desc2.sampler_state_pointer = (vme_context->vme_state.bo->offset >> 5);
368 desc->desc3.binding_table_entry_count = 1; /* FIXME: */
369 desc->desc3.binding_table_pointer = (vme_context->binding_table.bo->offset >> 5);
370 desc->desc4.constant_urb_entry_read_offset = 0;
371 desc->desc4.constant_urb_entry_read_length = CURBE_URB_ENTRY_LENGTH;
374 dri_bo_emit_reloc(bo,
375 I915_GEM_DOMAIN_INSTRUCTION, 0,
377 i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc0),
379 /*Sampler State(VME state pointer)*/
380 dri_bo_emit_reloc(bo,
381 I915_GEM_DOMAIN_INSTRUCTION, 0,
383 i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc2),
384 vme_context->vme_state.bo);
386 dri_bo_emit_reloc(bo,
387 I915_GEM_DOMAIN_INSTRUCTION, 0,
389 i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc3),
390 vme_context->binding_table.bo);
395 return VA_STATUS_SUCCESS;
398 static VAStatus gen6_vme_constant_setup(VADriverContextP ctx,
399 struct encode_state *encode_state,
400 struct gen6_encoder_context *gen6_encoder_context)
402 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
403 unsigned char *constant_buffer;
405 dri_bo_map(vme_context->curbe.bo, 1);
406 assert(vme_context->curbe.bo->virtual);
407 constant_buffer = vme_context->curbe.bo->virtual;
409 /*TODO copy buffer into CURB*/
411 dri_bo_unmap( vme_context->curbe.bo);
413 return VA_STATUS_SUCCESS;
416 static VAStatus gen6_vme_vme_state_setup(VADriverContextP ctx,
417 struct encode_state *encode_state,
419 struct gen6_encoder_context *gen6_encoder_context)
421 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
422 unsigned int *vme_state_message;
425 //building VME state message
426 dri_bo_map(vme_context->vme_state.bo, 1);
427 assert(vme_context->vme_state.bo->virtual);
428 vme_state_message = (unsigned int *)vme_context->vme_state.bo->virtual;
430 for(i = 0;i < 32; i++) {
431 vme_state_message[i] = 0x11;
433 vme_state_message[16] = 0x42424242; //cost function LUT set 0 for Intra
435 dri_bo_unmap( vme_context->vme_state.bo);
436 return VA_STATUS_SUCCESS;
439 static void gen6_vme_pipeline_select(VADriverContextP ctx)
442 OUT_BATCH(ctx, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
446 static void gen6_vme_state_base_address(VADriverContextP ctx)
448 BEGIN_BATCH(ctx, 10);
450 OUT_BATCH(ctx, CMD_STATE_BASE_ADDRESS | 8);
452 OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY); //General State Base Address
453 OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY); //Surface State Base Address
454 OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY); //Dynamic State Base Address
455 OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY); //Indirect Object Base Address
456 OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY); //Instruction Base Address
458 OUT_BATCH(ctx, 0xFFFFF000 | BASE_ADDRESS_MODIFY); //General State Access Upper Bound
459 OUT_BATCH(ctx, 0xFFFFF000 | BASE_ADDRESS_MODIFY); //Dynamic State Access Upper Bound
460 OUT_BATCH(ctx, 0xFFFFF000 | BASE_ADDRESS_MODIFY); //Indirect Object Access Upper Bound
461 OUT_BATCH(ctx, 0xFFFFF000 | BASE_ADDRESS_MODIFY); //Instruction Access Upper Bound
464 OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY); //LLC Coherent Base Address
465 OUT_BATCH(ctx, 0xFFFFF000 | BASE_ADDRESS_MODIFY ); //LLC Coherent Upper Bound
471 static void gen6_vme_vfe_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
473 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
477 OUT_BATCH(ctx, CMD_MEDIA_VFE_STATE | 6); /*Gen6 CMD_MEDIA_STATE_POINTERS = CMD_MEDIA_STATE */
478 OUT_BATCH(ctx, 0); /*Scratch Space Base Pointer and Space*/
479 OUT_BATCH(ctx, (vme_context->vfe_state.max_num_threads << 16)
480 | (vme_context->vfe_state.num_urb_entries << 8)
481 | (vme_context->vfe_state.gpgpu_mode << 2) ); /*Maximum Number of Threads , Number of URB Entries, MEDIA Mode*/
482 OUT_BATCH(ctx, 0); /*Debug: Object ID*/
483 OUT_BATCH(ctx, (vme_context->vfe_state.urb_entry_size << 16)
484 | vme_context->vfe_state.curbe_allocation_size); /*URB Entry Allocation Size , CURBE Allocation Size*/
485 OUT_BATCH(ctx, 0); /*Disable Scoreboard*/
486 OUT_BATCH(ctx, 0); /*Disable Scoreboard*/
487 OUT_BATCH(ctx, 0); /*Disable Scoreboard*/
493 static void gen6_vme_curbe_load(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
495 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
499 OUT_BATCH(ctx, CMD_MEDIA_CURBE_LOAD | 2);
502 OUT_BATCH(ctx, CURBE_TOTAL_DATA_LENGTH);
503 OUT_RELOC(ctx, vme_context->curbe.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
508 static void gen6_vme_idrt(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
510 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
514 OUT_BATCH(ctx, CMD_MEDIA_INTERFACE_LOAD | 2);
516 OUT_BATCH(ctx, GEN6_VME_KERNEL_NUMBER * sizeof(struct gen6_interface_descriptor_data));
517 OUT_RELOC(ctx, vme_context->idrt.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
522 static int gen6_vme_media_object(VADriverContextP ctx,
523 struct encode_state *encode_state,
527 struct i965_driver_data *i965 = i965_driver_data(ctx);
528 struct object_surface *obj_surface = SURFACE(encode_state->current_render_target);
529 int mb_width = ALIGN(obj_surface->orig_width, 16) / 16;
530 int len_in_dowrds = 6 + 1;
532 BEGIN_BATCH(ctx, len_in_dowrds);
534 OUT_BATCH(ctx, CMD_MEDIA_OBJECT | (len_in_dowrds - 2));
535 OUT_BATCH(ctx, kernel); /*Interface Descriptor Offset*/
542 OUT_BATCH(ctx, mb_width << 16 | mb_y << 8 | mb_x); /*M0.0 Refrence0 X,Y, not used in Intra*/
545 return len_in_dowrds * 4;
548 static void gen6_vme_media_init(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
551 struct i965_driver_data *i965 = i965_driver_data(ctx);
552 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
555 /* constant buffer */
556 dri_bo_unreference(vme_context->curbe.bo);
557 bo = dri_bo_alloc(i965->intel.bufmgr,
559 CURBE_TOTAL_DATA_LENGTH, 64);
561 vme_context->curbe.bo = bo;
564 for (i = 0; i < MAX_MEDIA_SURFACES_GEN6; i++) {
565 dri_bo_unreference(vme_context->surface_state[i].bo);
566 vme_context->surface_state[i].bo = NULL;
570 dri_bo_unreference(vme_context->binding_table.bo);
571 bo = dri_bo_alloc(i965->intel.bufmgr,
573 MAX_MEDIA_SURFACES_GEN6 * sizeof(unsigned int), 32);
575 vme_context->binding_table.bo = bo;
577 /* interface descriptor remapping table */
578 dri_bo_unreference(vme_context->idrt.bo);
579 bo = dri_bo_alloc(i965->intel.bufmgr,
581 MAX_INTERFACE_DESC_GEN6 * sizeof(struct gen6_interface_descriptor_data), 16);
583 vme_context->idrt.bo = bo;
585 /* VME output buffer */
586 dri_bo_unreference(vme_context->vme_output.bo);
587 vme_context->vme_output.bo = NULL;
590 dri_bo_unreference(vme_context->vme_state.bo);
591 bo = dri_bo_alloc(i965->intel.bufmgr,
595 vme_context->vme_state.bo = bo;
597 vme_context->vfe_state.max_num_threads = 60 - 1;
598 vme_context->vfe_state.num_urb_entries = 16;
599 vme_context->vfe_state.gpgpu_mode = 0;
600 vme_context->vfe_state.urb_entry_size = 59 - 1;
601 vme_context->vfe_state.curbe_allocation_size = CURBE_ALLOCATION_SIZE - 1;
604 static void gen6_vme_pipeline_programing(VADriverContextP ctx,
605 struct encode_state *encode_state,
606 struct gen6_encoder_context *gen6_encoder_context)
608 VAEncSliceParameterBuffer *pSliceParameter = (VAEncSliceParameterBuffer *)encode_state->slice_params[0]->buffer;
609 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param->buffer;
610 int is_intra = pSliceParameter->slice_flags.bits.is_intra;
611 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
612 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
613 int emit_new_state = 1, object_len_in_bytes;
616 intel_batchbuffer_start_atomic(ctx, 0x1000);
618 for(y = 0; y < height_in_mbs; y++){
619 for(x = 0; x < width_in_mbs; x++){
621 if (emit_new_state) {
622 /*Step1: MI_FLUSH/PIPE_CONTROL*/
624 OUT_BATCH(ctx, CMD_PIPE_CONTROL | 0x02);
630 /*Step2: State command PIPELINE_SELECT*/
631 gen6_vme_pipeline_select(ctx);
633 /*Step3: State commands configuring pipeline states*/
634 gen6_vme_state_base_address(ctx);
635 gen6_vme_vfe_state(ctx, gen6_encoder_context);
636 gen6_vme_curbe_load(ctx, gen6_encoder_context);
637 gen6_vme_idrt(ctx, gen6_encoder_context);
642 /*Step4: Primitive commands*/
643 object_len_in_bytes = gen6_vme_media_object(ctx, encode_state, x, y, is_intra ? VME_INTRA_SHADER : VME_INTER_SHADER);
645 if (intel_batchbuffer_check_free_space(ctx, object_len_in_bytes) == 0) {
646 intel_batchbuffer_end_atomic(ctx);
647 intel_batchbuffer_flush(ctx);
649 intel_batchbuffer_start_atomic(ctx, 0x1000);
654 intel_batchbuffer_end_atomic(ctx);
657 static VAStatus gen6_vme_prepare(VADriverContextP ctx,
658 struct encode_state *encode_state,
659 struct gen6_encoder_context *gen6_encoder_context)
661 VAStatus vaStatus = VA_STATUS_SUCCESS;
662 VAEncSliceParameterBuffer *pSliceParameter = (VAEncSliceParameterBuffer *)encode_state->slice_params[0]->buffer;
663 int is_intra = pSliceParameter->slice_flags.bits.is_intra;
665 /*Setup all the memory object*/
666 gen6_vme_surface_setup(ctx, encode_state, is_intra, gen6_encoder_context);
667 gen6_vme_interface_setup(ctx, encode_state, gen6_encoder_context);
668 gen6_vme_constant_setup(ctx, encode_state, gen6_encoder_context);
669 gen6_vme_vme_state_setup(ctx, encode_state, is_intra, gen6_encoder_context);
671 /*Programing media pipeline*/
672 gen6_vme_pipeline_programing(ctx, encode_state, gen6_encoder_context);
677 static VAStatus gen6_vme_run(VADriverContextP ctx,
678 struct encode_state *encode_state,
679 struct gen6_encoder_context *gen6_encoder_context)
681 intel_batchbuffer_flush(ctx);
683 return VA_STATUS_SUCCESS;
686 static VAStatus gen6_vme_stop(VADriverContextP ctx,
687 struct encode_state *encode_state,
688 struct gen6_encoder_context *gen6_encoder_context)
690 return VA_STATUS_SUCCESS;
693 VAStatus gen6_vme_pipeline(VADriverContextP ctx,
695 struct encode_state *encode_state,
696 struct gen6_encoder_context *gen6_encoder_context)
698 gen6_vme_media_init(ctx, gen6_encoder_context);
699 gen6_vme_prepare(ctx, encode_state, gen6_encoder_context);
700 gen6_vme_run(ctx, encode_state, gen6_encoder_context);
701 gen6_vme_stop(ctx, encode_state, gen6_encoder_context);
703 return VA_STATUS_SUCCESS;
706 Bool gen6_vme_context_init(VADriverContextP ctx, struct gen6_vme_context *vme_context)
708 struct i965_driver_data *i965 = i965_driver_data(ctx);
711 for (i = 0; i < GEN6_VME_KERNEL_NUMBER; i++) {
712 /*Load kernel into GPU memory*/
713 struct i965_kernel *kernel = &gen6_vme_kernels[i];
715 kernel->bo = dri_bo_alloc(i965->intel.bufmgr,
720 dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
726 Bool gen6_vme_context_destroy(struct gen6_vme_context *vme_context)
730 for (i = 0; i < MAX_MEDIA_SURFACES_GEN6; i++) {
731 dri_bo_unreference(vme_context->surface_state[i].bo);
732 vme_context->surface_state[i].bo = NULL;
735 dri_bo_unreference(vme_context->idrt.bo);
736 vme_context->idrt.bo = NULL;
738 dri_bo_unreference(vme_context->binding_table.bo);
739 vme_context->binding_table.bo = NULL;
741 dri_bo_unreference(vme_context->curbe.bo);
742 vme_context->curbe.bo = NULL;
744 dri_bo_unreference(vme_context->vme_output.bo);
745 vme_context->vme_output.bo = NULL;
747 dri_bo_unreference(vme_context->vme_state.bo);
748 vme_context->vme_state.bo = NULL;
750 for (i = 0; i < GEN6_VME_KERNEL_NUMBER; i++) {
751 /*Load kernel into GPU memory*/
752 struct i965_kernel *kernel = &gen6_vme_kernels[i];
754 dri_bo_unreference(kernel->bo);