2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Zhao Yakui <yakui.zhao@intel.com>
26 * Xiang Haihao <haihao.xiang@intel.com>
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
38 #include "i965_defines.h"
39 #include "i965_drv_video.h"
40 #include "i965_encoder.h"
44 #define SURFACE_STATE_PADDED_SIZE_0_GEN7 ALIGN(sizeof(struct gen7_surface_state), 32)
45 #define SURFACE_STATE_PADDED_SIZE_1_GEN7 ALIGN(sizeof(struct gen7_surface_state2), 32)
46 #define SURFACE_STATE_PADDED_SIZE_GEN7 MAX(SURFACE_STATE_PADDED_SIZE_0_GEN7, SURFACE_STATE_PADDED_SIZE_1_GEN7)
48 #define SURFACE_STATE_PADDED_SIZE_0_GEN6 ALIGN(sizeof(struct i965_surface_state), 32)
49 #define SURFACE_STATE_PADDED_SIZE_1_GEN6 ALIGN(sizeof(struct i965_surface_state2), 32)
50 #define SURFACE_STATE_PADDED_SIZE_GEN6 MAX(SURFACE_STATE_PADDED_SIZE_0_GEN6, SURFACE_STATE_PADDED_SIZE_1_GEN6)
52 #define SURFACE_STATE_PADDED_SIZE MAX(SURFACE_STATE_PADDED_SIZE_GEN6, SURFACE_STATE_PADDED_SIZE_GEN7)
53 #define SURFACE_STATE_OFFSET(index) (SURFACE_STATE_PADDED_SIZE * index)
54 #define BINDING_TABLE_OFFSET(index) (SURFACE_STATE_OFFSET(MAX_MEDIA_SURFACES_GEN6) + sizeof(unsigned int) * index)
56 #define VME_INTRA_SHADER 0
57 #define VME_INTER_SHADER 1
58 #define VME_BINTER_SHADER 3
59 #define VME_BATCHBUFFER 2
61 #define CURBE_ALLOCATION_SIZE 37 /* in 256-bit */
62 #define CURBE_TOTAL_DATA_LENGTH (4 * 32) /* in byte, it should be less than or equal to CURBE_ALLOCATION_SIZE * 32 */
63 #define CURBE_URB_ENTRY_LENGTH 4 /* in 256-bit, it should be less than or equal to CURBE_TOTAL_DATA_LENGTH / 32 */
65 #define VME_MSG_LENGTH 32
67 static const uint32_t gen8_vme_intra_frame[][4] = {
68 #include "shaders/vme/intra_frame_haswell.g75b"
71 static const uint32_t gen8_vme_inter_frame[][4] = {
72 #include "shaders/vme/inter_frame_haswell.g75b"
75 static const uint32_t gen8_vme_inter_bframe[][4] = {
76 #include "shaders/vme/inter_bframe_haswell.g75b"
79 static const uint32_t gen8_vme_batchbuffer[][4] = {
80 #include "shaders/vme/batchbuffer.g75b"
83 static struct i965_kernel gen8_vme_kernels[] = {
86 VME_INTRA_SHADER, /*index*/
88 sizeof(gen8_vme_intra_frame),
95 sizeof(gen8_vme_inter_frame),
101 gen8_vme_batchbuffer,
102 sizeof(gen8_vme_batchbuffer),
108 gen8_vme_inter_bframe,
109 sizeof(gen8_vme_inter_bframe),
114 static const uint32_t gen8_vme_mpeg2_intra_frame[][4] = {
115 #include "shaders/vme/intra_frame_haswell.g75b"
118 static const uint32_t gen8_vme_mpeg2_inter_frame[][4] = {
119 #include "shaders/vme/mpeg2_inter_haswell.g75b"
122 static const uint32_t gen8_vme_mpeg2_batchbuffer[][4] = {
123 #include "shaders/vme/batchbuffer.g75b"
126 static struct i965_kernel gen8_vme_mpeg2_kernels[] = {
129 VME_INTRA_SHADER, /*index*/
130 gen8_vme_mpeg2_intra_frame,
131 sizeof(gen8_vme_mpeg2_intra_frame),
137 gen8_vme_mpeg2_inter_frame,
138 sizeof(gen8_vme_mpeg2_inter_frame),
144 gen8_vme_mpeg2_batchbuffer,
145 sizeof(gen8_vme_mpeg2_batchbuffer),
150 /* only used for VME source surface state */
152 gen8_vme_source_surface_state(VADriverContextP ctx,
154 struct object_surface *obj_surface,
155 struct intel_encoder_context *encoder_context)
157 struct gen6_vme_context *vme_context = encoder_context->vme_context;
159 vme_context->vme_surface2_setup(ctx,
160 &vme_context->gpe_context,
162 BINDING_TABLE_OFFSET(index),
163 SURFACE_STATE_OFFSET(index));
167 gen8_vme_media_source_surface_state(VADriverContextP ctx,
169 struct object_surface *obj_surface,
170 struct intel_encoder_context *encoder_context)
172 struct gen6_vme_context *vme_context = encoder_context->vme_context;
174 vme_context->vme_media_rw_surface_setup(ctx,
175 &vme_context->gpe_context,
177 BINDING_TABLE_OFFSET(index),
178 SURFACE_STATE_OFFSET(index));
182 gen8_vme_media_chroma_source_surface_state(VADriverContextP ctx,
184 struct object_surface *obj_surface,
185 struct intel_encoder_context *encoder_context)
187 struct gen6_vme_context *vme_context = encoder_context->vme_context;
189 vme_context->vme_media_chroma_surface_setup(ctx,
190 &vme_context->gpe_context,
192 BINDING_TABLE_OFFSET(index),
193 SURFACE_STATE_OFFSET(index));
197 gen8_vme_output_buffer_setup(VADriverContextP ctx,
198 struct encode_state *encode_state,
200 struct intel_encoder_context *encoder_context)
203 struct i965_driver_data *i965 = i965_driver_data(ctx);
204 struct gen6_vme_context *vme_context = encoder_context->vme_context;
205 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
206 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
207 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
208 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
209 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
211 vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs;
212 vme_context->vme_output.pitch = 16; /* in bytes, always 16 */
215 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 2;
217 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 24;
219 * Inter MV . 32-byte Intra search + 16 IME info + 128 IME MV + 32 IME Ref
220 * + 16 FBR Info + 128 FBR MV + 32 FBR Ref.
221 * 16 * (2 + 2 * (1 + 8 + 2))= 16 * 24.
224 vme_context->vme_output.bo = dri_bo_alloc(i965->intel.bufmgr,
226 vme_context->vme_output.num_blocks * vme_context->vme_output.size_block,
228 assert(vme_context->vme_output.bo);
229 vme_context->vme_buffer_suface_setup(ctx,
230 &vme_context->gpe_context,
231 &vme_context->vme_output,
232 BINDING_TABLE_OFFSET(index),
233 SURFACE_STATE_OFFSET(index));
237 gen8_vme_output_vme_batchbuffer_setup(VADriverContextP ctx,
238 struct encode_state *encode_state,
240 struct intel_encoder_context *encoder_context)
243 struct i965_driver_data *i965 = i965_driver_data(ctx);
244 struct gen6_vme_context *vme_context = encoder_context->vme_context;
245 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
246 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
247 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
249 vme_context->vme_batchbuffer.num_blocks = width_in_mbs * height_in_mbs + 1;
250 vme_context->vme_batchbuffer.size_block = 64; /* 4 OWORDs */
251 vme_context->vme_batchbuffer.pitch = 16;
252 vme_context->vme_batchbuffer.bo = dri_bo_alloc(i965->intel.bufmgr,
254 vme_context->vme_batchbuffer.num_blocks * vme_context->vme_batchbuffer.size_block,
256 vme_context->vme_buffer_suface_setup(ctx,
257 &vme_context->gpe_context,
258 &vme_context->vme_batchbuffer,
259 BINDING_TABLE_OFFSET(index),
260 SURFACE_STATE_OFFSET(index));
264 gen8_vme_surface_setup(VADriverContextP ctx,
265 struct encode_state *encode_state,
267 struct intel_encoder_context *encoder_context)
269 struct object_surface *obj_surface;
271 /*Setup surfaces state*/
272 /* current picture for encoding */
273 obj_surface = encode_state->input_yuv_object;
274 gen8_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
275 gen8_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
276 gen8_vme_media_chroma_source_surface_state(ctx, 6, obj_surface, encoder_context);
280 obj_surface = encode_state->reference_objects[0];
282 if (obj_surface && obj_surface->bo)
283 gen8_vme_source_surface_state(ctx, 1, obj_surface, encoder_context);
286 obj_surface = encode_state->reference_objects[1];
288 if (obj_surface && obj_surface->bo)
289 gen8_vme_source_surface_state(ctx, 2, obj_surface, encoder_context);
293 gen8_vme_output_buffer_setup(ctx, encode_state, 3, encoder_context);
294 gen8_vme_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
296 return VA_STATUS_SUCCESS;
299 static VAStatus gen8_vme_interface_setup(VADriverContextP ctx,
300 struct encode_state *encode_state,
301 struct intel_encoder_context *encoder_context)
303 struct gen6_vme_context *vme_context = encoder_context->vme_context;
304 struct gen6_interface_descriptor_data *desc;
308 bo = vme_context->gpe_context.idrt.bo;
313 for (i = 0; i < vme_context->vme_kernel_sum; i++) {
314 struct i965_kernel *kernel;
315 kernel = &vme_context->gpe_context.kernels[i];
316 assert(sizeof(*desc) == 32);
317 /*Setup the descritor table*/
318 memset(desc, 0, sizeof(*desc));
319 desc->desc0.kernel_start_pointer = (kernel->bo->offset >> 6);
320 desc->desc2.sampler_count = 0; /* FIXME: */
321 desc->desc2.sampler_state_pointer = 0;
322 desc->desc3.binding_table_entry_count = 1; /* FIXME: */
323 desc->desc3.binding_table_pointer = (BINDING_TABLE_OFFSET(0) >> 5);
324 desc->desc4.constant_urb_entry_read_offset = 0;
325 desc->desc4.constant_urb_entry_read_length = CURBE_URB_ENTRY_LENGTH;
328 dri_bo_emit_reloc(bo,
329 I915_GEM_DOMAIN_INSTRUCTION, 0,
331 i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc0),
337 return VA_STATUS_SUCCESS;
340 static VAStatus gen8_vme_constant_setup(VADriverContextP ctx,
341 struct encode_state *encode_state,
342 struct intel_encoder_context *encoder_context)
344 struct gen6_vme_context *vme_context = encoder_context->vme_context;
345 unsigned char *constant_buffer;
346 unsigned int *vme_state_message;
349 vme_state_message = (unsigned int *)vme_context->vme_state_message;
351 if (encoder_context->codec == CODEC_H264) {
352 if (vme_context->h264_level >= 30) {
355 if (vme_context->h264_level >= 31)
358 } else if (encoder_context->codec == CODEC_MPEG2) {
362 vme_state_message[31] = mv_num;
364 dri_bo_map(vme_context->gpe_context.curbe.bo, 1);
365 assert(vme_context->gpe_context.curbe.bo->virtual);
366 constant_buffer = vme_context->gpe_context.curbe.bo->virtual;
368 /* VME MV/Mb cost table is passed by using const buffer */
369 /* Now it uses the fixed search path. So it is constructed directly
372 memcpy(constant_buffer, (char *)vme_context->vme_state_message, 128);
374 dri_bo_unmap(vme_context->gpe_context.curbe.bo);
376 return VA_STATUS_SUCCESS;
379 static const unsigned int intra_mb_mode_cost_table[] = {
380 0x31110001, // for qp0
381 0x09110001, // for qp1
382 0x15030001, // for qp2
383 0x0b030001, // for qp3
384 0x0d030011, // for qp4
385 0x17210011, // for qp5
386 0x41210011, // for qp6
387 0x19210011, // for qp7
388 0x25050003, // for qp8
389 0x1b130003, // for qp9
390 0x1d130003, // for qp10
391 0x27070021, // for qp11
392 0x51310021, // for qp12
393 0x29090021, // for qp13
394 0x35150005, // for qp14
395 0x2b0b0013, // for qp15
396 0x2d0d0013, // for qp16
397 0x37170007, // for qp17
398 0x61410031, // for qp18
399 0x39190009, // for qp19
400 0x45250015, // for qp20
401 0x3b1b000b, // for qp21
402 0x3d1d000d, // for qp22
403 0x47270017, // for qp23
404 0x71510041, // for qp24 ! center for qp=0..30
405 0x49290019, // for qp25
406 0x55350025, // for qp26
407 0x4b2b001b, // for qp27
408 0x4d2d001d, // for qp28
409 0x57370027, // for qp29
410 0x81610051, // for qp30
411 0x57270017, // for qp31
412 0x81510041, // for qp32 ! center for qp=31..51
413 0x59290019, // for qp33
414 0x65350025, // for qp34
415 0x5b2b001b, // for qp35
416 0x5d2d001d, // for qp36
417 0x67370027, // for qp37
418 0x91610051, // for qp38
419 0x69390029, // for qp39
420 0x75450035, // for qp40
421 0x6b3b002b, // for qp41
422 0x6d3d002d, // for qp42
423 0x77470037, // for qp43
424 0xa1710061, // for qp44
425 0x79490039, // for qp45
426 0x85550045, // for qp46
427 0x7b4b003b, // for qp47
428 0x7d4d003d, // for qp48
429 0x87570047, // for qp49
430 0xb1810071, // for qp50
431 0x89590049 // for qp51
434 static void gen8_vme_state_setup_fixup(VADriverContextP ctx,
435 struct encode_state *encode_state,
436 struct intel_encoder_context *encoder_context,
437 unsigned int *vme_state_message)
439 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
440 VAEncPictureParameterBufferH264 *pic_param = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
441 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
443 if (slice_param->slice_type != SLICE_TYPE_I &&
444 slice_param->slice_type != SLICE_TYPE_SI)
446 if (encoder_context->rate_control_mode == VA_RC_CQP)
447 vme_state_message[0] = intra_mb_mode_cost_table[pic_param->pic_init_qp + slice_param->slice_qp_delta];
449 vme_state_message[0] = intra_mb_mode_cost_table[mfc_context->bit_rate_control_context[slice_param->slice_type].QpPrimeY];
452 static VAStatus gen8_vme_vme_state_setup(VADriverContextP ctx,
453 struct encode_state *encode_state,
455 struct intel_encoder_context *encoder_context)
457 struct gen6_vme_context *vme_context = encoder_context->vme_context;
458 unsigned int *vme_state_message;
461 //pass the MV/Mb cost into VME message on HASWell
462 assert(vme_context->vme_state_message);
463 vme_state_message = (unsigned int *)vme_context->vme_state_message;
465 vme_state_message[0] = 0x4a4a4a4a;
466 vme_state_message[1] = 0x4a4a4a4a;
467 vme_state_message[2] = 0x4a4a4a4a;
468 vme_state_message[3] = 0x22120200;
469 vme_state_message[4] = 0x62524232;
471 for (i=5; i < 8; i++) {
472 vme_state_message[i] = 0;
475 switch (encoder_context->codec) {
477 gen8_vme_state_setup_fixup(ctx, encode_state, encoder_context, vme_state_message);
486 return VA_STATUS_SUCCESS;
491 gen8_vme_fill_vme_batchbuffer(VADriverContextP ctx,
492 struct encode_state *encode_state,
493 int mb_width, int mb_height,
495 int transform_8x8_mode_flag,
496 struct intel_encoder_context *encoder_context)
498 struct gen6_vme_context *vme_context = encoder_context->vme_context;
499 int mb_x = 0, mb_y = 0;
501 unsigned int *command_ptr;
503 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
504 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
506 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
507 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
508 int slice_mb_begin = pSliceParameter->macroblock_address;
509 int slice_mb_number = pSliceParameter->num_macroblocks;
510 unsigned int mb_intra_ub;
511 int slice_mb_x = pSliceParameter->macroblock_address % mb_width;
512 for (i = 0; i < slice_mb_number; ) {
513 int mb_count = i + slice_mb_begin;
514 mb_x = mb_count % mb_width;
515 mb_y = mb_count / mb_width;
518 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
521 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
523 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
524 if (mb_x != (mb_width -1))
525 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
529 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_AE);
530 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_BCD_MASK);
531 if ((i == (mb_width - 1)) && slice_mb_x) {
532 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
536 if ((i == mb_width) && slice_mb_x) {
537 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_D);
539 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
540 *command_ptr++ = kernel;
547 *command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
548 *command_ptr++ = ( (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
555 *command_ptr++ = MI_BATCH_BUFFER_END;
557 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
560 static void gen8_vme_media_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
562 struct gen6_vme_context *vme_context = encoder_context->vme_context;
564 i965_gpe_context_init(ctx, &vme_context->gpe_context);
566 /* VME output buffer */
567 dri_bo_unreference(vme_context->vme_output.bo);
568 vme_context->vme_output.bo = NULL;
570 dri_bo_unreference(vme_context->vme_batchbuffer.bo);
571 vme_context->vme_batchbuffer.bo = NULL;
574 dri_bo_unreference(vme_context->vme_state.bo);
575 vme_context->vme_state.bo = NULL;
578 static void gen8_vme_pipeline_programing(VADriverContextP ctx,
579 struct encode_state *encode_state,
580 struct intel_encoder_context *encoder_context)
582 struct gen6_vme_context *vme_context = encoder_context->vme_context;
583 struct intel_batchbuffer *batch = encoder_context->base.batch;
584 VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
585 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
586 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
587 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
588 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
590 bool allow_hwscore = true;
593 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
594 pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
595 if ((pSliceParameter->macroblock_address % width_in_mbs)) {
596 allow_hwscore = false;
600 if ((pSliceParameter->slice_type == SLICE_TYPE_I) ||
601 (pSliceParameter->slice_type == SLICE_TYPE_I)) {
602 kernel_shader = VME_INTRA_SHADER;
603 } else if ((pSliceParameter->slice_type == SLICE_TYPE_P) ||
604 (pSliceParameter->slice_type == SLICE_TYPE_SP)) {
605 kernel_shader = VME_INTER_SHADER;
607 kernel_shader = VME_BINTER_SHADER;
609 kernel_shader = VME_INTER_SHADER;
612 gen7_vme_walker_fill_vme_batchbuffer(ctx,
614 width_in_mbs, height_in_mbs,
616 pPicParameter->pic_fields.bits.transform_8x8_mode_flag,
619 gen8_vme_fill_vme_batchbuffer(ctx,
621 width_in_mbs, height_in_mbs,
623 pPicParameter->pic_fields.bits.transform_8x8_mode_flag,
626 intel_batchbuffer_start_atomic(batch, 0x1000);
627 gen6_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
628 BEGIN_BATCH(batch, 2);
629 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (2 << 6));
631 vme_context->vme_batchbuffer.bo,
632 I915_GEM_DOMAIN_COMMAND, 0,
634 ADVANCE_BATCH(batch);
636 intel_batchbuffer_end_atomic(batch);
639 static VAStatus gen8_vme_prepare(VADriverContextP ctx,
640 struct encode_state *encode_state,
641 struct intel_encoder_context *encoder_context)
643 VAStatus vaStatus = VA_STATUS_SUCCESS;
644 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
645 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
646 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
647 struct gen6_vme_context *vme_context = encoder_context->vme_context;
649 if (!vme_context->h264_level ||
650 (vme_context->h264_level != pSequenceParameter->level_idc)) {
651 vme_context->h264_level = pSequenceParameter->level_idc;
654 intel_vme_update_mbmv_cost(ctx, encode_state, encoder_context);
656 /*Setup all the memory object*/
657 gen8_vme_surface_setup(ctx, encode_state, is_intra, encoder_context);
658 gen8_vme_interface_setup(ctx, encode_state, encoder_context);
659 //gen8_vme_vme_state_setup(ctx, encode_state, is_intra, encoder_context);
660 gen8_vme_constant_setup(ctx, encode_state, encoder_context);
662 /*Programing media pipeline*/
663 gen8_vme_pipeline_programing(ctx, encode_state, encoder_context);
668 static VAStatus gen8_vme_run(VADriverContextP ctx,
669 struct encode_state *encode_state,
670 struct intel_encoder_context *encoder_context)
672 struct intel_batchbuffer *batch = encoder_context->base.batch;
674 intel_batchbuffer_flush(batch);
676 return VA_STATUS_SUCCESS;
679 static VAStatus gen8_vme_stop(VADriverContextP ctx,
680 struct encode_state *encode_state,
681 struct intel_encoder_context *encoder_context)
683 return VA_STATUS_SUCCESS;
687 gen8_vme_pipeline(VADriverContextP ctx,
689 struct encode_state *encode_state,
690 struct intel_encoder_context *encoder_context)
692 gen8_vme_media_init(ctx, encoder_context);
693 gen8_vme_prepare(ctx, encode_state, encoder_context);
694 gen8_vme_run(ctx, encode_state, encoder_context);
695 gen8_vme_stop(ctx, encode_state, encoder_context);
697 return VA_STATUS_SUCCESS;
701 gen8_vme_mpeg2_output_buffer_setup(VADriverContextP ctx,
702 struct encode_state *encode_state,
705 struct intel_encoder_context *encoder_context)
708 struct i965_driver_data *i965 = i965_driver_data(ctx);
709 struct gen6_vme_context *vme_context = encoder_context->vme_context;
710 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
711 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
712 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
714 vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs;
715 vme_context->vme_output.pitch = 16; /* in bytes, always 16 */
718 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 2;
720 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 24;
722 * Inter MV . 32-byte Intra search + 16 IME info + 128 IME MV + 32 IME Ref
723 * + 16 FBR Info + 128 FBR MV + 32 FBR Ref.
724 * 16 * (2 + 2 * (1 + 8 + 2))= 16 * 24.
727 vme_context->vme_output.bo = dri_bo_alloc(i965->intel.bufmgr,
729 vme_context->vme_output.num_blocks * vme_context->vme_output.size_block,
731 assert(vme_context->vme_output.bo);
732 vme_context->vme_buffer_suface_setup(ctx,
733 &vme_context->gpe_context,
734 &vme_context->vme_output,
735 BINDING_TABLE_OFFSET(index),
736 SURFACE_STATE_OFFSET(index));
740 gen8_vme_mpeg2_output_vme_batchbuffer_setup(VADriverContextP ctx,
741 struct encode_state *encode_state,
743 struct intel_encoder_context *encoder_context)
746 struct i965_driver_data *i965 = i965_driver_data(ctx);
747 struct gen6_vme_context *vme_context = encoder_context->vme_context;
748 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
749 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
750 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
752 vme_context->vme_batchbuffer.num_blocks = width_in_mbs * height_in_mbs + 1;
753 vme_context->vme_batchbuffer.size_block = 64; /* 4 OWORDs */
754 vme_context->vme_batchbuffer.pitch = 16;
755 vme_context->vme_batchbuffer.bo = dri_bo_alloc(i965->intel.bufmgr,
757 vme_context->vme_batchbuffer.num_blocks * vme_context->vme_batchbuffer.size_block,
759 vme_context->vme_buffer_suface_setup(ctx,
760 &vme_context->gpe_context,
761 &vme_context->vme_batchbuffer,
762 BINDING_TABLE_OFFSET(index),
763 SURFACE_STATE_OFFSET(index));
767 gen8_vme_mpeg2_surface_setup(VADriverContextP ctx,
768 struct encode_state *encode_state,
770 struct intel_encoder_context *encoder_context)
772 struct object_surface *obj_surface;
774 /*Setup surfaces state*/
775 /* current picture for encoding */
776 obj_surface = encode_state->input_yuv_object;
777 gen8_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
778 gen8_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
779 gen8_vme_media_chroma_source_surface_state(ctx, 6, obj_surface, encoder_context);
783 obj_surface = encode_state->reference_objects[0];
785 if (obj_surface->bo != NULL)
786 gen8_vme_source_surface_state(ctx, 1, obj_surface, encoder_context);
789 obj_surface = encode_state->reference_objects[1];
791 if (obj_surface && obj_surface->bo != NULL)
792 gen8_vme_source_surface_state(ctx, 2, obj_surface, encoder_context);
796 gen8_vme_mpeg2_output_buffer_setup(ctx, encode_state, 3, is_intra, encoder_context);
797 gen8_vme_mpeg2_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
799 return VA_STATUS_SUCCESS;
803 gen8_vme_mpeg2_fill_vme_batchbuffer(VADriverContextP ctx,
804 struct encode_state *encode_state,
805 int mb_width, int mb_height,
807 int transform_8x8_mode_flag,
808 struct intel_encoder_context *encoder_context)
810 struct gen6_vme_context *vme_context = encoder_context->vme_context;
811 int mb_x = 0, mb_y = 0;
813 unsigned int *command_ptr;
816 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
817 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
819 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
820 VAEncSliceParameterBufferMPEG2 *slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[s]->buffer;
822 for (j = 0; j < encode_state->slice_params_ext[s]->num_elements; j++) {
823 int slice_mb_begin = slice_param->macroblock_address;
824 int slice_mb_number = slice_param->num_macroblocks;
825 unsigned int mb_intra_ub;
826 int slice_mb_x = slice_param->macroblock_address % mb_width;
828 for (i = 0; i < slice_mb_number;) {
829 int mb_count = i + slice_mb_begin;
831 mb_x = mb_count % mb_width;
832 mb_y = mb_count / mb_width;
836 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
840 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
843 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
845 if (mb_x != (mb_width -1))
846 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
851 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_AE);
853 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_BCD_MASK);
855 if ((i == (mb_width - 1)) && slice_mb_x) {
856 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
860 if ((i == mb_width) && slice_mb_x) {
861 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_D);
864 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
865 *command_ptr++ = kernel;
872 *command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
873 *command_ptr++ = ( (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
883 *command_ptr++ = MI_BATCH_BUFFER_END;
885 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
889 gen8_vme_mpeg2_pipeline_programing(VADriverContextP ctx,
890 struct encode_state *encode_state,
892 struct intel_encoder_context *encoder_context)
894 struct gen6_vme_context *vme_context = encoder_context->vme_context;
895 struct intel_batchbuffer *batch = encoder_context->base.batch;
896 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
897 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
898 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
900 gen8_vme_mpeg2_fill_vme_batchbuffer(ctx,
902 width_in_mbs, height_in_mbs,
903 is_intra ? VME_INTRA_SHADER : VME_INTER_SHADER,
907 intel_batchbuffer_start_atomic(batch, 0x1000);
908 gen6_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
909 BEGIN_BATCH(batch, 2);
910 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (2 << 6));
912 vme_context->vme_batchbuffer.bo,
913 I915_GEM_DOMAIN_COMMAND, 0,
915 ADVANCE_BATCH(batch);
917 intel_batchbuffer_end_atomic(batch);
921 gen8_vme_mpeg2_prepare(VADriverContextP ctx,
922 struct encode_state *encode_state,
923 struct intel_encoder_context *encoder_context)
925 VAStatus vaStatus = VA_STATUS_SUCCESS;
926 VAEncSliceParameterBufferMPEG2 *slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[0]->buffer;
928 /*Setup all the memory object*/
929 gen8_vme_mpeg2_surface_setup(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
930 gen8_vme_interface_setup(ctx, encode_state, encoder_context);
931 gen8_vme_vme_state_setup(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
932 gen8_vme_constant_setup(ctx, encode_state, encoder_context);
934 /*Programing media pipeline*/
935 gen8_vme_mpeg2_pipeline_programing(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
941 gen8_vme_mpeg2_pipeline(VADriverContextP ctx,
943 struct encode_state *encode_state,
944 struct intel_encoder_context *encoder_context)
946 gen8_vme_media_init(ctx, encoder_context);
947 gen8_vme_mpeg2_prepare(ctx, encode_state, encoder_context);
948 gen8_vme_run(ctx, encode_state, encoder_context);
949 gen8_vme_stop(ctx, encode_state, encoder_context);
951 return VA_STATUS_SUCCESS;
955 gen8_vme_context_destroy(void *context)
957 struct gen6_vme_context *vme_context = context;
959 i965_gpe_context_destroy(&vme_context->gpe_context);
961 dri_bo_unreference(vme_context->vme_output.bo);
962 vme_context->vme_output.bo = NULL;
964 dri_bo_unreference(vme_context->vme_state.bo);
965 vme_context->vme_state.bo = NULL;
967 dri_bo_unreference(vme_context->vme_batchbuffer.bo);
968 vme_context->vme_batchbuffer.bo = NULL;
970 if (vme_context->vme_state_message) {
971 free(vme_context->vme_state_message);
972 vme_context->vme_state_message = NULL;
978 Bool gen8_vme_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
980 struct gen6_vme_context *vme_context = calloc(1, sizeof(struct gen6_vme_context));
981 struct i965_kernel *vme_kernel_list = NULL;
984 switch (encoder_context->codec) {
986 vme_kernel_list = gen8_vme_kernels;
987 encoder_context->vme_pipeline = gen8_vme_pipeline;
988 i965_kernel_num = sizeof(gen8_vme_kernels) / sizeof(struct i965_kernel);
992 vme_kernel_list = gen8_vme_mpeg2_kernels;
993 encoder_context->vme_pipeline = gen8_vme_mpeg2_pipeline;
994 i965_kernel_num = sizeof(gen8_vme_mpeg2_kernels) / sizeof(struct i965_kernel);
1004 vme_context->vme_kernel_sum = i965_kernel_num;
1005 vme_context->gpe_context.surface_state_binding_table.length = (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_MEDIA_SURFACES_GEN6;
1007 vme_context->gpe_context.idrt.max_entries = MAX_INTERFACE_DESC_GEN6;
1008 vme_context->gpe_context.idrt.entry_size = sizeof(struct gen6_interface_descriptor_data);
1010 vme_context->gpe_context.curbe.length = CURBE_TOTAL_DATA_LENGTH;
1012 vme_context->gpe_context.vfe_state.max_num_threads = 60 - 1;
1013 vme_context->gpe_context.vfe_state.num_urb_entries = 16;
1014 vme_context->gpe_context.vfe_state.gpgpu_mode = 0;
1015 vme_context->gpe_context.vfe_state.urb_entry_size = 59 - 1;
1016 vme_context->gpe_context.vfe_state.curbe_allocation_size = CURBE_ALLOCATION_SIZE - 1;
1018 gen7_vme_scoreboard_init(ctx, vme_context);
1020 i965_gpe_load_kernels(ctx,
1021 &vme_context->gpe_context,
1024 vme_context->vme_surface2_setup = gen7_gpe_surface2_setup;
1025 vme_context->vme_media_rw_surface_setup = gen7_gpe_media_rw_surface_setup;
1026 vme_context->vme_buffer_suface_setup = gen7_gpe_buffer_suface_setup;
1027 vme_context->vme_media_chroma_surface_setup = gen75_gpe_media_chroma_surface_setup;
1029 encoder_context->vme_context = vme_context;
1030 encoder_context->vme_context_destroy = gen8_vme_context_destroy;
1032 vme_context->vme_state_message = malloc(VME_MSG_LENGTH * sizeof(int));