2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Zhao Yakui <yakui.zhao@intel.com>
26 * Xiang Haihao <haihao.xiang@intel.com>
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
38 #include "i965_defines.h"
39 #include "i965_drv_video.h"
40 #include "i965_encoder.h"
44 #define SURFACE_STATE_PADDED_SIZE_0_GEN7 ALIGN(sizeof(struct gen7_surface_state), 32)
45 #define SURFACE_STATE_PADDED_SIZE_1_GEN7 ALIGN(sizeof(struct gen7_surface_state2), 32)
46 #define SURFACE_STATE_PADDED_SIZE_GEN7 MAX(SURFACE_STATE_PADDED_SIZE_0_GEN7, SURFACE_STATE_PADDED_SIZE_1_GEN7)
48 #define SURFACE_STATE_PADDED_SIZE_0_GEN6 ALIGN(sizeof(struct i965_surface_state), 32)
49 #define SURFACE_STATE_PADDED_SIZE_1_GEN6 ALIGN(sizeof(struct i965_surface_state2), 32)
50 #define SURFACE_STATE_PADDED_SIZE_GEN6 MAX(SURFACE_STATE_PADDED_SIZE_0_GEN6, SURFACE_STATE_PADDED_SIZE_1_GEN6)
52 #define SURFACE_STATE_PADDED_SIZE MAX(SURFACE_STATE_PADDED_SIZE_GEN6, SURFACE_STATE_PADDED_SIZE_GEN7)
53 #define SURFACE_STATE_OFFSET(index) (SURFACE_STATE_PADDED_SIZE * index)
54 #define BINDING_TABLE_OFFSET(index) (SURFACE_STATE_OFFSET(MAX_MEDIA_SURFACES_GEN6) + sizeof(unsigned int) * index)
56 #define VME_INTRA_SHADER 0
57 #define VME_INTER_SHADER 1
58 #define VME_BINTER_SHADER 3
59 #define VME_BATCHBUFFER 2
61 #define CURBE_ALLOCATION_SIZE 37 /* in 256-bit */
62 #define CURBE_TOTAL_DATA_LENGTH (4 * 32) /* in byte, it should be less than or equal to CURBE_ALLOCATION_SIZE * 32 */
63 #define CURBE_URB_ENTRY_LENGTH 4 /* in 256-bit, it should be less than or equal to CURBE_TOTAL_DATA_LENGTH / 32 */
65 #define VME_MSG_LENGTH 32
67 #define MB_SCOREBOARD_A (1 << 0)
68 #define MB_SCOREBOARD_B (1 << 1)
69 #define MB_SCOREBOARD_C (1 << 2)
71 static const uint32_t gen75_vme_intra_frame[][4] = {
72 #include "shaders/vme/intra_frame_haswell.g75b"
75 static const uint32_t gen75_vme_inter_frame[][4] = {
76 #include "shaders/vme/inter_frame_haswell.g75b"
79 static const uint32_t gen75_vme_inter_bframe[][4] = {
80 #include "shaders/vme/inter_bframe_haswell.g75b"
83 static const uint32_t gen75_vme_batchbuffer[][4] = {
84 #include "shaders/vme/batchbuffer.g75b"
87 static struct i965_kernel gen75_vme_kernels[] = {
90 VME_INTRA_SHADER, /*index*/
91 gen75_vme_intra_frame,
92 sizeof(gen75_vme_intra_frame),
98 gen75_vme_inter_frame,
99 sizeof(gen75_vme_inter_frame),
105 gen75_vme_batchbuffer,
106 sizeof(gen75_vme_batchbuffer),
112 gen75_vme_inter_bframe,
113 sizeof(gen75_vme_inter_bframe),
118 static const uint32_t gen75_vme_mpeg2_intra_frame[][4] = {
119 #include "shaders/vme/intra_frame_haswell.g75b"
122 static const uint32_t gen75_vme_mpeg2_inter_frame[][4] = {
123 #include "shaders/vme/mpeg2_inter_frame_haswell.g75b"
126 static const uint32_t gen75_vme_mpeg2_batchbuffer[][4] = {
127 #include "shaders/vme/batchbuffer.g75b"
130 static struct i965_kernel gen75_vme_mpeg2_kernels[] = {
133 VME_INTRA_SHADER, /*index*/
134 gen75_vme_mpeg2_intra_frame,
135 sizeof(gen75_vme_mpeg2_intra_frame),
141 gen75_vme_mpeg2_inter_frame,
142 sizeof(gen75_vme_mpeg2_inter_frame),
148 gen75_vme_mpeg2_batchbuffer,
149 sizeof(gen75_vme_mpeg2_batchbuffer),
154 /* only used for VME source surface state */
156 gen75_vme_source_surface_state(VADriverContextP ctx,
158 struct object_surface *obj_surface,
159 struct intel_encoder_context *encoder_context)
161 struct gen6_vme_context *vme_context = encoder_context->vme_context;
163 vme_context->vme_surface2_setup(ctx,
164 &vme_context->gpe_context,
166 BINDING_TABLE_OFFSET(index),
167 SURFACE_STATE_OFFSET(index));
171 gen75_vme_media_source_surface_state(VADriverContextP ctx,
173 struct object_surface *obj_surface,
174 struct intel_encoder_context *encoder_context)
176 struct gen6_vme_context *vme_context = encoder_context->vme_context;
178 vme_context->vme_media_rw_surface_setup(ctx,
179 &vme_context->gpe_context,
181 BINDING_TABLE_OFFSET(index),
182 SURFACE_STATE_OFFSET(index));
186 gen75_vme_media_chroma_source_surface_state(VADriverContextP ctx,
188 struct object_surface *obj_surface,
189 struct intel_encoder_context *encoder_context)
191 struct gen6_vme_context *vme_context = encoder_context->vme_context;
193 vme_context->vme_media_chroma_surface_setup(ctx,
194 &vme_context->gpe_context,
196 BINDING_TABLE_OFFSET(index),
197 SURFACE_STATE_OFFSET(index));
201 gen75_vme_output_buffer_setup(VADriverContextP ctx,
202 struct encode_state *encode_state,
204 struct intel_encoder_context *encoder_context)
207 struct i965_driver_data *i965 = i965_driver_data(ctx);
208 struct gen6_vme_context *vme_context = encoder_context->vme_context;
209 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
210 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
211 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
212 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
213 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
215 vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs;
216 vme_context->vme_output.pitch = 16; /* in bytes, always 16 */
219 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 2;
221 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 24;
223 * Inter MV . 32-byte Intra search + 16 IME info + 128 IME MV + 32 IME Ref
224 * + 16 FBR Info + 128 FBR MV + 32 FBR Ref.
225 * 16 * (2 + 2 * (1 + 8 + 2))= 16 * 24.
228 vme_context->vme_output.bo = dri_bo_alloc(i965->intel.bufmgr,
230 vme_context->vme_output.num_blocks * vme_context->vme_output.size_block,
232 assert(vme_context->vme_output.bo);
233 vme_context->vme_buffer_suface_setup(ctx,
234 &vme_context->gpe_context,
235 &vme_context->vme_output,
236 BINDING_TABLE_OFFSET(index),
237 SURFACE_STATE_OFFSET(index));
241 gen75_vme_output_vme_batchbuffer_setup(VADriverContextP ctx,
242 struct encode_state *encode_state,
244 struct intel_encoder_context *encoder_context)
247 struct i965_driver_data *i965 = i965_driver_data(ctx);
248 struct gen6_vme_context *vme_context = encoder_context->vme_context;
249 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
250 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
251 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
253 vme_context->vme_batchbuffer.num_blocks = width_in_mbs * height_in_mbs + 1;
254 vme_context->vme_batchbuffer.size_block = 64; /* 4 OWORDs */
255 vme_context->vme_batchbuffer.pitch = 16;
256 vme_context->vme_batchbuffer.bo = dri_bo_alloc(i965->intel.bufmgr,
258 vme_context->vme_batchbuffer.num_blocks * vme_context->vme_batchbuffer.size_block,
260 vme_context->vme_buffer_suface_setup(ctx,
261 &vme_context->gpe_context,
262 &vme_context->vme_batchbuffer,
263 BINDING_TABLE_OFFSET(index),
264 SURFACE_STATE_OFFSET(index));
268 gen75_vme_surface_setup(VADriverContextP ctx,
269 struct encode_state *encode_state,
271 struct intel_encoder_context *encoder_context)
273 struct i965_driver_data *i965 = i965_driver_data(ctx);
274 struct object_surface *obj_surface;
275 VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
277 /*Setup surfaces state*/
278 /* current picture for encoding */
279 obj_surface = SURFACE(encoder_context->input_yuv_surface);
281 gen75_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
282 gen75_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
283 gen75_vme_media_chroma_source_surface_state(ctx, 6, obj_surface, encoder_context);
287 obj_surface = SURFACE(pPicParameter->ReferenceFrames[0].picture_id);
289 if ( obj_surface->bo != NULL)
290 gen75_vme_source_surface_state(ctx, 1, obj_surface, encoder_context);
293 obj_surface = SURFACE(pPicParameter->ReferenceFrames[1].picture_id);
295 if ( obj_surface->bo != NULL )
296 gen75_vme_source_surface_state(ctx, 2, obj_surface, encoder_context);
300 gen75_vme_output_buffer_setup(ctx, encode_state, 3, encoder_context);
301 gen75_vme_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
303 return VA_STATUS_SUCCESS;
306 static VAStatus gen75_vme_interface_setup(VADriverContextP ctx,
307 struct encode_state *encode_state,
308 struct intel_encoder_context *encoder_context)
310 struct gen6_vme_context *vme_context = encoder_context->vme_context;
311 struct gen6_interface_descriptor_data *desc;
315 bo = vme_context->gpe_context.idrt.bo;
320 for (i = 0; i < vme_context->vme_kernel_sum; i++) {
321 struct i965_kernel *kernel;
322 kernel = &vme_context->gpe_context.kernels[i];
323 assert(sizeof(*desc) == 32);
324 /*Setup the descritor table*/
325 memset(desc, 0, sizeof(*desc));
326 desc->desc0.kernel_start_pointer = (kernel->bo->offset >> 6);
327 desc->desc2.sampler_count = 0; /* FIXME: */
328 desc->desc2.sampler_state_pointer = 0;
329 desc->desc3.binding_table_entry_count = 1; /* FIXME: */
330 desc->desc3.binding_table_pointer = (BINDING_TABLE_OFFSET(0) >> 5);
331 desc->desc4.constant_urb_entry_read_offset = 0;
332 desc->desc4.constant_urb_entry_read_length = CURBE_URB_ENTRY_LENGTH;
335 dri_bo_emit_reloc(bo,
336 I915_GEM_DOMAIN_INSTRUCTION, 0,
338 i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc0),
344 return VA_STATUS_SUCCESS;
347 static VAStatus gen75_vme_constant_setup(VADriverContextP ctx,
348 struct encode_state *encode_state,
349 struct intel_encoder_context *encoder_context)
351 struct gen6_vme_context *vme_context = encoder_context->vme_context;
352 unsigned char *constant_buffer;
353 unsigned int *vme_state_message;
356 vme_state_message = (unsigned int *)vme_context->vme_state_message;
358 if (encoder_context->profile == VAProfileH264Baseline ||
359 encoder_context->profile == VAProfileH264Main ||
360 encoder_context->profile == VAProfileH264High) {
361 if (vme_context->h264_level >= 30) {
364 if (vme_context->h264_level >= 31)
367 } else if (encoder_context->profile == VAProfileMPEG2Simple ||
368 encoder_context->profile == VAProfileMPEG2Main) {
372 vme_state_message[31] = mv_num;
374 dri_bo_map(vme_context->gpe_context.curbe.bo, 1);
375 assert(vme_context->gpe_context.curbe.bo->virtual);
376 constant_buffer = vme_context->gpe_context.curbe.bo->virtual;
378 /* VME MV/Mb cost table is passed by using const buffer */
379 /* Now it uses the fixed search path. So it is constructed directly
382 memcpy(constant_buffer, (char *)vme_context->vme_state_message, 128);
384 dri_bo_unmap(vme_context->gpe_context.curbe.bo);
386 return VA_STATUS_SUCCESS;
389 static const unsigned int intra_mb_mode_cost_table[] = {
390 0x31110001, // for qp0
391 0x09110001, // for qp1
392 0x15030001, // for qp2
393 0x0b030001, // for qp3
394 0x0d030011, // for qp4
395 0x17210011, // for qp5
396 0x41210011, // for qp6
397 0x19210011, // for qp7
398 0x25050003, // for qp8
399 0x1b130003, // for qp9
400 0x1d130003, // for qp10
401 0x27070021, // for qp11
402 0x51310021, // for qp12
403 0x29090021, // for qp13
404 0x35150005, // for qp14
405 0x2b0b0013, // for qp15
406 0x2d0d0013, // for qp16
407 0x37170007, // for qp17
408 0x61410031, // for qp18
409 0x39190009, // for qp19
410 0x45250015, // for qp20
411 0x3b1b000b, // for qp21
412 0x3d1d000d, // for qp22
413 0x47270017, // for qp23
414 0x71510041, // for qp24 ! center for qp=0..30
415 0x49290019, // for qp25
416 0x55350025, // for qp26
417 0x4b2b001b, // for qp27
418 0x4d2d001d, // for qp28
419 0x57370027, // for qp29
420 0x81610051, // for qp30
421 0x57270017, // for qp31
422 0x81510041, // for qp32 ! center for qp=31..51
423 0x59290019, // for qp33
424 0x65350025, // for qp34
425 0x5b2b001b, // for qp35
426 0x5d2d001d, // for qp36
427 0x67370027, // for qp37
428 0x91610051, // for qp38
429 0x69390029, // for qp39
430 0x75450035, // for qp40
431 0x6b3b002b, // for qp41
432 0x6d3d002d, // for qp42
433 0x77470037, // for qp43
434 0xa1710061, // for qp44
435 0x79490039, // for qp45
436 0x85550045, // for qp46
437 0x7b4b003b, // for qp47
438 0x7d4d003d, // for qp48
439 0x87570047, // for qp49
440 0xb1810071, // for qp50
441 0x89590049 // for qp51
444 static void gen75_vme_state_setup_fixup(VADriverContextP ctx,
445 struct encode_state *encode_state,
446 struct intel_encoder_context *encoder_context,
447 unsigned int *vme_state_message)
449 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
450 VAEncPictureParameterBufferH264 *pic_param = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
451 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
453 if (slice_param->slice_type != SLICE_TYPE_I &&
454 slice_param->slice_type != SLICE_TYPE_SI)
456 if (encoder_context->rate_control_mode == VA_RC_CQP)
457 vme_state_message[0] = intra_mb_mode_cost_table[pic_param->pic_init_qp + slice_param->slice_qp_delta];
459 vme_state_message[0] = intra_mb_mode_cost_table[mfc_context->bit_rate_control_context[slice_param->slice_type].QpPrimeY];
462 static VAStatus gen75_vme_vme_state_setup(VADriverContextP ctx,
463 struct encode_state *encode_state,
465 struct intel_encoder_context *encoder_context)
467 struct gen6_vme_context *vme_context = encoder_context->vme_context;
468 unsigned int *vme_state_message;
471 //pass the MV/Mb cost into VME message on HASWell
472 assert(vme_context->vme_state_message);
473 vme_state_message = (unsigned int *)vme_context->vme_state_message;
475 vme_state_message[0] = 0x4a4a4a4a;
476 vme_state_message[1] = 0x4a4a4a4a;
477 vme_state_message[2] = 0x4a4a4a4a;
478 vme_state_message[3] = 0x22120200;
479 vme_state_message[4] = 0x62524232;
481 for (i=5; i < 8; i++) {
482 vme_state_message[i] = 0;
485 switch (encoder_context->profile) {
486 case VAProfileH264Baseline:
487 case VAProfileH264Main:
488 case VAProfileH264High:
489 gen75_vme_state_setup_fixup(ctx, encode_state, encoder_context, vme_state_message);
498 return VA_STATUS_SUCCESS;
501 #define INTRA_PRED_AVAIL_FLAG_AE 0x60
502 #define INTRA_PRED_AVAIL_FLAG_B 0x10
503 #define INTRA_PRED_AVAIL_FLAG_C 0x8
504 #define INTRA_PRED_AVAIL_FLAG_D 0x4
505 #define INTRA_PRED_AVAIL_FLAG_BCD_MASK 0x1C
508 gen75_vme_fill_vme_batchbuffer(VADriverContextP ctx,
509 struct encode_state *encode_state,
510 int mb_width, int mb_height,
512 int transform_8x8_mode_flag,
513 struct intel_encoder_context *encoder_context)
515 struct gen6_vme_context *vme_context = encoder_context->vme_context;
516 int mb_x = 0, mb_y = 0;
518 unsigned int *command_ptr;
520 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
521 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
523 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
524 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
525 int slice_mb_begin = pSliceParameter->macroblock_address;
526 int slice_mb_number = pSliceParameter->num_macroblocks;
527 unsigned int mb_intra_ub;
528 int slice_mb_x = pSliceParameter->macroblock_address % mb_width;
529 for (i = 0; i < slice_mb_number; ) {
530 int mb_count = i + slice_mb_begin;
531 mb_x = mb_count % mb_width;
532 mb_y = mb_count / mb_width;
535 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
538 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
540 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
541 if (mb_x != (mb_width -1))
542 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
546 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_AE);
547 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_BCD_MASK);
548 if ((i == (mb_width - 1)) && slice_mb_x) {
549 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
553 if ((i == mb_width) && slice_mb_x) {
554 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_D);
556 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
557 *command_ptr++ = kernel;
564 *command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
565 *command_ptr++ = ( (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
572 *command_ptr++ = MI_BATCH_BUFFER_END;
574 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
577 /* check whether the mb of (x_index, y_index) is out of bound */
578 static inline int loop_in_bounds(int x_index, int y_index, int first_mb, int num_mb, int mb_width, int mb_height)
581 if (x_index < 0 || x_index >= mb_width)
583 if (y_index < 0 || y_index >= mb_height)
586 mb_index = y_index * mb_width + x_index;
587 if (mb_index < first_mb || mb_index > (first_mb + num_mb))
594 gen75_vme_walker_fill_vme_batchbuffer(VADriverContextP ctx,
595 struct encode_state *encode_state,
596 int mb_width, int mb_height,
598 int transform_8x8_mode_flag,
599 struct intel_encoder_context *encoder_context)
601 struct gen6_vme_context *vme_context = encoder_context->vme_context;
602 int mb_x = 0, mb_y = 0;
605 unsigned int *command_ptr;
609 #define USE_SCOREBOARD (1 << 21)
611 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
612 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
614 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
615 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
616 int first_mb = pSliceParameter->macroblock_address;
617 int num_mb = pSliceParameter->num_macroblocks;
618 unsigned int mb_intra_ub, score_dep;
619 int x_outer, y_outer, x_inner, y_inner;
621 x_outer = first_mb % mb_width;
622 y_outer = first_mb / mb_width;
625 for (; x_outer < (mb_width -2 ) && !loop_in_bounds(x_outer, y_outer, first_mb, num_mb, mb_width, mb_height); ) {
628 for (; !loop_in_bounds(x_inner, y_inner, first_mb, num_mb, mb_width, mb_height);) {
632 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
633 score_dep |= MB_SCOREBOARD_A;
635 if (y_inner != mb_row) {
636 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
637 score_dep |= MB_SCOREBOARD_B;
639 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
640 if (x_inner != (mb_width -1)) {
641 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
642 score_dep |= MB_SCOREBOARD_C;
646 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
647 *command_ptr++ = kernel;
648 *command_ptr++ = USE_SCOREBOARD;
651 /* the (X, Y) term of scoreboard */
652 *command_ptr++ = ((y_inner << 16) | x_inner);
653 *command_ptr++ = score_dep;
656 *command_ptr++ = (mb_width << 16 | y_inner << 8 | x_inner);
657 *command_ptr++ = ((1 << 18) | (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
664 x_outer = mb_width - 2;
665 y_outer = first_mb / mb_width;
667 for (;!loop_in_bounds(x_outer, y_outer, first_mb, num_mb, mb_width, mb_height); ) {
670 for (; !loop_in_bounds(x_inner, y_inner, first_mb, num_mb, mb_width, mb_height);) {
674 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
675 score_dep |= MB_SCOREBOARD_A;
677 if (y_inner != mb_row) {
678 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
679 score_dep |= MB_SCOREBOARD_B;
681 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
682 if (x_inner != (mb_width -1)) {
683 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
684 score_dep |= MB_SCOREBOARD_C;
688 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
689 *command_ptr++ = kernel;
690 *command_ptr++ = USE_SCOREBOARD;
693 /* the (X, Y) term of scoreboard */
694 *command_ptr++ = ((y_inner << 16) | x_inner);
695 *command_ptr++ = score_dep;
698 *command_ptr++ = (mb_width << 16 | y_inner << 8 | x_inner);
699 *command_ptr++ = ((1 << 18) | (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
708 x_outer = mb_width - 2;
716 *command_ptr++ = MI_BATCH_BUFFER_END;
718 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
721 static void gen75_vme_media_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
723 struct i965_driver_data *i965 = i965_driver_data(ctx);
724 struct gen6_vme_context *vme_context = encoder_context->vme_context;
727 i965_gpe_context_init(ctx, &vme_context->gpe_context);
729 /* VME output buffer */
730 dri_bo_unreference(vme_context->vme_output.bo);
731 vme_context->vme_output.bo = NULL;
733 dri_bo_unreference(vme_context->vme_batchbuffer.bo);
734 vme_context->vme_batchbuffer.bo = NULL;
737 dri_bo_unreference(vme_context->vme_state.bo);
738 vme_context->vme_state.bo = NULL;
741 static void gen75_vme_pipeline_programing(VADriverContextP ctx,
742 struct encode_state *encode_state,
743 struct intel_encoder_context *encoder_context)
745 struct gen6_vme_context *vme_context = encoder_context->vme_context;
746 struct intel_batchbuffer *batch = encoder_context->base.batch;
747 VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
748 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
749 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
750 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
751 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
752 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
754 bool allow_hwscore = true;
757 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
758 pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
759 if ((pSliceParameter->macroblock_address % width_in_mbs)) {
760 allow_hwscore = false;
764 if ((pSliceParameter->slice_type == SLICE_TYPE_I) ||
765 (pSliceParameter->slice_type == SLICE_TYPE_I)) {
766 kernel_shader = VME_INTRA_SHADER;
767 } else if ((pSliceParameter->slice_type == SLICE_TYPE_P) ||
768 (pSliceParameter->slice_type == SLICE_TYPE_SP)) {
769 kernel_shader = VME_INTER_SHADER;
771 kernel_shader = VME_BINTER_SHADER;
773 kernel_shader = VME_INTER_SHADER;
776 gen75_vme_walker_fill_vme_batchbuffer(ctx,
778 width_in_mbs, height_in_mbs,
780 pPicParameter->pic_fields.bits.transform_8x8_mode_flag,
783 gen75_vme_fill_vme_batchbuffer(ctx,
785 width_in_mbs, height_in_mbs,
787 pPicParameter->pic_fields.bits.transform_8x8_mode_flag,
790 intel_batchbuffer_start_atomic(batch, 0x1000);
791 gen6_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
792 BEGIN_BATCH(batch, 2);
793 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (2 << 6));
795 vme_context->vme_batchbuffer.bo,
796 I915_GEM_DOMAIN_COMMAND, 0,
798 ADVANCE_BATCH(batch);
800 intel_batchbuffer_end_atomic(batch);
803 static VAStatus gen75_vme_prepare(VADriverContextP ctx,
804 struct encode_state *encode_state,
805 struct intel_encoder_context *encoder_context)
807 VAStatus vaStatus = VA_STATUS_SUCCESS;
808 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
809 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
810 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
811 struct gen6_vme_context *vme_context = encoder_context->vme_context;
813 if (!vme_context->h264_level ||
814 (vme_context->h264_level != pSequenceParameter->level_idc)) {
815 vme_context->h264_level = pSequenceParameter->level_idc;
818 intel_vme_update_mbmv_cost(ctx, encode_state, encoder_context);
820 /*Setup all the memory object*/
821 gen75_vme_surface_setup(ctx, encode_state, is_intra, encoder_context);
822 gen75_vme_interface_setup(ctx, encode_state, encoder_context);
823 //gen75_vme_vme_state_setup(ctx, encode_state, is_intra, encoder_context);
824 gen75_vme_constant_setup(ctx, encode_state, encoder_context);
826 /*Programing media pipeline*/
827 gen75_vme_pipeline_programing(ctx, encode_state, encoder_context);
832 static VAStatus gen75_vme_run(VADriverContextP ctx,
833 struct encode_state *encode_state,
834 struct intel_encoder_context *encoder_context)
836 struct intel_batchbuffer *batch = encoder_context->base.batch;
838 intel_batchbuffer_flush(batch);
840 return VA_STATUS_SUCCESS;
843 static VAStatus gen75_vme_stop(VADriverContextP ctx,
844 struct encode_state *encode_state,
845 struct intel_encoder_context *encoder_context)
847 return VA_STATUS_SUCCESS;
851 gen75_vme_pipeline(VADriverContextP ctx,
853 struct encode_state *encode_state,
854 struct intel_encoder_context *encoder_context)
856 gen75_vme_media_init(ctx, encoder_context);
857 gen75_vme_prepare(ctx, encode_state, encoder_context);
858 gen75_vme_run(ctx, encode_state, encoder_context);
859 gen75_vme_stop(ctx, encode_state, encoder_context);
861 return VA_STATUS_SUCCESS;
865 gen75_vme_mpeg2_output_buffer_setup(VADriverContextP ctx,
866 struct encode_state *encode_state,
869 struct intel_encoder_context *encoder_context)
872 struct i965_driver_data *i965 = i965_driver_data(ctx);
873 struct gen6_vme_context *vme_context = encoder_context->vme_context;
874 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
875 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
876 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
878 vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs;
879 vme_context->vme_output.pitch = 16; /* in bytes, always 16 */
882 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 2;
884 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 24;
886 * Inter MV . 32-byte Intra search + 16 IME info + 128 IME MV + 32 IME Ref
887 * + 16 FBR Info + 128 FBR MV + 32 FBR Ref.
888 * 16 * (2 + 2 * (1 + 8 + 2))= 16 * 24.
891 vme_context->vme_output.bo = dri_bo_alloc(i965->intel.bufmgr,
893 vme_context->vme_output.num_blocks * vme_context->vme_output.size_block,
895 assert(vme_context->vme_output.bo);
896 vme_context->vme_buffer_suface_setup(ctx,
897 &vme_context->gpe_context,
898 &vme_context->vme_output,
899 BINDING_TABLE_OFFSET(index),
900 SURFACE_STATE_OFFSET(index));
904 gen75_vme_mpeg2_output_vme_batchbuffer_setup(VADriverContextP ctx,
905 struct encode_state *encode_state,
907 struct intel_encoder_context *encoder_context)
910 struct i965_driver_data *i965 = i965_driver_data(ctx);
911 struct gen6_vme_context *vme_context = encoder_context->vme_context;
912 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
913 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
914 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
916 vme_context->vme_batchbuffer.num_blocks = width_in_mbs * height_in_mbs + 1;
917 vme_context->vme_batchbuffer.size_block = 64; /* 4 OWORDs */
918 vme_context->vme_batchbuffer.pitch = 16;
919 vme_context->vme_batchbuffer.bo = dri_bo_alloc(i965->intel.bufmgr,
921 vme_context->vme_batchbuffer.num_blocks * vme_context->vme_batchbuffer.size_block,
923 vme_context->vme_buffer_suface_setup(ctx,
924 &vme_context->gpe_context,
925 &vme_context->vme_batchbuffer,
926 BINDING_TABLE_OFFSET(index),
927 SURFACE_STATE_OFFSET(index));
931 gen75_vme_mpeg2_surface_setup(VADriverContextP ctx,
932 struct encode_state *encode_state,
934 struct intel_encoder_context *encoder_context)
936 struct i965_driver_data *i965 = i965_driver_data(ctx);
937 struct object_surface *obj_surface;
938 VAEncPictureParameterBufferMPEG2 *pic_param = (VAEncPictureParameterBufferMPEG2 *)encode_state->pic_param_ext->buffer;
940 /*Setup surfaces state*/
941 /* current picture for encoding */
942 obj_surface = SURFACE(encoder_context->input_yuv_surface);
944 gen75_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
945 gen75_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
946 gen75_vme_media_chroma_source_surface_state(ctx, 6, obj_surface, encoder_context);
950 obj_surface = SURFACE(pic_param->forward_reference_picture);
952 if ( obj_surface->bo != NULL)
953 gen75_vme_source_surface_state(ctx, 1, obj_surface, encoder_context);
956 obj_surface = SURFACE(pic_param->backward_reference_picture);
957 if (obj_surface && obj_surface->bo != NULL)
958 gen75_vme_source_surface_state(ctx, 2, obj_surface, encoder_context);
962 gen75_vme_mpeg2_output_buffer_setup(ctx, encode_state, 3, is_intra, encoder_context);
963 gen75_vme_mpeg2_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
965 return VA_STATUS_SUCCESS;
969 gen75_vme_mpeg2_fill_vme_batchbuffer(VADriverContextP ctx,
970 struct encode_state *encode_state,
971 int mb_width, int mb_height,
973 int transform_8x8_mode_flag,
974 struct intel_encoder_context *encoder_context)
976 struct gen6_vme_context *vme_context = encoder_context->vme_context;
977 int mb_x = 0, mb_y = 0;
979 unsigned int *command_ptr;
982 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
983 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
985 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
986 VAEncSliceParameterBufferMPEG2 *slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[s]->buffer;
988 for (j = 0; j < encode_state->slice_params_ext[s]->num_elements; j++) {
989 int slice_mb_begin = slice_param->macroblock_address;
990 int slice_mb_number = slice_param->num_macroblocks;
991 unsigned int mb_intra_ub;
992 int slice_mb_x = slice_param->macroblock_address % mb_width;
994 for (i = 0; i < slice_mb_number;) {
995 int mb_count = i + slice_mb_begin;
997 mb_x = mb_count % mb_width;
998 mb_y = mb_count / mb_width;
1002 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
1006 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
1009 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
1011 if (mb_x != (mb_width -1))
1012 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
1017 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_AE);
1019 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_BCD_MASK);
1021 if ((i == (mb_width - 1)) && slice_mb_x) {
1022 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
1026 if ((i == mb_width) && slice_mb_x) {
1027 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_D);
1030 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
1031 *command_ptr++ = kernel;
1038 *command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
1039 *command_ptr++ = ( (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
1049 *command_ptr++ = MI_BATCH_BUFFER_END;
1051 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
1055 gen75_vme_mpeg2_pipeline_programing(VADriverContextP ctx,
1056 struct encode_state *encode_state,
1058 struct intel_encoder_context *encoder_context)
1060 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1061 struct intel_batchbuffer *batch = encoder_context->base.batch;
1062 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
1063 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
1064 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
1066 gen75_vme_mpeg2_fill_vme_batchbuffer(ctx,
1068 width_in_mbs, height_in_mbs,
1069 is_intra ? VME_INTRA_SHADER : VME_INTER_SHADER,
1073 intel_batchbuffer_start_atomic(batch, 0x1000);
1074 gen6_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
1075 BEGIN_BATCH(batch, 2);
1076 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (2 << 6));
1078 vme_context->vme_batchbuffer.bo,
1079 I915_GEM_DOMAIN_COMMAND, 0,
1081 ADVANCE_BATCH(batch);
1083 intel_batchbuffer_end_atomic(batch);
1087 gen75_vme_mpeg2_prepare(VADriverContextP ctx,
1088 struct encode_state *encode_state,
1089 struct intel_encoder_context *encoder_context)
1091 VAStatus vaStatus = VA_STATUS_SUCCESS;
1092 VAEncSliceParameterBufferMPEG2 *slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[0]->buffer;
1094 /*Setup all the memory object*/
1095 gen75_vme_mpeg2_surface_setup(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
1096 gen75_vme_interface_setup(ctx, encode_state, encoder_context);
1097 gen75_vme_vme_state_setup(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
1098 gen75_vme_constant_setup(ctx, encode_state, encoder_context);
1100 /*Programing media pipeline*/
1101 gen75_vme_mpeg2_pipeline_programing(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
1107 gen75_vme_mpeg2_pipeline(VADriverContextP ctx,
1109 struct encode_state *encode_state,
1110 struct intel_encoder_context *encoder_context)
1112 gen75_vme_media_init(ctx, encoder_context);
1113 gen75_vme_mpeg2_prepare(ctx, encode_state, encoder_context);
1114 gen75_vme_run(ctx, encode_state, encoder_context);
1115 gen75_vme_stop(ctx, encode_state, encoder_context);
1117 return VA_STATUS_SUCCESS;
1121 gen75_vme_context_destroy(void *context)
1123 struct gen6_vme_context *vme_context = context;
1125 i965_gpe_context_destroy(&vme_context->gpe_context);
1127 dri_bo_unreference(vme_context->vme_output.bo);
1128 vme_context->vme_output.bo = NULL;
1130 dri_bo_unreference(vme_context->vme_state.bo);
1131 vme_context->vme_state.bo = NULL;
1133 dri_bo_unreference(vme_context->vme_batchbuffer.bo);
1134 vme_context->vme_batchbuffer.bo = NULL;
1136 if (vme_context->vme_state_message) {
1137 free(vme_context->vme_state_message);
1138 vme_context->vme_state_message = NULL;
1144 Bool gen75_vme_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
1146 struct gen6_vme_context *vme_context = calloc(1, sizeof(struct gen6_vme_context));
1147 struct i965_kernel *vme_kernel_list = NULL;
1148 int i965_kernel_num;
1150 switch (encoder_context->profile) {
1151 case VAProfileH264Baseline:
1152 case VAProfileH264Main:
1153 case VAProfileH264High:
1154 vme_kernel_list = gen75_vme_kernels;
1155 encoder_context->vme_pipeline = gen75_vme_pipeline;
1156 i965_kernel_num = sizeof(gen75_vme_kernels) / sizeof(struct i965_kernel);
1159 case VAProfileMPEG2Simple:
1160 case VAProfileMPEG2Main:
1161 vme_kernel_list = gen75_vme_mpeg2_kernels;
1162 encoder_context->vme_pipeline = gen75_vme_mpeg2_pipeline;
1163 i965_kernel_num = sizeof(gen75_vme_mpeg2_kernels) / sizeof(struct i965_kernel);
1168 /* never get here */
1173 vme_context->vme_kernel_sum = i965_kernel_num;
1174 vme_context->gpe_context.surface_state_binding_table.length = (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_MEDIA_SURFACES_GEN6;
1176 vme_context->gpe_context.idrt.max_entries = MAX_INTERFACE_DESC_GEN6;
1177 vme_context->gpe_context.idrt.entry_size = sizeof(struct gen6_interface_descriptor_data);
1179 vme_context->gpe_context.curbe.length = CURBE_TOTAL_DATA_LENGTH;
1181 vme_context->gpe_context.vfe_state.max_num_threads = 60 - 1;
1182 vme_context->gpe_context.vfe_state.num_urb_entries = 16;
1183 vme_context->gpe_context.vfe_state.gpgpu_mode = 0;
1184 vme_context->gpe_context.vfe_state.urb_entry_size = 59 - 1;
1185 vme_context->gpe_context.vfe_state.curbe_allocation_size = CURBE_ALLOCATION_SIZE - 1;
1187 vme_context->gpe_context.vfe_desc5.scoreboard0.enable = 1;
1188 vme_context->gpe_context.vfe_desc5.scoreboard0.type = SCOREBOARD_STALLING;
1189 vme_context->gpe_context.vfe_desc5.scoreboard0.mask = (MB_SCOREBOARD_A |
1193 /* In VME prediction the current mb depends on the neighbour
1194 * A/B/C macroblock. So the left/up/up-right dependency should
1197 vme_context->gpe_context.vfe_desc6.scoreboard1.delta_x0 = -1;
1198 vme_context->gpe_context.vfe_desc6.scoreboard1.delta_y0 = 0;
1199 vme_context->gpe_context.vfe_desc6.scoreboard1.delta_x1 = 0;
1200 vme_context->gpe_context.vfe_desc6.scoreboard1.delta_y1 = -1;
1201 vme_context->gpe_context.vfe_desc6.scoreboard1.delta_x2 = 1;
1202 vme_context->gpe_context.vfe_desc6.scoreboard1.delta_y2 = -1;
1204 vme_context->gpe_context.vfe_desc7.dword = 0;
1206 i965_gpe_load_kernels(ctx,
1207 &vme_context->gpe_context,
1210 vme_context->vme_surface2_setup = gen7_gpe_surface2_setup;
1211 vme_context->vme_media_rw_surface_setup = gen7_gpe_media_rw_surface_setup;
1212 vme_context->vme_buffer_suface_setup = gen7_gpe_buffer_suface_setup;
1213 vme_context->vme_media_chroma_surface_setup = gen75_gpe_media_chroma_surface_setup;
1215 encoder_context->vme_context = vme_context;
1216 encoder_context->vme_context_destroy = gen75_vme_context_destroy;
1218 vme_context->vme_state_message = malloc(VME_MSG_LENGTH * sizeof(int));