2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Xiang Haihao <haihao.xiang@intel.com>
29 #ifndef HAVE_GEN_AVC_SURFACE
30 #define HAVE_GEN_AVC_SURFACE 1
34 #include "intel_batchbuffer.h"
35 #include "intel_driver.h"
36 #include "i965_defines.h"
37 #include "i965_drv_video.h"
38 #include "i965_decoder_utils.h"
42 static const uint32_t zigzag_direct[64] = {
43 0, 1, 8, 16, 9, 2, 3, 10,
44 17, 24, 32, 25, 18, 11, 4, 5,
45 12, 19, 26, 33, 40, 48, 41, 34,
46 27, 20, 13, 6, 7, 14, 21, 28,
47 35, 42, 49, 56, 57, 50, 43, 36,
48 29, 22, 15, 23, 30, 37, 44, 51,
49 58, 59, 52, 45, 38, 31, 39, 46,
50 53, 60, 61, 54, 47, 55, 62, 63
54 gen6_mfd_avc_frame_store_index(VADriverContextP ctx,
55 VAPictureParameterBufferH264 *pic_param,
56 struct gen6_mfd_context *gen6_mfd_context)
58 struct i965_driver_data *i965 = i965_driver_data(ctx);
61 assert(ARRAY_ELEMS(gen6_mfd_context->reference_surface) == ARRAY_ELEMS(pic_param->ReferenceFrames));
63 for (i = 0; i < ARRAY_ELEMS(gen6_mfd_context->reference_surface); i++) {
66 if (gen6_mfd_context->reference_surface[i].surface_id == VA_INVALID_ID)
69 for (j = 0; j < ARRAY_ELEMS(pic_param->ReferenceFrames); j++) {
70 VAPictureH264 *ref_pic = &pic_param->ReferenceFrames[j];
71 if (ref_pic->flags & VA_PICTURE_H264_INVALID)
74 if (gen6_mfd_context->reference_surface[i].surface_id == ref_pic->picture_id) {
81 struct object_surface *obj_surface = SURFACE(gen6_mfd_context->reference_surface[i].surface_id);
82 obj_surface->flags &= ~SURFACE_REFERENCED;
84 if ((obj_surface->flags & SURFACE_ALL_MASK) == SURFACE_DISPLAYED) {
85 dri_bo_unreference(obj_surface->bo);
86 obj_surface->bo = NULL;
87 obj_surface->flags &= ~SURFACE_REF_DIS_MASK;
90 if (obj_surface->free_private_data)
91 obj_surface->free_private_data(&obj_surface->private_data);
93 gen6_mfd_context->reference_surface[i].surface_id = VA_INVALID_ID;
94 gen6_mfd_context->reference_surface[i].frame_store_id = -1;
98 for (i = 0; i < ARRAY_ELEMS(pic_param->ReferenceFrames); i++) {
99 VAPictureH264 *ref_pic = &pic_param->ReferenceFrames[i];
102 if (ref_pic->flags & VA_PICTURE_H264_INVALID)
105 for (j = 0; j < ARRAY_ELEMS(gen6_mfd_context->reference_surface); j++) {
106 if (gen6_mfd_context->reference_surface[j].surface_id == VA_INVALID_ID)
109 if (gen6_mfd_context->reference_surface[j].surface_id == ref_pic->picture_id) {
117 struct object_surface *obj_surface = SURFACE(ref_pic->picture_id);
120 i965_check_alloc_surface_bo(ctx, obj_surface, 1, VA_FOURCC('N', 'V', '1', '2'), SUBSAMPLE_YUV420);
122 for (frame_idx = 0; frame_idx < ARRAY_ELEMS(gen6_mfd_context->reference_surface); frame_idx++) {
123 for (j = 0; j < ARRAY_ELEMS(gen6_mfd_context->reference_surface); j++) {
124 if (gen6_mfd_context->reference_surface[j].surface_id == VA_INVALID_ID)
127 if (gen6_mfd_context->reference_surface[j].frame_store_id == frame_idx)
131 if (j == ARRAY_ELEMS(gen6_mfd_context->reference_surface))
135 assert(frame_idx < ARRAY_ELEMS(gen6_mfd_context->reference_surface));
137 for (j = 0; j < ARRAY_ELEMS(gen6_mfd_context->reference_surface); j++) {
138 if (gen6_mfd_context->reference_surface[j].surface_id == VA_INVALID_ID) {
139 gen6_mfd_context->reference_surface[j].surface_id = ref_pic->picture_id;
140 gen6_mfd_context->reference_surface[j].frame_store_id = frame_idx;
148 for (i = 0; i < ARRAY_ELEMS(gen6_mfd_context->reference_surface) - 1; i++) {
149 if (gen6_mfd_context->reference_surface[i].surface_id != VA_INVALID_ID &&
150 gen6_mfd_context->reference_surface[i].frame_store_id == i)
153 for (j = i + 1; j < ARRAY_ELEMS(gen6_mfd_context->reference_surface); j++) {
154 if (gen6_mfd_context->reference_surface[j].surface_id != VA_INVALID_ID &&
155 gen6_mfd_context->reference_surface[j].frame_store_id == i) {
156 VASurfaceID id = gen6_mfd_context->reference_surface[i].surface_id;
157 int frame_idx = gen6_mfd_context->reference_surface[i].frame_store_id;
159 gen6_mfd_context->reference_surface[i].surface_id = gen6_mfd_context->reference_surface[j].surface_id;
160 gen6_mfd_context->reference_surface[i].frame_store_id = gen6_mfd_context->reference_surface[j].frame_store_id;
161 gen6_mfd_context->reference_surface[j].surface_id = id;
162 gen6_mfd_context->reference_surface[j].frame_store_id = frame_idx;
170 gen6_mfd_init_avc_surface(VADriverContextP ctx,
171 VAPictureParameterBufferH264 *pic_param,
172 struct object_surface *obj_surface)
174 struct i965_driver_data *i965 = i965_driver_data(ctx);
175 GenAvcSurface *gen6_avc_surface = obj_surface->private_data;
178 obj_surface->free_private_data = gen_free_avc_surface;
179 height_in_mbs = ((pic_param->picture_height_in_mbs_minus1 + 1) & 0xff); /* frame height */
181 if (!gen6_avc_surface) {
182 gen6_avc_surface = calloc(sizeof(GenAvcSurface), 1);
183 assert((obj_surface->size & 0x3f) == 0);
184 obj_surface->private_data = gen6_avc_surface;
187 gen6_avc_surface->dmv_bottom_flag = (pic_param->pic_fields.bits.field_pic_flag &&
188 !pic_param->seq_fields.bits.direct_8x8_inference_flag);
190 if (gen6_avc_surface->dmv_top == NULL) {
191 gen6_avc_surface->dmv_top = dri_bo_alloc(i965->intel.bufmgr,
192 "direct mv w/r buffer",
193 128 * height_in_mbs * 64, /* scalable with frame height */
197 if (gen6_avc_surface->dmv_bottom_flag &&
198 gen6_avc_surface->dmv_bottom == NULL) {
199 gen6_avc_surface->dmv_bottom = dri_bo_alloc(i965->intel.bufmgr,
200 "direct mv w/r buffer",
201 128 * height_in_mbs * 64, /* scalable with frame height */
207 gen6_mfd_pipe_mode_select(VADriverContextP ctx,
208 struct decode_state *decode_state,
210 struct gen6_mfd_context *gen6_mfd_context)
212 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
214 assert(standard_select == MFX_FORMAT_MPEG2 ||
215 standard_select == MFX_FORMAT_AVC ||
216 standard_select == MFX_FORMAT_VC1);
218 BEGIN_BCS_BATCH(batch, 4);
219 OUT_BCS_BATCH(batch, MFX_PIPE_MODE_SELECT | (4 - 2));
221 (MFD_MODE_VLD << 16) | /* VLD mode */
222 (0 << 10) | /* disable Stream-Out */
223 (gen6_mfd_context->post_deblocking_output.valid << 9) | /* Post Deblocking Output */
224 (gen6_mfd_context->pre_deblocking_output.valid << 8) | /* Pre Deblocking Output */
225 (0 << 7) | /* disable TLB prefectch */
226 (0 << 5) | /* not in stitch mode */
227 (MFX_CODEC_DECODE << 4) | /* decoding mode */
228 (standard_select << 0));
230 (0 << 20) | /* round flag in PB slice */
231 (0 << 19) | /* round flag in Intra8x8 */
232 (0 << 7) | /* expand NOA bus flag */
233 (1 << 6) | /* must be 1 */
234 (0 << 5) | /* disable clock gating for NOA */
235 (0 << 4) | /* terminate if AVC motion and POC table error occurs */
236 (0 << 3) | /* terminate if AVC mbdata error occurs */
237 (0 << 2) | /* terminate if AVC CABAC/CAVLC decode error occurs */
238 (0 << 1) | /* AVC long field motion vector */
239 (1 << 0)); /* always calculate AVC ILDB boundary strength */
240 OUT_BCS_BATCH(batch, 0);
241 ADVANCE_BCS_BATCH(batch);
245 gen6_mfd_surface_state(VADriverContextP ctx,
246 struct decode_state *decode_state,
248 struct gen6_mfd_context *gen6_mfd_context)
250 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
251 struct i965_driver_data *i965 = i965_driver_data(ctx);
252 struct object_surface *obj_surface = SURFACE(decode_state->current_render_target);
255 BEGIN_BCS_BATCH(batch, 6);
256 OUT_BCS_BATCH(batch, MFX_SURFACE_STATE | (6 - 2));
257 OUT_BCS_BATCH(batch, 0);
259 ((obj_surface->orig_height - 1) << 19) |
260 ((obj_surface->orig_width - 1) << 6));
262 (MFX_SURFACE_PLANAR_420_8 << 28) | /* 420 planar YUV surface */
263 (1 << 27) | /* must be 1 for interleave U/V, hardware requirement */
264 (0 << 22) | /* surface object control state, FIXME??? */
265 ((obj_surface->width - 1) << 3) | /* pitch */
266 (0 << 2) | /* must be 0 for interleave U/V */
267 (1 << 1) | /* must be y-tiled */
268 (I965_TILEWALK_YMAJOR << 0)); /* tile walk, FIXME: must be 1 ??? */
270 (0 << 16) | /* must be 0 for interleave U/V */
271 (obj_surface->height)); /* y offset for U(cb) */
272 OUT_BCS_BATCH(batch, 0);
273 ADVANCE_BCS_BATCH(batch);
277 gen6_mfd_pipe_buf_addr_state(VADriverContextP ctx,
278 struct decode_state *decode_state,
280 struct gen6_mfd_context *gen6_mfd_context)
282 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
283 struct i965_driver_data *i965 = i965_driver_data(ctx);
286 BEGIN_BCS_BATCH(batch, 24);
287 OUT_BCS_BATCH(batch, MFX_PIPE_BUF_ADDR_STATE | (24 - 2));
288 if (gen6_mfd_context->pre_deblocking_output.valid)
289 OUT_BCS_RELOC(batch, gen6_mfd_context->pre_deblocking_output.bo,
290 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
293 OUT_BCS_BATCH(batch, 0);
295 if (gen6_mfd_context->post_deblocking_output.valid)
296 OUT_BCS_RELOC(batch, gen6_mfd_context->post_deblocking_output.bo,
297 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
300 OUT_BCS_BATCH(batch, 0);
302 OUT_BCS_BATCH(batch, 0); /* ignore for decoding */
303 OUT_BCS_BATCH(batch, 0); /* ignore for decoding */
305 if (gen6_mfd_context->intra_row_store_scratch_buffer.valid)
306 OUT_BCS_RELOC(batch, gen6_mfd_context->intra_row_store_scratch_buffer.bo,
307 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
310 OUT_BCS_BATCH(batch, 0);
312 if (gen6_mfd_context->deblocking_filter_row_store_scratch_buffer.valid)
313 OUT_BCS_RELOC(batch, gen6_mfd_context->deblocking_filter_row_store_scratch_buffer.bo,
314 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
317 OUT_BCS_BATCH(batch, 0);
320 for (i = 0; i < ARRAY_ELEMS(gen6_mfd_context->reference_surface); i++) {
321 struct object_surface *obj_surface;
323 if (gen6_mfd_context->reference_surface[i].surface_id != VA_INVALID_ID) {
324 obj_surface = SURFACE(gen6_mfd_context->reference_surface[i].surface_id);
325 assert(obj_surface && obj_surface->bo);
327 OUT_BCS_RELOC(batch, obj_surface->bo,
328 I915_GEM_DOMAIN_INSTRUCTION, 0,
331 OUT_BCS_BATCH(batch, 0);
335 OUT_BCS_BATCH(batch, 0); /* ignore DW23 for decoding */
336 ADVANCE_BCS_BATCH(batch);
340 gen6_mfd_ind_obj_base_addr_state(VADriverContextP ctx,
341 dri_bo *slice_data_bo,
343 struct gen6_mfd_context *gen6_mfd_context)
345 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
347 BEGIN_BCS_BATCH(batch, 11);
348 OUT_BCS_BATCH(batch, MFX_IND_OBJ_BASE_ADDR_STATE | (11 - 2));
349 OUT_BCS_RELOC(batch, slice_data_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); /* MFX Indirect Bitstream Object Base Address */
350 OUT_BCS_BATCH(batch, 0);
351 OUT_BCS_BATCH(batch, 0); /* ignore for VLD mode */
352 OUT_BCS_BATCH(batch, 0);
353 OUT_BCS_BATCH(batch, 0); /* ignore for VLD mode */
354 OUT_BCS_BATCH(batch, 0);
355 OUT_BCS_BATCH(batch, 0); /* ignore for VLD mode */
356 OUT_BCS_BATCH(batch, 0);
357 OUT_BCS_BATCH(batch, 0); /* ignore for VLD mode */
358 OUT_BCS_BATCH(batch, 0);
359 ADVANCE_BCS_BATCH(batch);
363 gen6_mfd_bsp_buf_base_addr_state(VADriverContextP ctx,
364 struct decode_state *decode_state,
366 struct gen6_mfd_context *gen6_mfd_context)
368 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
370 BEGIN_BCS_BATCH(batch, 4);
371 OUT_BCS_BATCH(batch, MFX_BSP_BUF_BASE_ADDR_STATE | (4 - 2));
373 if (gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.valid)
374 OUT_BCS_RELOC(batch, gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.bo,
375 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
378 OUT_BCS_BATCH(batch, 0);
380 if (gen6_mfd_context->mpr_row_store_scratch_buffer.valid)
381 OUT_BCS_RELOC(batch, gen6_mfd_context->mpr_row_store_scratch_buffer.bo,
382 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
385 OUT_BCS_BATCH(batch, 0);
387 if (gen6_mfd_context->bitplane_read_buffer.valid)
388 OUT_BCS_RELOC(batch, gen6_mfd_context->bitplane_read_buffer.bo,
389 I915_GEM_DOMAIN_INSTRUCTION, 0,
392 OUT_BCS_BATCH(batch, 0);
394 ADVANCE_BCS_BATCH(batch);
398 gen6_mfd_avc_img_state(VADriverContextP ctx,
399 struct decode_state *decode_state,
400 struct gen6_mfd_context *gen6_mfd_context)
402 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
405 int mbaff_frame_flag;
406 unsigned int width_in_mbs, height_in_mbs;
407 VAPictureParameterBufferH264 *pic_param;
409 assert(decode_state->pic_param && decode_state->pic_param->buffer);
410 pic_param = (VAPictureParameterBufferH264 *)decode_state->pic_param->buffer;
411 assert(!(pic_param->CurrPic.flags & VA_PICTURE_H264_INVALID));
413 if (decode_state->iq_matrix && decode_state->iq_matrix->buffer)
416 qm_present_flag = 0; /* built-in QM matrices */
418 if (pic_param->CurrPic.flags & VA_PICTURE_H264_TOP_FIELD)
420 else if (pic_param->CurrPic.flags & VA_PICTURE_H264_BOTTOM_FIELD)
425 if ((img_struct & 0x1) == 0x1) {
426 assert(pic_param->pic_fields.bits.field_pic_flag == 0x1);
428 assert(pic_param->pic_fields.bits.field_pic_flag == 0x0);
431 if (pic_param->seq_fields.bits.frame_mbs_only_flag) { /* a frame containing only frame macroblocks */
432 assert(pic_param->seq_fields.bits.mb_adaptive_frame_field_flag == 0);
433 assert(pic_param->pic_fields.bits.field_pic_flag == 0);
435 assert(pic_param->seq_fields.bits.direct_8x8_inference_flag == 1); /* see H.264 spec */
438 mbaff_frame_flag = (pic_param->seq_fields.bits.mb_adaptive_frame_field_flag &&
439 !pic_param->pic_fields.bits.field_pic_flag);
441 width_in_mbs = ((pic_param->picture_width_in_mbs_minus1 + 1) & 0xff);
442 height_in_mbs = ((pic_param->picture_height_in_mbs_minus1 + 1) & 0xff); /* frame height */
443 assert(!((width_in_mbs * height_in_mbs) & 0x8000)); /* hardware requirement */
445 /* MFX unit doesn't support 4:2:2 and 4:4:4 picture */
446 assert(pic_param->seq_fields.bits.chroma_format_idc == 0 || /* monochrome picture */
447 pic_param->seq_fields.bits.chroma_format_idc == 1); /* 4:2:0 */
448 assert(pic_param->seq_fields.bits.residual_colour_transform_flag == 0); /* only available for 4:4:4 */
450 BEGIN_BCS_BATCH(batch, 13);
451 OUT_BCS_BATCH(batch, MFX_AVC_IMG_STATE | (13 - 2));
453 ((width_in_mbs * height_in_mbs) & 0x7fff));
455 (height_in_mbs << 16) |
456 (width_in_mbs << 0));
458 ((pic_param->second_chroma_qp_index_offset & 0x1f) << 24) |
459 ((pic_param->chroma_qp_index_offset & 0x1f) << 16) |
460 (0 << 14) | /* Max-bit conformance Intra flag ??? FIXME */
461 (0 << 13) | /* Max Macroblock size conformance Inter flag ??? FIXME */
462 (1 << 12) | /* always 1, hardware requirement */
463 (qm_present_flag << 10) |
467 (pic_param->seq_fields.bits.chroma_format_idc << 10) |
468 (pic_param->pic_fields.bits.entropy_coding_mode_flag << 7) |
469 ((!pic_param->pic_fields.bits.reference_pic_flag) << 6) |
470 (pic_param->pic_fields.bits.constrained_intra_pred_flag << 5) |
471 (pic_param->seq_fields.bits.direct_8x8_inference_flag << 4) |
472 (pic_param->pic_fields.bits.transform_8x8_mode_flag << 3) |
473 (pic_param->seq_fields.bits.frame_mbs_only_flag << 2) |
474 (mbaff_frame_flag << 1) |
475 (pic_param->pic_fields.bits.field_pic_flag << 0));
476 OUT_BCS_BATCH(batch, 0);
477 OUT_BCS_BATCH(batch, 0);
478 OUT_BCS_BATCH(batch, 0);
479 OUT_BCS_BATCH(batch, 0);
480 OUT_BCS_BATCH(batch, 0);
481 OUT_BCS_BATCH(batch, 0);
482 OUT_BCS_BATCH(batch, 0);
483 OUT_BCS_BATCH(batch, 0);
484 ADVANCE_BCS_BATCH(batch);
488 gen6_mfd_avc_qm_state(VADriverContextP ctx,
489 struct decode_state *decode_state,
490 struct gen6_mfd_context *gen6_mfd_context)
492 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
494 VAIQMatrixBufferH264 *iq_matrix;
495 VAPictureParameterBufferH264 *pic_param;
497 if (!decode_state->iq_matrix || !decode_state->iq_matrix->buffer)
500 iq_matrix = (VAIQMatrixBufferH264 *)decode_state->iq_matrix->buffer;
502 assert(decode_state->pic_param && decode_state->pic_param->buffer);
503 pic_param = (VAPictureParameterBufferH264 *)decode_state->pic_param->buffer;
505 cmd_len = 2 + 6 * 4; /* always load six 4x4 scaling matrices */
507 if (pic_param->pic_fields.bits.transform_8x8_mode_flag)
508 cmd_len += 2 * 16; /* load two 8x8 scaling matrices */
510 BEGIN_BCS_BATCH(batch, cmd_len);
511 OUT_BCS_BATCH(batch, MFX_AVC_QM_STATE | (cmd_len - 2));
513 if (pic_param->pic_fields.bits.transform_8x8_mode_flag)
515 (0x0 << 8) | /* don't use default built-in matrices */
516 (0xff << 0)); /* six 4x4 and two 8x8 scaling matrices */
519 (0x0 << 8) | /* don't use default built-in matrices */
520 (0x3f << 0)); /* six 4x4 scaling matrices */
522 intel_batchbuffer_data(batch, &iq_matrix->ScalingList4x4[0][0], 6 * 4 * 4);
524 if (pic_param->pic_fields.bits.transform_8x8_mode_flag)
525 intel_batchbuffer_data(batch, &iq_matrix->ScalingList8x8[0][0], 2 * 16 * 4);
527 ADVANCE_BCS_BATCH(batch);
531 gen6_mfd_avc_directmode_state(VADriverContextP ctx,
532 VAPictureParameterBufferH264 *pic_param,
533 VASliceParameterBufferH264 *slice_param,
534 struct gen6_mfd_context *gen6_mfd_context)
536 struct i965_driver_data *i965 = i965_driver_data(ctx);
537 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
538 struct object_surface *obj_surface;
539 GenAvcSurface *gen6_avc_surface;
540 VAPictureH264 *va_pic;
543 BEGIN_BCS_BATCH(batch, 69);
544 OUT_BCS_BATCH(batch, MFX_AVC_DIRECTMODE_STATE | (69 - 2));
546 /* reference surfaces 0..15 */
547 for (i = 0; i < ARRAY_ELEMS(gen6_mfd_context->reference_surface); i++) {
548 if (gen6_mfd_context->reference_surface[i].surface_id != VA_INVALID_ID) {
549 obj_surface = SURFACE(gen6_mfd_context->reference_surface[i].surface_id);
551 gen6_avc_surface = obj_surface->private_data;
553 if (gen6_avc_surface == NULL) {
554 OUT_BCS_BATCH(batch, 0);
555 OUT_BCS_BATCH(batch, 0);
557 OUT_BCS_RELOC(batch, gen6_avc_surface->dmv_top,
558 I915_GEM_DOMAIN_INSTRUCTION, 0,
561 if (gen6_avc_surface->dmv_bottom_flag == 1)
562 OUT_BCS_RELOC(batch, gen6_avc_surface->dmv_bottom,
563 I915_GEM_DOMAIN_INSTRUCTION, 0,
566 OUT_BCS_RELOC(batch, gen6_avc_surface->dmv_top,
567 I915_GEM_DOMAIN_INSTRUCTION, 0,
571 OUT_BCS_BATCH(batch, 0);
572 OUT_BCS_BATCH(batch, 0);
576 /* the current decoding frame/field */
577 va_pic = &pic_param->CurrPic;
578 assert(!(va_pic->flags & VA_PICTURE_H264_INVALID));
579 obj_surface = SURFACE(va_pic->picture_id);
580 assert(obj_surface && obj_surface->bo && obj_surface->private_data);
581 gen6_avc_surface = obj_surface->private_data;
583 OUT_BCS_RELOC(batch, gen6_avc_surface->dmv_top,
584 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
587 if (gen6_avc_surface->dmv_bottom_flag == 1)
588 OUT_BCS_RELOC(batch, gen6_avc_surface->dmv_bottom,
589 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
592 OUT_BCS_RELOC(batch, gen6_avc_surface->dmv_top,
593 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
597 for (i = 0; i < ARRAY_ELEMS(gen6_mfd_context->reference_surface); i++) {
598 if (gen6_mfd_context->reference_surface[i].surface_id != VA_INVALID_ID) {
600 for (j = 0; j < ARRAY_ELEMS(pic_param->ReferenceFrames); j++) {
601 va_pic = &pic_param->ReferenceFrames[j];
603 if (va_pic->flags & VA_PICTURE_H264_INVALID)
606 if (va_pic->picture_id == gen6_mfd_context->reference_surface[i].surface_id) {
613 assert(!(va_pic->flags & VA_PICTURE_H264_INVALID));
615 OUT_BCS_BATCH(batch, va_pic->TopFieldOrderCnt);
616 OUT_BCS_BATCH(batch, va_pic->BottomFieldOrderCnt);
618 OUT_BCS_BATCH(batch, 0);
619 OUT_BCS_BATCH(batch, 0);
623 va_pic = &pic_param->CurrPic;
624 OUT_BCS_BATCH(batch, va_pic->TopFieldOrderCnt);
625 OUT_BCS_BATCH(batch, va_pic->BottomFieldOrderCnt);
627 ADVANCE_BCS_BATCH(batch);
631 gen6_mfd_avc_slice_state(VADriverContextP ctx,
632 VAPictureParameterBufferH264 *pic_param,
633 VASliceParameterBufferH264 *slice_param,
634 VASliceParameterBufferH264 *next_slice_param,
635 struct gen6_mfd_context *gen6_mfd_context)
637 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
638 int width_in_mbs = pic_param->picture_width_in_mbs_minus1 + 1;
639 int height_in_mbs = pic_param->picture_height_in_mbs_minus1 + 1;
640 int slice_hor_pos, slice_ver_pos, next_slice_hor_pos, next_slice_ver_pos;
641 int num_ref_idx_l0, num_ref_idx_l1;
642 int mbaff_picture = (!pic_param->pic_fields.bits.field_pic_flag &&
643 pic_param->seq_fields.bits.mb_adaptive_frame_field_flag);
644 int weighted_pred_idc = 0;
645 int first_mb_in_slice = 0, first_mb_in_next_slice = 0;
646 unsigned int chroma_log2_weight_denom, luma_log2_weight_denom;
649 if (slice_param->slice_type == SLICE_TYPE_I ||
650 slice_param->slice_type == SLICE_TYPE_SI) {
651 slice_type = SLICE_TYPE_I;
652 } else if (slice_param->slice_type == SLICE_TYPE_P ||
653 slice_param->slice_type == SLICE_TYPE_SP) {
654 slice_type = SLICE_TYPE_P;
656 assert(slice_param->slice_type == SLICE_TYPE_B);
657 slice_type = SLICE_TYPE_B;
660 luma_log2_weight_denom = slice_param->luma_log2_weight_denom;
661 chroma_log2_weight_denom = slice_param->chroma_log2_weight_denom;
663 if (slice_type == SLICE_TYPE_I) {
664 assert(slice_param->num_ref_idx_l0_active_minus1 == 0);
665 assert(slice_param->num_ref_idx_l1_active_minus1 == 0);
668 } else if (slice_type == SLICE_TYPE_P) {
669 assert(slice_param->num_ref_idx_l1_active_minus1 == 0);
670 num_ref_idx_l0 = slice_param->num_ref_idx_l0_active_minus1 + 1;
672 weighted_pred_idc = (pic_param->pic_fields.bits.weighted_pred_flag == 1);
674 num_ref_idx_l0 = slice_param->num_ref_idx_l0_active_minus1 + 1;
675 num_ref_idx_l1 = slice_param->num_ref_idx_l1_active_minus1 + 1;
676 weighted_pred_idc = pic_param->pic_fields.bits.weighted_bipred_idc;
678 if (weighted_pred_idc == 2) {
679 /* 8.4.3 - Derivation process for prediction weights (8-279) */
680 luma_log2_weight_denom = 5;
681 chroma_log2_weight_denom = 5;
685 first_mb_in_slice = slice_param->first_mb_in_slice << mbaff_picture;
686 slice_hor_pos = first_mb_in_slice % width_in_mbs;
687 slice_ver_pos = first_mb_in_slice / width_in_mbs;
689 if (next_slice_param) {
690 first_mb_in_next_slice = next_slice_param->first_mb_in_slice << mbaff_picture;
691 next_slice_hor_pos = first_mb_in_next_slice % width_in_mbs;
692 next_slice_ver_pos = first_mb_in_next_slice / width_in_mbs;
694 next_slice_hor_pos = 0;
695 next_slice_ver_pos = height_in_mbs;
698 BEGIN_BCS_BATCH(batch, 11); /* FIXME: is it 10??? */
699 OUT_BCS_BATCH(batch, MFX_AVC_SLICE_STATE | (11 - 2));
700 OUT_BCS_BATCH(batch, slice_type);
702 (num_ref_idx_l1 << 24) |
703 (num_ref_idx_l0 << 16) |
704 (chroma_log2_weight_denom << 8) |
705 (luma_log2_weight_denom << 0));
707 (weighted_pred_idc << 30) |
708 (slice_param->direct_spatial_mv_pred_flag << 29) |
709 (slice_param->disable_deblocking_filter_idc << 27) |
710 (slice_param->cabac_init_idc << 24) |
711 ((pic_param->pic_init_qp_minus26 + 26 + slice_param->slice_qp_delta) << 16) |
712 ((slice_param->slice_beta_offset_div2 & 0xf) << 8) |
713 ((slice_param->slice_alpha_c0_offset_div2 & 0xf) << 0));
715 (slice_ver_pos << 24) |
716 (slice_hor_pos << 16) |
717 (first_mb_in_slice << 0));
719 (next_slice_ver_pos << 16) |
720 (next_slice_hor_pos << 0));
722 (next_slice_param == NULL) << 19); /* last slice flag */
723 OUT_BCS_BATCH(batch, 0);
724 OUT_BCS_BATCH(batch, 0);
725 OUT_BCS_BATCH(batch, 0);
726 OUT_BCS_BATCH(batch, 0);
727 ADVANCE_BCS_BATCH(batch);
731 gen6_mfd_avc_phantom_slice_state(VADriverContextP ctx,
732 VAPictureParameterBufferH264 *pic_param,
733 struct gen6_mfd_context *gen6_mfd_context)
735 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
736 int width_in_mbs = pic_param->picture_width_in_mbs_minus1 + 1;
737 int height_in_mbs = pic_param->picture_height_in_mbs_minus1 + 1; /* frame height */
739 BEGIN_BCS_BATCH(batch, 11); /* FIXME: is it 10??? */
740 OUT_BCS_BATCH(batch, MFX_AVC_SLICE_STATE | (11 - 2));
741 OUT_BCS_BATCH(batch, 0);
742 OUT_BCS_BATCH(batch, 0);
743 OUT_BCS_BATCH(batch, 0);
745 height_in_mbs << 24 |
746 width_in_mbs * height_in_mbs / (1 + !!pic_param->pic_fields.bits.field_pic_flag));
747 OUT_BCS_BATCH(batch, 0);
748 OUT_BCS_BATCH(batch, 0);
749 OUT_BCS_BATCH(batch, 0);
750 OUT_BCS_BATCH(batch, 0);
751 OUT_BCS_BATCH(batch, 0);
752 OUT_BCS_BATCH(batch, 0);
753 ADVANCE_BCS_BATCH(batch);
757 gen6_mfd_avc_ref_idx_state(VADriverContextP ctx,
758 VAPictureParameterBufferH264 *pic_param,
759 VASliceParameterBufferH264 *slice_param,
760 struct gen6_mfd_context *gen6_mfd_context)
762 gen6_send_avc_ref_idx_state(
763 gen6_mfd_context->base.batch,
765 gen6_mfd_context->reference_surface
770 gen6_mfd_avc_weightoffset_state(VADriverContextP ctx,
771 VAPictureParameterBufferH264 *pic_param,
772 VASliceParameterBufferH264 *slice_param,
773 struct gen6_mfd_context *gen6_mfd_context)
775 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
776 int i, j, num_weight_offset_table = 0;
777 short weightoffsets[32 * 6];
779 if ((slice_param->slice_type == SLICE_TYPE_P ||
780 slice_param->slice_type == SLICE_TYPE_SP) &&
781 (pic_param->pic_fields.bits.weighted_pred_flag == 1)) {
782 num_weight_offset_table = 1;
785 if ((slice_param->slice_type == SLICE_TYPE_B) &&
786 (pic_param->pic_fields.bits.weighted_bipred_idc == 1)) {
787 num_weight_offset_table = 2;
790 for (i = 0; i < num_weight_offset_table; i++) {
791 BEGIN_BCS_BATCH(batch, 98);
792 OUT_BCS_BATCH(batch, MFX_AVC_WEIGHTOFFSET_STATE | (98 - 2));
793 OUT_BCS_BATCH(batch, i);
796 for (j = 0; j < 32; j++) {
797 weightoffsets[j * 6 + 0] = slice_param->luma_weight_l0[j];
798 weightoffsets[j * 6 + 1] = slice_param->luma_offset_l0[j];
799 weightoffsets[j * 6 + 2] = slice_param->chroma_weight_l0[j][0];
800 weightoffsets[j * 6 + 3] = slice_param->chroma_offset_l0[j][0];
801 weightoffsets[j * 6 + 4] = slice_param->chroma_weight_l0[j][1];
802 weightoffsets[j * 6 + 5] = slice_param->chroma_offset_l0[j][1];
805 for (j = 0; j < 32; j++) {
806 weightoffsets[j * 6 + 0] = slice_param->luma_weight_l1[j];
807 weightoffsets[j * 6 + 1] = slice_param->luma_offset_l1[j];
808 weightoffsets[j * 6 + 2] = slice_param->chroma_weight_l1[j][0];
809 weightoffsets[j * 6 + 3] = slice_param->chroma_offset_l1[j][0];
810 weightoffsets[j * 6 + 4] = slice_param->chroma_weight_l1[j][1];
811 weightoffsets[j * 6 + 5] = slice_param->chroma_offset_l1[j][1];
815 intel_batchbuffer_data(batch, weightoffsets, sizeof(weightoffsets));
816 ADVANCE_BCS_BATCH(batch);
821 gen6_mfd_avc_bsd_object(VADriverContextP ctx,
822 VAPictureParameterBufferH264 *pic_param,
823 VASliceParameterBufferH264 *slice_param,
824 dri_bo *slice_data_bo,
825 struct gen6_mfd_context *gen6_mfd_context)
827 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
828 unsigned int slice_data_bit_offset;
830 slice_data_bit_offset = avc_get_first_mb_bit_offset(
833 pic_param->pic_fields.bits.entropy_coding_mode_flag
836 BEGIN_BCS_BATCH(batch, 6);
837 OUT_BCS_BATCH(batch, MFD_AVC_BSD_OBJECT | (6 - 2));
839 (slice_param->slice_data_size - slice_param->slice_data_offset));
840 OUT_BCS_BATCH(batch, slice_param->slice_data_offset);
848 ((slice_data_bit_offset >> 3) << 16) |
850 ((0x7 - (slice_data_bit_offset & 0x7)) << 0));
851 OUT_BCS_BATCH(batch, 0);
852 ADVANCE_BCS_BATCH(batch);
856 gen6_mfd_avc_phantom_slice_bsd_object(VADriverContextP ctx,
857 VAPictureParameterBufferH264 *pic_param,
858 struct gen6_mfd_context *gen6_mfd_context)
860 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
862 BEGIN_BCS_BATCH(batch, 6);
863 OUT_BCS_BATCH(batch, MFD_AVC_BSD_OBJECT | (6 - 2));
864 OUT_BCS_BATCH(batch, 0);
865 OUT_BCS_BATCH(batch, 0);
866 OUT_BCS_BATCH(batch, 0);
867 OUT_BCS_BATCH(batch, 0);
868 OUT_BCS_BATCH(batch, 0);
869 ADVANCE_BCS_BATCH(batch);
873 gen6_mfd_avc_phantom_slice(VADriverContextP ctx,
874 VAPictureParameterBufferH264 *pic_param,
875 struct gen6_mfd_context *gen6_mfd_context)
877 gen6_mfd_avc_phantom_slice_state(ctx, pic_param, gen6_mfd_context);
878 gen6_mfd_avc_phantom_slice_bsd_object(ctx, pic_param, gen6_mfd_context);
882 gen6_mfd_avc_decode_init(VADriverContextP ctx,
883 struct decode_state *decode_state,
884 struct gen6_mfd_context *gen6_mfd_context)
886 VAPictureParameterBufferH264 *pic_param;
887 VASliceParameterBufferH264 *slice_param;
888 VAPictureH264 *va_pic;
889 struct i965_driver_data *i965 = i965_driver_data(ctx);
890 struct object_surface *obj_surface;
892 int i, j, enable_avc_ildb = 0;
895 for (j = 0; j < decode_state->num_slice_params && enable_avc_ildb == 0; j++) {
896 assert(decode_state->slice_params && decode_state->slice_params[j]->buffer);
897 slice_param = (VASliceParameterBufferH264 *)decode_state->slice_params[j]->buffer;
899 for (i = 0; i < decode_state->slice_params[j]->num_elements; i++) {
900 assert(slice_param->slice_data_flag == VA_SLICE_DATA_FLAG_ALL);
901 assert((slice_param->slice_type == SLICE_TYPE_I) ||
902 (slice_param->slice_type == SLICE_TYPE_SI) ||
903 (slice_param->slice_type == SLICE_TYPE_P) ||
904 (slice_param->slice_type == SLICE_TYPE_SP) ||
905 (slice_param->slice_type == SLICE_TYPE_B));
907 if (slice_param->disable_deblocking_filter_idc != 1) {
916 assert(decode_state->pic_param && decode_state->pic_param->buffer);
917 pic_param = (VAPictureParameterBufferH264 *)decode_state->pic_param->buffer;
918 gen6_mfd_avc_frame_store_index(ctx, pic_param, gen6_mfd_context);
919 width_in_mbs = ((pic_param->picture_width_in_mbs_minus1 + 1) & 0xff);
921 /* Current decoded picture */
922 va_pic = &pic_param->CurrPic;
923 assert(!(va_pic->flags & VA_PICTURE_H264_INVALID));
924 obj_surface = SURFACE(va_pic->picture_id);
926 obj_surface->flags &= ~SURFACE_REF_DIS_MASK;
927 obj_surface->flags |= (pic_param->pic_fields.bits.reference_pic_flag ? SURFACE_REFERENCED : 0);
928 i965_check_alloc_surface_bo(ctx, obj_surface, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
930 /* initial uv component for YUV400 case */
931 if (pic_param->seq_fields.bits.chroma_format_idc == 0) {
932 unsigned int uv_offset = obj_surface->width * obj_surface->height;
933 unsigned int uv_size = obj_surface->width * obj_surface->height / 2;
935 drm_intel_gem_bo_map_gtt(obj_surface->bo);
936 memset(obj_surface->bo->virtual + uv_offset, 0x80, uv_size);
937 drm_intel_gem_bo_unmap_gtt(obj_surface->bo);
940 gen6_mfd_init_avc_surface(ctx, pic_param, obj_surface);
942 dri_bo_unreference(gen6_mfd_context->post_deblocking_output.bo);
943 gen6_mfd_context->post_deblocking_output.bo = obj_surface->bo;
944 dri_bo_reference(gen6_mfd_context->post_deblocking_output.bo);
945 gen6_mfd_context->post_deblocking_output.valid = enable_avc_ildb;
947 dri_bo_unreference(gen6_mfd_context->pre_deblocking_output.bo);
948 gen6_mfd_context->pre_deblocking_output.bo = obj_surface->bo;
949 dri_bo_reference(gen6_mfd_context->pre_deblocking_output.bo);
950 gen6_mfd_context->pre_deblocking_output.valid = !enable_avc_ildb;
952 dri_bo_unreference(gen6_mfd_context->intra_row_store_scratch_buffer.bo);
953 bo = dri_bo_alloc(i965->intel.bufmgr,
958 gen6_mfd_context->intra_row_store_scratch_buffer.bo = bo;
959 gen6_mfd_context->intra_row_store_scratch_buffer.valid = 1;
961 dri_bo_unreference(gen6_mfd_context->deblocking_filter_row_store_scratch_buffer.bo);
962 bo = dri_bo_alloc(i965->intel.bufmgr,
963 "deblocking filter row store",
964 width_in_mbs * 64 * 4,
967 gen6_mfd_context->deblocking_filter_row_store_scratch_buffer.bo = bo;
968 gen6_mfd_context->deblocking_filter_row_store_scratch_buffer.valid = 1;
970 dri_bo_unreference(gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.bo);
971 bo = dri_bo_alloc(i965->intel.bufmgr,
976 gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.bo = bo;
977 gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.valid = 1;
979 dri_bo_unreference(gen6_mfd_context->mpr_row_store_scratch_buffer.bo);
980 bo = dri_bo_alloc(i965->intel.bufmgr,
985 gen6_mfd_context->mpr_row_store_scratch_buffer.bo = bo;
986 gen6_mfd_context->mpr_row_store_scratch_buffer.valid = 1;
988 gen6_mfd_context->bitplane_read_buffer.valid = 0;
992 gen6_mfd_avc_decode_picture(VADriverContextP ctx,
993 struct decode_state *decode_state,
994 struct gen6_mfd_context *gen6_mfd_context)
996 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
997 VAPictureParameterBufferH264 *pic_param;
998 VASliceParameterBufferH264 *slice_param, *next_slice_param, *next_slice_group_param;
999 dri_bo *slice_data_bo;
1002 assert(decode_state->pic_param && decode_state->pic_param->buffer);
1003 pic_param = (VAPictureParameterBufferH264 *)decode_state->pic_param->buffer;
1004 gen6_mfd_avc_decode_init(ctx, decode_state, gen6_mfd_context);
1006 intel_batchbuffer_start_atomic_bcs(batch, 0x1000);
1007 intel_batchbuffer_emit_mi_flush(batch);
1008 gen6_mfd_pipe_mode_select(ctx, decode_state, MFX_FORMAT_AVC, gen6_mfd_context);
1009 gen6_mfd_surface_state(ctx, decode_state, MFX_FORMAT_AVC, gen6_mfd_context);
1010 gen6_mfd_pipe_buf_addr_state(ctx, decode_state, MFX_FORMAT_AVC, gen6_mfd_context);
1011 gen6_mfd_bsp_buf_base_addr_state(ctx, decode_state, MFX_FORMAT_AVC, gen6_mfd_context);
1012 gen6_mfd_avc_img_state(ctx, decode_state, gen6_mfd_context);
1013 gen6_mfd_avc_qm_state(ctx, decode_state, gen6_mfd_context);
1015 for (j = 0; j < decode_state->num_slice_params; j++) {
1016 assert(decode_state->slice_params && decode_state->slice_params[j]->buffer);
1017 slice_param = (VASliceParameterBufferH264 *)decode_state->slice_params[j]->buffer;
1018 slice_data_bo = decode_state->slice_datas[j]->bo;
1019 gen6_mfd_ind_obj_base_addr_state(ctx, slice_data_bo, MFX_FORMAT_AVC, gen6_mfd_context);
1021 if (j == decode_state->num_slice_params - 1)
1022 next_slice_group_param = NULL;
1024 next_slice_group_param = (VASliceParameterBufferH264 *)decode_state->slice_params[j + 1]->buffer;
1026 for (i = 0; i < decode_state->slice_params[j]->num_elements; i++) {
1027 assert(slice_param->slice_data_flag == VA_SLICE_DATA_FLAG_ALL);
1028 assert((slice_param->slice_type == SLICE_TYPE_I) ||
1029 (slice_param->slice_type == SLICE_TYPE_SI) ||
1030 (slice_param->slice_type == SLICE_TYPE_P) ||
1031 (slice_param->slice_type == SLICE_TYPE_SP) ||
1032 (slice_param->slice_type == SLICE_TYPE_B));
1034 if (i < decode_state->slice_params[j]->num_elements - 1)
1035 next_slice_param = slice_param + 1;
1037 next_slice_param = next_slice_group_param;
1039 gen6_mfd_avc_directmode_state(ctx, pic_param, slice_param, gen6_mfd_context);
1040 gen6_mfd_avc_slice_state(ctx, pic_param, slice_param, next_slice_param, gen6_mfd_context);
1041 gen6_mfd_avc_ref_idx_state(ctx, pic_param, slice_param, gen6_mfd_context);
1042 gen6_mfd_avc_weightoffset_state(ctx, pic_param, slice_param, gen6_mfd_context);
1043 gen6_mfd_avc_bsd_object(ctx, pic_param, slice_param, slice_data_bo, gen6_mfd_context);
1048 gen6_mfd_avc_phantom_slice(ctx, pic_param, gen6_mfd_context);
1049 intel_batchbuffer_end_atomic(batch);
1050 intel_batchbuffer_flush(batch);
1054 gen6_mfd_mpeg2_decode_init(VADriverContextP ctx,
1055 struct decode_state *decode_state,
1056 struct gen6_mfd_context *gen6_mfd_context)
1058 VAPictureParameterBufferMPEG2 *pic_param;
1059 struct i965_driver_data *i965 = i965_driver_data(ctx);
1060 struct object_surface *obj_surface;
1062 unsigned int width_in_mbs;
1064 assert(decode_state->pic_param && decode_state->pic_param->buffer);
1065 pic_param = (VAPictureParameterBufferMPEG2 *)decode_state->pic_param->buffer;
1066 width_in_mbs = ALIGN(pic_param->horizontal_size, 16) / 16;
1068 mpeg2_set_reference_surfaces(
1070 gen6_mfd_context->reference_surface,
1075 /* Current decoded picture */
1076 obj_surface = SURFACE(decode_state->current_render_target);
1077 assert(obj_surface);
1078 i965_check_alloc_surface_bo(ctx, obj_surface, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
1080 dri_bo_unreference(gen6_mfd_context->pre_deblocking_output.bo);
1081 gen6_mfd_context->pre_deblocking_output.bo = obj_surface->bo;
1082 dri_bo_reference(gen6_mfd_context->pre_deblocking_output.bo);
1083 gen6_mfd_context->pre_deblocking_output.valid = 1;
1085 dri_bo_unreference(gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.bo);
1086 bo = dri_bo_alloc(i965->intel.bufmgr,
1087 "bsd mpc row store",
1091 gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.bo = bo;
1092 gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.valid = 1;
1094 gen6_mfd_context->post_deblocking_output.valid = 0;
1095 gen6_mfd_context->intra_row_store_scratch_buffer.valid = 0;
1096 gen6_mfd_context->deblocking_filter_row_store_scratch_buffer.valid = 0;
1097 gen6_mfd_context->mpr_row_store_scratch_buffer.valid = 0;
1098 gen6_mfd_context->bitplane_read_buffer.valid = 0;
1102 gen6_mfd_mpeg2_pic_state(VADriverContextP ctx,
1103 struct decode_state *decode_state,
1104 struct gen6_mfd_context *gen6_mfd_context)
1106 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
1107 VAPictureParameterBufferMPEG2 *pic_param;
1108 unsigned int tff, pic_structure;
1110 assert(decode_state->pic_param && decode_state->pic_param->buffer);
1111 pic_param = (VAPictureParameterBufferMPEG2 *)decode_state->pic_param->buffer;
1113 pic_structure = pic_param->picture_coding_extension.bits.picture_structure;
1114 if (pic_structure == MPEG_FRAME)
1115 tff = pic_param->picture_coding_extension.bits.top_field_first;
1117 tff = !(pic_param->picture_coding_extension.bits.is_first_field ^
1118 (pic_structure & MPEG_TOP_FIELD));
1120 BEGIN_BCS_BATCH(batch, 4);
1121 OUT_BCS_BATCH(batch, MFX_MPEG2_PIC_STATE | (4 - 2));
1122 OUT_BCS_BATCH(batch,
1123 (pic_param->f_code & 0xf) << 28 | /* f_code[1][1] */
1124 ((pic_param->f_code >> 4) & 0xf) << 24 | /* f_code[1][0] */
1125 ((pic_param->f_code >> 8) & 0xf) << 20 | /* f_code[0][1] */
1126 ((pic_param->f_code >> 12) & 0xf) << 16 | /* f_code[0][0] */
1127 pic_param->picture_coding_extension.bits.intra_dc_precision << 14 |
1128 pic_param->picture_coding_extension.bits.picture_structure << 12 |
1130 pic_param->picture_coding_extension.bits.frame_pred_frame_dct << 10 |
1131 pic_param->picture_coding_extension.bits.concealment_motion_vectors << 9 |
1132 pic_param->picture_coding_extension.bits.q_scale_type << 8 |
1133 pic_param->picture_coding_extension.bits.intra_vlc_format << 7 |
1134 pic_param->picture_coding_extension.bits.alternate_scan << 6);
1135 OUT_BCS_BATCH(batch,
1136 pic_param->picture_coding_type << 9);
1137 OUT_BCS_BATCH(batch,
1138 (ALIGN(pic_param->vertical_size, 16) / 16) << 16 |
1139 (ALIGN(pic_param->horizontal_size, 16) / 16));
1140 ADVANCE_BCS_BATCH(batch);
1144 gen6_mfd_mpeg2_qm_state(VADriverContextP ctx,
1145 struct decode_state *decode_state,
1146 struct gen6_mfd_context *gen6_mfd_context)
1148 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
1149 VAIQMatrixBufferMPEG2 * const gen_iq_matrix = &gen6_mfd_context->iq_matrix.mpeg2;
1152 /* Update internal QM state */
1153 if (decode_state->iq_matrix && decode_state->iq_matrix->buffer) {
1154 VAIQMatrixBufferMPEG2 * const iq_matrix =
1155 (VAIQMatrixBufferMPEG2 *)decode_state->iq_matrix->buffer;
1157 gen_iq_matrix->load_intra_quantiser_matrix =
1158 iq_matrix->load_intra_quantiser_matrix;
1159 if (iq_matrix->load_intra_quantiser_matrix) {
1160 for (j = 0; j < 64; j++)
1161 gen_iq_matrix->intra_quantiser_matrix[zigzag_direct[j]] =
1162 iq_matrix->intra_quantiser_matrix[j];
1165 gen_iq_matrix->load_non_intra_quantiser_matrix =
1166 iq_matrix->load_non_intra_quantiser_matrix;
1167 if (iq_matrix->load_non_intra_quantiser_matrix) {
1168 for (j = 0; j < 64; j++)
1169 gen_iq_matrix->non_intra_quantiser_matrix[zigzag_direct[j]] =
1170 iq_matrix->non_intra_quantiser_matrix[j];
1174 /* Commit QM state to HW */
1175 for (i = 0; i < 2; i++) {
1176 unsigned char *qm = NULL;
1179 if (gen_iq_matrix->load_intra_quantiser_matrix)
1180 qm = gen_iq_matrix->intra_quantiser_matrix;
1182 if (gen_iq_matrix->load_non_intra_quantiser_matrix)
1183 qm = gen_iq_matrix->non_intra_quantiser_matrix;
1189 BEGIN_BCS_BATCH(batch, 18);
1190 OUT_BCS_BATCH(batch, MFX_MPEG2_QM_STATE | (18 - 2));
1191 OUT_BCS_BATCH(batch, i);
1192 intel_batchbuffer_data(batch, qm, 64);
1193 ADVANCE_BCS_BATCH(batch);
1198 gen6_mfd_mpeg2_bsd_object(VADriverContextP ctx,
1199 VAPictureParameterBufferMPEG2 *pic_param,
1200 VASliceParameterBufferMPEG2 *slice_param,
1201 VASliceParameterBufferMPEG2 *next_slice_param,
1202 struct gen6_mfd_context *gen6_mfd_context)
1204 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
1205 unsigned int width_in_mbs = ALIGN(pic_param->horizontal_size, 16) / 16;
1206 int mb_count, vpos0, hpos0, vpos1, hpos1, is_field_pic_wa, is_field_pic = 0;
1208 if (pic_param->picture_coding_extension.bits.picture_structure == MPEG_TOP_FIELD ||
1209 pic_param->picture_coding_extension.bits.picture_structure == MPEG_BOTTOM_FIELD)
1211 is_field_pic_wa = is_field_pic &&
1212 gen6_mfd_context->wa_mpeg2_slice_vertical_position > 0;
1214 vpos0 = slice_param->slice_vertical_position / (1 + is_field_pic_wa);
1215 hpos0 = slice_param->slice_horizontal_position;
1217 if (next_slice_param == NULL) {
1218 vpos1 = ALIGN(pic_param->vertical_size, 16) / 16 / (1 + is_field_pic);
1221 vpos1 = next_slice_param->slice_vertical_position / (1 + is_field_pic_wa);
1222 hpos1 = next_slice_param->slice_horizontal_position;
1225 mb_count = (vpos1 * width_in_mbs + hpos1) - (vpos0 * width_in_mbs + hpos0);
1227 BEGIN_BCS_BATCH(batch, 5);
1228 OUT_BCS_BATCH(batch, MFD_MPEG2_BSD_OBJECT | (5 - 2));
1229 OUT_BCS_BATCH(batch,
1230 slice_param->slice_data_size - (slice_param->macroblock_offset >> 3));
1231 OUT_BCS_BATCH(batch,
1232 slice_param->slice_data_offset + (slice_param->macroblock_offset >> 3));
1233 OUT_BCS_BATCH(batch,
1237 (next_slice_param == NULL) << 5 |
1238 (next_slice_param == NULL) << 3 |
1239 (slice_param->macroblock_offset & 0x7));
1240 OUT_BCS_BATCH(batch,
1241 slice_param->quantiser_scale_code << 24);
1242 ADVANCE_BCS_BATCH(batch);
1246 gen6_mfd_mpeg2_decode_picture(VADriverContextP ctx,
1247 struct decode_state *decode_state,
1248 struct gen6_mfd_context *gen6_mfd_context)
1250 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
1251 VAPictureParameterBufferMPEG2 *pic_param;
1252 VASliceParameterBufferMPEG2 *slice_param, *next_slice_param, *next_slice_group_param;
1253 dri_bo *slice_data_bo;
1256 assert(decode_state->pic_param && decode_state->pic_param->buffer);
1257 pic_param = (VAPictureParameterBufferMPEG2 *)decode_state->pic_param->buffer;
1259 gen6_mfd_mpeg2_decode_init(ctx, decode_state, gen6_mfd_context);
1260 intel_batchbuffer_start_atomic_bcs(batch, 0x1000);
1261 intel_batchbuffer_emit_mi_flush(batch);
1262 gen6_mfd_pipe_mode_select(ctx, decode_state, MFX_FORMAT_MPEG2, gen6_mfd_context);
1263 gen6_mfd_surface_state(ctx, decode_state, MFX_FORMAT_MPEG2, gen6_mfd_context);
1264 gen6_mfd_pipe_buf_addr_state(ctx, decode_state, MFX_FORMAT_MPEG2, gen6_mfd_context);
1265 gen6_mfd_bsp_buf_base_addr_state(ctx, decode_state, MFX_FORMAT_MPEG2, gen6_mfd_context);
1266 gen6_mfd_mpeg2_pic_state(ctx, decode_state, gen6_mfd_context);
1267 gen6_mfd_mpeg2_qm_state(ctx, decode_state, gen6_mfd_context);
1269 if (gen6_mfd_context->wa_mpeg2_slice_vertical_position < 0)
1270 gen6_mfd_context->wa_mpeg2_slice_vertical_position =
1271 mpeg2_wa_slice_vertical_position(decode_state, pic_param);
1273 for (j = 0; j < decode_state->num_slice_params; j++) {
1274 assert(decode_state->slice_params && decode_state->slice_params[j]->buffer);
1275 slice_param = (VASliceParameterBufferMPEG2 *)decode_state->slice_params[j]->buffer;
1276 slice_data_bo = decode_state->slice_datas[j]->bo;
1277 gen6_mfd_ind_obj_base_addr_state(ctx, slice_data_bo, MFX_FORMAT_MPEG2, gen6_mfd_context);
1279 if (j == decode_state->num_slice_params - 1)
1280 next_slice_group_param = NULL;
1282 next_slice_group_param = (VASliceParameterBufferMPEG2 *)decode_state->slice_params[j + 1]->buffer;
1284 for (i = 0; i < decode_state->slice_params[j]->num_elements; i++) {
1285 assert(slice_param->slice_data_flag == VA_SLICE_DATA_FLAG_ALL);
1287 if (i < decode_state->slice_params[j]->num_elements - 1)
1288 next_slice_param = slice_param + 1;
1290 next_slice_param = next_slice_group_param;
1292 gen6_mfd_mpeg2_bsd_object(ctx, pic_param, slice_param, next_slice_param, gen6_mfd_context);
1297 intel_batchbuffer_end_atomic(batch);
1298 intel_batchbuffer_flush(batch);
1301 static const int va_to_gen6_vc1_pic_type[5] = {
1305 GEN6_VC1_BI_PICTURE,
1309 static const int va_to_gen6_vc1_mv[4] = {
1311 2, /* 1-MV half-pel */
1312 3, /* 1-MV half-pef bilinear */
1316 static const int b_picture_scale_factor[21] = {
1317 128, 85, 170, 64, 192,
1318 51, 102, 153, 204, 43,
1319 215, 37, 74, 111, 148,
1320 185, 222, 32, 96, 160,
1324 static const int va_to_gen6_vc1_condover[3] = {
1330 static const int va_to_gen6_vc1_profile[4] = {
1331 GEN6_VC1_SIMPLE_PROFILE,
1332 GEN6_VC1_MAIN_PROFILE,
1333 GEN6_VC1_RESERVED_PROFILE,
1334 GEN6_VC1_ADVANCED_PROFILE
1338 gen6_mfd_free_vc1_surface(void **data)
1340 struct gen6_vc1_surface *gen6_vc1_surface = *data;
1342 if (!gen6_vc1_surface)
1345 dri_bo_unreference(gen6_vc1_surface->dmv);
1346 free(gen6_vc1_surface);
1351 gen6_mfd_init_vc1_surface(VADriverContextP ctx,
1352 VAPictureParameterBufferVC1 *pic_param,
1353 struct object_surface *obj_surface)
1355 struct i965_driver_data *i965 = i965_driver_data(ctx);
1356 struct gen6_vc1_surface *gen6_vc1_surface = obj_surface->private_data;
1357 int height_in_mbs = ALIGN(pic_param->coded_height, 16) / 16;
1359 obj_surface->free_private_data = gen6_mfd_free_vc1_surface;
1361 if (!gen6_vc1_surface) {
1362 gen6_vc1_surface = calloc(sizeof(struct gen6_vc1_surface), 1);
1363 assert((obj_surface->size & 0x3f) == 0);
1364 obj_surface->private_data = gen6_vc1_surface;
1367 gen6_vc1_surface->picture_type = pic_param->picture_fields.bits.picture_type;
1369 if (gen6_vc1_surface->dmv == NULL) {
1370 gen6_vc1_surface->dmv = dri_bo_alloc(i965->intel.bufmgr,
1371 "direct mv w/r buffer",
1372 128 * height_in_mbs * 64, /* scalable with frame height */
1378 gen6_mfd_vc1_decode_init(VADriverContextP ctx,
1379 struct decode_state *decode_state,
1380 struct gen6_mfd_context *gen6_mfd_context)
1382 VAPictureParameterBufferVC1 *pic_param;
1383 struct i965_driver_data *i965 = i965_driver_data(ctx);
1384 struct object_surface *obj_surface;
1389 assert(decode_state->pic_param && decode_state->pic_param->buffer);
1390 pic_param = (VAPictureParameterBufferVC1 *)decode_state->pic_param->buffer;
1391 width_in_mbs = ALIGN(pic_param->coded_width, 16) / 16;
1393 /* reference picture */
1394 obj_surface = SURFACE(pic_param->forward_reference_picture);
1396 if (obj_surface && obj_surface->bo)
1397 gen6_mfd_context->reference_surface[0].surface_id = pic_param->forward_reference_picture;
1399 gen6_mfd_context->reference_surface[0].surface_id = VA_INVALID_ID;
1401 obj_surface = SURFACE(pic_param->backward_reference_picture);
1403 if (obj_surface && obj_surface->bo)
1404 gen6_mfd_context->reference_surface[1].surface_id = pic_param->backward_reference_picture;
1406 gen6_mfd_context->reference_surface[1].surface_id = pic_param->forward_reference_picture;
1408 /* must do so !!! */
1409 for (i = 2; i < ARRAY_ELEMS(gen6_mfd_context->reference_surface); i++)
1410 gen6_mfd_context->reference_surface[i].surface_id = gen6_mfd_context->reference_surface[i % 2].surface_id;
1412 /* Current decoded picture */
1413 obj_surface = SURFACE(decode_state->current_render_target);
1414 assert(obj_surface);
1415 i965_check_alloc_surface_bo(ctx, obj_surface, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
1416 gen6_mfd_init_vc1_surface(ctx, pic_param, obj_surface);
1418 dri_bo_unreference(gen6_mfd_context->post_deblocking_output.bo);
1419 gen6_mfd_context->post_deblocking_output.bo = obj_surface->bo;
1420 dri_bo_reference(gen6_mfd_context->post_deblocking_output.bo);
1421 gen6_mfd_context->post_deblocking_output.valid = pic_param->entrypoint_fields.bits.loopfilter;
1423 dri_bo_unreference(gen6_mfd_context->pre_deblocking_output.bo);
1424 gen6_mfd_context->pre_deblocking_output.bo = obj_surface->bo;
1425 dri_bo_reference(gen6_mfd_context->pre_deblocking_output.bo);
1426 gen6_mfd_context->pre_deblocking_output.valid = !pic_param->entrypoint_fields.bits.loopfilter;
1428 dri_bo_unreference(gen6_mfd_context->intra_row_store_scratch_buffer.bo);
1429 bo = dri_bo_alloc(i965->intel.bufmgr,
1434 gen6_mfd_context->intra_row_store_scratch_buffer.bo = bo;
1435 gen6_mfd_context->intra_row_store_scratch_buffer.valid = 1;
1437 dri_bo_unreference(gen6_mfd_context->deblocking_filter_row_store_scratch_buffer.bo);
1438 bo = dri_bo_alloc(i965->intel.bufmgr,
1439 "deblocking filter row store",
1440 width_in_mbs * 6 * 64,
1443 gen6_mfd_context->deblocking_filter_row_store_scratch_buffer.bo = bo;
1444 gen6_mfd_context->deblocking_filter_row_store_scratch_buffer.valid = 1;
1446 dri_bo_unreference(gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.bo);
1447 bo = dri_bo_alloc(i965->intel.bufmgr,
1448 "bsd mpc row store",
1452 gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.bo = bo;
1453 gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.valid = 1;
1455 gen6_mfd_context->mpr_row_store_scratch_buffer.valid = 0;
1457 gen6_mfd_context->bitplane_read_buffer.valid = !!pic_param->bitplane_present.value;
1458 dri_bo_unreference(gen6_mfd_context->bitplane_read_buffer.bo);
1460 if (gen6_mfd_context->bitplane_read_buffer.valid) {
1461 int width_in_mbs = ALIGN(pic_param->coded_width, 16) / 16;
1462 int height_in_mbs = ALIGN(pic_param->coded_height, 16) / 16;
1463 int bitplane_width = ALIGN(width_in_mbs, 2) / 2;
1465 uint8_t *src = NULL, *dst = NULL;
1467 assert(decode_state->bit_plane->buffer);
1468 src = decode_state->bit_plane->buffer;
1470 bo = dri_bo_alloc(i965->intel.bufmgr,
1472 bitplane_width * height_in_mbs,
1475 gen6_mfd_context->bitplane_read_buffer.bo = bo;
1477 dri_bo_map(bo, True);
1478 assert(bo->virtual);
1481 for (src_h = 0; src_h < height_in_mbs; src_h++) {
1482 for(src_w = 0; src_w < width_in_mbs; src_w++) {
1483 int src_index, dst_index;
1487 src_index = (src_h * width_in_mbs + src_w) / 2;
1488 src_shift = !((src_h * width_in_mbs + src_w) & 1) * 4;
1489 src_value = ((src[src_index] >> src_shift) & 0xf);
1491 dst_index = src_w / 2;
1492 dst[dst_index] = ((dst[dst_index] >> 4) | (src_value << 4));
1496 dst[src_w / 2] >>= 4;
1498 dst += bitplane_width;
1503 gen6_mfd_context->bitplane_read_buffer.bo = NULL;
1507 gen6_mfd_vc1_pic_state(VADriverContextP ctx,
1508 struct decode_state *decode_state,
1509 struct gen6_mfd_context *gen6_mfd_context)
1511 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
1512 VAPictureParameterBufferVC1 *pic_param;
1513 struct i965_driver_data *i965 = i965_driver_data(ctx);
1514 struct object_surface *obj_surface;
1515 int alt_pquant_config = 0, alt_pquant_edge_mask = 0, alt_pq;
1516 int dquant, dquantfrm, dqprofile, dqdbedge, dqsbedge, dqbilevel;
1517 int unified_mv_mode;
1518 int ref_field_pic_polarity = 0;
1519 int scale_factor = 0;
1521 int dmv_surface_valid = 0;
1528 assert(decode_state->pic_param && decode_state->pic_param->buffer);
1529 pic_param = (VAPictureParameterBufferVC1 *)decode_state->pic_param->buffer;
1531 profile = va_to_gen6_vc1_profile[pic_param->sequence_fields.bits.profile];
1532 dquant = pic_param->pic_quantizer_fields.bits.dquant;
1533 dquantfrm = pic_param->pic_quantizer_fields.bits.dq_frame;
1534 dqprofile = pic_param->pic_quantizer_fields.bits.dq_profile;
1535 dqdbedge = pic_param->pic_quantizer_fields.bits.dq_db_edge;
1536 dqsbedge = pic_param->pic_quantizer_fields.bits.dq_sb_edge;
1537 dqbilevel = pic_param->pic_quantizer_fields.bits.dq_binary_level;
1538 alt_pq = pic_param->pic_quantizer_fields.bits.alt_pic_quantizer;
1541 alt_pquant_config = 0;
1542 alt_pquant_edge_mask = 0;
1543 } else if (dquant == 2) {
1544 alt_pquant_config = 1;
1545 alt_pquant_edge_mask = 0xf;
1547 assert(dquant == 1);
1548 if (dquantfrm == 0) {
1549 alt_pquant_config = 0;
1550 alt_pquant_edge_mask = 0;
1553 assert(dquantfrm == 1);
1554 alt_pquant_config = 1;
1556 switch (dqprofile) {
1558 if (dqbilevel == 0) {
1559 alt_pquant_config = 2;
1560 alt_pquant_edge_mask = 0;
1562 assert(dqbilevel == 1);
1563 alt_pquant_config = 3;
1564 alt_pquant_edge_mask = 0;
1569 alt_pquant_edge_mask = 0xf;
1574 alt_pquant_edge_mask = 0x9;
1576 alt_pquant_edge_mask = (0x3 << dqdbedge);
1581 alt_pquant_edge_mask = (0x1 << dqsbedge);
1590 if (pic_param->mv_fields.bits.mv_mode == VAMvModeIntensityCompensation) {
1591 assert(pic_param->mv_fields.bits.mv_mode2 < 4);
1592 unified_mv_mode = va_to_gen6_vc1_mv[pic_param->mv_fields.bits.mv_mode2];
1594 assert(pic_param->mv_fields.bits.mv_mode < 4);
1595 unified_mv_mode = va_to_gen6_vc1_mv[pic_param->mv_fields.bits.mv_mode];
1598 if (pic_param->sequence_fields.bits.interlace == 1 &&
1599 pic_param->picture_fields.bits.frame_coding_mode != 0) { /* frame-interlace or field-interlace */
1600 /* FIXME: calculate reference field picture polarity */
1602 ref_field_pic_polarity = 0;
1605 if (pic_param->b_picture_fraction < 21)
1606 scale_factor = b_picture_scale_factor[pic_param->b_picture_fraction];
1608 picture_type = va_to_gen6_vc1_pic_type[pic_param->picture_fields.bits.picture_type];
1610 if (profile == GEN6_VC1_ADVANCED_PROFILE &&
1611 picture_type == GEN6_VC1_I_PICTURE)
1612 picture_type = GEN6_VC1_BI_PICTURE;
1614 if (picture_type == GEN6_VC1_I_PICTURE || picture_type == GEN6_VC1_BI_PICTURE) /* I picture */
1615 trans_ac_y = pic_param->transform_fields.bits.transform_ac_codingset_idx2;
1617 trans_ac_y = pic_param->transform_fields.bits.transform_ac_codingset_idx1;
1619 * 8.3.6.2.1 Transform Type Selection
1620 * If variable-sized transform coding is not enabled,
1621 * then the 8x8 transform shall be used for all blocks.
1622 * it is also MFX_VC1_PIC_STATE requirement.
1624 if (pic_param->transform_fields.bits.variable_sized_transform_flag == 0) {
1625 pic_param->transform_fields.bits.mb_level_transform_type_flag = 1;
1626 pic_param->transform_fields.bits.frame_level_transform_type = 0;
1630 if (picture_type == GEN6_VC1_B_PICTURE) {
1631 struct gen6_vc1_surface *gen6_vc1_surface = NULL;
1633 obj_surface = SURFACE(pic_param->backward_reference_picture);
1634 assert(obj_surface);
1635 gen6_vc1_surface = obj_surface->private_data;
1637 if (!gen6_vc1_surface ||
1638 (va_to_gen6_vc1_pic_type[gen6_vc1_surface->picture_type] == GEN6_VC1_I_PICTURE ||
1639 va_to_gen6_vc1_pic_type[gen6_vc1_surface->picture_type] == GEN6_VC1_BI_PICTURE))
1640 dmv_surface_valid = 0;
1642 dmv_surface_valid = 1;
1645 assert(pic_param->picture_fields.bits.frame_coding_mode < 3);
1647 if (pic_param->picture_fields.bits.frame_coding_mode < 2)
1648 fcm = pic_param->picture_fields.bits.frame_coding_mode;
1650 if (pic_param->picture_fields.bits.top_field_first)
1656 if (pic_param->picture_fields.bits.picture_type == GEN6_VC1_B_PICTURE) { /* B picture */
1657 brfd = pic_param->reference_fields.bits.reference_distance;
1658 brfd = (scale_factor * brfd) >> 8;
1659 brfd = pic_param->reference_fields.bits.reference_distance - brfd - 1;
1665 overlap = pic_param->sequence_fields.bits.overlap;
1666 if (profile != GEN6_VC1_ADVANCED_PROFILE && pic_param->pic_quantizer_fields.bits.pic_quantizer_scale < 9)
1669 assert(pic_param->conditional_overlap_flag < 3);
1670 assert(pic_param->mv_fields.bits.mv_table < 4); /* FIXME: interlace mode */
1672 BEGIN_BCS_BATCH(batch, 6);
1673 OUT_BCS_BATCH(batch, MFX_VC1_PIC_STATE | (6 - 2));
1674 OUT_BCS_BATCH(batch,
1675 (ALIGN(pic_param->coded_height, 16) / 16) << 16 |
1676 (ALIGN(pic_param->coded_width, 16) / 16));
1677 OUT_BCS_BATCH(batch,
1678 pic_param->sequence_fields.bits.syncmarker << 31 |
1679 1 << 29 | /* concealment */
1681 pic_param->entrypoint_fields.bits.loopfilter << 23 |
1683 (pic_param->pic_quantizer_fields.bits.quantizer == 0) << 21 | /* implicit quantizer */
1684 pic_param->pic_quantizer_fields.bits.pic_quantizer_scale << 16 |
1685 alt_pquant_edge_mask << 12 |
1686 alt_pquant_config << 10 |
1687 pic_param->pic_quantizer_fields.bits.half_qp << 9 |
1688 pic_param->pic_quantizer_fields.bits.pic_quantizer_type << 8 |
1689 va_to_gen6_vc1_condover[pic_param->conditional_overlap_flag] << 6 |
1690 !pic_param->picture_fields.bits.is_first_field << 5 |
1693 OUT_BCS_BATCH(batch,
1694 !!pic_param->bitplane_present.value << 23 |
1695 !pic_param->bitplane_present.flags.bp_forward_mb << 22 |
1696 !pic_param->bitplane_present.flags.bp_mv_type_mb << 21 |
1697 !pic_param->bitplane_present.flags.bp_skip_mb << 20 |
1698 !pic_param->bitplane_present.flags.bp_direct_mb << 19 |
1699 !pic_param->bitplane_present.flags.bp_overflags << 18 |
1700 !pic_param->bitplane_present.flags.bp_ac_pred << 17 |
1701 !pic_param->bitplane_present.flags.bp_field_tx << 16 |
1702 pic_param->mv_fields.bits.extended_dmv_range << 14 |
1703 pic_param->mv_fields.bits.extended_mv_range << 12 |
1704 pic_param->mv_fields.bits.four_mv_switch << 11 |
1705 pic_param->fast_uvmc_flag << 10 |
1706 unified_mv_mode << 8 |
1707 ref_field_pic_polarity << 6 |
1708 pic_param->reference_fields.bits.num_reference_pictures << 5 |
1709 pic_param->reference_fields.bits.reference_distance << 0);
1710 OUT_BCS_BATCH(batch,
1711 scale_factor << 24 |
1712 pic_param->mv_fields.bits.mv_table << 20 |
1713 pic_param->mv_fields.bits.four_mv_block_pattern_table << 18 |
1714 pic_param->mv_fields.bits.two_mv_block_pattern_table << 16 |
1715 pic_param->transform_fields.bits.frame_level_transform_type << 12 |
1716 pic_param->transform_fields.bits.mb_level_transform_type_flag << 11 |
1717 pic_param->mb_mode_table << 8 |
1719 pic_param->transform_fields.bits.transform_ac_codingset_idx1 << 4 |
1720 pic_param->transform_fields.bits.intra_transform_dc_table << 3 |
1721 pic_param->cbp_table << 0);
1722 OUT_BCS_BATCH(batch,
1723 dmv_surface_valid << 13 |
1725 ((ALIGN(pic_param->coded_width, 16) / 16 + 1) / 2 - 1));
1726 ADVANCE_BCS_BATCH(batch);
1730 gen6_mfd_vc1_pred_pipe_state(VADriverContextP ctx,
1731 struct decode_state *decode_state,
1732 struct gen6_mfd_context *gen6_mfd_context)
1734 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
1735 VAPictureParameterBufferVC1 *pic_param;
1736 int interpolation_mode = 0;
1737 int intensitycomp_single;
1739 assert(decode_state->pic_param && decode_state->pic_param->buffer);
1740 pic_param = (VAPictureParameterBufferVC1 *)decode_state->pic_param->buffer;
1742 if (pic_param->mv_fields.bits.mv_mode == VAMvMode1MvHalfPelBilinear ||
1743 (pic_param->mv_fields.bits.mv_mode == VAMvModeIntensityCompensation &&
1744 pic_param->mv_fields.bits.mv_mode2 == VAMvMode1MvHalfPelBilinear))
1745 interpolation_mode = 2; /* Half-pel bilinear */
1746 else if (pic_param->mv_fields.bits.mv_mode == VAMvMode1MvHalfPel ||
1747 (pic_param->mv_fields.bits.mv_mode == VAMvModeIntensityCompensation &&
1748 pic_param->mv_fields.bits.mv_mode2 == VAMvMode1MvHalfPel))
1749 interpolation_mode = 0; /* Half-pel bicubic */
1751 interpolation_mode = 1; /* Quarter-pel bicubic */
1753 assert(decode_state->pic_param && decode_state->pic_param->buffer);
1754 pic_param = (VAPictureParameterBufferVC1 *)decode_state->pic_param->buffer;
1755 intensitycomp_single = (pic_param->mv_fields.bits.mv_mode == VAMvModeIntensityCompensation);
1757 BEGIN_BCS_BATCH(batch, 7);
1758 OUT_BCS_BATCH(batch, MFX_VC1_PRED_PIPE_STATE | (7 - 2));
1759 OUT_BCS_BATCH(batch,
1760 0 << 8 | /* FIXME: interlace mode */
1761 pic_param->rounding_control << 4 |
1762 va_to_gen6_vc1_profile[pic_param->sequence_fields.bits.profile] << 2);
1763 OUT_BCS_BATCH(batch,
1764 pic_param->luma_shift << 16 |
1765 pic_param->luma_scale << 0); /* FIXME: Luma Scaling */
1766 OUT_BCS_BATCH(batch, 0);
1767 OUT_BCS_BATCH(batch, 0);
1768 OUT_BCS_BATCH(batch, 0);
1769 OUT_BCS_BATCH(batch,
1770 interpolation_mode << 19 |
1771 pic_param->fast_uvmc_flag << 18 |
1772 0 << 17 | /* FIXME: scale up or down ??? */
1773 pic_param->range_reduction_frame << 16 |
1774 0 << 6 | /* FIXME: double ??? */
1776 intensitycomp_single << 2 |
1777 intensitycomp_single << 0);
1778 ADVANCE_BCS_BATCH(batch);
1783 gen6_mfd_vc1_directmode_state(VADriverContextP ctx,
1784 struct decode_state *decode_state,
1785 struct gen6_mfd_context *gen6_mfd_context)
1787 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
1788 VAPictureParameterBufferVC1 *pic_param;
1789 struct i965_driver_data *i965 = i965_driver_data(ctx);
1790 struct object_surface *obj_surface;
1791 dri_bo *dmv_read_buffer = NULL, *dmv_write_buffer = NULL;
1793 assert(decode_state->pic_param && decode_state->pic_param->buffer);
1794 pic_param = (VAPictureParameterBufferVC1 *)decode_state->pic_param->buffer;
1796 obj_surface = SURFACE(decode_state->current_render_target);
1798 if (obj_surface && obj_surface->private_data) {
1799 dmv_write_buffer = ((struct gen6_vc1_surface *)(obj_surface->private_data))->dmv;
1802 obj_surface = SURFACE(pic_param->backward_reference_picture);
1804 if (obj_surface && obj_surface->private_data) {
1805 dmv_read_buffer = ((struct gen6_vc1_surface *)(obj_surface->private_data))->dmv;
1808 BEGIN_BCS_BATCH(batch, 3);
1809 OUT_BCS_BATCH(batch, MFX_VC1_DIRECTMODE_STATE | (3 - 2));
1811 if (dmv_write_buffer)
1812 OUT_BCS_RELOC(batch, dmv_write_buffer,
1813 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
1816 OUT_BCS_BATCH(batch, 0);
1818 if (dmv_read_buffer)
1819 OUT_BCS_RELOC(batch, dmv_read_buffer,
1820 I915_GEM_DOMAIN_INSTRUCTION, 0,
1823 OUT_BCS_BATCH(batch, 0);
1825 ADVANCE_BCS_BATCH(batch);
1829 gen6_mfd_vc1_get_macroblock_bit_offset(uint8_t *buf, int in_slice_data_bit_offset, int profile)
1831 int out_slice_data_bit_offset;
1832 int slice_header_size = in_slice_data_bit_offset / 8;
1836 out_slice_data_bit_offset = in_slice_data_bit_offset;
1838 for (i = 0, j = 0; i < slice_header_size; i++, j++) {
1839 if (!buf[j] && !buf[j + 1] && buf[j + 2] == 3 && buf[j + 3] < 4) {
1844 out_slice_data_bit_offset = 8 * j + in_slice_data_bit_offset % 8;
1847 return out_slice_data_bit_offset;
1851 gen6_mfd_vc1_bsd_object(VADriverContextP ctx,
1852 VAPictureParameterBufferVC1 *pic_param,
1853 VASliceParameterBufferVC1 *slice_param,
1854 VASliceParameterBufferVC1 *next_slice_param,
1855 dri_bo *slice_data_bo,
1856 struct gen6_mfd_context *gen6_mfd_context)
1858 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
1859 int next_slice_start_vert_pos;
1860 int macroblock_offset;
1861 uint8_t *slice_data = NULL;
1863 dri_bo_map(slice_data_bo, 0);
1864 slice_data = (uint8_t *)(slice_data_bo->virtual + slice_param->slice_data_offset);
1865 macroblock_offset = gen6_mfd_vc1_get_macroblock_bit_offset(slice_data,
1866 slice_param->macroblock_offset,
1867 pic_param->sequence_fields.bits.profile);
1868 dri_bo_unmap(slice_data_bo);
1870 if (next_slice_param)
1871 next_slice_start_vert_pos = next_slice_param->slice_vertical_position;
1873 next_slice_start_vert_pos = ALIGN(pic_param->coded_height, 16) / 16;
1875 BEGIN_BCS_BATCH(batch, 4);
1876 OUT_BCS_BATCH(batch, MFD_VC1_BSD_OBJECT | (4 - 2));
1877 OUT_BCS_BATCH(batch,
1878 slice_param->slice_data_size - (macroblock_offset >> 3));
1879 OUT_BCS_BATCH(batch,
1880 slice_param->slice_data_offset + (macroblock_offset >> 3));
1881 OUT_BCS_BATCH(batch,
1882 slice_param->slice_vertical_position << 24 |
1883 next_slice_start_vert_pos << 16 |
1885 (macroblock_offset & 0x7));
1886 ADVANCE_BCS_BATCH(batch);
1890 gen6_mfd_vc1_decode_picture(VADriverContextP ctx,
1891 struct decode_state *decode_state,
1892 struct gen6_mfd_context *gen6_mfd_context)
1894 struct intel_batchbuffer *batch = gen6_mfd_context->base.batch;
1895 VAPictureParameterBufferVC1 *pic_param;
1896 VASliceParameterBufferVC1 *slice_param, *next_slice_param, *next_slice_group_param;
1897 dri_bo *slice_data_bo;
1900 assert(decode_state->pic_param && decode_state->pic_param->buffer);
1901 pic_param = (VAPictureParameterBufferVC1 *)decode_state->pic_param->buffer;
1903 gen6_mfd_vc1_decode_init(ctx, decode_state, gen6_mfd_context);
1904 intel_batchbuffer_start_atomic_bcs(batch, 0x1000);
1905 intel_batchbuffer_emit_mi_flush(batch);
1906 gen6_mfd_pipe_mode_select(ctx, decode_state, MFX_FORMAT_VC1, gen6_mfd_context);
1907 gen6_mfd_surface_state(ctx, decode_state, MFX_FORMAT_VC1, gen6_mfd_context);
1908 gen6_mfd_pipe_buf_addr_state(ctx, decode_state, MFX_FORMAT_VC1, gen6_mfd_context);
1909 gen6_mfd_bsp_buf_base_addr_state(ctx, decode_state, MFX_FORMAT_VC1, gen6_mfd_context);
1910 gen6_mfd_vc1_pic_state(ctx, decode_state, gen6_mfd_context);
1911 gen6_mfd_vc1_pred_pipe_state(ctx, decode_state, gen6_mfd_context);
1912 gen6_mfd_vc1_directmode_state(ctx, decode_state, gen6_mfd_context);
1914 for (j = 0; j < decode_state->num_slice_params; j++) {
1915 assert(decode_state->slice_params && decode_state->slice_params[j]->buffer);
1916 slice_param = (VASliceParameterBufferVC1 *)decode_state->slice_params[j]->buffer;
1917 slice_data_bo = decode_state->slice_datas[j]->bo;
1918 gen6_mfd_ind_obj_base_addr_state(ctx, slice_data_bo, MFX_FORMAT_VC1, gen6_mfd_context);
1920 if (j == decode_state->num_slice_params - 1)
1921 next_slice_group_param = NULL;
1923 next_slice_group_param = (VASliceParameterBufferVC1 *)decode_state->slice_params[j + 1]->buffer;
1925 for (i = 0; i < decode_state->slice_params[j]->num_elements; i++) {
1926 assert(slice_param->slice_data_flag == VA_SLICE_DATA_FLAG_ALL);
1928 if (i < decode_state->slice_params[j]->num_elements - 1)
1929 next_slice_param = slice_param + 1;
1931 next_slice_param = next_slice_group_param;
1933 gen6_mfd_vc1_bsd_object(ctx, pic_param, slice_param, next_slice_param, slice_data_bo, gen6_mfd_context);
1938 intel_batchbuffer_end_atomic(batch);
1939 intel_batchbuffer_flush(batch);
1943 gen6_mfd_decode_picture(VADriverContextP ctx,
1945 union codec_state *codec_state,
1946 struct hw_context *hw_context)
1949 struct gen6_mfd_context *gen6_mfd_context = (struct gen6_mfd_context *)hw_context;
1950 struct decode_state *decode_state = &codec_state->decode;
1952 assert(gen6_mfd_context);
1955 case VAProfileMPEG2Simple:
1956 case VAProfileMPEG2Main:
1957 gen6_mfd_mpeg2_decode_picture(ctx, decode_state, gen6_mfd_context);
1960 case VAProfileH264Baseline:
1961 case VAProfileH264Main:
1962 case VAProfileH264High:
1963 gen6_mfd_avc_decode_picture(ctx, decode_state, gen6_mfd_context);
1966 case VAProfileVC1Simple:
1967 case VAProfileVC1Main:
1968 case VAProfileVC1Advanced:
1969 gen6_mfd_vc1_decode_picture(ctx, decode_state, gen6_mfd_context);
1979 gen6_mfd_context_destroy(void *hw_context)
1981 struct gen6_mfd_context *gen6_mfd_context = (struct gen6_mfd_context *)hw_context;
1983 dri_bo_unreference(gen6_mfd_context->post_deblocking_output.bo);
1984 gen6_mfd_context->post_deblocking_output.bo = NULL;
1986 dri_bo_unreference(gen6_mfd_context->pre_deblocking_output.bo);
1987 gen6_mfd_context->pre_deblocking_output.bo = NULL;
1989 dri_bo_unreference(gen6_mfd_context->intra_row_store_scratch_buffer.bo);
1990 gen6_mfd_context->intra_row_store_scratch_buffer.bo = NULL;
1992 dri_bo_unreference(gen6_mfd_context->deblocking_filter_row_store_scratch_buffer.bo);
1993 gen6_mfd_context->deblocking_filter_row_store_scratch_buffer.bo = NULL;
1995 dri_bo_unreference(gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.bo);
1996 gen6_mfd_context->bsd_mpc_row_store_scratch_buffer.bo = NULL;
1998 dri_bo_unreference(gen6_mfd_context->mpr_row_store_scratch_buffer.bo);
1999 gen6_mfd_context->mpr_row_store_scratch_buffer.bo = NULL;
2001 dri_bo_unreference(gen6_mfd_context->bitplane_read_buffer.bo);
2002 gen6_mfd_context->bitplane_read_buffer.bo = NULL;
2004 intel_batchbuffer_free(gen6_mfd_context->base.batch);
2005 free(gen6_mfd_context);
2009 gen6_dec_hw_context_init(VADriverContextP ctx, VAProfile profile)
2011 struct intel_driver_data *intel = intel_driver_data(ctx);
2012 struct gen6_mfd_context *gen6_mfd_context = calloc(1, sizeof(struct gen6_mfd_context));
2015 gen6_mfd_context->base.destroy = gen6_mfd_context_destroy;
2016 gen6_mfd_context->base.run = gen6_mfd_decode_picture;
2017 gen6_mfd_context->base.batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER, 0);
2019 for (i = 0; i < ARRAY_ELEMS(gen6_mfd_context->reference_surface); i++) {
2020 gen6_mfd_context->reference_surface[i].surface_id = VA_INVALID_ID;
2021 gen6_mfd_context->reference_surface[i].frame_store_id = -1;
2024 gen6_mfd_context->wa_mpeg2_slice_vertical_position = -1;
2026 return (struct hw_context *)gen6_mfd_context;