2 * Copyright © 2010-2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Zhou Chang <chang.zhou@intel.com>
35 #include "intel_batchbuffer.h"
36 #include "i965_defines.h"
37 #include "i965_structs.h"
38 #include "i965_drv_video.h"
39 #include "i965_encoder.h"
40 #include "i965_encoder_utils.h"
43 gen6_mfc_pipe_mode_select(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
45 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
47 BEGIN_BCS_BATCH(batch, 4);
49 OUT_BCS_BATCH(batch, MFX_PIPE_MODE_SELECT | (4 - 2));
51 (0 << 10) | /* disable Stream-Out , advanced QP/bitrate control need enable it*/
52 (1 << 9) | /* Post Deblocking Output */
53 (0 << 8) | /* Pre Deblocking Output */
54 (0 << 7) | /* disable TLB prefectch */
55 (0 << 5) | /* not in stitch mode */
56 (1 << 4) | /* encoding mode */
57 (2 << 0)); /* Standard Select: AVC */
59 (0 << 20) | /* round flag in PB slice */
60 (0 << 19) | /* round flag in Intra8x8 */
61 (0 << 7) | /* expand NOA bus flag */
62 (1 << 6) | /* must be 1 */
63 (0 << 5) | /* disable clock gating for NOA */
64 (0 << 4) | /* terminate if AVC motion and POC table error occurs */
65 (0 << 3) | /* terminate if AVC mbdata error occurs */
66 (0 << 2) | /* terminate if AVC CABAC/CAVLC decode error occurs */
67 (0 << 1) | /* AVC long field motion vector */
68 (0 << 0)); /* always calculate AVC ILDB boundary strength */
69 OUT_BCS_BATCH(batch, 0);
71 ADVANCE_BCS_BATCH(batch);
75 gen7_mfc_pipe_mode_select(VADriverContextP ctx,
77 struct gen6_encoder_context *gen6_encoder_context)
79 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
81 assert(standard_select == MFX_FORMAT_MPEG2 ||
82 standard_select == MFX_FORMAT_AVC);
84 BEGIN_BCS_BATCH(batch, 5);
85 OUT_BCS_BATCH(batch, MFX_PIPE_MODE_SELECT | (5 - 2));
87 (MFX_LONG_MODE << 17) | /* Must be long format for encoder */
88 (MFD_MODE_VLD << 15) | /* VLD mode */
89 (0 << 10) | /* disable Stream-Out */
90 (1 << 9) | /* Post Deblocking Output */
91 (0 << 8) | /* Pre Deblocking Output */
92 (0 << 5) | /* not in stitch mode */
93 (1 << 4) | /* encoding mode */
94 (standard_select << 0)); /* standard select: avc or mpeg2 */
96 (0 << 7) | /* expand NOA bus flag */
97 (0 << 6) | /* disable slice-level clock gating */
98 (0 << 5) | /* disable clock gating for NOA */
99 (0 << 4) | /* terminate if AVC motion and POC table error occurs */
100 (0 << 3) | /* terminate if AVC mbdata error occurs */
101 (0 << 2) | /* terminate if AVC CABAC/CAVLC decode error occurs */
104 OUT_BCS_BATCH(batch, 0);
105 OUT_BCS_BATCH(batch, 0);
107 ADVANCE_BCS_BATCH(batch);
111 gen6_mfc_surface_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
113 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
114 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
116 BEGIN_BCS_BATCH(batch, 6);
118 OUT_BCS_BATCH(batch, MFX_SURFACE_STATE | (6 - 2));
119 OUT_BCS_BATCH(batch, 0);
121 ((mfc_context->surface_state.height - 1) << 19) |
122 ((mfc_context->surface_state.width - 1) << 6));
124 (MFX_SURFACE_PLANAR_420_8 << 28) | /* 420 planar YUV surface */
125 (1 << 27) | /* must be 1 for interleave U/V, hardware requirement */
126 (0 << 22) | /* surface object control state, FIXME??? */
127 ((mfc_context->surface_state.w_pitch - 1) << 3) | /* pitch */
128 (0 << 2) | /* must be 0 for interleave U/V */
129 (1 << 1) | /* must be y-tiled */
130 (I965_TILEWALK_YMAJOR << 0)); /* tile walk, TILEWALK_YMAJOR */
132 (0 << 16) | /* must be 0 for interleave U/V */
133 (mfc_context->surface_state.h_pitch)); /* y offset for U(cb) */
134 OUT_BCS_BATCH(batch, 0);
135 ADVANCE_BCS_BATCH(batch);
139 gen7_mfc_surface_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
141 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
142 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
144 BEGIN_BCS_BATCH(batch, 6);
146 OUT_BCS_BATCH(batch, MFX_SURFACE_STATE | (6 - 2));
147 OUT_BCS_BATCH(batch, 0);
149 ((mfc_context->surface_state.height - 1) << 18) |
150 ((mfc_context->surface_state.width - 1) << 4));
152 (MFX_SURFACE_PLANAR_420_8 << 28) | /* 420 planar YUV surface */
153 (1 << 27) | /* must be 1 for interleave U/V, hardware requirement */
154 (0 << 22) | /* surface object control state, FIXME??? */
155 ((mfc_context->surface_state.w_pitch - 1) << 3) | /* pitch */
156 (0 << 2) | /* must be 0 for interleave U/V */
157 (1 << 1) | /* must be tiled */
158 (I965_TILEWALK_YMAJOR << 0)); /* tile walk, TILEWALK_YMAJOR */
160 (0 << 16) | /* must be 0 for interleave U/V */
161 (mfc_context->surface_state.h_pitch)); /* y offset for U(cb) */
162 OUT_BCS_BATCH(batch, 0);
163 ADVANCE_BCS_BATCH(batch);
167 gen6_mfc_pipe_buf_addr_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
169 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
170 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
173 BEGIN_BCS_BATCH(batch, 24);
175 OUT_BCS_BATCH(batch, MFX_PIPE_BUF_ADDR_STATE | (24 - 2));
177 OUT_BCS_BATCH(batch, 0); /* pre output addr */
179 OUT_BCS_RELOC(batch, mfc_context->post_deblocking_output.bo,
180 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
181 0); /* post output addr */
183 OUT_BCS_RELOC(batch, mfc_context->uncompressed_picture_source.bo,
184 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
185 0); /* uncompressed data */
186 OUT_BCS_RELOC(batch, mfc_context->macroblock_status_buffer.bo,
187 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
188 0); /* StreamOut data*/
189 OUT_BCS_RELOC(batch, mfc_context->intra_row_store_scratch_buffer.bo,
190 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
192 OUT_BCS_RELOC(batch, mfc_context->deblocking_filter_row_store_scratch_buffer.bo,
193 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
195 /* 7..22 Reference pictures*/
196 for (i = 0; i < ARRAY_ELEMS(mfc_context->reference_surfaces); i++) {
197 if ( mfc_context->reference_surfaces[i].bo != NULL) {
198 OUT_BCS_RELOC(batch, mfc_context->reference_surfaces[i].bo,
199 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
202 OUT_BCS_BATCH(batch, 0);
205 OUT_BCS_RELOC(batch, mfc_context->macroblock_status_buffer.bo,
206 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
207 0); /* Macroblock status buffer*/
209 ADVANCE_BCS_BATCH(batch);
213 gen6_mfc_ind_obj_base_addr_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
215 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
216 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
217 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
219 BEGIN_BCS_BATCH(batch, 11);
221 OUT_BCS_BATCH(batch, MFX_IND_OBJ_BASE_ADDR_STATE | (11 - 2));
222 OUT_BCS_BATCH(batch, 0);
223 OUT_BCS_BATCH(batch, 0);
224 /* MFX Indirect MV Object Base Address */
225 OUT_BCS_RELOC(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
226 OUT_BCS_BATCH(batch, 0);
227 OUT_BCS_BATCH(batch, 0);
228 OUT_BCS_BATCH(batch, 0);
229 OUT_BCS_BATCH(batch, 0);
230 OUT_BCS_BATCH(batch, 0);
231 /*MFC Indirect PAK-BSE Object Base Address for Encoder*/
233 mfc_context->mfc_indirect_pak_bse_object.bo,
234 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
237 mfc_context->mfc_indirect_pak_bse_object.bo,
238 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
239 mfc_context->mfc_indirect_pak_bse_object.end_offset);
241 ADVANCE_BCS_BATCH(batch);
245 gen7_mfc_ind_obj_base_addr_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
247 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
248 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
249 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
251 BEGIN_BCS_BATCH(batch, 11);
253 OUT_BCS_BATCH(batch, MFX_IND_OBJ_BASE_ADDR_STATE | (11 - 2));
254 OUT_BCS_BATCH(batch, 0);
255 OUT_BCS_BATCH(batch, 0);
256 /* MFX Indirect MV Object Base Address */
257 OUT_BCS_RELOC(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
258 OUT_BCS_BATCH(batch, 0x80000000); /* must set, up to 2G */
259 OUT_BCS_BATCH(batch, 0);
260 OUT_BCS_BATCH(batch, 0);
261 OUT_BCS_BATCH(batch, 0);
262 OUT_BCS_BATCH(batch, 0);
263 /*MFC Indirect PAK-BSE Object Base Address for Encoder*/
265 mfc_context->mfc_indirect_pak_bse_object.bo,
266 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
269 mfc_context->mfc_indirect_pak_bse_object.bo,
270 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
271 mfc_context->mfc_indirect_pak_bse_object.end_offset);
273 ADVANCE_BCS_BATCH(batch);
277 gen6_mfc_bsp_buf_base_addr_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
279 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
280 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
282 BEGIN_BCS_BATCH(batch, 4);
284 OUT_BCS_BATCH(batch, MFX_BSP_BUF_BASE_ADDR_STATE | (4 - 2));
285 OUT_BCS_RELOC(batch, mfc_context->bsd_mpc_row_store_scratch_buffer.bo,
286 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
288 OUT_BCS_BATCH(batch, 0);
289 OUT_BCS_BATCH(batch, 0);
291 ADVANCE_BCS_BATCH(batch);
295 gen6_mfc_avc_img_state(VADriverContextP ctx,struct encode_state *encode_state,
296 struct gen6_encoder_context *gen6_encoder_context)
298 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
299 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
300 VAEncSequenceParameterBufferH264Ext *pSequenceParameter = (VAEncSequenceParameterBufferH264Ext *)encode_state->seq_param_ext->buffer;
301 int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
302 int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
304 BEGIN_BCS_BATCH(batch, 13);
305 OUT_BCS_BATCH(batch, MFX_AVC_IMG_STATE | (13 - 2));
307 ((width_in_mbs * height_in_mbs) & 0xFFFF));
309 (height_in_mbs << 16) |
310 (width_in_mbs << 0));
312 (0 << 24) | /*Second Chroma QP Offset*/
313 (0 << 16) | /*Chroma QP Offset*/
314 (0 << 14) | /*Max-bit conformance Intra flag*/
315 (0 << 13) | /*Max Macroblock size conformance Inter flag*/
316 (1 << 12) | /*Should always be written as "1" */
317 (0 << 10) | /*QM Preset FLag */
318 (0 << 8) | /*Image Structure*/
319 (0 << 0) ); /*Current Decoed Image Frame Store ID, reserved in Encode mode*/
321 (400 << 16) | /*Mininum Frame size*/
322 (0 << 15) | /*Disable reading of Macroblock Status Buffer*/
323 (0 << 14) | /*Load BitStream Pointer only once, 1 slic 1 frame*/
324 (0 << 13) | /*CABAC 0 word insertion test enable*/
325 (1 << 12) | /*MVUnpackedEnable,compliant to DXVA*/
326 (1 << 10) | /*Chroma Format IDC, 4:2:0*/
327 (1 << 7) | /*0:CAVLC encoding mode,1:CABAC*/
328 (0 << 6) | /*Only valid for VLD decoding mode*/
329 (0 << 5) | /*Constrained Intra Predition Flag, from PPS*/
330 (pSequenceParameter->direct_8x8_inference_flag << 4) | /*Direct 8x8 inference flag*/
331 (0 << 3) | /*Only 8x8 IDCT Transform Mode Flag*/
332 (1 << 2) | /*Frame MB only flag*/
333 (0 << 1) | /*MBAFF mode is in active*/
334 (0 << 0) ); /*Field picture flag*/
336 (1<<16) | /*Frame Size Rate Control Flag*/
338 (1<<9) | /*MB level Rate Control Enabling Flag*/
339 (1 << 3) | /*FrameBitRateMinReportMask*/
340 (1 << 2) | /*FrameBitRateMaxReportMask*/
341 (1 << 1) | /*InterMBMaxSizeReportMask*/
342 (1 << 0) ); /*IntraMBMaxSizeReportMask*/
343 OUT_BCS_BATCH(batch, /*Inter and Intra Conformance Max size limit*/
344 (0x0600 << 16) | /*InterMbMaxSz 192 Byte*/
345 (0x0800) ); /*IntraMbMaxSz 256 Byte*/
346 OUT_BCS_BATCH(batch, 0x00000000); /*Reserved : MBZReserved*/
347 OUT_BCS_BATCH(batch, 0x01020304); /*Slice QP Delta for bitrate control*/
348 OUT_BCS_BATCH(batch, 0xFEFDFCFB);
349 OUT_BCS_BATCH(batch, 0x80601004); /*MAX = 128KB, MIN = 64KB*/
350 OUT_BCS_BATCH(batch, 0x00800001);
351 OUT_BCS_BATCH(batch, 0);
353 ADVANCE_BCS_BATCH(batch);
357 gen7_mfc_avc_img_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
359 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
360 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
362 int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
363 int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
365 BEGIN_BCS_BATCH(batch, 16);
366 OUT_BCS_BATCH(batch, MFX_AVC_IMG_STATE | (16 - 2));
368 ((width_in_mbs * height_in_mbs) & 0xFFFF));
370 ((height_in_mbs - 1) << 16) |
371 ((width_in_mbs - 1) << 0));
373 (0 << 24) | /* Second Chroma QP Offset */
374 (0 << 16) | /* Chroma QP Offset */
375 (0 << 14) | /* Max-bit conformance Intra flag */
376 (0 << 13) | /* Max Macroblock size conformance Inter flag */
377 (0 << 12) | /* FIXME: Weighted_Pred_Flag */
378 (0 << 10) | /* FIXME: Weighted_BiPred_Idc */
379 (0 << 8) | /* FIXME: Image Structure */
380 (0 << 0) ); /* Current Decoed Image Frame Store ID, reserved in Encode mode */
382 (0 << 16) | /* Mininum Frame size */
383 (0 << 15) | /* Disable reading of Macroblock Status Buffer */
384 (0 << 14) | /* Load BitStream Pointer only once, 1 slic 1 frame */
385 (0 << 13) | /* CABAC 0 word insertion test enable */
386 (1 << 12) | /* MVUnpackedEnable,compliant to DXVA */
387 (1 << 10) | /* Chroma Format IDC, 4:2:0 */
388 (0 << 9) | /* FIXME: MbMvFormatFlag */
389 (1 << 7) | /* 0:CAVLC encoding mode,1:CABAC */
390 (0 << 6) | /* Only valid for VLD decoding mode */
391 (0 << 5) | /* Constrained Intra Predition Flag, from PPS */
392 (0 << 4) | /* Direct 8x8 inference flag */
393 (0 << 3) | /* Only 8x8 IDCT Transform Mode Flag */
394 (1 << 2) | /* Frame MB only flag */
395 (0 << 1) | /* MBAFF mode is in active */
396 (0 << 0)); /* Field picture flag */
397 OUT_BCS_BATCH(batch, 0); /* Mainly about MB rate control and debug, just ignoring */
398 OUT_BCS_BATCH(batch, /* Inter and Intra Conformance Max size limit */
399 (0xBB8 << 16) | /* InterMbMaxSz */
400 (0xEE8) ); /* IntraMbMaxSz */
401 OUT_BCS_BATCH(batch, 0); /* Reserved */
402 OUT_BCS_BATCH(batch, 0); /* Slice QP Delta for bitrate control */
403 OUT_BCS_BATCH(batch, 0); /* Slice QP Delta for bitrate control */
404 OUT_BCS_BATCH(batch, 0x8C000000);
405 OUT_BCS_BATCH(batch, 0x00010000);
406 OUT_BCS_BATCH(batch, 0);
407 OUT_BCS_BATCH(batch, 0);
408 OUT_BCS_BATCH(batch, 0);
409 OUT_BCS_BATCH(batch, 0);
411 ADVANCE_BCS_BATCH(batch);
414 static void gen6_mfc_avc_directmode_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
416 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
417 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
421 BEGIN_BCS_BATCH(batch, 69);
423 OUT_BCS_BATCH(batch, MFX_AVC_DIRECTMODE_STATE | (69 - 2));
425 /* Reference frames and Current frames */
426 for(i = 0; i < NUM_MFC_DMV_BUFFERS; i++) {
427 if ( mfc_context->direct_mv_buffers[i].bo != NULL) {
428 OUT_BCS_RELOC(batch, mfc_context->direct_mv_buffers[i].bo,
429 I915_GEM_DOMAIN_INSTRUCTION, 0,
432 OUT_BCS_BATCH(batch, 0);
437 for(i = 0; i < 32; i++) {
438 OUT_BCS_BATCH(batch, i/2);
440 OUT_BCS_BATCH(batch, 0);
441 OUT_BCS_BATCH(batch, 0);
443 ADVANCE_BCS_BATCH(batch);
446 static void gen6_mfc_avc_slice_state(VADriverContextP ctx,
448 struct encode_state *encode_state,
449 struct gen6_encoder_context *gen6_encoder_context,
450 int rate_control_enable,
453 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
454 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
455 VAEncSliceParameterBufferH264Ext *pSliceParameter = (VAEncSliceParameterBufferH264Ext *)encode_state->slice_params_ext[0]->buffer; /* TODO: multi slices support */
457 BEGIN_BCS_BATCH(batch, 11);;
459 OUT_BCS_BATCH(batch, MFX_AVC_SLICE_STATE | (11 - 2) );
461 OUT_BCS_BATCH(batch, slice_type); /*Slice Type: I:P:B Slice*/
463 if ( slice_type == SLICE_TYPE_I )
464 OUT_BCS_BATCH(batch, 0); /*no reference frames and pred_weight_table*/
466 OUT_BCS_BATCH(batch, 0x00010000); /*1 reference frame*/
469 (pSliceParameter->direct_spatial_mv_pred_flag<<29) | /*Direct Prediction Type*/
470 (0<<24) | /*Enable deblocking operation*/
471 (qp<<16) | /*Slice Quantization Parameter*/
473 OUT_BCS_BATCH(batch, 0); /*First MB X&Y , the postion of current slice*/
474 OUT_BCS_BATCH(batch, ( ((mfc_context->surface_state.height+15)/16) << 16) );
477 (rate_control_enable<<31) | /*in CBR mode RateControlCounterEnable = enable*/
478 (1<<30) | /*ResetRateControlCounter*/
479 (0<<28) | /*RC Triggle Mode = Always Rate Control*/
480 (8<<24) | /*RC Stable Tolerance, middle level*/
481 (rate_control_enable<<23) | /*RC Panic Enable*/
482 (0<<22) | /*QP mode, don't modfiy CBP*/
483 (0<<21) | /*MB Type Direct Conversion Disable*/
484 (0<<20) | /*MB Type Skip Conversion Disable*/
485 (1<<19) | /*IsLastSlice*/
486 (0<<18) | /*BitstreamOutputFlag Compressed BitStream Output Disable Flag 0:enable 1:disable*/
487 (1<<17) | /*HeaderPresentFlag*/
488 (1<<16) | /*SliceData PresentFlag*/
489 (1<<15) | /*TailPresentFlag*/
490 (1<<13) | /*RBSP NAL TYPE*/
491 (0<<12) ); /*CabacZeroWordInsertionEnable*/
493 OUT_BCS_BATCH(batch, mfc_context->mfc_indirect_pak_bse_object.offset);
495 OUT_BCS_BATCH(batch, (24<<24) | /*Target QP - 24 is lowest QP*/
496 (20<<16) | /*Target QP + 20 is highest QP*/
501 OUT_BCS_BATCH(batch, 0x08888888);
502 OUT_BCS_BATCH(batch, 0);
504 ADVANCE_BCS_BATCH(batch);
506 static void gen6_mfc_avc_qm_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
508 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
511 BEGIN_BCS_BATCH(batch, 58);
513 OUT_BCS_BATCH(batch, MFX_AVC_QM_STATE | 56);
514 OUT_BCS_BATCH(batch, 0xFF ) ;
515 for( i = 0; i < 56; i++) {
516 OUT_BCS_BATCH(batch, 0x10101010);
519 ADVANCE_BCS_BATCH(batch);
522 static void gen6_mfc_avc_fqm_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
524 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
527 BEGIN_BCS_BATCH(batch, 113);
528 OUT_BCS_BATCH(batch, MFC_AVC_FQM_STATE | (113 - 2));
530 for(i = 0; i < 112;i++) {
531 OUT_BCS_BATCH(batch, 0x10001000);
534 ADVANCE_BCS_BATCH(batch);
538 gen7_mfc_qm_state(VADriverContextP ctx,
542 struct gen6_encoder_context *gen6_encoder_context)
544 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
545 unsigned int qm_buffer[16];
547 assert(qm_length <= 16);
548 assert(sizeof(*qm) == 4);
549 memcpy(qm_buffer, qm, qm_length * 4);
551 BEGIN_BCS_BATCH(batch, 18);
552 OUT_BCS_BATCH(batch, MFX_QM_STATE | (18 - 2));
553 OUT_BCS_BATCH(batch, qm_type << 0);
554 intel_batchbuffer_data(batch, qm_buffer, 16 * 4);
555 ADVANCE_BCS_BATCH(batch);
558 static void gen7_mfc_avc_qm_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
560 unsigned int qm[16] = {
561 0x10101010, 0x10101010, 0x10101010, 0x10101010,
562 0x10101010, 0x10101010, 0x10101010, 0x10101010,
563 0x10101010, 0x10101010, 0x10101010, 0x10101010,
564 0x10101010, 0x10101010, 0x10101010, 0x10101010
567 gen7_mfc_qm_state(ctx, MFX_QM_AVC_4X4_INTRA_MATRIX, qm, 12, gen6_encoder_context);
568 gen7_mfc_qm_state(ctx, MFX_QM_AVC_4X4_INTER_MATRIX, qm, 12, gen6_encoder_context);
569 gen7_mfc_qm_state(ctx, MFX_QM_AVC_8x8_INTRA_MATRIX, qm, 16, gen6_encoder_context);
570 gen7_mfc_qm_state(ctx, MFX_QM_AVC_8x8_INTER_MATRIX, qm, 16, gen6_encoder_context);
574 gen7_mfc_fqm_state(VADriverContextP ctx,
578 struct gen6_encoder_context *gen6_encoder_context)
580 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
581 unsigned int fqm_buffer[32];
583 assert(fqm_length <= 32);
584 assert(sizeof(*fqm) == 4);
585 memcpy(fqm_buffer, fqm, fqm_length * 4);
587 BEGIN_BCS_BATCH(batch, 34);
588 OUT_BCS_BATCH(batch, MFX_FQM_STATE | (34 - 2));
589 OUT_BCS_BATCH(batch, fqm_type << 0);
590 intel_batchbuffer_data(batch, fqm_buffer, 32 * 4);
591 ADVANCE_BCS_BATCH(batch);
594 static void gen7_mfc_avc_fqm_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
596 unsigned int qm[32] = {
597 0x10001000, 0x10001000, 0x10001000, 0x10001000,
598 0x10001000, 0x10001000, 0x10001000, 0x10001000,
599 0x10001000, 0x10001000, 0x10001000, 0x10001000,
600 0x10001000, 0x10001000, 0x10001000, 0x10001000,
601 0x10001000, 0x10001000, 0x10001000, 0x10001000,
602 0x10001000, 0x10001000, 0x10001000, 0x10001000,
603 0x10001000, 0x10001000, 0x10001000, 0x10001000,
604 0x10001000, 0x10001000, 0x10001000, 0x10001000
607 gen7_mfc_fqm_state(ctx, MFX_QM_AVC_4X4_INTRA_MATRIX, qm, 24, gen6_encoder_context);
608 gen7_mfc_fqm_state(ctx, MFX_QM_AVC_4X4_INTER_MATRIX, qm, 24, gen6_encoder_context);
609 gen7_mfc_fqm_state(ctx, MFX_QM_AVC_8x8_INTRA_MATRIX, qm, 32, gen6_encoder_context);
610 gen7_mfc_fqm_state(ctx, MFX_QM_AVC_8x8_INTER_MATRIX, qm, 32, gen6_encoder_context);
613 static void gen6_mfc_avc_ref_idx_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
615 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
618 BEGIN_BCS_BATCH(batch, 10);
619 OUT_BCS_BATCH(batch, MFX_AVC_REF_IDX_STATE | 8);
620 OUT_BCS_BATCH(batch, 0); //Select L0
621 OUT_BCS_BATCH(batch, 0x80808020); //Only 1 reference
622 for(i = 0; i < 7; i++) {
623 OUT_BCS_BATCH(batch, 0x80808080);
625 ADVANCE_BCS_BATCH(batch);
627 BEGIN_BCS_BATCH(batch, 10);
628 OUT_BCS_BATCH(batch, MFX_AVC_REF_IDX_STATE | 8);
629 OUT_BCS_BATCH(batch, 1); //Select L1
630 OUT_BCS_BATCH(batch, 0x80808022); //Only 1 reference
631 for(i = 0; i < 7; i++) {
632 OUT_BCS_BATCH(batch, 0x80808080);
634 ADVANCE_BCS_BATCH(batch);
638 gen6_mfc_avc_insert_object(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context,
639 unsigned int *insert_data, int lenght_in_dws, int data_bits_in_last_dw,
640 int skip_emul_byte_count, int is_last_header, int is_end_of_slice)
642 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
644 BEGIN_BCS_BATCH(batch, lenght_in_dws + 2);
646 OUT_BCS_BATCH(batch, MFC_AVC_INSERT_OBJECT | (lenght_in_dws + 2 - 2));
648 (0 << 16) | /* always start at offset 0 */
649 (data_bits_in_last_dw << 8) |
650 (skip_emul_byte_count << 4) |
651 (1 << 3) | /* FIXME: ??? */
652 ((!!is_last_header) << 2) |
653 ((!!is_end_of_slice) << 1) |
654 (0 << 0)); /* FIXME: ??? */
656 intel_batchbuffer_data(batch, insert_data, lenght_in_dws * 4);
657 ADVANCE_BCS_BATCH(batch);
661 gen6_mfc_avc_pak_object_intra(VADriverContextP ctx, int x, int y, int end_mb, int qp,unsigned int *msg,
662 struct gen6_encoder_context *gen6_encoder_context,
663 int intra_mb_size_in_bits)
665 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
666 int len_in_dwords = 11;
667 unsigned char target_mb_size = intra_mb_size_in_bits / 16; //In Words
668 unsigned char max_mb_size = target_mb_size * 2 > 255? 255: target_mb_size * 2 ;
670 BEGIN_BCS_BATCH(batch, len_in_dwords);
672 OUT_BCS_BATCH(batch, MFC_AVC_PAK_OBJECT | (len_in_dwords - 2));
673 OUT_BCS_BATCH(batch, 0);
674 OUT_BCS_BATCH(batch, 0);
676 (0 << 24) | /* PackedMvNum, Debug*/
677 (0 << 20) | /* No motion vector */
678 (1 << 19) | /* CbpDcY */
679 (1 << 18) | /* CbpDcU */
680 (1 << 17) | /* CbpDcV */
683 OUT_BCS_BATCH(batch, (0xFFFF<<16) | (y << 8) | x); /* Code Block Pattern for Y*/
684 OUT_BCS_BATCH(batch, 0x000F000F); /* Code Block Pattern */
685 OUT_BCS_BATCH(batch, (0 << 27) | (end_mb << 26) | qp); /* Last MB */
687 /*Stuff for Intra MB*/
688 OUT_BCS_BATCH(batch, msg[1]); /* We using Intra16x16 no 4x4 predmode*/
689 OUT_BCS_BATCH(batch, msg[2]);
690 OUT_BCS_BATCH(batch, msg[3]&0xFC);
692 /*MaxSizeInWord and TargetSzieInWord*/
693 OUT_BCS_BATCH(batch, (max_mb_size << 24) |
694 (target_mb_size << 16) );
696 ADVANCE_BCS_BATCH(batch);
698 return len_in_dwords;
701 static int gen6_mfc_avc_pak_object_inter(VADriverContextP ctx, int x, int y, int end_mb, int qp, unsigned int offset,
702 struct gen6_encoder_context *gen6_encoder_context,
703 int inter_mb_size_in_bits, int slice_type)
705 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
706 int len_in_dwords = 11;
707 unsigned char target_mb_size = inter_mb_size_in_bits / 16; //In Words
708 unsigned char max_mb_size = target_mb_size * 16 > 255? 255: target_mb_size * 16 ;
710 BEGIN_BCS_BATCH(batch, len_in_dwords);
712 OUT_BCS_BATCH(batch, MFC_AVC_PAK_OBJECT | (len_in_dwords - 2));
714 OUT_BCS_BATCH(batch, 32); /* 32 MV*/
715 OUT_BCS_BATCH(batch, offset);
718 (1 << 24) | /* PackedMvNum, Debug*/
719 (4 << 20) | /* 8 MV, SNB don't use it*/
720 (1 << 19) | /* CbpDcY */
721 (1 << 18) | /* CbpDcU */
722 (1 << 17) | /* CbpDcV */
723 (0 << 15) | /* Transform8x8Flag = 0*/
724 (0 << 14) | /* Frame based*/
725 (0 << 13) | /* Inter MB */
726 (1 << 8) | /* MbType = P_L0_16x16 */
727 (0 << 7) | /* MBZ for frame */
729 (2 << 4) | /* MBZ for inter*/
731 (0 << 2) | /* SkipMbFlag */
732 (0 << 0)); /* InterMbMode */
734 OUT_BCS_BATCH(batch, (0xFFFF<<16) | (y << 8) | x); /* Code Block Pattern for Y*/
735 OUT_BCS_BATCH(batch, 0x000F000F); /* Code Block Pattern */
736 if ( slice_type == SLICE_TYPE_B) {
737 OUT_BCS_BATCH(batch, (0xF<<28) | (end_mb << 26) | qp); /* Last MB */
739 OUT_BCS_BATCH(batch, (end_mb << 26) | qp); /* Last MB */
743 /*Stuff for Inter MB*/
744 OUT_BCS_BATCH(batch, 0x0);
745 OUT_BCS_BATCH(batch, 0x0);
746 OUT_BCS_BATCH(batch, 0x0);
748 /*MaxSizeInWord and TargetSzieInWord*/
749 OUT_BCS_BATCH(batch, (max_mb_size << 24) |
750 (target_mb_size << 16) );
752 ADVANCE_BCS_BATCH(batch);
754 return len_in_dwords;
757 static void gen6_mfc_init(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
759 struct i965_driver_data *i965 = i965_driver_data(ctx);
760 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
764 /*Encode common setup for MFC*/
765 dri_bo_unreference(mfc_context->post_deblocking_output.bo);
766 mfc_context->post_deblocking_output.bo = NULL;
768 dri_bo_unreference(mfc_context->pre_deblocking_output.bo);
769 mfc_context->pre_deblocking_output.bo = NULL;
771 dri_bo_unreference(mfc_context->uncompressed_picture_source.bo);
772 mfc_context->uncompressed_picture_source.bo = NULL;
774 dri_bo_unreference(mfc_context->mfc_indirect_pak_bse_object.bo);
775 mfc_context->mfc_indirect_pak_bse_object.bo = NULL;
777 for (i = 0; i < NUM_MFC_DMV_BUFFERS; i++){
778 if ( mfc_context->direct_mv_buffers[i].bo != NULL);
779 dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
780 mfc_context->direct_mv_buffers[i].bo = NULL;
783 for (i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++){
784 if (mfc_context->reference_surfaces[i].bo != NULL)
785 dri_bo_unreference(mfc_context->reference_surfaces[i].bo);
786 mfc_context->reference_surfaces[i].bo = NULL;
789 dri_bo_unreference(mfc_context->intra_row_store_scratch_buffer.bo);
790 bo = dri_bo_alloc(i965->intel.bufmgr,
795 mfc_context->intra_row_store_scratch_buffer.bo = bo;
797 dri_bo_unreference(mfc_context->macroblock_status_buffer.bo);
798 bo = dri_bo_alloc(i965->intel.bufmgr,
803 mfc_context->macroblock_status_buffer.bo = bo;
805 dri_bo_unreference(mfc_context->deblocking_filter_row_store_scratch_buffer.bo);
806 bo = dri_bo_alloc(i965->intel.bufmgr,
808 49152, /* 6 * 128 * 64 */
811 mfc_context->deblocking_filter_row_store_scratch_buffer.bo = bo;
813 dri_bo_unreference(mfc_context->bsd_mpc_row_store_scratch_buffer.bo);
814 bo = dri_bo_alloc(i965->intel.bufmgr,
816 12288, /* 1.5 * 128 * 64 */
819 mfc_context->bsd_mpc_row_store_scratch_buffer.bo = bo;
822 void gen6_mfc_avc_pipeline_programing(VADriverContextP ctx,
823 struct encode_state *encode_state,
824 struct gen6_encoder_context *gen6_encoder_context)
826 struct i965_driver_data *i965 = i965_driver_data(ctx);
827 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
828 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
829 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
830 VAEncSequenceParameterBufferH264Ext *pSequenceParameter = (VAEncSequenceParameterBufferH264Ext *)encode_state->seq_param_ext->buffer;
831 VAEncPictureParameterBufferH264Ext *pPicParameter = (VAEncPictureParameterBufferH264Ext *)encode_state->pic_param_ext->buffer;
832 VAEncSliceParameterBufferH264Ext *pSliceParameter = (VAEncSliceParameterBufferH264Ext *)encode_state->slice_params_ext[0]->buffer; /* FIXME: multi slices */
833 VAEncH264DecRefPicMarkingBuffer *pDecRefPicMarking = NULL;
834 unsigned int *msg = NULL, offset = 0;
835 int emit_new_state = 1, object_len_in_bytes;
836 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
837 int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
838 int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
840 int rate_control_mode = pSequenceParameter->rate_control_method;
841 float fps = pSequenceParameter->time_scale * 0.5 / pSequenceParameter->num_units_in_tick ;
842 int inter_mb_size = pSequenceParameter->bits_per_second * 1.0 / fps / width_in_mbs / height_in_mbs;
843 int intra_mb_size = inter_mb_size * 5.0;
844 int qp = pPicParameter->pic_init_qp;
845 unsigned char *slice_header = NULL;
846 int slice_header_length_in_bits = 0;
847 unsigned int tail_data[] = { 0x0 };
849 if (encode_state->dec_ref_pic_marking)
850 pDecRefPicMarking = (VAEncH264DecRefPicMarkingBuffer *)encode_state->dec_ref_pic_marking->buffer;
852 slice_header_length_in_bits = build_avc_slice_header(pSequenceParameter, pPicParameter, pSliceParameter, pDecRefPicMarking, &slice_header);
854 if ( rate_control_mode != 2) {
856 if ( intra_mb_size > 384*8) //ONE MB raw data is 384 bytes
857 intra_mb_size = 384*8;
858 if ( inter_mb_size > 256*8)
859 intra_mb_size = 256*8;
862 intel_batchbuffer_start_atomic_bcs(batch, 0x1000);
865 dri_bo_map(vme_context->vme_output.bo , 1);
866 msg = (unsigned int *)vme_context->vme_output.bo->virtual;
869 for (y = 0; y < height_in_mbs; y++) {
870 for (x = 0; x < width_in_mbs; x++) {
871 int last_mb = (y == (height_in_mbs-1)) && ( x == (width_in_mbs-1) );
873 if (emit_new_state) {
874 intel_batchbuffer_emit_mi_flush(batch);
876 if (IS_GEN7(i965->intel.device_id)) {
877 gen7_mfc_pipe_mode_select(ctx, MFX_FORMAT_AVC, gen6_encoder_context);
878 gen7_mfc_surface_state(ctx, gen6_encoder_context);
879 gen7_mfc_ind_obj_base_addr_state(ctx, gen6_encoder_context);
881 gen6_mfc_pipe_mode_select(ctx, gen6_encoder_context);
882 gen6_mfc_surface_state(ctx, gen6_encoder_context);
883 gen6_mfc_ind_obj_base_addr_state(ctx, gen6_encoder_context);
886 gen6_mfc_pipe_buf_addr_state(ctx, gen6_encoder_context);
887 gen6_mfc_bsp_buf_base_addr_state(ctx, gen6_encoder_context);
889 if (IS_GEN7(i965->intel.device_id)) {
890 gen7_mfc_avc_img_state(ctx, gen6_encoder_context);
891 gen7_mfc_avc_qm_state(ctx, gen6_encoder_context);
892 gen7_mfc_avc_fqm_state(ctx, gen6_encoder_context);
894 gen6_mfc_avc_img_state(ctx, encode_state,gen6_encoder_context);
895 gen6_mfc_avc_qm_state(ctx, gen6_encoder_context);
896 gen6_mfc_avc_fqm_state(ctx, gen6_encoder_context);
899 gen6_mfc_avc_directmode_state(ctx, gen6_encoder_context);
900 gen6_mfc_avc_ref_idx_state(ctx, gen6_encoder_context);
901 gen6_mfc_avc_slice_state(ctx, pSliceParameter->slice_type,
902 encode_state, gen6_encoder_context,
903 rate_control_mode == 0, qp);
904 gen6_mfc_avc_insert_object(ctx, gen6_encoder_context,
905 (unsigned int *)slice_header, ALIGN(slice_header_length_in_bits, 32) >> 5, slice_header_length_in_bits & 0x1f,
906 5, 1, 0); /* first 5 bytes are start code + nal unit type */
912 object_len_in_bytes = gen6_mfc_avc_pak_object_intra(ctx, x, y, last_mb, qp, msg, gen6_encoder_context, intra_mb_size);
915 object_len_in_bytes = gen6_mfc_avc_pak_object_inter(ctx, x, y, last_mb, qp, offset, gen6_encoder_context, inter_mb_size, pSliceParameter->slice_type);
919 if (intel_batchbuffer_check_free_space(batch, object_len_in_bytes) == 0) {
921 intel_batchbuffer_end_atomic(batch);
922 intel_batchbuffer_flush(batch);
924 intel_batchbuffer_start_atomic_bcs(batch, 0x1000);
929 gen6_mfc_avc_insert_object(ctx, gen6_encoder_context,
930 tail_data, sizeof(tail_data) >> 2, 32,
931 sizeof(tail_data), 1, 1);
934 dri_bo_unmap(vme_context->vme_output.bo);
938 intel_batchbuffer_end_atomic(batch);
942 gen6_mfc_free_avc_surface(void **data)
944 struct gen6_mfc_avc_surface_aux *avc_surface = *data;
949 dri_bo_unreference(avc_surface->dmv_top);
950 avc_surface->dmv_top = NULL;
951 dri_bo_unreference(avc_surface->dmv_bottom);
952 avc_surface->dmv_bottom = NULL;
958 static VAStatus gen6_mfc_avc_prepare(VADriverContextP ctx,
959 struct encode_state *encode_state,
960 struct gen6_encoder_context *gen6_encoder_context)
962 struct i965_driver_data *i965 = i965_driver_data(ctx);
963 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
964 struct object_surface *obj_surface;
965 struct object_buffer *obj_buffer;
966 struct gen6_mfc_avc_surface_aux* gen6_avc_surface;
968 VAEncPictureParameterBufferH264Ext *pPicParameter = (VAEncPictureParameterBufferH264Ext *)encode_state->pic_param_ext->buffer;
969 VAStatus vaStatus = VA_STATUS_SUCCESS;
972 /*Setup all the input&output object*/
974 /* Setup current frame and current direct mv buffer*/
975 obj_surface = SURFACE(pPicParameter->CurrPic.picture_id);
977 i965_check_alloc_surface_bo(ctx, obj_surface, 1, VA_FOURCC('N','V','1','2'));
978 if ( obj_surface->private_data == NULL) {
979 gen6_avc_surface = calloc(sizeof(struct gen6_mfc_avc_surface_aux), 1);
980 gen6_avc_surface->dmv_top =
981 dri_bo_alloc(i965->intel.bufmgr,
985 gen6_avc_surface->dmv_bottom =
986 dri_bo_alloc(i965->intel.bufmgr,
990 assert(gen6_avc_surface->dmv_top);
991 assert(gen6_avc_surface->dmv_bottom);
992 obj_surface->private_data = (void *)gen6_avc_surface;
993 obj_surface->free_private_data = (void *)gen6_mfc_free_avc_surface;
995 gen6_avc_surface = (struct gen6_mfc_avc_surface_aux*) obj_surface->private_data;
996 mfc_context->direct_mv_buffers[NUM_MFC_DMV_BUFFERS - 2].bo = gen6_avc_surface->dmv_top;
997 mfc_context->direct_mv_buffers[NUM_MFC_DMV_BUFFERS - 1].bo = gen6_avc_surface->dmv_bottom;
998 dri_bo_reference(gen6_avc_surface->dmv_top);
999 dri_bo_reference(gen6_avc_surface->dmv_bottom);
1001 mfc_context->post_deblocking_output.bo = obj_surface->bo;
1002 dri_bo_reference(mfc_context->post_deblocking_output.bo);
1004 mfc_context->surface_state.width = obj_surface->orig_width;
1005 mfc_context->surface_state.height = obj_surface->orig_height;
1006 mfc_context->surface_state.w_pitch = obj_surface->width;
1007 mfc_context->surface_state.h_pitch = obj_surface->height;
1009 /* Setup reference frames and direct mv buffers*/
1010 for(i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++) {
1011 if ( pPicParameter->ReferenceFrames[i].picture_id != VA_INVALID_ID ) {
1012 obj_surface = SURFACE(pPicParameter->ReferenceFrames[i].picture_id);
1013 assert(obj_surface);
1014 if (obj_surface->bo != NULL) {
1015 mfc_context->reference_surfaces[i].bo = obj_surface->bo;
1016 dri_bo_reference(obj_surface->bo);
1018 /* Check DMV buffer */
1019 if ( obj_surface->private_data == NULL) {
1021 gen6_avc_surface = calloc(sizeof(struct gen6_mfc_avc_surface_aux), 1);
1022 gen6_avc_surface->dmv_top =
1023 dri_bo_alloc(i965->intel.bufmgr,
1027 gen6_avc_surface->dmv_bottom =
1028 dri_bo_alloc(i965->intel.bufmgr,
1032 assert(gen6_avc_surface->dmv_top);
1033 assert(gen6_avc_surface->dmv_bottom);
1034 obj_surface->private_data = gen6_avc_surface;
1035 obj_surface->free_private_data = gen6_mfc_free_avc_surface;
1038 gen6_avc_surface = (struct gen6_mfc_avc_surface_aux*) obj_surface->private_data;
1039 /* Setup DMV buffer */
1040 mfc_context->direct_mv_buffers[i*2].bo = gen6_avc_surface->dmv_top;
1041 mfc_context->direct_mv_buffers[i*2+1].bo = gen6_avc_surface->dmv_bottom;
1042 dri_bo_reference(gen6_avc_surface->dmv_top);
1043 dri_bo_reference(gen6_avc_surface->dmv_bottom);
1049 obj_surface = SURFACE(encode_state->current_render_target);
1050 assert(obj_surface && obj_surface->bo);
1051 mfc_context->uncompressed_picture_source.bo = obj_surface->bo;
1052 dri_bo_reference(mfc_context->uncompressed_picture_source.bo);
1054 obj_buffer = BUFFER (pPicParameter->CodedBuf); /* FIXME: fix this later */
1055 bo = obj_buffer->buffer_store->bo;
1057 mfc_context->mfc_indirect_pak_bse_object.bo = bo;
1058 mfc_context->mfc_indirect_pak_bse_object.offset = ALIGN(sizeof(VACodedBufferSegment), 64);
1059 mfc_context->mfc_indirect_pak_bse_object.end_offset = ALIGN (obj_buffer->size_element - 0x1000, 0x1000);
1060 dri_bo_reference(mfc_context->mfc_indirect_pak_bse_object.bo);
1062 /*Programing bcs pipeline*/
1063 gen6_mfc_avc_pipeline_programing(ctx, encode_state, gen6_encoder_context); //filling the pipeline
1068 static VAStatus gen6_mfc_run(VADriverContextP ctx,
1069 struct encode_state *encode_state,
1070 struct gen6_encoder_context *gen6_encoder_context)
1072 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
1074 intel_batchbuffer_flush(batch); //run the pipeline
1076 return VA_STATUS_SUCCESS;
1079 static VAStatus gen6_mfc_stop(VADriverContextP ctx,
1080 struct encode_state *encode_state,
1081 struct gen6_encoder_context *gen6_encoder_context)
1084 struct i965_driver_data *i965 = i965_driver_data(ctx);
1085 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
1087 VAEncPictureParameterBufferH264Ext *pPicParameter = (VAEncPictureParameterBufferH264Ext *)encode_state->pic_param_ext->buffer;
1089 struct object_surface *obj_surface = SURFACE(pPicParameter->reconstructed_picture);
1090 //struct object_surface *obj_surface = SURFACE(pPicParameter->reference_picture[0]);
1091 //struct object_surface *obj_surface = SURFACE(encode_state->current_render_target);
1092 my_debug(obj_surface);
1096 return VA_STATUS_SUCCESS;
1100 gen6_mfc_avc_encode_picture(VADriverContextP ctx,
1101 struct encode_state *encode_state,
1102 struct gen6_encoder_context *gen6_encoder_context)
1104 gen6_mfc_init(ctx, gen6_encoder_context);
1105 gen6_mfc_avc_prepare(ctx, encode_state, gen6_encoder_context);
1106 gen6_mfc_run(ctx, encode_state, gen6_encoder_context);
1107 gen6_mfc_stop(ctx, encode_state, gen6_encoder_context);
1109 return VA_STATUS_SUCCESS;
1113 gen6_mfc_pipeline(VADriverContextP ctx,
1115 struct encode_state *encode_state,
1116 struct gen6_encoder_context *gen6_encoder_context)
1121 case VAProfileH264Baseline:
1122 vaStatus = gen6_mfc_avc_encode_picture(ctx, encode_state, gen6_encoder_context);
1125 /* FIXME: add for other profile */
1127 vaStatus = VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
1134 Bool gen6_mfc_context_init(VADriverContextP ctx, struct gen6_mfc_context *mfc_context)
1139 Bool gen6_mfc_context_destroy(struct gen6_mfc_context *mfc_context)
1143 dri_bo_unreference(mfc_context->post_deblocking_output.bo);
1144 mfc_context->post_deblocking_output.bo = NULL;
1146 dri_bo_unreference(mfc_context->pre_deblocking_output.bo);
1147 mfc_context->pre_deblocking_output.bo = NULL;
1149 dri_bo_unreference(mfc_context->uncompressed_picture_source.bo);
1150 mfc_context->uncompressed_picture_source.bo = NULL;
1152 dri_bo_unreference(mfc_context->mfc_indirect_pak_bse_object.bo);
1153 mfc_context->mfc_indirect_pak_bse_object.bo = NULL;
1155 for (i = 0; i < NUM_MFC_DMV_BUFFERS; i++){
1156 dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
1157 mfc_context->direct_mv_buffers[i].bo = NULL;
1160 dri_bo_unreference(mfc_context->intra_row_store_scratch_buffer.bo);
1161 mfc_context->intra_row_store_scratch_buffer.bo = NULL;
1163 dri_bo_unreference(mfc_context->macroblock_status_buffer.bo);
1164 mfc_context->macroblock_status_buffer.bo = NULL;
1166 dri_bo_unreference(mfc_context->deblocking_filter_row_store_scratch_buffer.bo);
1167 mfc_context->deblocking_filter_row_store_scratch_buffer.bo = NULL;
1169 dri_bo_unreference(mfc_context->bsd_mpc_row_store_scratch_buffer.bo);
1170 mfc_context->bsd_mpc_row_store_scratch_buffer.bo = NULL;