Follow the input Picture/Slice parameters to generate slice header/data
[platform/upstream/libva-intel-driver.git] / src / gen6_mfc.c
1 /*
2  * Copyright © 2010-2011 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sub license, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19  * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Zhou Chang <chang.zhou@intel.com>
26  *
27  */
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <assert.h>
33 #include <math.h>
34
35 #include "intel_batchbuffer.h"
36 #include "i965_defines.h"
37 #include "i965_structs.h"
38 #include "i965_drv_video.h"
39 #include "i965_encoder.h"
40 #include "i965_encoder_utils.h"
41 #include "gen6_mfc.h"
42 #include "gen6_vme.h"
43 #include "intel_media.h"
44
45 static const uint32_t gen6_mfc_batchbuffer_avc_intra[][4] = {
46 #include "shaders/utils/mfc_batchbuffer_avc_intra.g6b"
47 };
48
49 static const uint32_t gen6_mfc_batchbuffer_avc_inter[][4] = {
50 #include "shaders/utils/mfc_batchbuffer_avc_inter.g6b"
51 };
52
53 static struct i965_kernel gen6_mfc_kernels[] = {
54     {
55         "MFC AVC INTRA BATCHBUFFER ",
56         MFC_BATCHBUFFER_AVC_INTRA,
57         gen6_mfc_batchbuffer_avc_intra,
58         sizeof(gen6_mfc_batchbuffer_avc_intra),
59         NULL
60     },
61
62     {
63         "MFC AVC INTER BATCHBUFFER ",
64         MFC_BATCHBUFFER_AVC_INTER,
65         gen6_mfc_batchbuffer_avc_inter,
66         sizeof(gen6_mfc_batchbuffer_avc_inter),
67         NULL
68     },
69 };
70
71 static void
72 gen6_mfc_pipe_mode_select(VADriverContextP ctx,
73                           int standard_select,
74                           struct intel_encoder_context *encoder_context)
75 {
76     struct intel_batchbuffer *batch = encoder_context->base.batch;
77     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
78
79     assert(standard_select == MFX_FORMAT_AVC);
80
81     BEGIN_BCS_BATCH(batch, 4);
82
83     OUT_BCS_BATCH(batch, MFX_PIPE_MODE_SELECT | (4 - 2));
84     OUT_BCS_BATCH(batch,
85                   (1 << 10) | /* disable Stream-Out , advanced QP/bitrate control need enable it*/
86                   ((!!mfc_context->post_deblocking_output.bo) << 9)  | /* Post Deblocking Output */
87                   ((!!mfc_context->pre_deblocking_output.bo) << 8)  | /* Pre Deblocking Output */
88                   (0 << 7)  | /* disable TLB prefectch */
89                   (0 << 5)  | /* not in stitch mode */
90                   (1 << 4)  | /* encoding mode */
91                   (2 << 0));  /* Standard Select: AVC */
92     OUT_BCS_BATCH(batch,
93                   (0 << 20) | /* round flag in PB slice */
94                   (0 << 19) | /* round flag in Intra8x8 */
95                   (0 << 7)  | /* expand NOA bus flag */
96                   (1 << 6)  | /* must be 1 */
97                   (0 << 5)  | /* disable clock gating for NOA */
98                   (0 << 4)  | /* terminate if AVC motion and POC table error occurs */
99                   (0 << 3)  | /* terminate if AVC mbdata error occurs */
100                   (0 << 2)  | /* terminate if AVC CABAC/CAVLC decode error occurs */
101                   (0 << 1)  | /* AVC long field motion vector */
102                   (0 << 0));  /* always calculate AVC ILDB boundary strength */
103     OUT_BCS_BATCH(batch, 0);
104
105     ADVANCE_BCS_BATCH(batch);
106 }
107
108 static void
109 gen6_mfc_surface_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
110 {
111     struct intel_batchbuffer *batch = encoder_context->base.batch;
112     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
113
114     BEGIN_BCS_BATCH(batch, 6);
115
116     OUT_BCS_BATCH(batch, MFX_SURFACE_STATE | (6 - 2));
117     OUT_BCS_BATCH(batch, 0);
118     OUT_BCS_BATCH(batch,
119                   ((mfc_context->surface_state.height - 1) << 19) |
120                   ((mfc_context->surface_state.width - 1) << 6));
121     OUT_BCS_BATCH(batch,
122                   (MFX_SURFACE_PLANAR_420_8 << 28) | /* 420 planar YUV surface */
123                   (1 << 27) | /* must be 1 for interleave U/V, hardware requirement */
124                   (0 << 22) | /* surface object control state, FIXME??? */
125                   ((mfc_context->surface_state.w_pitch - 1) << 3) | /* pitch */
126                   (0 << 2)  | /* must be 0 for interleave U/V */
127                   (1 << 1)  | /* must be y-tiled */
128                   (I965_TILEWALK_YMAJOR << 0));                         /* tile walk, TILEWALK_YMAJOR */
129     OUT_BCS_BATCH(batch,
130                   (0 << 16) |                                                           /* must be 0 for interleave U/V */
131                   (mfc_context->surface_state.h_pitch));                /* y offset for U(cb) */
132     OUT_BCS_BATCH(batch, 0);
133     ADVANCE_BCS_BATCH(batch);
134 }
135
136 void
137 gen6_mfc_pipe_buf_addr_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
138 {
139     struct intel_batchbuffer *batch = encoder_context->base.batch;
140     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
141     int i;
142
143     BEGIN_BCS_BATCH(batch, 24);
144
145     OUT_BCS_BATCH(batch, MFX_PIPE_BUF_ADDR_STATE | (24 - 2));
146
147     if (mfc_context->pre_deblocking_output.bo)
148         OUT_BCS_RELOC(batch, mfc_context->pre_deblocking_output.bo,
149                       I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
150                       0);
151     else
152         OUT_BCS_BATCH(batch, 0);                                                                                        /* pre output addr   */
153
154     if (mfc_context->post_deblocking_output.bo)
155         OUT_BCS_RELOC(batch, mfc_context->post_deblocking_output.bo,
156                       I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
157                       0);                                                                                       /* post output addr  */ 
158     else
159         OUT_BCS_BATCH(batch, 0);
160
161     OUT_BCS_RELOC(batch, mfc_context->uncompressed_picture_source.bo,
162                   I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
163                   0);                                                                                   /* uncompressed data */
164     OUT_BCS_RELOC(batch, mfc_context->macroblock_status_buffer.bo,
165                   I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
166                   0);                                                                                   /* StreamOut data*/
167     OUT_BCS_RELOC(batch, mfc_context->intra_row_store_scratch_buffer.bo,
168                   I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
169                   0);   
170     OUT_BCS_RELOC(batch, mfc_context->deblocking_filter_row_store_scratch_buffer.bo,
171                   I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
172                   0);
173     /* 7..22 Reference pictures*/
174     for (i = 0; i < ARRAY_ELEMS(mfc_context->reference_surfaces); i++) {
175         if ( mfc_context->reference_surfaces[i].bo != NULL) {
176             OUT_BCS_RELOC(batch, mfc_context->reference_surfaces[i].bo,
177                           I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
178                           0);                   
179         } else {
180             OUT_BCS_BATCH(batch, 0);
181         }
182     }
183     OUT_BCS_RELOC(batch, mfc_context->macroblock_status_buffer.bo,
184                   I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
185                   0);                                                                                   /* Macroblock status buffer*/
186
187     ADVANCE_BCS_BATCH(batch);
188 }
189
190 static void
191 gen6_mfc_ind_obj_base_addr_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
192 {
193     struct intel_batchbuffer *batch = encoder_context->base.batch;
194     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
195     struct gen6_vme_context *vme_context = encoder_context->vme_context;
196
197     BEGIN_BCS_BATCH(batch, 11);
198
199     OUT_BCS_BATCH(batch, MFX_IND_OBJ_BASE_ADDR_STATE | (11 - 2));
200     OUT_BCS_BATCH(batch, 0);
201     OUT_BCS_BATCH(batch, 0);
202     /* MFX Indirect MV Object Base Address */
203     OUT_BCS_RELOC(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
204     OUT_BCS_BATCH(batch, 0);    
205     OUT_BCS_BATCH(batch, 0);
206     OUT_BCS_BATCH(batch, 0);
207     OUT_BCS_BATCH(batch, 0);
208     OUT_BCS_BATCH(batch, 0);
209     /*MFC Indirect PAK-BSE Object Base Address for Encoder*/    
210     OUT_BCS_RELOC(batch,
211                   mfc_context->mfc_indirect_pak_bse_object.bo,
212                   I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
213                   0);
214     OUT_BCS_RELOC(batch,
215                   mfc_context->mfc_indirect_pak_bse_object.bo,
216                   I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
217                   mfc_context->mfc_indirect_pak_bse_object.end_offset);
218
219     ADVANCE_BCS_BATCH(batch);
220 }
221
222 void
223 gen6_mfc_bsp_buf_base_addr_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
224 {
225     struct intel_batchbuffer *batch = encoder_context->base.batch;
226     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
227
228     BEGIN_BCS_BATCH(batch, 4);
229
230     OUT_BCS_BATCH(batch, MFX_BSP_BUF_BASE_ADDR_STATE | (4 - 2));
231     OUT_BCS_RELOC(batch, mfc_context->bsd_mpc_row_store_scratch_buffer.bo,
232                   I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
233                   0);
234     OUT_BCS_BATCH(batch, 0);
235     OUT_BCS_BATCH(batch, 0);
236
237     ADVANCE_BCS_BATCH(batch);
238 }
239
240 static void
241 gen6_mfc_avc_img_state(VADriverContextP ctx,struct encode_state *encode_state,
242                        struct intel_encoder_context *encoder_context)
243 {
244     struct intel_batchbuffer *batch = encoder_context->base.batch;
245     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
246     VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
247     VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
248     int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
249     int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
250
251     BEGIN_BCS_BATCH(batch, 13);
252     OUT_BCS_BATCH(batch, MFX_AVC_IMG_STATE | (13 - 2));
253     OUT_BCS_BATCH(batch, 
254                   ((width_in_mbs * height_in_mbs) & 0xFFFF));
255     OUT_BCS_BATCH(batch, 
256                   (height_in_mbs << 16) | 
257                   (width_in_mbs << 0));
258     OUT_BCS_BATCH(batch, 
259                   (0 << 24) |     /*Second Chroma QP Offset*/
260                   (0 << 16) |     /*Chroma QP Offset*/
261                   (0 << 14) |   /*Max-bit conformance Intra flag*/
262                   (0 << 13) |   /*Max Macroblock size conformance Inter flag*/
263                   (1 << 12) |   /*Should always be written as "1" */
264                   (0 << 10) |   /*QM Preset FLag */
265                   (0 << 8)  |   /*Image Structure*/
266                   (0 << 0) );   /*Current Decoed Image Frame Store ID, reserved in Encode mode*/
267     OUT_BCS_BATCH(batch,
268                   (400 << 16) |   /*Mininum Frame size*/        
269                   (0 << 15) |   /*Disable reading of Macroblock Status Buffer*/
270                   (0 << 14) |   /*Load BitStream Pointer only once, 1 slic 1 frame*/
271                   (0 << 13) |   /*CABAC 0 word insertion test enable*/
272                   (1 << 12) |   /*MVUnpackedEnable,compliant to DXVA*/
273                   (1 << 10) |   /*Chroma Format IDC, 4:2:0*/
274                   (pPicParameter->pic_fields.bits.entropy_coding_mode_flag << 7)  |   /*0:CAVLC encoding mode,1:CABAC*/
275                   (0 << 6)  |   /*Only valid for VLD decoding mode*/
276                   (0 << 5)  |   /*Constrained Intra Predition Flag, from PPS*/
277                   (pSequenceParameter->seq_fields.bits.direct_8x8_inference_flag << 4)  |   /*Direct 8x8 inference flag*/
278                   (pPicParameter->pic_fields.bits.transform_8x8_mode_flag << 3)  |   /*8x8 or 4x4 IDCT Transform Mode Flag*/
279                   (1 << 2)  |   /*Frame MB only flag*/
280                   (0 << 1)  |   /*MBAFF mode is in active*/
281                   (0 << 0) );   /*Field picture flag*/
282     OUT_BCS_BATCH(batch, 
283                   (1<<16)   |   /*Frame Size Rate Control Flag*/  
284                   (1<<12)   |   
285                   (1<<9)    |   /*MB level Rate Control Enabling Flag*/
286                   (1 << 3)  |   /*FrameBitRateMinReportMask*/
287                   (1 << 2)  |   /*FrameBitRateMaxReportMask*/
288                   (1 << 1)  |   /*InterMBMaxSizeReportMask*/
289                   (1 << 0) );   /*IntraMBMaxSizeReportMask*/
290     OUT_BCS_BATCH(batch,                        /*Inter and Intra Conformance Max size limit*/
291                   (0x0600 << 16) |              /*InterMbMaxSz 192 Byte*/
292                   (0x0800) );                   /*IntraMbMaxSz 256 Byte*/
293     OUT_BCS_BATCH(batch, 0x00000000);   /*Reserved : MBZReserved*/
294     OUT_BCS_BATCH(batch, 0x01020304);   /*Slice QP Delta for bitrate control*/                  
295     OUT_BCS_BATCH(batch, 0xFEFDFCFB);           
296     OUT_BCS_BATCH(batch, 0x80601004);   /*MAX = 128KB, MIN = 64KB*/
297     OUT_BCS_BATCH(batch, 0x00800001);   
298     OUT_BCS_BATCH(batch, 0);
299
300     ADVANCE_BCS_BATCH(batch);
301 }
302
303 static void
304 gen6_mfc_avc_directmode_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
305 {
306     struct intel_batchbuffer *batch = encoder_context->base.batch;
307     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
308
309     int i;
310
311     BEGIN_BCS_BATCH(batch, 69);
312
313     OUT_BCS_BATCH(batch, MFX_AVC_DIRECTMODE_STATE | (69 - 2));
314
315     /* Reference frames and Current frames */
316     for(i = 0; i < NUM_MFC_DMV_BUFFERS; i++) {
317         if ( mfc_context->direct_mv_buffers[i].bo != NULL) { 
318             OUT_BCS_RELOC(batch, mfc_context->direct_mv_buffers[i].bo,
319                           I915_GEM_DOMAIN_INSTRUCTION, 0,
320                           0);
321         } else {
322             OUT_BCS_BATCH(batch, 0);
323         }
324     }
325
326     /* POL list */
327     for(i = 0; i < 32; i++) {
328         OUT_BCS_BATCH(batch, i/2);
329     }
330     OUT_BCS_BATCH(batch, 0);
331     OUT_BCS_BATCH(batch, 0);
332
333     ADVANCE_BCS_BATCH(batch);
334 }
335
336 static void
337 gen6_mfc_avc_slice_state(VADriverContextP ctx,
338                          VAEncPictureParameterBufferH264 *pic_param,
339                          VAEncSliceParameterBufferH264 *slice_param,
340                          struct encode_state *encode_state,
341                          struct intel_encoder_context *encoder_context,
342                          int rate_control_enable,
343                          int qp,
344                          struct intel_batchbuffer *batch)
345 {
346     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
347     int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
348     int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
349     int beginmb = slice_param->macroblock_address;
350     int endmb = beginmb + slice_param->num_macroblocks;
351     int beginx = beginmb % width_in_mbs;
352     int beginy = beginmb / width_in_mbs;
353     int nextx =  endmb % width_in_mbs;
354     int nexty = endmb / width_in_mbs;
355     int slice_type = intel_avc_enc_slice_type_fixup(slice_param->slice_type);
356     int last_slice = (endmb == (width_in_mbs * height_in_mbs));
357     int maxQpN, maxQpP;
358     unsigned char correct[6], grow, shrink;
359     int i;
360     int weighted_pred_idc = 0;
361     unsigned int luma_log2_weight_denom = slice_param->luma_log2_weight_denom;
362     unsigned int chroma_log2_weight_denom = slice_param->chroma_log2_weight_denom;
363     int num_ref_l0 = 0, num_ref_l1 = 0;
364
365     if (batch == NULL)
366         batch = encoder_context->base.batch;
367
368     if (slice_type == SLICE_TYPE_I) {
369         luma_log2_weight_denom = 0;
370         chroma_log2_weight_denom = 0;
371     } else if (slice_type == SLICE_TYPE_P) {
372         weighted_pred_idc = pic_param->pic_fields.bits.weighted_pred_flag;
373         num_ref_l0 = pic_param->num_ref_idx_l0_active_minus1 + 1;
374
375         if (slice_param->num_ref_idx_active_override_flag)
376             num_ref_l0 = slice_param->num_ref_idx_l0_active_minus1 + 1;
377     } else if (slice_type == SLICE_TYPE_B) {
378         weighted_pred_idc = pic_param->pic_fields.bits.weighted_bipred_idc;
379         num_ref_l0 = pic_param->num_ref_idx_l0_active_minus1 + 1;
380         num_ref_l1 = pic_param->num_ref_idx_l1_active_minus1 + 1;
381
382         if (slice_param->num_ref_idx_active_override_flag) {
383             num_ref_l0 = slice_param->num_ref_idx_l0_active_minus1 + 1;
384             num_ref_l1 = slice_param->num_ref_idx_l1_active_minus1 + 1;
385         }
386
387         if (weighted_pred_idc == 2) {
388             /* 8.4.3 - Derivation process for prediction weights (8-279) */
389             luma_log2_weight_denom = 5;
390             chroma_log2_weight_denom = 5;
391         }
392     }
393
394     maxQpN = mfc_context->bit_rate_control_context[slice_type].MaxQpNegModifier;
395     maxQpP = mfc_context->bit_rate_control_context[slice_type].MaxQpPosModifier;
396
397     for (i = 0; i < 6; i++)
398         correct[i] = mfc_context->bit_rate_control_context[slice_type].Correct[i];
399
400     grow = mfc_context->bit_rate_control_context[slice_type].GrowInit + 
401         (mfc_context->bit_rate_control_context[slice_type].GrowResistance << 4);
402     shrink = mfc_context->bit_rate_control_context[slice_type].ShrinkInit + 
403         (mfc_context->bit_rate_control_context[slice_type].ShrinkResistance << 4);
404
405     BEGIN_BCS_BATCH(batch, 11);;
406
407     OUT_BCS_BATCH(batch, MFX_AVC_SLICE_STATE | (11 - 2) );
408     OUT_BCS_BATCH(batch, slice_type);                   /*Slice Type: I:P:B Slice*/
409
410     OUT_BCS_BATCH(batch,
411                   (num_ref_l0 << 16) |
412                   (num_ref_l1 << 24) |
413                   (chroma_log2_weight_denom << 8) |
414                   (luma_log2_weight_denom << 0));
415
416     OUT_BCS_BATCH(batch, 
417                   (weighted_pred_idc << 30) |
418                   (slice_param->direct_spatial_mv_pred_flag<<29) |             /*Direct Prediction Type*/
419                   (slice_param->disable_deblocking_filter_idc << 27) |
420                   (slice_param->cabac_init_idc << 24) |
421                   (qp<<16) |                    /*Slice Quantization Parameter*/
422                   ((slice_param->slice_beta_offset_div2 & 0xf) << 8) |
423                   ((slice_param->slice_alpha_c0_offset_div2 & 0xf) << 0));
424     OUT_BCS_BATCH(batch,
425                   (beginy << 24) |                      /*First MB X&Y , the begin postion of current slice*/
426                   (beginx << 16) |
427                   slice_param->macroblock_address );
428     OUT_BCS_BATCH(batch, (nexty << 16) | nextx);                       /*Next slice first MB X&Y*/
429     OUT_BCS_BATCH(batch, 
430                   (0/*rate_control_enable*/ << 31) |            /*in CBR mode RateControlCounterEnable = enable*/
431                   (1 << 30) |           /*ResetRateControlCounter*/
432                   (0 << 28) |           /*RC Triggle Mode = Always Rate Control*/
433                   (4 << 24) |     /*RC Stable Tolerance, middle level*/
434                   (0/*rate_control_enable*/ << 23) |     /*RC Panic Enable*/                 
435                   (0 << 22) |     /*QP mode, don't modfiy CBP*/
436                   (0 << 21) |     /*MB Type Direct Conversion Enabled*/ 
437                   (0 << 20) |     /*MB Type Skip Conversion Enabled*/ 
438                   (last_slice << 19) |     /*IsLastSlice*/
439                   (0 << 18) |   /*BitstreamOutputFlag Compressed BitStream Output Disable Flag 0:enable 1:disable*/
440                   (1 << 17) |       /*HeaderPresentFlag*/       
441                   (1 << 16) |       /*SliceData PresentFlag*/
442                   (1 << 15) |       /*TailPresentFlag*/
443                   (1 << 13) |       /*RBSP NAL TYPE*/   
444                   (0 << 12) );    /*CabacZeroWordInsertionEnable*/
445     OUT_BCS_BATCH(batch, mfc_context->mfc_indirect_pak_bse_object.offset);
446     OUT_BCS_BATCH(batch,
447                   (maxQpN << 24) |     /*Target QP - 24 is lowest QP*/ 
448                   (maxQpP << 16) |     /*Target QP + 20 is highest QP*/
449                   (shrink << 8)  |
450                   (grow << 0));   
451     OUT_BCS_BATCH(batch,
452                   (correct[5] << 20) |
453                   (correct[4] << 16) |
454                   (correct[3] << 12) |
455                   (correct[2] << 8) |
456                   (correct[1] << 4) |
457                   (correct[0] << 0));
458     OUT_BCS_BATCH(batch, 0);
459
460     ADVANCE_BCS_BATCH(batch);
461 }
462
463 static void gen6_mfc_avc_qm_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
464 {
465     struct intel_batchbuffer *batch = encoder_context->base.batch;
466     int i;
467
468     BEGIN_BCS_BATCH(batch, 58);
469
470     OUT_BCS_BATCH(batch, MFX_AVC_QM_STATE | 56);
471     OUT_BCS_BATCH(batch, 0xFF ) ; 
472     for( i = 0; i < 56; i++) {
473         OUT_BCS_BATCH(batch, 0x10101010); 
474     }   
475
476     ADVANCE_BCS_BATCH(batch);
477 }
478
479 static void gen6_mfc_avc_fqm_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
480 {
481     struct intel_batchbuffer *batch = encoder_context->base.batch;
482     int i;
483
484     BEGIN_BCS_BATCH(batch, 113);
485     OUT_BCS_BATCH(batch, MFC_AVC_FQM_STATE | (113 - 2));
486
487     for(i = 0; i < 112;i++) {
488         OUT_BCS_BATCH(batch, 0x10001000);
489     }   
490
491     ADVANCE_BCS_BATCH(batch);   
492 }
493
494 static void
495 gen6_mfc_avc_insert_object(VADriverContextP ctx, struct intel_encoder_context *encoder_context,
496                            unsigned int *insert_data, int lenght_in_dws, int data_bits_in_last_dw,
497                            int skip_emul_byte_count, int is_last_header, int is_end_of_slice, int emulation_flag,
498                            struct intel_batchbuffer *batch)
499 {
500     if (batch == NULL)
501         batch = encoder_context->base.batch;
502
503     BEGIN_BCS_BATCH(batch, lenght_in_dws + 2);
504
505     OUT_BCS_BATCH(batch, MFC_AVC_INSERT_OBJECT | (lenght_in_dws + 2 - 2));
506
507     OUT_BCS_BATCH(batch,
508                   (0 << 16) |   /* always start at offset 0 */
509                   (data_bits_in_last_dw << 8) |
510                   (skip_emul_byte_count << 4) |
511                   (!!emulation_flag << 3) |
512                   ((!!is_last_header) << 2) |
513                   ((!!is_end_of_slice) << 1) |
514                   (0 << 0));    /* FIXME: ??? */
515
516     intel_batchbuffer_data(batch, insert_data, lenght_in_dws * 4);
517     ADVANCE_BCS_BATCH(batch);
518 }
519
520 void 
521 gen6_mfc_init(VADriverContextP ctx, 
522               struct encode_state *encode_state,
523               struct intel_encoder_context *encoder_context)
524 {
525     struct i965_driver_data *i965 = i965_driver_data(ctx);
526     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
527     dri_bo *bo;
528     int i;
529     int width_in_mbs = 0;
530     int height_in_mbs = 0;
531
532     if (encoder_context->codec == CODEC_H264) {
533         VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
534         width_in_mbs = pSequenceParameter->picture_width_in_mbs;
535         height_in_mbs = pSequenceParameter->picture_height_in_mbs;
536     } else {
537         VAEncSequenceParameterBufferMPEG2 *pSequenceParameter = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
538
539         assert(encoder_context->codec == CODEC_MPEG2);
540
541         width_in_mbs = ALIGN(pSequenceParameter->picture_width, 16) / 16;
542         height_in_mbs = ALIGN(pSequenceParameter->picture_height, 16) / 16;
543     }
544
545     /*Encode common setup for MFC*/
546     dri_bo_unreference(mfc_context->post_deblocking_output.bo);
547     mfc_context->post_deblocking_output.bo = NULL;
548
549     dri_bo_unreference(mfc_context->pre_deblocking_output.bo);
550     mfc_context->pre_deblocking_output.bo = NULL;
551
552     dri_bo_unreference(mfc_context->uncompressed_picture_source.bo);
553     mfc_context->uncompressed_picture_source.bo = NULL;
554
555     dri_bo_unreference(mfc_context->mfc_indirect_pak_bse_object.bo); 
556     mfc_context->mfc_indirect_pak_bse_object.bo = NULL;
557
558     for (i = 0; i < NUM_MFC_DMV_BUFFERS; i++){
559         if ( mfc_context->direct_mv_buffers[i].bo != NULL);
560         dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
561         mfc_context->direct_mv_buffers[i].bo = NULL;
562     }
563
564     for (i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++){
565         if (mfc_context->reference_surfaces[i].bo != NULL)
566             dri_bo_unreference(mfc_context->reference_surfaces[i].bo);
567         mfc_context->reference_surfaces[i].bo = NULL;  
568     }
569
570     dri_bo_unreference(mfc_context->intra_row_store_scratch_buffer.bo);
571     bo = dri_bo_alloc(i965->intel.bufmgr,
572                       "Buffer",
573                       width_in_mbs * 64,
574                       64);
575     assert(bo);
576     mfc_context->intra_row_store_scratch_buffer.bo = bo;
577
578     dri_bo_unreference(mfc_context->macroblock_status_buffer.bo);
579     bo = dri_bo_alloc(i965->intel.bufmgr,
580                       "Buffer",
581                       width_in_mbs * height_in_mbs * 16,
582                       64);
583     assert(bo);
584     mfc_context->macroblock_status_buffer.bo = bo;
585
586     dri_bo_unreference(mfc_context->deblocking_filter_row_store_scratch_buffer.bo);
587     bo = dri_bo_alloc(i965->intel.bufmgr,
588                       "Buffer",
589                       4 * width_in_mbs * 64,  /* 4 * width_in_mbs * 64 */
590                       64);
591     assert(bo);
592     mfc_context->deblocking_filter_row_store_scratch_buffer.bo = bo;
593
594     dri_bo_unreference(mfc_context->bsd_mpc_row_store_scratch_buffer.bo);
595     bo = dri_bo_alloc(i965->intel.bufmgr,
596                       "Buffer",
597                       128 * width_in_mbs, /* 2 * widht_in_mbs * 64 */
598                       0x1000);
599     assert(bo);
600     mfc_context->bsd_mpc_row_store_scratch_buffer.bo = bo;
601
602     dri_bo_unreference(mfc_context->mfc_batchbuffer_surface.bo);
603     mfc_context->mfc_batchbuffer_surface.bo = NULL;
604
605     dri_bo_unreference(mfc_context->aux_batchbuffer_surface.bo);
606     mfc_context->aux_batchbuffer_surface.bo = NULL;
607
608     if (mfc_context->aux_batchbuffer)
609         intel_batchbuffer_free(mfc_context->aux_batchbuffer);
610
611     mfc_context->aux_batchbuffer = intel_batchbuffer_new(&i965->intel, I915_EXEC_BSD, 0);
612     mfc_context->aux_batchbuffer_surface.bo = mfc_context->aux_batchbuffer->buffer;
613     dri_bo_reference(mfc_context->aux_batchbuffer_surface.bo);
614     mfc_context->aux_batchbuffer_surface.pitch = 16;
615     mfc_context->aux_batchbuffer_surface.num_blocks = mfc_context->aux_batchbuffer->size / 16;
616     mfc_context->aux_batchbuffer_surface.size_block = 16;
617
618     i965_gpe_context_init(ctx, &mfc_context->gpe_context);
619 }
620
621 static void gen6_mfc_avc_pipeline_picture_programing( VADriverContextP ctx,
622                                                       struct encode_state *encode_state,
623                                                       struct intel_encoder_context *encoder_context)
624 {
625     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
626
627     mfc_context->pipe_mode_select(ctx, MFX_FORMAT_AVC, encoder_context);
628     mfc_context->set_surface_state(ctx, encoder_context);
629     mfc_context->ind_obj_base_addr_state(ctx, encoder_context);
630     gen6_mfc_pipe_buf_addr_state(ctx, encoder_context);
631     gen6_mfc_bsp_buf_base_addr_state(ctx, encoder_context);
632     mfc_context->avc_img_state(ctx, encode_state, encoder_context);
633     mfc_context->avc_qm_state(ctx, encoder_context);
634     mfc_context->avc_fqm_state(ctx, encoder_context);
635     gen6_mfc_avc_directmode_state(ctx, encoder_context); 
636     intel_mfc_avc_ref_idx_state(ctx, encode_state, encoder_context);
637 }
638
639
640 VAStatus
641 gen6_mfc_run(VADriverContextP ctx, 
642              struct encode_state *encode_state,
643              struct intel_encoder_context *encoder_context)
644 {
645     struct intel_batchbuffer *batch = encoder_context->base.batch;
646
647     intel_batchbuffer_flush(batch);             //run the pipeline
648
649     return VA_STATUS_SUCCESS;
650 }
651
652 VAStatus
653 gen6_mfc_stop(VADriverContextP ctx, 
654               struct encode_state *encode_state,
655               struct intel_encoder_context *encoder_context,
656               int *encoded_bits_size)
657 {
658     VAStatus vaStatus = VA_STATUS_ERROR_UNKNOWN;
659     VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
660     VACodedBufferSegment *coded_buffer_segment;
661     
662     vaStatus = i965_MapBuffer(ctx, pPicParameter->coded_buf, (void **)&coded_buffer_segment);
663     assert(vaStatus == VA_STATUS_SUCCESS);
664     *encoded_bits_size = coded_buffer_segment->size * 8;
665     i965_UnmapBuffer(ctx, pPicParameter->coded_buf);
666
667     return VA_STATUS_SUCCESS;
668 }
669
670 #if __SOFTWARE__
671
672 static int
673 gen6_mfc_avc_pak_object_intra(VADriverContextP ctx, int x, int y, int end_mb, int qp,unsigned int *msg,
674                               struct intel_encoder_context *encoder_context,
675                               unsigned char target_mb_size, unsigned char max_mb_size,
676                               struct intel_batchbuffer *batch)
677 {
678     int len_in_dwords = 11;
679
680     if (batch == NULL)
681         batch = encoder_context->base.batch;
682
683     BEGIN_BCS_BATCH(batch, len_in_dwords);
684
685     OUT_BCS_BATCH(batch, MFC_AVC_PAK_OBJECT | (len_in_dwords - 2));
686     OUT_BCS_BATCH(batch, 0);
687     OUT_BCS_BATCH(batch, 0);
688     OUT_BCS_BATCH(batch, 
689                   (0 << 24) |           /* PackedMvNum, Debug*/
690                   (0 << 20) |           /* No motion vector */
691                   (1 << 19) |           /* CbpDcY */
692                   (1 << 18) |           /* CbpDcU */
693                   (1 << 17) |           /* CbpDcV */
694                   (msg[0] & 0xFFFF) );
695
696     OUT_BCS_BATCH(batch, (0xFFFF << 16) | (y << 8) | x);                /* Code Block Pattern for Y*/
697     OUT_BCS_BATCH(batch, 0x000F000F);                                                   /* Code Block Pattern */                
698     OUT_BCS_BATCH(batch, (0 << 27) | (end_mb << 26) | qp);      /* Last MB */
699
700     /*Stuff for Intra MB*/
701     OUT_BCS_BATCH(batch, msg[1]);                       /* We using Intra16x16 no 4x4 predmode*/        
702     OUT_BCS_BATCH(batch, msg[2]);       
703     OUT_BCS_BATCH(batch, msg[3]&0xFC);          
704     
705     /*MaxSizeInWord and TargetSzieInWord*/
706     OUT_BCS_BATCH(batch, (max_mb_size << 24) |
707                   (target_mb_size << 16) );
708
709     ADVANCE_BCS_BATCH(batch);
710
711     return len_in_dwords;
712 }
713
714 static int
715 gen6_mfc_avc_pak_object_inter(VADriverContextP ctx, int x, int y, int end_mb, int qp,
716                               unsigned int *msg, unsigned int offset,
717                               struct intel_encoder_context *encoder_context,
718                               unsigned char target_mb_size,unsigned char max_mb_size, int slice_type,
719                               struct intel_batchbuffer *batch)
720 {
721     struct gen6_vme_context *vme_context = encoder_context->vme_context;
722     int len_in_dwords = 11;
723
724     if (batch == NULL)
725         batch = encoder_context->base.batch;
726
727     BEGIN_BCS_BATCH(batch, len_in_dwords);
728
729     OUT_BCS_BATCH(batch, MFC_AVC_PAK_OBJECT | (len_in_dwords - 2));
730
731     OUT_BCS_BATCH(batch, msg[2]);         /* 32 MV*/
732     OUT_BCS_BATCH(batch, offset);
733
734     OUT_BCS_BATCH(batch, msg[0]);
735
736     OUT_BCS_BATCH(batch, (0xFFFF<<16) | (y << 8) | x);        /* Code Block Pattern for Y*/
737     OUT_BCS_BATCH(batch, 0x000F000F);                         /* Code Block Pattern */  
738 #if 0 
739     if ( slice_type == SLICE_TYPE_B) {
740         OUT_BCS_BATCH(batch, (0xF<<28) | (end_mb << 26) | qp);  /* Last MB */
741     } else {
742         OUT_BCS_BATCH(batch, (end_mb << 26) | qp);      /* Last MB */
743     }
744 #else
745     OUT_BCS_BATCH(batch, (end_mb << 26) | qp);  /* Last MB */
746 #endif
747
748
749     /*Stuff for Inter MB*/
750     OUT_BCS_BATCH(batch, msg[1]);        
751     OUT_BCS_BATCH(batch, vme_context->ref_index_in_mb[0]);
752     OUT_BCS_BATCH(batch, vme_context->ref_index_in_mb[1]);
753
754     /*MaxSizeInWord and TargetSzieInWord*/
755     OUT_BCS_BATCH(batch, (max_mb_size << 24) |
756                   (target_mb_size << 16) );
757
758     ADVANCE_BCS_BATCH(batch);
759
760     return len_in_dwords;
761 }
762
763 static void 
764 gen6_mfc_avc_pipeline_slice_programing(VADriverContextP ctx,
765                                        struct encode_state *encode_state,
766                                        struct intel_encoder_context *encoder_context,
767                                        int slice_index,
768                                        struct intel_batchbuffer *slice_batch)
769 {
770     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
771     struct gen6_vme_context *vme_context = encoder_context->vme_context;
772     VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
773     VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
774     VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[slice_index]->buffer; 
775     unsigned int *msg = NULL, offset = 0;
776     int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
777     int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
778     int last_slice = (pSliceParameter->macroblock_address + pSliceParameter->num_macroblocks) == (width_in_mbs * height_in_mbs);
779     int i,x,y;
780     int qp = pPicParameter->pic_init_qp + pSliceParameter->slice_qp_delta;
781     unsigned int rate_control_mode = encoder_context->rate_control_mode;
782     unsigned char *slice_header = NULL;
783     int slice_header_length_in_bits = 0;
784     unsigned int tail_data[] = { 0x0, 0x0 };
785     int slice_type = intel_avc_enc_slice_type_fixup(pSliceParameter->slice_type);
786     int is_intra = slice_type == SLICE_TYPE_I;
787
788     if (rate_control_mode == VA_RC_CBR) {
789         qp = mfc_context->bit_rate_control_context[slice_type].QpPrimeY;
790         pSliceParameter->slice_qp_delta = qp - pPicParameter->pic_init_qp;
791     }
792
793     /* only support for 8-bit pixel bit-depth */
794     assert(pSequenceParameter->bit_depth_luma_minus8 == 0);
795     assert(pSequenceParameter->bit_depth_chroma_minus8 == 0);
796     assert(pPicParameter->pic_init_qp >= 0 && pPicParameter->pic_init_qp < 52);
797     assert(qp >= 0 && qp < 52);
798
799     gen6_mfc_avc_slice_state(ctx, 
800                              pPicParameter,
801                              pSliceParameter,
802                              encode_state, encoder_context,
803                              (rate_control_mode == VA_RC_CBR), qp, slice_batch);
804
805     if ( slice_index == 0) 
806         intel_mfc_avc_pipeline_header_programing(ctx, encode_state, encoder_context, slice_batch);
807
808     slice_header_length_in_bits = build_avc_slice_header(pSequenceParameter, pPicParameter, pSliceParameter, &slice_header);
809
810     // slice hander
811     mfc_context->insert_object(ctx, encoder_context,
812                                (unsigned int *)slice_header, ALIGN(slice_header_length_in_bits, 32) >> 5, slice_header_length_in_bits & 0x1f,
813                                5,  /* first 5 bytes are start code + nal unit type */
814                                1, 0, 1, slice_batch);
815
816     dri_bo_map(vme_context->vme_output.bo , 1);
817     msg = (unsigned int *)vme_context->vme_output.bo->virtual;
818
819     if (is_intra) {
820         msg += pSliceParameter->macroblock_address * INTRA_VME_OUTPUT_IN_DWS;
821     } else {
822         msg += pSliceParameter->macroblock_address * INTER_VME_OUTPUT_IN_DWS;
823         msg += 32; /* the first 32 DWs are MVs */
824         offset = pSliceParameter->macroblock_address * INTER_VME_OUTPUT_IN_BYTES;
825     }
826    
827     for (i = pSliceParameter->macroblock_address; 
828          i < pSliceParameter->macroblock_address + pSliceParameter->num_macroblocks; i++) {
829         int last_mb = (i == (pSliceParameter->macroblock_address + pSliceParameter->num_macroblocks - 1) );
830         x = i % width_in_mbs;
831         y = i / width_in_mbs;
832
833         if (is_intra) {
834             assert(msg);
835             gen6_mfc_avc_pak_object_intra(ctx, x, y, last_mb, qp, msg, encoder_context, 0, 0, slice_batch);
836             msg += INTRA_VME_OUTPUT_IN_DWS;
837         } else {
838             if (msg[0] & INTRA_MB_FLAG_MASK) {
839                 gen6_mfc_avc_pak_object_intra(ctx, x, y, last_mb, qp, msg, encoder_context, 0, 0, slice_batch);
840             } else {
841                 gen6_mfc_avc_pak_object_inter(ctx, x, y, last_mb, qp, msg, offset, encoder_context, 0, 0, slice_type, slice_batch);
842             }
843
844             msg += INTER_VME_OUTPUT_IN_DWS;
845             offset += INTER_VME_OUTPUT_IN_BYTES;
846         }
847     }
848    
849     dri_bo_unmap(vme_context->vme_output.bo);
850
851     if ( last_slice ) {    
852         mfc_context->insert_object(ctx, encoder_context,
853                                    tail_data, 2, 8,
854                                    2, 1, 1, 0, slice_batch);
855     } else {
856         mfc_context->insert_object(ctx, encoder_context,
857                                    tail_data, 1, 8,
858                                    1, 1, 1, 0, slice_batch);
859     }
860
861     free(slice_header);
862
863 }
864
865 static dri_bo *
866 gen6_mfc_avc_software_batchbuffer(VADriverContextP ctx,
867                                   struct encode_state *encode_state,
868                                   struct intel_encoder_context *encoder_context)
869 {
870     struct i965_driver_data *i965 = i965_driver_data(ctx);
871     struct intel_batchbuffer *batch = intel_batchbuffer_new(&i965->intel, I915_EXEC_BSD, 0);
872     dri_bo *batch_bo = batch->buffer;
873     int i;
874
875     for (i = 0; i < encode_state->num_slice_params_ext; i++) {
876         gen6_mfc_avc_pipeline_slice_programing(ctx, encode_state, encoder_context, i, batch);
877     }
878
879     intel_batchbuffer_align(batch, 8);
880     
881     BEGIN_BCS_BATCH(batch, 2);
882     OUT_BCS_BATCH(batch, 0);
883     OUT_BCS_BATCH(batch, MI_BATCH_BUFFER_END);
884     ADVANCE_BCS_BATCH(batch);
885
886     dri_bo_reference(batch_bo);
887     intel_batchbuffer_free(batch);
888
889     return batch_bo;
890 }
891
892 #else
893
894 static void
895 gen6_mfc_batchbuffer_surfaces_input(VADriverContextP ctx,
896                                     struct encode_state *encode_state,
897                                     struct intel_encoder_context *encoder_context)
898
899 {
900     struct gen6_vme_context *vme_context = encoder_context->vme_context;
901     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
902
903     assert(vme_context->vme_output.bo);
904     mfc_context->buffer_suface_setup(ctx,
905                                      &mfc_context->gpe_context,
906                                      &vme_context->vme_output,
907                                      BINDING_TABLE_OFFSET(BIND_IDX_VME_OUTPUT),
908                                      SURFACE_STATE_OFFSET(BIND_IDX_VME_OUTPUT));
909     assert(mfc_context->aux_batchbuffer_surface.bo);
910     mfc_context->buffer_suface_setup(ctx,
911                                      &mfc_context->gpe_context,
912                                      &mfc_context->aux_batchbuffer_surface,
913                                      BINDING_TABLE_OFFSET(BIND_IDX_MFC_SLICE_HEADER),
914                                      SURFACE_STATE_OFFSET(BIND_IDX_MFC_SLICE_HEADER));
915 }
916
917 static void
918 gen6_mfc_batchbuffer_surfaces_output(VADriverContextP ctx,
919                                      struct encode_state *encode_state,
920                                      struct intel_encoder_context *encoder_context)
921
922 {
923     struct i965_driver_data *i965 = i965_driver_data(ctx);
924     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
925     VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
926     int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
927     int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
928     mfc_context->mfc_batchbuffer_surface.num_blocks = width_in_mbs * height_in_mbs + encode_state->num_slice_params_ext * 8 + 1;
929     mfc_context->mfc_batchbuffer_surface.size_block = 16 * CMD_LEN_IN_OWORD; /* 3 OWORDs */
930     mfc_context->mfc_batchbuffer_surface.pitch = 16;
931     mfc_context->mfc_batchbuffer_surface.bo = dri_bo_alloc(i965->intel.bufmgr, 
932                                                            "MFC batchbuffer",
933                                                            mfc_context->mfc_batchbuffer_surface.num_blocks * mfc_context->mfc_batchbuffer_surface.size_block,
934                                                            0x1000);
935     mfc_context->buffer_suface_setup(ctx,
936                                      &mfc_context->gpe_context,
937                                      &mfc_context->mfc_batchbuffer_surface,
938                                      BINDING_TABLE_OFFSET(BIND_IDX_MFC_BATCHBUFFER),
939                                      SURFACE_STATE_OFFSET(BIND_IDX_MFC_BATCHBUFFER));
940 }
941
942 static void
943 gen6_mfc_batchbuffer_surfaces_setup(VADriverContextP ctx, 
944                                     struct encode_state *encode_state,
945                                     struct intel_encoder_context *encoder_context)
946 {
947     gen6_mfc_batchbuffer_surfaces_input(ctx, encode_state, encoder_context);
948     gen6_mfc_batchbuffer_surfaces_output(ctx, encode_state, encoder_context);
949 }
950
951 static void
952 gen6_mfc_batchbuffer_idrt_setup(VADriverContextP ctx, 
953                                 struct encode_state *encode_state,
954                                 struct intel_encoder_context *encoder_context)
955 {
956     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
957     struct gen6_interface_descriptor_data *desc;   
958     int i;
959     dri_bo *bo;
960
961     bo = mfc_context->gpe_context.idrt.bo;
962     dri_bo_map(bo, 1);
963     assert(bo->virtual);
964     desc = bo->virtual;
965
966     for (i = 0; i < mfc_context->gpe_context.num_kernels; i++) {
967         struct i965_kernel *kernel;
968
969         kernel = &mfc_context->gpe_context.kernels[i];
970         assert(sizeof(*desc) == 32);
971
972         /*Setup the descritor table*/
973         memset(desc, 0, sizeof(*desc));
974         desc->desc0.kernel_start_pointer = (kernel->bo->offset >> 6);
975         desc->desc2.sampler_count = 0;
976         desc->desc2.sampler_state_pointer = 0;
977         desc->desc3.binding_table_entry_count = 2;
978         desc->desc3.binding_table_pointer = (BINDING_TABLE_OFFSET(0) >> 5);
979         desc->desc4.constant_urb_entry_read_offset = 0;
980         desc->desc4.constant_urb_entry_read_length = 4;
981                 
982         /*kernel start*/
983         dri_bo_emit_reloc(bo,   
984                           I915_GEM_DOMAIN_INSTRUCTION, 0,
985                           0,
986                           i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc0),
987                           kernel->bo);
988         desc++;
989     }
990
991     dri_bo_unmap(bo);
992 }
993
994 static void
995 gen6_mfc_batchbuffer_constant_setup(VADriverContextP ctx, 
996                                     struct encode_state *encode_state,
997                                     struct intel_encoder_context *encoder_context)
998 {
999     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
1000     
1001     (void)mfc_context;
1002 }
1003
1004 static void
1005 gen6_mfc_batchbuffer_emit_object_command(struct intel_batchbuffer *batch,
1006                                          int index,
1007                                          int head_offset,
1008                                          int batchbuffer_offset,
1009                                          int head_size,
1010                                          int tail_size,
1011                                          int number_mb_cmds,
1012                                          int first_object,
1013                                          int last_object,
1014                                          int last_slice,
1015                                          int mb_x,
1016                                          int mb_y,
1017                                          int width_in_mbs,
1018                                          int qp,
1019                                          unsigned int ref_index[2])
1020 {
1021     BEGIN_BATCH(batch, 14);
1022     
1023     OUT_BATCH(batch, CMD_MEDIA_OBJECT | (14 - 2));
1024     OUT_BATCH(batch, index);
1025     OUT_BATCH(batch, 0);
1026     OUT_BATCH(batch, 0);
1027     OUT_BATCH(batch, 0);
1028     OUT_BATCH(batch, 0);
1029    
1030     /*inline data */
1031     OUT_BATCH(batch, head_offset);
1032     OUT_BATCH(batch, batchbuffer_offset);
1033     OUT_BATCH(batch, 
1034               head_size << 16 |
1035               tail_size);
1036     OUT_BATCH(batch,
1037               number_mb_cmds << 16 |
1038               first_object << 2 |
1039               last_object << 1 |
1040               last_slice);
1041     OUT_BATCH(batch,
1042               mb_y << 8 |
1043               mb_x);
1044     OUT_BATCH(batch,
1045               qp << 16 |
1046               width_in_mbs);
1047     OUT_BATCH(batch, ref_index[0]);
1048     OUT_BATCH(batch, ref_index[1]);
1049
1050     ADVANCE_BATCH(batch);
1051 }
1052
1053 static void
1054 gen6_mfc_avc_batchbuffer_slice_command(VADriverContextP ctx,
1055                                        struct intel_encoder_context *encoder_context,
1056                                        VAEncSliceParameterBufferH264 *slice_param,
1057                                        int head_offset,
1058                                        unsigned short head_size,
1059                                        unsigned short tail_size,
1060                                        int batchbuffer_offset,
1061                                        int qp,
1062                                        int last_slice)
1063 {
1064     struct intel_batchbuffer *batch = encoder_context->base.batch;
1065     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
1066     struct gen6_vme_context *vme_context = encoder_context->vme_context;
1067     int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
1068     int total_mbs = slice_param->num_macroblocks;
1069     int number_mb_cmds = 128;
1070     int starting_mb = 0;
1071     int last_object = 0;
1072     int first_object = 1;
1073     int i;
1074     int mb_x, mb_y;
1075     int index = (slice_param->slice_type == SLICE_TYPE_I) ? MFC_BATCHBUFFER_AVC_INTRA : MFC_BATCHBUFFER_AVC_INTER;
1076
1077     for (i = 0; i < total_mbs / number_mb_cmds; i++) {
1078         last_object = (total_mbs - starting_mb) == number_mb_cmds;
1079         mb_x = (slice_param->macroblock_address + starting_mb) % width_in_mbs;
1080         mb_y = (slice_param->macroblock_address + starting_mb) / width_in_mbs;
1081         assert(mb_x <= 255 && mb_y <= 255);
1082
1083         starting_mb += number_mb_cmds;
1084
1085         gen6_mfc_batchbuffer_emit_object_command(batch,
1086                                                  index,
1087                                                  head_offset,
1088                                                  batchbuffer_offset,
1089                                                  head_size,
1090                                                  tail_size,
1091                                                  number_mb_cmds,
1092                                                  first_object,
1093                                                  last_object,
1094                                                  last_slice,
1095                                                  mb_x,
1096                                                  mb_y,
1097                                                  width_in_mbs,
1098                                                  qp,
1099                                                  vme_context->ref_index_in_mb);
1100
1101         if (first_object) {
1102             head_offset += head_size;
1103             batchbuffer_offset += head_size;
1104         }
1105
1106         if (last_object) {
1107             head_offset += tail_size;
1108             batchbuffer_offset += tail_size;
1109         }
1110
1111         batchbuffer_offset += number_mb_cmds * CMD_LEN_IN_OWORD;
1112
1113         first_object = 0;
1114     }
1115
1116     if (!last_object) {
1117         last_object = 1;
1118         number_mb_cmds = total_mbs % number_mb_cmds;
1119         mb_x = (slice_param->macroblock_address + starting_mb) % width_in_mbs;
1120         mb_y = (slice_param->macroblock_address + starting_mb) / width_in_mbs;
1121         assert(mb_x <= 255 && mb_y <= 255);
1122         starting_mb += number_mb_cmds;
1123
1124         gen6_mfc_batchbuffer_emit_object_command(batch,
1125                                                  index,
1126                                                  head_offset,
1127                                                  batchbuffer_offset,
1128                                                  head_size,
1129                                                  tail_size,
1130                                                  number_mb_cmds,
1131                                                  first_object,
1132                                                  last_object,
1133                                                  last_slice,
1134                                                  mb_x,
1135                                                  mb_y,
1136                                                  width_in_mbs,
1137                                                  qp,
1138                                                  vme_context->ref_index_in_mb);
1139     }
1140 }
1141                           
1142 /*
1143  * return size in Owords (16bytes)
1144  */         
1145 static int
1146 gen6_mfc_avc_batchbuffer_slice(VADriverContextP ctx,
1147                                struct encode_state *encode_state,
1148                                struct intel_encoder_context *encoder_context,
1149                                int slice_index,
1150                                int batchbuffer_offset)
1151 {
1152     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
1153     struct intel_batchbuffer *slice_batch = mfc_context->aux_batchbuffer;
1154     VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
1155     VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
1156     VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[slice_index]->buffer; 
1157     int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
1158     int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
1159     int last_slice = (pSliceParameter->macroblock_address + pSliceParameter->num_macroblocks) == (width_in_mbs * height_in_mbs);
1160     int qp = pPicParameter->pic_init_qp + pSliceParameter->slice_qp_delta;
1161     unsigned int rate_control_mode = encoder_context->rate_control_mode;
1162     unsigned char *slice_header = NULL;
1163     int slice_header_length_in_bits = 0;
1164     unsigned int tail_data[] = { 0x0, 0x0 };
1165     long head_offset;
1166     int old_used = intel_batchbuffer_used_size(slice_batch), used;
1167     unsigned short head_size, tail_size;
1168     int slice_type = intel_avc_enc_slice_type_fixup(pSliceParameter->slice_type);
1169
1170     if (rate_control_mode == VA_RC_CBR) {
1171         qp = mfc_context->bit_rate_control_context[slice_type].QpPrimeY;
1172         pSliceParameter->slice_qp_delta = qp - pPicParameter->pic_init_qp;
1173     }
1174
1175     /* only support for 8-bit pixel bit-depth */
1176     assert(pSequenceParameter->bit_depth_luma_minus8 == 0);
1177     assert(pSequenceParameter->bit_depth_chroma_minus8 == 0);
1178     assert(pPicParameter->pic_init_qp >= 0 && pPicParameter->pic_init_qp < 52);
1179     assert(qp >= 0 && qp < 52);
1180
1181     head_offset = old_used / 16;
1182     gen6_mfc_avc_slice_state(ctx,
1183                              pPicParameter,
1184                              pSliceParameter,
1185                              encode_state,
1186                              encoder_context,
1187                              (rate_control_mode == VA_RC_CBR),
1188                              qp,
1189                              slice_batch);
1190
1191     if (slice_index == 0)
1192         intel_mfc_avc_pipeline_header_programing(ctx, encode_state, encoder_context, slice_batch);
1193
1194     slice_header_length_in_bits = build_avc_slice_header(pSequenceParameter, pPicParameter, pSliceParameter, &slice_header);
1195
1196     // slice hander
1197     mfc_context->insert_object(ctx,
1198                                encoder_context,
1199                                (unsigned int *)slice_header,
1200                                ALIGN(slice_header_length_in_bits, 32) >> 5,
1201                                slice_header_length_in_bits & 0x1f,
1202                                5,  /* first 5 bytes are start code + nal unit type */
1203                                1,
1204                                0,
1205                                1,
1206                                slice_batch);
1207     free(slice_header);
1208
1209     intel_batchbuffer_align(slice_batch, 16); /* aligned by an Oword */
1210     used = intel_batchbuffer_used_size(slice_batch);
1211     head_size = (used - old_used) / 16;
1212     old_used = used;
1213
1214     /* tail */
1215     if (last_slice) {    
1216         mfc_context->insert_object(ctx,
1217                                    encoder_context,
1218                                    tail_data,
1219                                    2,
1220                                    8,
1221                                    2,
1222                                    1,
1223                                    1,
1224                                    0,
1225                                    slice_batch);
1226     } else {
1227         mfc_context->insert_object(ctx,
1228                                    encoder_context,
1229                                    tail_data,
1230                                    1,
1231                                    8,
1232                                    1,
1233                                    1,
1234                                    1,
1235                                    0,
1236                                    slice_batch);
1237     }
1238
1239     intel_batchbuffer_align(slice_batch, 16); /* aligned by an Oword */
1240     used = intel_batchbuffer_used_size(slice_batch);
1241     tail_size = (used - old_used) / 16;
1242
1243    
1244     gen6_mfc_avc_batchbuffer_slice_command(ctx,
1245                                            encoder_context,
1246                                            pSliceParameter,
1247                                            head_offset,
1248                                            head_size,
1249                                            tail_size,
1250                                            batchbuffer_offset,
1251                                            qp,
1252                                            last_slice);
1253
1254     return head_size + tail_size + pSliceParameter->num_macroblocks * CMD_LEN_IN_OWORD;
1255 }
1256
1257 static void
1258 gen6_mfc_avc_batchbuffer_pipeline(VADriverContextP ctx,
1259                                   struct encode_state *encode_state,
1260                                   struct intel_encoder_context *encoder_context)
1261 {
1262     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
1263     struct intel_batchbuffer *batch = encoder_context->base.batch;
1264     int i, size, offset = 0;
1265     intel_batchbuffer_start_atomic(batch, 0x4000); 
1266     gen6_gpe_pipeline_setup(ctx, &mfc_context->gpe_context, batch);
1267
1268     for ( i = 0; i < encode_state->num_slice_params_ext; i++) {
1269         size = gen6_mfc_avc_batchbuffer_slice(ctx, encode_state, encoder_context, i, offset);
1270         offset += size;
1271     }
1272
1273     intel_batchbuffer_end_atomic(batch);
1274     intel_batchbuffer_flush(batch);
1275 }
1276
1277 static void
1278 gen6_mfc_build_avc_batchbuffer(VADriverContextP ctx, 
1279                                struct encode_state *encode_state,
1280                                struct intel_encoder_context *encoder_context)
1281 {
1282     gen6_mfc_batchbuffer_surfaces_setup(ctx, encode_state, encoder_context);
1283     gen6_mfc_batchbuffer_idrt_setup(ctx, encode_state, encoder_context);
1284     gen6_mfc_batchbuffer_constant_setup(ctx, encode_state, encoder_context);
1285     gen6_mfc_avc_batchbuffer_pipeline(ctx, encode_state, encoder_context);
1286 }
1287
1288 static dri_bo *
1289 gen6_mfc_avc_hardware_batchbuffer(VADriverContextP ctx,
1290                                   struct encode_state *encode_state,
1291                                   struct intel_encoder_context *encoder_context)
1292 {
1293     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
1294
1295     gen6_mfc_build_avc_batchbuffer(ctx, encode_state, encoder_context);
1296     dri_bo_reference(mfc_context->mfc_batchbuffer_surface.bo);
1297
1298     return mfc_context->mfc_batchbuffer_surface.bo;
1299 }
1300
1301 #endif
1302
1303
1304 static void
1305 gen6_mfc_avc_pipeline_programing(VADriverContextP ctx,
1306                                  struct encode_state *encode_state,
1307                                  struct intel_encoder_context *encoder_context)
1308 {
1309     struct intel_batchbuffer *batch = encoder_context->base.batch;
1310     dri_bo *slice_batch_bo;
1311
1312     if ( intel_mfc_interlace_check(ctx, encode_state, encoder_context) ) {
1313         fprintf(stderr, "Current VA driver don't support interlace mode!\n");
1314         assert(0);
1315         return; 
1316     }
1317
1318 #if __SOFTWARE__
1319     slice_batch_bo = gen6_mfc_avc_software_batchbuffer(ctx, encode_state, encoder_context);
1320 #else
1321     slice_batch_bo = gen6_mfc_avc_hardware_batchbuffer(ctx, encode_state, encoder_context);
1322 #endif
1323
1324     // begin programing
1325     intel_batchbuffer_start_atomic_bcs(batch, 0x4000); 
1326     intel_batchbuffer_emit_mi_flush(batch);
1327     
1328     // picture level programing
1329     gen6_mfc_avc_pipeline_picture_programing(ctx, encode_state, encoder_context);
1330
1331     BEGIN_BCS_BATCH(batch, 2);
1332     OUT_BCS_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8));
1333     OUT_BCS_RELOC(batch,
1334                   slice_batch_bo,
1335                   I915_GEM_DOMAIN_COMMAND, 0, 
1336                   0);
1337     ADVANCE_BCS_BATCH(batch);
1338
1339     // end programing
1340     intel_batchbuffer_end_atomic(batch);
1341
1342     dri_bo_unreference(slice_batch_bo);
1343 }
1344
1345 VAStatus
1346 gen6_mfc_avc_encode_picture(VADriverContextP ctx, 
1347                             struct encode_state *encode_state,
1348                             struct intel_encoder_context *encoder_context)
1349 {
1350     struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
1351     unsigned int rate_control_mode = encoder_context->rate_control_mode;
1352     int current_frame_bits_size;
1353     int sts;
1354  
1355     for (;;) {
1356         gen6_mfc_init(ctx, encode_state, encoder_context);
1357         intel_mfc_avc_prepare(ctx, encode_state, encoder_context);
1358         /*Programing bcs pipeline*/
1359         gen6_mfc_avc_pipeline_programing(ctx, encode_state, encoder_context);   //filling the pipeline
1360         gen6_mfc_run(ctx, encode_state, encoder_context);
1361         if (rate_control_mode == VA_RC_CBR /*|| rate_control_mode == VA_RC_VBR*/) {
1362             gen6_mfc_stop(ctx, encode_state, encoder_context, &current_frame_bits_size);
1363             sts = intel_mfc_brc_postpack(encode_state, mfc_context, current_frame_bits_size);
1364             if (sts == BRC_NO_HRD_VIOLATION) {
1365                 intel_mfc_hrd_context_update(encode_state, mfc_context);
1366                 break;
1367             }
1368             else if (sts == BRC_OVERFLOW_WITH_MIN_QP || sts == BRC_UNDERFLOW_WITH_MAX_QP) {
1369                 if (!mfc_context->hrd.violation_noted) {
1370                     fprintf(stderr, "Unrepairable %s!\n", (sts == BRC_OVERFLOW_WITH_MIN_QP)? "overflow": "underflow");
1371                     mfc_context->hrd.violation_noted = 1;
1372                 }
1373                 return VA_STATUS_SUCCESS;
1374             }
1375         } else {
1376             break;
1377         }
1378     }
1379
1380     return VA_STATUS_SUCCESS;
1381 }
1382
1383 VAStatus
1384 gen6_mfc_pipeline(VADriverContextP ctx,
1385                   VAProfile profile,
1386                   struct encode_state *encode_state,
1387                   struct intel_encoder_context *encoder_context)
1388 {
1389     VAStatus vaStatus;
1390
1391     switch (profile) {
1392     case VAProfileH264Baseline:
1393     case VAProfileH264Main:
1394     case VAProfileH264High:
1395         vaStatus = gen6_mfc_avc_encode_picture(ctx, encode_state, encoder_context);
1396         break;
1397
1398         /* FIXME: add for other profile */
1399     default:
1400         vaStatus = VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
1401         break;
1402     }
1403
1404     return vaStatus;
1405 }
1406
1407 void
1408 gen6_mfc_context_destroy(void *context)
1409 {
1410     struct gen6_mfc_context *mfc_context = context;
1411     int i;
1412
1413     dri_bo_unreference(mfc_context->post_deblocking_output.bo);
1414     mfc_context->post_deblocking_output.bo = NULL;
1415
1416     dri_bo_unreference(mfc_context->pre_deblocking_output.bo);
1417     mfc_context->pre_deblocking_output.bo = NULL;
1418
1419     dri_bo_unreference(mfc_context->uncompressed_picture_source.bo);
1420     mfc_context->uncompressed_picture_source.bo = NULL;
1421
1422     dri_bo_unreference(mfc_context->mfc_indirect_pak_bse_object.bo); 
1423     mfc_context->mfc_indirect_pak_bse_object.bo = NULL;
1424
1425     for (i = 0; i < NUM_MFC_DMV_BUFFERS; i++){
1426         dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
1427         mfc_context->direct_mv_buffers[i].bo = NULL;
1428     }
1429
1430     dri_bo_unreference(mfc_context->intra_row_store_scratch_buffer.bo);
1431     mfc_context->intra_row_store_scratch_buffer.bo = NULL;
1432
1433     dri_bo_unreference(mfc_context->macroblock_status_buffer.bo);
1434     mfc_context->macroblock_status_buffer.bo = NULL;
1435
1436     dri_bo_unreference(mfc_context->deblocking_filter_row_store_scratch_buffer.bo);
1437     mfc_context->deblocking_filter_row_store_scratch_buffer.bo = NULL;
1438
1439     dri_bo_unreference(mfc_context->bsd_mpc_row_store_scratch_buffer.bo);
1440     mfc_context->bsd_mpc_row_store_scratch_buffer.bo = NULL;
1441
1442
1443     for (i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++){
1444         dri_bo_unreference(mfc_context->reference_surfaces[i].bo);
1445         mfc_context->reference_surfaces[i].bo = NULL;  
1446     }
1447
1448     i965_gpe_context_destroy(&mfc_context->gpe_context);
1449
1450     dri_bo_unreference(mfc_context->mfc_batchbuffer_surface.bo);
1451     mfc_context->mfc_batchbuffer_surface.bo = NULL;
1452
1453     dri_bo_unreference(mfc_context->aux_batchbuffer_surface.bo);
1454     mfc_context->aux_batchbuffer_surface.bo = NULL;
1455
1456     if (mfc_context->aux_batchbuffer)
1457         intel_batchbuffer_free(mfc_context->aux_batchbuffer);
1458
1459     mfc_context->aux_batchbuffer = NULL;
1460
1461     free(mfc_context);
1462 }
1463
1464 Bool gen6_mfc_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
1465 {
1466     struct gen6_mfc_context *mfc_context = calloc(1, sizeof(struct gen6_mfc_context));
1467
1468     mfc_context->gpe_context.surface_state_binding_table.length = (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_MEDIA_SURFACES_GEN6;
1469
1470     mfc_context->gpe_context.idrt.max_entries = MAX_GPE_KERNELS;
1471     mfc_context->gpe_context.idrt.entry_size = sizeof(struct gen6_interface_descriptor_data);
1472
1473     mfc_context->gpe_context.curbe.length = 32 * 4;
1474
1475     mfc_context->gpe_context.vfe_state.max_num_threads = 60 - 1;
1476     mfc_context->gpe_context.vfe_state.num_urb_entries = 16;
1477     mfc_context->gpe_context.vfe_state.gpgpu_mode = 0;
1478     mfc_context->gpe_context.vfe_state.urb_entry_size = 59 - 1;
1479     mfc_context->gpe_context.vfe_state.curbe_allocation_size = 37 - 1;
1480
1481     i965_gpe_load_kernels(ctx,
1482                           &mfc_context->gpe_context,
1483                           gen6_mfc_kernels,
1484                           NUM_MFC_KERNEL);
1485
1486     mfc_context->pipe_mode_select = gen6_mfc_pipe_mode_select;
1487     mfc_context->set_surface_state = gen6_mfc_surface_state;
1488     mfc_context->ind_obj_base_addr_state = gen6_mfc_ind_obj_base_addr_state;
1489     mfc_context->avc_img_state = gen6_mfc_avc_img_state;
1490     mfc_context->avc_qm_state = gen6_mfc_avc_qm_state;
1491     mfc_context->avc_fqm_state = gen6_mfc_avc_fqm_state;
1492     mfc_context->insert_object = gen6_mfc_avc_insert_object;
1493     mfc_context->buffer_suface_setup = i965_gpe_buffer_suface_setup;
1494
1495     encoder_context->mfc_context = mfc_context;
1496     encoder_context->mfc_context_destroy = gen6_mfc_context_destroy;
1497     encoder_context->mfc_pipeline = gen6_mfc_pipeline;
1498     encoder_context->mfc_brc_prepare = intel_mfc_brc_prepare;
1499
1500     return True;
1501 }