Add the separated files for media encoder on haswell
authorZhao Yakui <yakui.zhao@intel.com>
Fri, 20 Jul 2012 01:15:56 +0000 (21:15 -0400)
committerXiang, Haihao <haihao.xiang@intel.com>
Tue, 23 Oct 2012 05:50:28 +0000 (13:50 +0800)
There exist a lot of changes about the media encoder between Haswell
and IvyBridge. For example: the VME programming and the corresponding
general media command. To be simple, the separated files are added for
Haswell. Otherwise it has to consider the complex backward compatibility.

Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
src/Makefile.am
src/gen6_mfc.c
src/gen6_mfc.h
src/gen6_vme.h
src/gen75_mfc.c [new file with mode: 0644]
src/gen75_vme.c [new file with mode: 0644]
src/i965_drv_video.c
src/i965_drv_video.h
src/i965_encoder.c
src/i965_encoder.h

index 146e3af..8fe8124 100755 (executable)
@@ -52,6 +52,8 @@ source_c = \
        gen6_vme.c              \
        gen7_mfc.c              \
        gen7_mfd.c              \
+       gen75_vme.c             \
+       gen75_mfc.c             \
        i965_avc_bsd.c          \
        i965_avc_hw_scoreboard.c\
        i965_avc_ildb.c         \
index 65d224b..deeb381 100644 (file)
 #include "gen6_mfc.h"
 #include "gen6_vme.h"
 
-#define CMD_LEN_IN_OWORD        4
-
-#define BRC_CLIP(x, min, max) \
-{ \
-    x = ((x > (max)) ? (max) : ((x < (min)) ? (min) : x)); \
-}
-
-#define BRC_P_B_QP_DIFF 4
-#define BRC_I_P_QP_DIFF 2
-#define BRC_I_B_QP_DIFF (BRC_I_P_QP_DIFF + BRC_P_B_QP_DIFF)
-
-#define BRC_PWEIGHT 0.6  /* weight if P slice with comparison to I slice */
-#define BRC_BWEIGHT 0.25 /* weight if B slice with comparison to I slice */
-
-#define BRC_QP_MAX_CHANGE 5 /* maximum qp modification */
-#define BRC_CY 0.1 /* weight for */
-#define BRC_CX_UNDERFLOW 5.
-#define BRC_CX_OVERFLOW -4.
-
-#define BRC_PI_0_5 1.5707963267948966192313216916398
-
-typedef enum _gen6_brc_status
-{
-    BRC_NO_HRD_VIOLATION = 0,
-    BRC_UNDERFLOW = 1,
-    BRC_OVERFLOW = 2,
-    BRC_UNDERFLOW_WITH_MAX_QP = 3,
-    BRC_OVERFLOW_WITH_MIN_QP = 4,
-} gen6_brc_status;
 
 static const uint32_t gen6_mfc_batchbuffer_avc_intra[][4] = {
 #include "shaders/utils/mfc_batchbuffer_avc_intra.g6b"
@@ -1197,14 +1168,6 @@ static VAStatus gen6_mfc_run(VADriverContextP ctx,
     return VA_STATUS_SUCCESS;
 }
 
-extern VAStatus 
-i965_MapBuffer(VADriverContextP ctx, 
-               VABufferID buf_id,       /* in */
-               void **pbuf);            /* out */
-extern VAStatus
-i965_UnmapBuffer(VADriverContextP ctx,
-                 VABufferID buf_id);
-
 static VAStatus
 gen6_mfc_stop(VADriverContextP ctx, 
               struct encode_state *encode_state,
index 43f7082..ba72a9a 100644 (file)
@@ -64,6 +64,36 @@ struct encode_state;
 #define BIND_IDX_MFC_SLICE_HEADER       1
 #define BIND_IDX_MFC_BATCHBUFFER        2
 
+#define CMD_LEN_IN_OWORD        4
+
+#define BRC_CLIP(x, min, max) \
+{ \
+    x = ((x > (max)) ? (max) : ((x < (min)) ? (min) : x)); \
+}
+
+#define BRC_P_B_QP_DIFF 4
+#define BRC_I_P_QP_DIFF 2
+#define BRC_I_B_QP_DIFF (BRC_I_P_QP_DIFF + BRC_P_B_QP_DIFF)
+
+#define BRC_PWEIGHT 0.6  /* weight if P slice with comparison to I slice */
+#define BRC_BWEIGHT 0.25 /* weight if B slice with comparison to I slice */
+
+#define BRC_QP_MAX_CHANGE 5 /* maximum qp modification */
+#define BRC_CY 0.1 /* weight for */
+#define BRC_CX_UNDERFLOW 5.
+#define BRC_CX_OVERFLOW -4.
+
+#define BRC_PI_0_5 1.5707963267948966192313216916398
+
+typedef enum _gen6_brc_status
+{
+    BRC_NO_HRD_VIOLATION = 0,
+    BRC_UNDERFLOW = 1,
+    BRC_OVERFLOW = 2,
+    BRC_UNDERFLOW_WITH_MAX_QP = 3,
+    BRC_OVERFLOW_WITH_MIN_QP = 4,
+} gen6_brc_status;
+
 struct gen6_mfc_avc_surface_aux
 {
     dri_bo *dmv_top;
@@ -219,5 +249,6 @@ void gen6_mfc_context_destroy(void *context);
 void gen6_mfc_brc_prepare(struct encode_state *encode_state,
                           struct intel_encoder_context *encoder_context);
 
-
+extern
+Bool gen75_mfc_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context);
 #endif /* _GEN6_MFC_BCS_H_ */
index ad302da..c79770d 100644 (file)
@@ -78,4 +78,5 @@ struct gen6_vme_context
                                     unsigned long surface_state_offset);
 };
 
+Bool gen75_vme_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context);
 #endif /* _GEN6_VME_H_ */
diff --git a/src/gen75_mfc.c b/src/gen75_mfc.c
new file mode 100644 (file)
index 0000000..0748584
--- /dev/null
@@ -0,0 +1,2111 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Zhao Yakui <yakui.zhao@intel.com>
+ *    Xiang Haihao <haihao.xiang@intel.com>
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <assert.h>
+
+#include "intel_batchbuffer.h"
+#include "i965_defines.h"
+#include "i965_structs.h"
+#include "i965_drv_video.h"
+#include "i965_encoder.h"
+#include "i965_encoder_utils.h"
+#include "gen6_mfc.h"
+#include "gen6_vme.h"
+
+static const uint32_t gen75_mfc_batchbuffer_avc_intra[][4] = {
+#include "shaders/utils/mfc_batchbuffer_avc_intra.g7b"
+};
+
+static const uint32_t gen75_mfc_batchbuffer_avc_inter[][4] = {
+#include "shaders/utils/mfc_batchbuffer_avc_inter.g7b"
+};
+
+static struct i965_kernel gen75_mfc_kernels[] = {
+    {
+        "MFC AVC INTRA BATCHBUFFER ",
+        MFC_BATCHBUFFER_AVC_INTRA,
+        gen75_mfc_batchbuffer_avc_intra,
+        sizeof(gen75_mfc_batchbuffer_avc_intra),
+        NULL
+    },
+
+    {
+        "MFC AVC INTER BATCHBUFFER ",
+        MFC_BATCHBUFFER_AVC_INTER,
+        gen75_mfc_batchbuffer_avc_inter,
+        sizeof(gen75_mfc_batchbuffer_avc_inter),
+        NULL
+    },
+};
+
+static void
+gen75_mfc_pipe_mode_select(VADriverContextP ctx,
+                          int standard_select,
+                          struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+
+    assert(standard_select == MFX_FORMAT_MPEG2 ||
+           standard_select == MFX_FORMAT_AVC);
+
+    BEGIN_BCS_BATCH(batch, 5);
+
+    OUT_BCS_BATCH(batch, MFX_PIPE_MODE_SELECT | (5 - 2));
+    OUT_BCS_BATCH(batch,
+                  (MFX_LONG_MODE << 17) | /* Must be long format for encoder */
+                  (MFD_MODE_VLD << 15) | /* VLD mode */
+                  (1 << 10) | /* Stream-Out Enable */
+                  ((!!mfc_context->post_deblocking_output.bo) << 9)  | /* Post Deblocking Output */
+                  ((!!mfc_context->pre_deblocking_output.bo) << 8)  | /* Pre Deblocking Output */
+                  (0 << 8)  | /* Pre Deblocking Output */
+                  (0 << 5)  | /* not in stitch mode */
+                  (1 << 4)  | /* encoding mode */
+                  (standard_select << 0));  /* standard select: avc or mpeg2 */
+    OUT_BCS_BATCH(batch,
+                  (0 << 7)  | /* expand NOA bus flag */
+                  (0 << 6)  | /* disable slice-level clock gating */
+                  (0 << 5)  | /* disable clock gating for NOA */
+                  (0 << 4)  | /* terminate if AVC motion and POC table error occurs */
+                  (0 << 3)  | /* terminate if AVC mbdata error occurs */
+                  (0 << 2)  | /* terminate if AVC CABAC/CAVLC decode error occurs */
+                  (0 << 1)  |
+                  (0 << 0));
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, 0);
+
+    ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen75_mfc_surface_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+
+    BEGIN_BCS_BATCH(batch, 6);
+
+    OUT_BCS_BATCH(batch, MFX_SURFACE_STATE | (6 - 2));
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch,
+                  ((mfc_context->surface_state.height - 1) << 18) |
+                  ((mfc_context->surface_state.width - 1) << 4));
+    OUT_BCS_BATCH(batch,
+                  (MFX_SURFACE_PLANAR_420_8 << 28) | /* 420 planar YUV surface */
+                  (1 << 27) | /* must be 1 for interleave U/V, hardware requirement */
+                  (0 << 22) | /* surface object control state, FIXME??? */
+                  ((mfc_context->surface_state.w_pitch - 1) << 3) | /* pitch */
+                  (0 << 2)  | /* must be 0 for interleave U/V */
+                  (1 << 1)  | /* must be tiled */
+                  (I965_TILEWALK_YMAJOR << 0));  /* tile walk, TILEWALK_YMAJOR */
+    OUT_BCS_BATCH(batch,
+                  (0 << 16) |                                                          /* must be 0 for interleave U/V */
+                  (mfc_context->surface_state.h_pitch));               /* y offset for U(cb) */
+    OUT_BCS_BATCH(batch, 0);
+
+    ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen75_mfc_ind_obj_base_addr_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+
+    BEGIN_BCS_BATCH(batch, 11);
+
+    OUT_BCS_BATCH(batch, MFX_IND_OBJ_BASE_ADDR_STATE | (11 - 2));
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, 0);
+    /* MFX Indirect MV Object Base Address */
+    OUT_BCS_RELOC(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
+    OUT_BCS_BATCH(batch, 0x80000000); /* must set, up to 2G */
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, 0);
+    /*MFC Indirect PAK-BSE Object Base Address for Encoder*/   
+    OUT_BCS_RELOC(batch,
+                  mfc_context->mfc_indirect_pak_bse_object.bo,
+                  I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                  0);
+    OUT_BCS_RELOC(batch,
+                  mfc_context->mfc_indirect_pak_bse_object.bo,
+                  I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                  mfc_context->mfc_indirect_pak_bse_object.end_offset);
+
+    ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen75_mfc_avc_img_state(VADriverContextP ctx, struct encode_state *encode_state,  
+                       struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
+
+    int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
+    int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
+
+    BEGIN_BCS_BATCH(batch, 16);
+
+    OUT_BCS_BATCH(batch, MFX_AVC_IMG_STATE | (16 - 2));
+    OUT_BCS_BATCH(batch,
+                  ((width_in_mbs * height_in_mbs) & 0xFFFF));
+    OUT_BCS_BATCH(batch, 
+                  ((height_in_mbs - 1) << 16) | 
+                  ((width_in_mbs - 1) << 0));
+    OUT_BCS_BATCH(batch, 
+                  (0 << 24) |  /* Second Chroma QP Offset */
+                  (0 << 16) |  /* Chroma QP Offset */
+                  (0 << 14) |   /* Max-bit conformance Intra flag */
+                  (0 << 13) |   /* Max Macroblock size conformance Inter flag */
+                  (pPicParameter->pic_fields.bits.weighted_pred_flag << 12) |   /*Weighted_Pred_Flag */
+                  (pPicParameter->pic_fields.bits.weighted_bipred_idc << 10) |  /* Weighted_BiPred_Idc */
+                  (0 << 8)  |   /* FIXME: Image Structure */
+                  (0 << 0) );   /* Current Decoed Image Frame Store ID, reserved in Encode mode */
+    OUT_BCS_BATCH(batch,
+                  (0 << 16) |   /* Mininum Frame size */
+                  (0 << 15) |   /* Disable reading of Macroblock Status Buffer */
+                  (0 << 14) |   /* Load BitStream Pointer only once, 1 slic 1 frame */
+                  (0 << 13) |   /* CABAC 0 word insertion test enable */
+                  (1 << 12) |   /* MVUnpackedEnable,compliant to DXVA */
+                  (1 << 10) |   /* Chroma Format IDC, 4:2:0 */
+                  (0 << 9)  |   /* FIXME: MbMvFormatFlag */
+                  (pPicParameter->pic_fields.bits.entropy_coding_mode_flag << 7)  |   /*0:CAVLC encoding mode,1:CABAC*/
+                  (0 << 6)  |   /* Only valid for VLD decoding mode */
+                  (0 << 5)  |   /* Constrained Intra Predition Flag, from PPS */
+                  (0 << 4)  |   /* Direct 8x8 inference flag */
+                  (pPicParameter->pic_fields.bits.transform_8x8_mode_flag << 3)  |   /*8x8 or 4x4 IDCT Transform Mode Flag*/
+                  (1 << 2)  |   /* Frame MB only flag */
+                  (0 << 1)  |   /* MBAFF mode is in active */
+                  (0 << 0));    /* Field picture flag */
+    OUT_BCS_BATCH(batch, 0);    /* Mainly about MB rate control and debug, just ignoring */
+    OUT_BCS_BATCH(batch,        /* Inter and Intra Conformance Max size limit */
+                  (0xBB8 << 16) |       /* InterMbMaxSz */
+                  (0xEE8) );            /* IntraMbMaxSz */
+    OUT_BCS_BATCH(batch, 0);            /* Reserved */
+    OUT_BCS_BATCH(batch, 0);            /* Slice QP Delta for bitrate control */
+    OUT_BCS_BATCH(batch, 0);            /* Slice QP Delta for bitrate control */       
+    OUT_BCS_BATCH(batch, 0x8C000000);
+    OUT_BCS_BATCH(batch, 0x00010000);
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, 0);
+
+    ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen75_mfc_qm_state(VADriverContextP ctx,
+                  int qm_type,
+                  unsigned int *qm,
+                  int qm_length,
+                  struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    unsigned int qm_buffer[16];
+
+    assert(qm_length <= 16);
+    assert(sizeof(*qm) == 4);
+    memcpy(qm_buffer, qm, qm_length * 4);
+
+    BEGIN_BCS_BATCH(batch, 18);
+    OUT_BCS_BATCH(batch, MFX_QM_STATE | (18 - 2));
+    OUT_BCS_BATCH(batch, qm_type << 0);
+    intel_batchbuffer_data(batch, qm_buffer, 16 * 4);
+    ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen75_mfc_avc_qm_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+    unsigned int qm[16] = {
+        0x10101010, 0x10101010, 0x10101010, 0x10101010,
+        0x10101010, 0x10101010, 0x10101010, 0x10101010,
+        0x10101010, 0x10101010, 0x10101010, 0x10101010,
+        0x10101010, 0x10101010, 0x10101010, 0x10101010
+    };
+
+    gen75_mfc_qm_state(ctx, MFX_QM_AVC_4X4_INTRA_MATRIX, qm, 12, encoder_context);
+    gen75_mfc_qm_state(ctx, MFX_QM_AVC_4X4_INTER_MATRIX, qm, 12, encoder_context);
+    gen75_mfc_qm_state(ctx, MFX_QM_AVC_8x8_INTRA_MATRIX, qm, 16, encoder_context);
+    gen75_mfc_qm_state(ctx, MFX_QM_AVC_8x8_INTER_MATRIX, qm, 16, encoder_context);
+}
+
+static void
+gen75_mfc_fqm_state(VADriverContextP ctx,
+                   int fqm_type,
+                   unsigned int *fqm,
+                   int fqm_length,
+                   struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    unsigned int fqm_buffer[32];
+
+    assert(fqm_length <= 32);
+    assert(sizeof(*fqm) == 4);
+    memcpy(fqm_buffer, fqm, fqm_length * 4);
+
+    BEGIN_BCS_BATCH(batch, 34);
+    OUT_BCS_BATCH(batch, MFX_FQM_STATE | (34 - 2));
+    OUT_BCS_BATCH(batch, fqm_type << 0);
+    intel_batchbuffer_data(batch, fqm_buffer, 32 * 4);
+    ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen75_mfc_avc_fqm_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+    unsigned int qm[32] = {
+        0x10001000, 0x10001000, 0x10001000, 0x10001000,
+        0x10001000, 0x10001000, 0x10001000, 0x10001000,
+        0x10001000, 0x10001000, 0x10001000, 0x10001000,
+        0x10001000, 0x10001000, 0x10001000, 0x10001000,
+        0x10001000, 0x10001000, 0x10001000, 0x10001000,
+        0x10001000, 0x10001000, 0x10001000, 0x10001000,
+        0x10001000, 0x10001000, 0x10001000, 0x10001000,
+        0x10001000, 0x10001000, 0x10001000, 0x10001000
+    };
+
+    gen75_mfc_fqm_state(ctx, MFX_QM_AVC_4X4_INTRA_MATRIX, qm, 24, encoder_context);
+    gen75_mfc_fqm_state(ctx, MFX_QM_AVC_4X4_INTER_MATRIX, qm, 24, encoder_context);
+    gen75_mfc_fqm_state(ctx, MFX_QM_AVC_8x8_INTRA_MATRIX, qm, 32, encoder_context);
+    gen75_mfc_fqm_state(ctx, MFX_QM_AVC_8x8_INTER_MATRIX, qm, 32, encoder_context);
+}
+
+static void
+gen75_mfc_avc_insert_object(VADriverContextP ctx, struct intel_encoder_context *encoder_context,
+                           unsigned int *insert_data, int lenght_in_dws, int data_bits_in_last_dw,
+                           int skip_emul_byte_count, int is_last_header, int is_end_of_slice, int emulation_flag,
+                           struct intel_batchbuffer *batch)
+{
+    if (batch == NULL)
+        batch = encoder_context->base.batch;
+
+    BEGIN_BCS_BATCH(batch, lenght_in_dws + 2);
+
+    OUT_BCS_BATCH(batch, MFX_INSERT_OBJECT | (lenght_in_dws + 2 - 2));
+    OUT_BCS_BATCH(batch,
+                  (0 << 16) |   /* always start at offset 0 */
+                  (data_bits_in_last_dw << 8) |
+                  (skip_emul_byte_count << 4) |
+                  (!!emulation_flag << 3) |
+                  ((!!is_last_header) << 2) |
+                  ((!!is_end_of_slice) << 1) |
+                  (0 << 0));    /* FIXME: ??? */
+    intel_batchbuffer_data(batch, insert_data, lenght_in_dws * 4);
+
+    ADVANCE_BCS_BATCH(batch);
+}
+
+
+static void
+gen75_mfc_bit_rate_control_context_init(struct encode_state *encode_state, 
+                                       struct gen6_mfc_context *mfc_context)
+{
+    VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
+    int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
+    int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
+    float fps =  pSequenceParameter->time_scale * 0.5 / pSequenceParameter->num_units_in_tick ;
+    int inter_mb_size = pSequenceParameter->bits_per_second * 1.0 / (fps+4.0) / width_in_mbs / height_in_mbs;
+    int intra_mb_size = inter_mb_size * 5.0;
+    int i;
+
+    mfc_context->bit_rate_control_context[SLICE_TYPE_I].target_mb_size = intra_mb_size;
+    mfc_context->bit_rate_control_context[SLICE_TYPE_I].target_frame_size = intra_mb_size * width_in_mbs * height_in_mbs;
+    mfc_context->bit_rate_control_context[SLICE_TYPE_P].target_mb_size = inter_mb_size;
+    mfc_context->bit_rate_control_context[SLICE_TYPE_P].target_frame_size = inter_mb_size * width_in_mbs * height_in_mbs;
+    mfc_context->bit_rate_control_context[SLICE_TYPE_B].target_mb_size = inter_mb_size;
+    mfc_context->bit_rate_control_context[SLICE_TYPE_B].target_frame_size = inter_mb_size * width_in_mbs * height_in_mbs;
+
+    for(i = 0 ; i < 3; i++) {
+        mfc_context->bit_rate_control_context[i].QpPrimeY = 26;
+        mfc_context->bit_rate_control_context[i].MaxQpNegModifier = 6;
+        mfc_context->bit_rate_control_context[i].MaxQpPosModifier = 6;
+        mfc_context->bit_rate_control_context[i].GrowInit = 6;
+        mfc_context->bit_rate_control_context[i].GrowResistance = 4;
+        mfc_context->bit_rate_control_context[i].ShrinkInit = 6;
+        mfc_context->bit_rate_control_context[i].ShrinkResistance = 4;
+        
+        mfc_context->bit_rate_control_context[i].Correct[0] = 8;
+        mfc_context->bit_rate_control_context[i].Correct[1] = 4;
+        mfc_context->bit_rate_control_context[i].Correct[2] = 2;
+        mfc_context->bit_rate_control_context[i].Correct[3] = 2;
+        mfc_context->bit_rate_control_context[i].Correct[4] = 4;
+        mfc_context->bit_rate_control_context[i].Correct[5] = 8;
+    }
+    
+    mfc_context->bit_rate_control_context[SLICE_TYPE_I].TargetSizeInWord = (intra_mb_size + 16)/ 16;
+    mfc_context->bit_rate_control_context[SLICE_TYPE_P].TargetSizeInWord = (inter_mb_size + 16)/ 16;
+    mfc_context->bit_rate_control_context[SLICE_TYPE_B].TargetSizeInWord = (inter_mb_size + 16)/ 16;
+
+    mfc_context->bit_rate_control_context[SLICE_TYPE_I].MaxSizeInWord = mfc_context->bit_rate_control_context[SLICE_TYPE_I].TargetSizeInWord * 1.5;
+    mfc_context->bit_rate_control_context[SLICE_TYPE_P].MaxSizeInWord = mfc_context->bit_rate_control_context[SLICE_TYPE_P].TargetSizeInWord * 1.5;
+    mfc_context->bit_rate_control_context[SLICE_TYPE_B].MaxSizeInWord = mfc_context->bit_rate_control_context[SLICE_TYPE_B].TargetSizeInWord * 1.5;
+}
+
+static void
+gen75_mfc_brc_init(struct encode_state *encode_state,
+                  struct intel_encoder_context* encoder_context)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
+    VAEncMiscParameterBuffer* pMiscParamHRD = (VAEncMiscParameterBuffer*)encode_state->misc_param[VAEncMiscParameterTypeHRD]->buffer;
+    VAEncMiscParameterHRD* pParameterHRD = (VAEncMiscParameterHRD*)pMiscParamHRD->data;
+    double bitrate = pSequenceParameter->bits_per_second;
+    double framerate = (double)pSequenceParameter->time_scale /(2 * (double)pSequenceParameter->num_units_in_tick);
+    int inum = 1, pnum = 0, bnum = 0; /* Gop structure: number of I, P, B frames in the Gop. */
+    int intra_period = pSequenceParameter->intra_period;
+    int ip_period = pSequenceParameter->ip_period;
+    double qp1_size = 0.1 * 8 * 3 * (pSequenceParameter->picture_width_in_mbs<<4) * (pSequenceParameter->picture_height_in_mbs<<4)/2;
+    double qp51_size = 0.001 * 8 * 3 * (pSequenceParameter->picture_width_in_mbs<<4) * (pSequenceParameter->picture_height_in_mbs<<4)/2;
+    double bpf;
+
+    if (pSequenceParameter->ip_period) {
+        pnum = (intra_period + ip_period - 1)/ip_period - 1;
+        bnum = intra_period - inum - pnum;
+    }
+
+    mfc_context->brc.mode = encoder_context->rate_control_mode;
+
+    mfc_context->brc.target_frame_size[SLICE_TYPE_I] = (int)((double)((bitrate * intra_period)/framerate) /
+                                                             (double)(inum + BRC_PWEIGHT * pnum + BRC_BWEIGHT * bnum));
+    mfc_context->brc.target_frame_size[SLICE_TYPE_P] = BRC_PWEIGHT * mfc_context->brc.target_frame_size[SLICE_TYPE_I];
+    mfc_context->brc.target_frame_size[SLICE_TYPE_B] = BRC_BWEIGHT * mfc_context->brc.target_frame_size[SLICE_TYPE_I];
+
+    mfc_context->brc.gop_nums[SLICE_TYPE_I] = inum;
+    mfc_context->brc.gop_nums[SLICE_TYPE_P] = pnum;
+    mfc_context->brc.gop_nums[SLICE_TYPE_B] = bnum;
+
+    bpf = mfc_context->brc.bits_per_frame = bitrate/framerate;
+
+    mfc_context->hrd.buffer_size = (double)pParameterHRD->buffer_size;
+    mfc_context->hrd.current_buffer_fullness =
+        (double)(pParameterHRD->initial_buffer_fullness < mfc_context->hrd.buffer_size)?
+            pParameterHRD->initial_buffer_fullness: mfc_context->hrd.buffer_size/2.;
+    mfc_context->hrd.target_buffer_fullness = (double)mfc_context->hrd.buffer_size/2.;
+    mfc_context->hrd.buffer_capacity = (double)mfc_context->hrd.buffer_size/qp1_size;
+    mfc_context->hrd.violation_noted = 0;
+
+    if ((bpf > qp51_size) && (bpf < qp1_size)) {
+        mfc_context->bit_rate_control_context[SLICE_TYPE_P].QpPrimeY = 51 - 50*(bpf - qp51_size)/(qp1_size - qp51_size);
+    }
+    else if (bpf >= qp1_size)
+        mfc_context->bit_rate_control_context[SLICE_TYPE_P].QpPrimeY = 1;
+    else if (bpf <= qp51_size)
+        mfc_context->bit_rate_control_context[SLICE_TYPE_P].QpPrimeY = 51;
+
+    mfc_context->bit_rate_control_context[SLICE_TYPE_I].QpPrimeY = mfc_context->bit_rate_control_context[SLICE_TYPE_P].QpPrimeY;
+    mfc_context->bit_rate_control_context[SLICE_TYPE_B].QpPrimeY = mfc_context->bit_rate_control_context[SLICE_TYPE_I].QpPrimeY;
+
+    BRC_CLIP(mfc_context->bit_rate_control_context[SLICE_TYPE_I].QpPrimeY, 1, 51);
+    BRC_CLIP(mfc_context->bit_rate_control_context[SLICE_TYPE_P].QpPrimeY, 1, 51);
+    BRC_CLIP(mfc_context->bit_rate_control_context[SLICE_TYPE_B].QpPrimeY, 1, 51);
+}
+
+static void 
+gen75_mfc_hrd_context_init(struct encode_state *encode_state,
+                          struct intel_encoder_context *encoder_context)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
+    unsigned int rate_control_mode = encoder_context->rate_control_mode;
+    int target_bit_rate = pSequenceParameter->bits_per_second;
+    
+    // current we only support CBR mode.
+    if (rate_control_mode == VA_RC_CBR) {
+        mfc_context->vui_hrd.i_bit_rate_value = target_bit_rate >> 10;
+        mfc_context->vui_hrd.i_cpb_size_value = (target_bit_rate * 8) >> 10;
+        mfc_context->vui_hrd.i_initial_cpb_removal_delay = mfc_context->vui_hrd.i_cpb_size_value * 0.5 * 1024 / target_bit_rate * 90000;
+        mfc_context->vui_hrd.i_cpb_removal_delay = 2;
+        mfc_context->vui_hrd.i_frame_number = 0;
+
+        mfc_context->vui_hrd.i_initial_cpb_removal_delay_length = 24; 
+        mfc_context->vui_hrd.i_cpb_removal_delay_length = 24;
+        mfc_context->vui_hrd.i_dpb_output_delay_length = 24;
+    }
+
+}
+
+static void 
+gen75_mfc_free_avc_surface(void **data)
+{
+    struct gen6_mfc_avc_surface_aux *avc_surface = *data;
+
+    if (!avc_surface)
+        return;
+
+    dri_bo_unreference(avc_surface->dmv_top);
+    avc_surface->dmv_top = NULL;
+    dri_bo_unreference(avc_surface->dmv_bottom);
+    avc_surface->dmv_bottom = NULL;
+
+    free(avc_surface);
+    *data = NULL;
+}
+
+static void gen75_mfc_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    dri_bo *bo;
+    int i;
+
+    /*Encode common setup for MFC*/
+    dri_bo_unreference(mfc_context->post_deblocking_output.bo);
+    mfc_context->post_deblocking_output.bo = NULL;
+
+    dri_bo_unreference(mfc_context->pre_deblocking_output.bo);
+    mfc_context->pre_deblocking_output.bo = NULL;
+
+    dri_bo_unreference(mfc_context->uncompressed_picture_source.bo);
+    mfc_context->uncompressed_picture_source.bo = NULL;
+
+    dri_bo_unreference(mfc_context->mfc_indirect_pak_bse_object.bo); 
+    mfc_context->mfc_indirect_pak_bse_object.bo = NULL;
+
+    for (i = 0; i < NUM_MFC_DMV_BUFFERS; i++){
+        if ( mfc_context->direct_mv_buffers[i].bo != NULL);
+        dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
+        mfc_context->direct_mv_buffers[i].bo = NULL;
+    }
+
+    for (i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++){
+        if (mfc_context->reference_surfaces[i].bo != NULL)
+            dri_bo_unreference(mfc_context->reference_surfaces[i].bo);
+        mfc_context->reference_surfaces[i].bo = NULL;  
+    }
+
+    dri_bo_unreference(mfc_context->intra_row_store_scratch_buffer.bo);
+    bo = dri_bo_alloc(i965->intel.bufmgr,
+                      "Buffer",
+                      128 * 64,
+                      64);
+    assert(bo);
+    mfc_context->intra_row_store_scratch_buffer.bo = bo;
+
+    dri_bo_unreference(mfc_context->macroblock_status_buffer.bo);
+    bo = dri_bo_alloc(i965->intel.bufmgr,
+                      "Buffer",
+                      128*128*16,
+                      64);
+    assert(bo);
+    mfc_context->macroblock_status_buffer.bo = bo;
+
+    dri_bo_unreference(mfc_context->deblocking_filter_row_store_scratch_buffer.bo);
+    bo = dri_bo_alloc(i965->intel.bufmgr,
+                      "Buffer",
+                      49152,  /* 6 * 128 * 64 */
+                      64);
+    assert(bo);
+    mfc_context->deblocking_filter_row_store_scratch_buffer.bo = bo;
+
+    dri_bo_unreference(mfc_context->bsd_mpc_row_store_scratch_buffer.bo);
+    bo = dri_bo_alloc(i965->intel.bufmgr,
+                      "Buffer",
+                      12288, /* 1.5 * 128 * 64 */
+                      0x1000);
+    assert(bo);
+    mfc_context->bsd_mpc_row_store_scratch_buffer.bo = bo;
+
+    dri_bo_unreference(mfc_context->mfc_batchbuffer_surface.bo);
+    mfc_context->mfc_batchbuffer_surface.bo = NULL;
+
+    dri_bo_unreference(mfc_context->aux_batchbuffer_surface.bo);
+    mfc_context->aux_batchbuffer_surface.bo = NULL;
+
+    if (mfc_context->aux_batchbuffer)
+        intel_batchbuffer_free(mfc_context->aux_batchbuffer);
+
+    mfc_context->aux_batchbuffer = intel_batchbuffer_new(&i965->intel, I915_EXEC_BSD);
+    mfc_context->aux_batchbuffer_surface.bo = mfc_context->aux_batchbuffer->buffer;
+    dri_bo_reference(mfc_context->aux_batchbuffer_surface.bo);
+    mfc_context->aux_batchbuffer_surface.pitch = 16;
+    mfc_context->aux_batchbuffer_surface.num_blocks = mfc_context->aux_batchbuffer->size / 16;
+    mfc_context->aux_batchbuffer_surface.size_block = 16;
+
+    i965_gpe_context_init(ctx, &mfc_context->gpe_context);
+}
+
+static void
+gen75_mfc_pipe_buf_addr_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    int i;
+
+    BEGIN_BCS_BATCH(batch, 24);
+
+    OUT_BCS_BATCH(batch, MFX_PIPE_BUF_ADDR_STATE | (24 - 2));
+
+    if (mfc_context->pre_deblocking_output.bo)
+        OUT_BCS_RELOC(batch, mfc_context->pre_deblocking_output.bo,
+                      I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                      0);
+    else
+        OUT_BCS_BATCH(batch, 0);                                                                                       /* pre output addr   */
+
+    if (mfc_context->post_deblocking_output.bo)
+        OUT_BCS_RELOC(batch, mfc_context->post_deblocking_output.bo,
+                      I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                      0);                                                                                      /* post output addr  */ 
+    else
+        OUT_BCS_BATCH(batch, 0);
+
+    OUT_BCS_RELOC(batch, mfc_context->uncompressed_picture_source.bo,
+                  I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                  0);                                                                                  /* uncompressed data */
+    OUT_BCS_RELOC(batch, mfc_context->macroblock_status_buffer.bo,
+                  I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                  0);                                                                                  /* StreamOut data*/
+    OUT_BCS_RELOC(batch, mfc_context->intra_row_store_scratch_buffer.bo,
+                  I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                  0);  
+    OUT_BCS_RELOC(batch, mfc_context->deblocking_filter_row_store_scratch_buffer.bo,
+                  I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                  0);
+    /* 7..22 Reference pictures*/
+    for (i = 0; i < ARRAY_ELEMS(mfc_context->reference_surfaces); i++) {
+        if ( mfc_context->reference_surfaces[i].bo != NULL) {
+            OUT_BCS_RELOC(batch, mfc_context->reference_surfaces[i].bo,
+                          I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                          0);                  
+        } else {
+            OUT_BCS_BATCH(batch, 0);
+        }
+    }
+    OUT_BCS_RELOC(batch, mfc_context->macroblock_status_buffer.bo,
+                  I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                  0);                                                                                  /* Macroblock status buffer*/
+
+    ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen75_mfc_avc_directmode_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+
+    int i;
+
+    BEGIN_BCS_BATCH(batch, 69);
+
+    OUT_BCS_BATCH(batch, MFX_AVC_DIRECTMODE_STATE | (69 - 2));
+
+    /* Reference frames and Current frames */
+    for(i = 0; i < NUM_MFC_DMV_BUFFERS; i++) {
+        if ( mfc_context->direct_mv_buffers[i].bo != NULL) { 
+            OUT_BCS_RELOC(batch, mfc_context->direct_mv_buffers[i].bo,
+                          I915_GEM_DOMAIN_INSTRUCTION, 0,
+                          0);
+        } else {
+            OUT_BCS_BATCH(batch, 0);
+        }
+    }
+
+    /* POL list */
+    for(i = 0; i < 32; i++) {
+        OUT_BCS_BATCH(batch, i/2);
+    }
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, 0);
+
+    ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen75_mfc_avc_ref_idx_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    int i;
+
+    BEGIN_BCS_BATCH(batch, 10);
+    OUT_BCS_BATCH(batch, MFX_AVC_REF_IDX_STATE | 8); 
+    OUT_BCS_BATCH(batch, 0);                  //Select L0
+    OUT_BCS_BATCH(batch, 0x80808020);         //Only 1 reference
+    for(i = 0; i < 7; i++) {
+        OUT_BCS_BATCH(batch, 0x80808080);
+    }   
+    ADVANCE_BCS_BATCH(batch);
+
+    BEGIN_BCS_BATCH(batch, 10);
+    OUT_BCS_BATCH(batch, MFX_AVC_REF_IDX_STATE | 8); 
+    OUT_BCS_BATCH(batch, 1);                  //Select L1
+    OUT_BCS_BATCH(batch, 0x80808022);         //Only 1 reference
+    for(i = 0; i < 7; i++) {
+        OUT_BCS_BATCH(batch, 0x80808080);
+    }   
+    ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen75_mfc_bsp_buf_base_addr_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+
+    BEGIN_BCS_BATCH(batch, 4);
+
+    OUT_BCS_BATCH(batch, MFX_BSP_BUF_BASE_ADDR_STATE | (4 - 2));
+    OUT_BCS_RELOC(batch, mfc_context->bsd_mpc_row_store_scratch_buffer.bo,
+                  I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                  0);
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, 0);
+
+    ADVANCE_BCS_BATCH(batch);
+}
+
+
+static void gen75_mfc_avc_pipeline_picture_programing( VADriverContextP ctx,
+                                      struct encode_state *encode_state,
+                                      struct intel_encoder_context *encoder_context)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+
+    mfc_context->pipe_mode_select(ctx, MFX_FORMAT_AVC, encoder_context);
+    mfc_context->set_surface_state(ctx, encoder_context);
+    mfc_context->ind_obj_base_addr_state(ctx, encoder_context);
+    gen75_mfc_pipe_buf_addr_state(ctx, encoder_context);
+    gen75_mfc_bsp_buf_base_addr_state(ctx, encoder_context);
+    mfc_context->avc_img_state(ctx, encode_state, encoder_context);
+    mfc_context->avc_qm_state(ctx, encoder_context);
+    mfc_context->avc_fqm_state(ctx, encoder_context);
+    gen75_mfc_avc_directmode_state(ctx, encoder_context); 
+    gen75_mfc_avc_ref_idx_state(ctx, encoder_context);
+}
+
+
+static VAStatus gen75_mfc_avc_prepare(VADriverContextP ctx, 
+                                     struct encode_state *encode_state,
+                                     struct intel_encoder_context *encoder_context)
+{
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    struct object_surface *obj_surface;        
+    struct object_buffer *obj_buffer;
+    struct gen6_mfc_avc_surface_aux* gen6_avc_surface;
+    dri_bo *bo;
+    VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
+    VAStatus vaStatus = VA_STATUS_SUCCESS;
+    int i, j, enable_avc_ildb = 0;
+    VAEncSliceParameterBufferH264 *slice_param;
+    VACodedBufferSegment *coded_buffer_segment;
+    unsigned char *flag = NULL;
+
+    for (j = 0; j < encode_state->num_slice_params_ext && enable_avc_ildb == 0; j++) {
+        assert(encode_state->slice_params_ext && encode_state->slice_params_ext[j]->buffer);
+        slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[j]->buffer;
+
+        for (i = 0; i < encode_state->slice_params_ext[j]->num_elements; i++) {
+            assert((slice_param->slice_type == SLICE_TYPE_I) ||
+                   (slice_param->slice_type == SLICE_TYPE_SI) ||
+                   (slice_param->slice_type == SLICE_TYPE_P) ||
+                   (slice_param->slice_type == SLICE_TYPE_SP) ||
+                   (slice_param->slice_type == SLICE_TYPE_B));
+
+            if (slice_param->disable_deblocking_filter_idc != 1) {
+                enable_avc_ildb = 1;
+                break;
+            }
+
+            slice_param++;
+        }
+    }
+
+    /*Setup all the input&output object*/
+
+    /* Setup current frame and current direct mv buffer*/
+    obj_surface = SURFACE(pPicParameter->CurrPic.picture_id);
+    assert(obj_surface);
+    i965_check_alloc_surface_bo(ctx, obj_surface, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
+
+    if ( obj_surface->private_data == NULL) {
+        gen6_avc_surface = calloc(sizeof(struct gen6_mfc_avc_surface_aux), 1);
+        gen6_avc_surface->dmv_top = 
+            dri_bo_alloc(i965->intel.bufmgr,
+                         "Buffer",
+                         68*8192, 
+                         64);
+        gen6_avc_surface->dmv_bottom = 
+            dri_bo_alloc(i965->intel.bufmgr,
+                         "Buffer",
+                         68*8192, 
+                         64);
+        assert(gen6_avc_surface->dmv_top);
+        assert(gen6_avc_surface->dmv_bottom);
+        obj_surface->private_data = (void *)gen6_avc_surface;
+        obj_surface->free_private_data = (void *)gen75_mfc_free_avc_surface; 
+    }
+    gen6_avc_surface = (struct gen6_mfc_avc_surface_aux*) obj_surface->private_data;
+    mfc_context->direct_mv_buffers[NUM_MFC_DMV_BUFFERS - 2].bo = gen6_avc_surface->dmv_top;
+    mfc_context->direct_mv_buffers[NUM_MFC_DMV_BUFFERS - 1].bo = gen6_avc_surface->dmv_bottom;
+    dri_bo_reference(gen6_avc_surface->dmv_top);
+    dri_bo_reference(gen6_avc_surface->dmv_bottom);
+
+    if (enable_avc_ildb) {
+        mfc_context->post_deblocking_output.bo = obj_surface->bo;
+        dri_bo_reference(mfc_context->post_deblocking_output.bo);
+    } else {
+        mfc_context->pre_deblocking_output.bo = obj_surface->bo;
+        dri_bo_reference(mfc_context->pre_deblocking_output.bo);
+    }
+
+    mfc_context->surface_state.width = obj_surface->orig_width;
+    mfc_context->surface_state.height = obj_surface->orig_height;
+    mfc_context->surface_state.w_pitch = obj_surface->width;
+    mfc_context->surface_state.h_pitch = obj_surface->height;
+    
+    /* Setup reference frames and direct mv buffers*/
+    for(i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++) {
+        if ( pPicParameter->ReferenceFrames[i].picture_id != VA_INVALID_ID ) { 
+            obj_surface = SURFACE(pPicParameter->ReferenceFrames[i].picture_id);
+            assert(obj_surface);
+            if (obj_surface->bo != NULL) {
+                mfc_context->reference_surfaces[i].bo = obj_surface->bo;
+                dri_bo_reference(obj_surface->bo);
+            }
+            /* Check DMV buffer */
+            if ( obj_surface->private_data == NULL) {
+                
+                gen6_avc_surface = calloc(sizeof(struct gen6_mfc_avc_surface_aux), 1);
+                gen6_avc_surface->dmv_top = 
+                    dri_bo_alloc(i965->intel.bufmgr,
+                                 "Buffer",
+                                 68*8192, 
+                                 64);
+                gen6_avc_surface->dmv_bottom = 
+                    dri_bo_alloc(i965->intel.bufmgr,
+                                 "Buffer",
+                                 68*8192, 
+                                 64);
+                assert(gen6_avc_surface->dmv_top);
+                assert(gen6_avc_surface->dmv_bottom);
+                obj_surface->private_data = gen6_avc_surface;
+                obj_surface->free_private_data = gen75_mfc_free_avc_surface; 
+            }
+    
+            gen6_avc_surface = (struct gen6_mfc_avc_surface_aux*) obj_surface->private_data;
+            /* Setup DMV buffer */
+            mfc_context->direct_mv_buffers[i*2].bo = gen6_avc_surface->dmv_top;
+            mfc_context->direct_mv_buffers[i*2+1].bo = gen6_avc_surface->dmv_bottom; 
+            dri_bo_reference(gen6_avc_surface->dmv_top);
+            dri_bo_reference(gen6_avc_surface->dmv_bottom);
+        } else {
+            break;
+        }
+    }
+       
+    obj_surface = SURFACE(encoder_context->input_yuv_surface);
+    assert(obj_surface && obj_surface->bo);
+    mfc_context->uncompressed_picture_source.bo = obj_surface->bo;
+    dri_bo_reference(mfc_context->uncompressed_picture_source.bo);
+
+    obj_buffer = BUFFER (pPicParameter->coded_buf); /* FIXME: fix this later */
+    bo = obj_buffer->buffer_store->bo;
+    assert(bo);
+    mfc_context->mfc_indirect_pak_bse_object.bo = bo;
+    mfc_context->mfc_indirect_pak_bse_object.offset = I965_CODEDBUFFER_SIZE;
+    mfc_context->mfc_indirect_pak_bse_object.end_offset = ALIGN(obj_buffer->size_element - 0x1000, 0x1000);
+    dri_bo_reference(mfc_context->mfc_indirect_pak_bse_object.bo);
+    
+    dri_bo_map(bo, 1);
+    coded_buffer_segment = (VACodedBufferSegment *)bo->virtual;
+    flag = (unsigned char *)(coded_buffer_segment + 1);
+    *flag = 0;
+    dri_bo_unmap(bo);
+
+    return vaStatus;
+}
+
+
+static VAStatus gen75_mfc_run(VADriverContextP ctx, 
+                             struct encode_state *encode_state,
+                             struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+
+    intel_batchbuffer_flush(batch);            //run the pipeline
+
+    return VA_STATUS_SUCCESS;
+}
+
+
+static VAStatus
+gen75_mfc_stop(VADriverContextP ctx, 
+              struct encode_state *encode_state,
+              struct intel_encoder_context *encoder_context,
+              int *encoded_bits_size)
+{
+    VAStatus vaStatus = VA_STATUS_ERROR_UNKNOWN;
+    VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
+    VACodedBufferSegment *coded_buffer_segment;
+    
+    vaStatus = i965_MapBuffer(ctx, pPicParameter->coded_buf, (void **)&coded_buffer_segment);
+    assert(vaStatus == VA_STATUS_SUCCESS);
+    *encoded_bits_size = coded_buffer_segment->size * 8;
+    i965_UnmapBuffer(ctx, pPicParameter->coded_buf);
+
+    return VA_STATUS_SUCCESS;
+}
+
+
+static int gen75_mfc_update_hrd(struct encode_state *encode_state,
+                               struct gen6_mfc_context *mfc_context,
+                               int frame_bits)
+{
+    double prev_bf = mfc_context->hrd.current_buffer_fullness;
+
+    mfc_context->hrd.current_buffer_fullness -= frame_bits;
+
+    if (mfc_context->hrd.buffer_size > 0 && mfc_context->hrd.current_buffer_fullness <= 0.) {
+        mfc_context->hrd.current_buffer_fullness = prev_bf;
+        return BRC_UNDERFLOW;
+    }
+    
+    mfc_context->hrd.current_buffer_fullness += mfc_context->brc.bits_per_frame;
+    if (mfc_context->hrd.buffer_size > 0 && mfc_context->hrd.current_buffer_fullness > mfc_context->hrd.buffer_size) {
+        if (mfc_context->brc.mode == VA_RC_VBR)
+            mfc_context->hrd.current_buffer_fullness = mfc_context->hrd.buffer_size;
+        else {
+            mfc_context->hrd.current_buffer_fullness = prev_bf;
+            return BRC_OVERFLOW;
+        }
+    }
+    return BRC_NO_HRD_VIOLATION;
+}
+
+
+static int gen75_mfc_brc_postpack(struct encode_state *encode_state,
+                                 struct gen6_mfc_context *mfc_context,
+                                 int frame_bits)
+{
+    gen6_brc_status sts = BRC_NO_HRD_VIOLATION;
+    VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer; 
+    int slicetype = pSliceParameter->slice_type;
+    int qpi = mfc_context->bit_rate_control_context[SLICE_TYPE_I].QpPrimeY;
+    int qpp = mfc_context->bit_rate_control_context[SLICE_TYPE_P].QpPrimeY;
+    int qpb = mfc_context->bit_rate_control_context[SLICE_TYPE_B].QpPrimeY;
+    int qp; // quantizer of previously encoded slice of current type
+    int qpn; // predicted quantizer for next frame of current type in integer format
+    double qpf; // predicted quantizer for next frame of current type in float format
+    double delta_qp; // QP correction
+    int target_frame_size, frame_size_next;
+    /* Notes:
+     *  x - how far we are from HRD buffer borders
+     *  y - how far we are from target HRD buffer fullness
+     */
+    double x, y;
+    double frame_size_alpha;
+
+    if (slicetype == SLICE_TYPE_SP)
+        slicetype = SLICE_TYPE_P;
+    else if (slicetype == SLICE_TYPE_SI)
+        slicetype = SLICE_TYPE_I;
+
+    qp = mfc_context->bit_rate_control_context[slicetype].QpPrimeY;
+
+    target_frame_size = mfc_context->brc.target_frame_size[slicetype];
+    if (mfc_context->hrd.buffer_capacity < 5)
+        frame_size_alpha = 0;
+    else
+        frame_size_alpha = (double)mfc_context->brc.gop_nums[slicetype];
+    if (frame_size_alpha > 30) frame_size_alpha = 30;
+    frame_size_next = target_frame_size + (double)(target_frame_size - frame_bits) /
+                                          (double)(frame_size_alpha + 1.);
+
+    /* frame_size_next: avoiding negative number and too small value */
+    if ((double)frame_size_next < (double)(target_frame_size * 0.25))
+        frame_size_next = (int)((double)target_frame_size * 0.25);
+
+    qpf = (double)qp * target_frame_size / frame_size_next;
+    qpn = (int)(qpf + 0.5);
+
+    if (qpn == qp) {
+        /* setting qpn we round qpf making mistakes: now we are trying to compensate this */
+        mfc_context->brc.qpf_rounding_accumulator += qpf - qpn;
+        if (mfc_context->brc.qpf_rounding_accumulator > 1.0) {
+            qpn++;
+            mfc_context->brc.qpf_rounding_accumulator = 0.;
+        } else if (mfc_context->brc.qpf_rounding_accumulator < -1.0) {
+            qpn--;
+            mfc_context->brc.qpf_rounding_accumulator = 0.;
+        }
+    }
+    /* making sure that QP is not changing too fast */
+    if ((qpn - qp) > BRC_QP_MAX_CHANGE) qpn = qp + BRC_QP_MAX_CHANGE;
+    else if ((qpn - qp) < -BRC_QP_MAX_CHANGE) qpn = qp - BRC_QP_MAX_CHANGE;
+    /* making sure that with QP predictions we did do not leave QPs range */
+    BRC_CLIP(qpn, 1, 51);
+
+    /* checking wthether HRD compliance is still met */
+    sts = gen75_mfc_update_hrd(encode_state, mfc_context, frame_bits);
+
+    /* calculating QP delta as some function*/
+    x = mfc_context->hrd.target_buffer_fullness - mfc_context->hrd.current_buffer_fullness;
+    if (x > 0) {
+        x /= mfc_context->hrd.target_buffer_fullness;
+        y = mfc_context->hrd.current_buffer_fullness;
+    }
+    else {
+        x /= (mfc_context->hrd.buffer_size - mfc_context->hrd.target_buffer_fullness);
+        y = mfc_context->hrd.buffer_size - mfc_context->hrd.current_buffer_fullness;
+    }
+    if (y < 0.01) y = 0.01;
+    if (x > 1) x = 1;
+    else if (x < -1) x = -1;
+
+    delta_qp = BRC_QP_MAX_CHANGE*exp(-1/y)*sin(BRC_PI_0_5 * x);
+    qpn = (int)(qpn + delta_qp + 0.5);
+
+    /* making sure that with QP predictions we did do not leave QPs range */
+    BRC_CLIP(qpn, 1, 51);
+
+    if (sts == BRC_NO_HRD_VIOLATION) { // no HRD violation
+        /* correcting QPs of slices of other types */
+        if (slicetype == SLICE_TYPE_P) {
+            if (abs(qpn + BRC_P_B_QP_DIFF - qpb) > 2)
+                mfc_context->bit_rate_control_context[SLICE_TYPE_B].QpPrimeY += (qpn + BRC_P_B_QP_DIFF - qpb) >> 1;
+            if (abs(qpn - BRC_I_P_QP_DIFF - qpi) > 2)
+                mfc_context->bit_rate_control_context[SLICE_TYPE_I].QpPrimeY += (qpn - BRC_I_P_QP_DIFF - qpi) >> 1;
+        } else if (slicetype == SLICE_TYPE_I) {
+            if (abs(qpn + BRC_I_B_QP_DIFF - qpb) > 4)
+                mfc_context->bit_rate_control_context[SLICE_TYPE_B].QpPrimeY += (qpn + BRC_I_B_QP_DIFF - qpb) >> 2;
+            if (abs(qpn + BRC_I_P_QP_DIFF - qpp) > 2)
+                mfc_context->bit_rate_control_context[SLICE_TYPE_P].QpPrimeY += (qpn + BRC_I_P_QP_DIFF - qpp) >> 2;
+        } else { // SLICE_TYPE_B
+            if (abs(qpn - BRC_P_B_QP_DIFF - qpp) > 2)
+                mfc_context->bit_rate_control_context[SLICE_TYPE_P].QpPrimeY += (qpn - BRC_P_B_QP_DIFF - qpp) >> 1;
+            if (abs(qpn - BRC_I_B_QP_DIFF - qpi) > 4)
+                mfc_context->bit_rate_control_context[SLICE_TYPE_I].QpPrimeY += (qpn - BRC_I_B_QP_DIFF - qpi) >> 2;
+        }
+        BRC_CLIP(mfc_context->bit_rate_control_context[SLICE_TYPE_I].QpPrimeY, 1, 51);
+        BRC_CLIP(mfc_context->bit_rate_control_context[SLICE_TYPE_P].QpPrimeY, 1, 51);
+        BRC_CLIP(mfc_context->bit_rate_control_context[SLICE_TYPE_B].QpPrimeY, 1, 51);
+    } else if (sts == BRC_UNDERFLOW) { // underflow
+        if (qpn <= qp) qpn = qp + 1;
+        if (qpn > 51) {
+            qpn = 51;
+            sts = BRC_UNDERFLOW_WITH_MAX_QP; //underflow with maxQP
+        }
+    } else if (sts == BRC_OVERFLOW) {
+        if (qpn >= qp) qpn = qp - 1;
+        if (qpn < 1) { // < 0 (?) overflow with minQP
+            qpn = 1;
+            sts = BRC_OVERFLOW_WITH_MIN_QP; // bit stuffing to be done
+        }
+    }
+
+    mfc_context->bit_rate_control_context[slicetype].QpPrimeY = qpn;
+
+    return sts;
+}
+
+static void 
+gen75_mfc_hrd_context_update(struct encode_state *encode_state, 
+                          struct gen6_mfc_context *mfc_context) 
+{
+    mfc_context->vui_hrd.i_frame_number++;
+}
+
+
+static int interlace_check(VADriverContextP ctx,
+                   struct encode_state *encode_state,
+                   struct intel_encoder_context *encoder_context)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    VAEncSliceParameterBufferH264 *pSliceParameter;
+    int i;
+    int mbCount = 0;
+    int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
+    int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
+  
+    for (i = 0; i < encode_state->num_slice_params_ext; i++) {
+        pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[i]->buffer; 
+        mbCount += pSliceParameter->num_macroblocks; 
+    }
+    
+    if ( mbCount == ( width_in_mbs * height_in_mbs ) )
+        return 0;
+
+    return 1;
+}
+
+static void
+gen75_mfc_avc_slice_state(VADriverContextP ctx,
+                         VAEncPictureParameterBufferH264 *pic_param,
+                         VAEncSliceParameterBufferH264 *slice_param,
+                         struct encode_state *encode_state,
+                         struct intel_encoder_context *encoder_context,
+                         int rate_control_enable,
+                         int qp,
+                         struct intel_batchbuffer *batch)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
+    int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
+    int beginmb = slice_param->macroblock_address;
+    int endmb = beginmb + slice_param->num_macroblocks;
+    int beginx = beginmb % width_in_mbs;
+    int beginy = beginmb / width_in_mbs;
+    int nextx =  endmb % width_in_mbs;
+    int nexty = endmb / width_in_mbs;
+    int slice_type = slice_param->slice_type;
+    int last_slice = (endmb == (width_in_mbs * height_in_mbs));
+    int bit_rate_control_target, maxQpN, maxQpP;
+    unsigned char correct[6], grow, shrink;
+    int i;
+    int weighted_pred_idc = 0;
+    unsigned int luma_log2_weight_denom = slice_param->luma_log2_weight_denom;
+    unsigned int chroma_log2_weight_denom = slice_param->chroma_log2_weight_denom;
+
+    if (batch == NULL)
+        batch = encoder_context->base.batch;
+
+    bit_rate_control_target = slice_type;
+    if (slice_type == SLICE_TYPE_SP)
+        bit_rate_control_target = SLICE_TYPE_P;
+    else if (slice_type == SLICE_TYPE_SI)
+        bit_rate_control_target = SLICE_TYPE_I;
+
+    if (slice_type == SLICE_TYPE_P) {
+        weighted_pred_idc = pic_param->pic_fields.bits.weighted_pred_flag;
+    } else if (slice_type == SLICE_TYPE_B) {
+        weighted_pred_idc = pic_param->pic_fields.bits.weighted_bipred_idc;
+
+        if (weighted_pred_idc == 2) {
+            /* 8.4.3 - Derivation process for prediction weights (8-279) */
+            luma_log2_weight_denom = 5;
+            chroma_log2_weight_denom = 5;
+        }
+    }
+
+    maxQpN = mfc_context->bit_rate_control_context[bit_rate_control_target].MaxQpNegModifier;
+    maxQpP = mfc_context->bit_rate_control_context[bit_rate_control_target].MaxQpPosModifier;
+
+    for (i = 0; i < 6; i++)
+        correct[i] = mfc_context->bit_rate_control_context[bit_rate_control_target].Correct[i];
+
+    grow = mfc_context->bit_rate_control_context[bit_rate_control_target].GrowInit + 
+        (mfc_context->bit_rate_control_context[bit_rate_control_target].GrowResistance << 4);
+    shrink = mfc_context->bit_rate_control_context[bit_rate_control_target].ShrinkInit + 
+        (mfc_context->bit_rate_control_context[bit_rate_control_target].ShrinkResistance << 4);
+
+    BEGIN_BCS_BATCH(batch, 11);;
+
+    OUT_BCS_BATCH(batch, MFX_AVC_SLICE_STATE | (11 - 2) );
+    OUT_BCS_BATCH(batch, slice_type);                  /*Slice Type: I:P:B Slice*/
+
+    if (slice_type == SLICE_TYPE_I) {
+        OUT_BCS_BATCH(batch, 0);                       /*no reference frames and pred_weight_table*/
+    } else {
+        OUT_BCS_BATCH(batch,
+                      (1 << 16) |                      /*1 reference frame*/
+                      (chroma_log2_weight_denom << 8) |
+                      (luma_log2_weight_denom << 0));
+    }
+
+    OUT_BCS_BATCH(batch, 
+                  (weighted_pred_idc << 30) |
+                  (slice_param->direct_spatial_mv_pred_flag<<29) |             /*Direct Prediction Type*/
+                  (slice_param->disable_deblocking_filter_idc << 27) |
+                  (slice_param->cabac_init_idc << 24) |
+                  (qp<<16) |                   /*Slice Quantization Parameter*/
+                  ((slice_param->slice_beta_offset_div2 & 0xf) << 8) |
+                  ((slice_param->slice_alpha_c0_offset_div2 & 0xf) << 0));
+    OUT_BCS_BATCH(batch,
+                  (beginy << 24) |                     /*First MB X&Y , the begin postion of current slice*/
+                  (beginx << 16) |
+                  slice_param->macroblock_address );
+    OUT_BCS_BATCH(batch, (nexty << 16) | nextx);                       /*Next slice first MB X&Y*/
+    OUT_BCS_BATCH(batch, 
+                  (0/*rate_control_enable*/ << 31) |           /*in CBR mode RateControlCounterEnable = enable*/
+                  (1 << 30) |          /*ResetRateControlCounter*/
+                  (0 << 28) |          /*RC Triggle Mode = Always Rate Control*/
+                  (4 << 24) |     /*RC Stable Tolerance, middle level*/
+                  (0/*rate_control_enable*/ << 23) |     /*RC Panic Enable*/                 
+                  (0 << 22) |     /*QP mode, don't modfiy CBP*/
+                  (0 << 21) |     /*MB Type Direct Conversion Enabled*/ 
+                  (0 << 20) |     /*MB Type Skip Conversion Enabled*/ 
+                  (last_slice << 19) |     /*IsLastSlice*/
+                  (0 << 18) |  /*BitstreamOutputFlag Compressed BitStream Output Disable Flag 0:enable 1:disable*/
+                  (1 << 17) |      /*HeaderPresentFlag*/       
+                  (1 << 16) |      /*SliceData PresentFlag*/
+                  (1 << 15) |      /*TailPresentFlag*/
+                  (1 << 13) |      /*RBSP NAL TYPE*/   
+                  (0 << 12) );    /*CabacZeroWordInsertionEnable*/
+    OUT_BCS_BATCH(batch, mfc_context->mfc_indirect_pak_bse_object.offset);
+    OUT_BCS_BATCH(batch,
+                  (maxQpN << 24) |     /*Target QP - 24 is lowest QP*/ 
+                  (maxQpP << 16) |     /*Target QP + 20 is highest QP*/
+                  (shrink << 8)  |
+                  (grow << 0));   
+    OUT_BCS_BATCH(batch,
+                  (correct[5] << 20) |
+                  (correct[4] << 16) |
+                  (correct[3] << 12) |
+                  (correct[2] << 8) |
+                  (correct[1] << 4) |
+                  (correct[0] << 0));
+    OUT_BCS_BATCH(batch, 0);
+
+    ADVANCE_BCS_BATCH(batch);
+}
+
+
+static void gen75_mfc_avc_pipeline_header_programing(VADriverContextP ctx,
+                                                    struct encode_state *encode_state,
+                                                    struct intel_encoder_context *encoder_context,
+                                                    struct intel_batchbuffer *slice_batch)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    int idx = va_enc_packed_type_to_idx(VAEncPackedHeaderH264_SPS);
+
+    if (encode_state->packed_header_data[idx]) {
+        VAEncPackedHeaderParameterBuffer *param = NULL;
+        unsigned int *header_data = (unsigned int *)encode_state->packed_header_data[idx]->buffer;
+        unsigned int length_in_bits;
+
+        assert(encode_state->packed_header_param[idx]);
+        param = (VAEncPackedHeaderParameterBuffer *)encode_state->packed_header_param[idx]->buffer;
+        length_in_bits = param->bit_length;
+
+        mfc_context->insert_object(ctx,
+                                   encoder_context,
+                                   header_data,
+                                   ALIGN(length_in_bits, 32) >> 5,
+                                   length_in_bits & 0x1f,
+                                   5,   /* FIXME: check it */
+                                   0,
+                                   0,
+                                   !param->has_emulation_bytes,
+                                   slice_batch);
+    }
+
+    idx = va_enc_packed_type_to_idx(VAEncPackedHeaderH264_PPS);
+
+    if (encode_state->packed_header_data[idx]) {
+        VAEncPackedHeaderParameterBuffer *param = NULL;
+        unsigned int *header_data = (unsigned int *)encode_state->packed_header_data[idx]->buffer;
+        unsigned int length_in_bits;
+
+        assert(encode_state->packed_header_param[idx]);
+        param = (VAEncPackedHeaderParameterBuffer *)encode_state->packed_header_param[idx]->buffer;
+        length_in_bits = param->bit_length;
+
+        mfc_context->insert_object(ctx,
+                                   encoder_context,
+                                   header_data,
+                                   ALIGN(length_in_bits, 32) >> 5,
+                                   length_in_bits & 0x1f,
+                                   5, /* FIXME: check it */
+                                   0,
+                                   0,
+                                   !param->has_emulation_bytes,
+                                   slice_batch);
+    }
+    
+    idx = va_enc_packed_type_to_idx(VAEncPackedHeaderH264_SEI);
+
+    if (encode_state->packed_header_data[idx]) {
+        VAEncPackedHeaderParameterBuffer *param = NULL;
+        unsigned int *header_data = (unsigned int *)encode_state->packed_header_data[idx]->buffer;
+        unsigned int length_in_bits;
+
+        assert(encode_state->packed_header_param[idx]);
+        param = (VAEncPackedHeaderParameterBuffer *)encode_state->packed_header_param[idx]->buffer;
+        length_in_bits = param->bit_length;
+
+        mfc_context->insert_object(ctx,
+                                   encoder_context,
+                                   header_data,
+                                   ALIGN(length_in_bits, 32) >> 5,
+                                   length_in_bits & 0x1f,
+                                   5, /* FIXME: check it */
+                                   0,
+                                   0,
+                                   !param->has_emulation_bytes,
+                                   slice_batch);
+    }
+}
+
+#if __SOFTWARE__
+
+static int
+gen75_mfc_avc_pak_object_intra(VADriverContextP ctx, int x, int y, int end_mb,
+                               int qp,unsigned int *msg,
+                              struct intel_encoder_context *encoder_context,
+                              unsigned char target_mb_size, unsigned char max_mb_size,
+                              struct intel_batchbuffer *batch)
+{
+    int len_in_dwords = 11;
+
+    if (batch == NULL)
+        batch = encoder_context->base.batch;
+
+    BEGIN_BCS_BATCH(batch, len_in_dwords);
+
+    OUT_BCS_BATCH(batch, MFC_AVC_PAK_OBJECT | (len_in_dwords - 2));
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, 
+                  (0 << 24) |          /* PackedMvNum, Debug*/
+                  (0 << 20) |          /* No motion vector */
+                  (1 << 19) |          /* CbpDcY */
+                  (1 << 18) |          /* CbpDcU */
+                  (1 << 17) |          /* CbpDcV */
+                  (msg[0] & 0xFFFF) );
+
+    OUT_BCS_BATCH(batch, (0xFFFF << 16) | (y << 8) | x);               /* Code Block Pattern for Y*/
+    OUT_BCS_BATCH(batch, 0x000F000F);                                                  /* Code Block Pattern */                
+    OUT_BCS_BATCH(batch, (0 << 27) | (end_mb << 26) | qp);     /* Last MB */
+
+    /*Stuff for Intra MB*/
+    OUT_BCS_BATCH(batch, msg[1]);                      /* We using Intra16x16 no 4x4 predmode*/        
+    OUT_BCS_BATCH(batch, msg[2]);      
+    OUT_BCS_BATCH(batch, msg[3]&0xFC);         
+    
+    /*MaxSizeInWord and TargetSzieInWord*/
+    OUT_BCS_BATCH(batch, (max_mb_size << 24) |
+                  (target_mb_size << 16) );
+
+    ADVANCE_BCS_BATCH(batch);
+
+    return len_in_dwords;
+}
+
+static int
+gen75_mfc_avc_pak_object_inter(VADriverContextP ctx, int x, int y, int end_mb, int qp,
+                              unsigned int *msg, unsigned int offset,
+                              struct intel_encoder_context *encoder_context,
+                              unsigned char target_mb_size,unsigned char max_mb_size, int slice_type,
+                              struct intel_batchbuffer *batch)
+{
+    int len_in_dwords = 11;
+
+    if (batch == NULL)
+        batch = encoder_context->base.batch;
+
+    BEGIN_BCS_BATCH(batch, len_in_dwords);
+
+    OUT_BCS_BATCH(batch, MFC_AVC_PAK_OBJECT | (len_in_dwords - 2));
+
+    OUT_BCS_BATCH(batch, msg[2]);         /* 32 MV*/
+    OUT_BCS_BATCH(batch, offset);
+
+    OUT_BCS_BATCH(batch, msg[0]);
+
+    OUT_BCS_BATCH(batch, (0xFFFF<<16) | (y << 8) | x);        /* Code Block Pattern for Y*/
+    OUT_BCS_BATCH(batch, 0x000F000F);                         /* Code Block Pattern */  
+#if 0 
+    if ( slice_type == SLICE_TYPE_B) {
+        OUT_BCS_BATCH(batch, (0xF<<28) | (end_mb << 26) | qp); /* Last MB */
+    } else {
+        OUT_BCS_BATCH(batch, (end_mb << 26) | qp);     /* Last MB */
+    }
+#else
+    OUT_BCS_BATCH(batch, (end_mb << 26) | qp); /* Last MB */
+#endif
+
+
+    /*Stuff for Inter MB*/
+    OUT_BCS_BATCH(batch, msg[1]);        
+    OUT_BCS_BATCH(batch, 0x0);    
+    OUT_BCS_BATCH(batch, 0x0);        
+
+    /*MaxSizeInWord and TargetSzieInWord*/
+    OUT_BCS_BATCH(batch, (max_mb_size << 24) |
+                  (target_mb_size << 16) );
+
+    ADVANCE_BCS_BATCH(batch);
+
+    return len_in_dwords;
+}
+
+static void 
+gen75_mfc_avc_pipeline_slice_programing(VADriverContextP ctx,
+                                       struct encode_state *encode_state,
+                                       struct intel_encoder_context *encoder_context,
+                                       int slice_index,
+                                       struct intel_batchbuffer *slice_batch)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+    VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
+    VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
+    VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[slice_index]->buffer; 
+    unsigned int *msg = NULL, offset = 0;
+    int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
+    int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
+    int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
+    int last_slice = (pSliceParameter->macroblock_address + pSliceParameter->num_macroblocks) == (width_in_mbs * height_in_mbs);
+    int i,x,y;
+    int qp = pPicParameter->pic_init_qp + pSliceParameter->slice_qp_delta;
+    unsigned int rate_control_mode = encoder_context->rate_control_mode;
+    unsigned char *slice_header = NULL;
+    int slice_header_length_in_bits = 0;
+    unsigned int tail_data[] = { 0x0, 0x0 };
+    int slice_type = pSliceParameter->slice_type;
+
+
+    if (rate_control_mode == VA_RC_CBR) {
+        qp = mfc_context->bit_rate_control_context[slice_type].QpPrimeY;
+        pSliceParameter->slice_qp_delta = qp - pPicParameter->pic_init_qp;
+    }
+
+    /* only support for 8-bit pixel bit-depth */
+    assert(pSequenceParameter->bit_depth_luma_minus8 == 0);
+    assert(pSequenceParameter->bit_depth_chroma_minus8 == 0);
+    assert(pPicParameter->pic_init_qp >= 0 && pPicParameter->pic_init_qp < 52);
+    assert(qp >= 0 && qp < 52);
+
+    gen75_mfc_avc_slice_state(ctx, 
+                             pPicParameter,
+                             pSliceParameter,
+                             encode_state, encoder_context,
+                             (rate_control_mode == VA_RC_CBR), qp, slice_batch);
+
+    if ( slice_index == 0) 
+        gen75_mfc_avc_pipeline_header_programing(ctx, encode_state, encoder_context, slice_batch);
+
+    slice_header_length_in_bits = build_avc_slice_header(pSequenceParameter, pPicParameter, pSliceParameter, &slice_header);
+
+    // slice hander
+    mfc_context->insert_object(ctx, encoder_context,
+                               (unsigned int *)slice_header, ALIGN(slice_header_length_in_bits, 32) >> 5, slice_header_length_in_bits & 0x1f,
+                               5,  /* first 5 bytes are start code + nal unit type */
+                               1, 0, 1, slice_batch);
+
+    dri_bo_map(vme_context->vme_output.bo , 1);
+    msg = (unsigned int *)vme_context->vme_output.bo->virtual;
+
+    if (is_intra) {
+        msg += pSliceParameter->macroblock_address * INTRA_VME_OUTPUT_IN_DWS;
+    } else {
+        msg += pSliceParameter->macroblock_address * INTER_VME_OUTPUT_IN_DWS;
+        msg += 32; /* the first 32 DWs are MVs */
+        offset = pSliceParameter->macroblock_address * INTER_VME_OUTPUT_IN_BYTES;
+    }
+   
+    for (i = pSliceParameter->macroblock_address; 
+         i < pSliceParameter->macroblock_address + pSliceParameter->num_macroblocks; i++) {
+        int last_mb = (i == (pSliceParameter->macroblock_address + pSliceParameter->num_macroblocks - 1) );
+        x = i % width_in_mbs;
+        y = i / width_in_mbs;
+
+        if (is_intra) {
+            assert(msg);
+            gen75_mfc_avc_pak_object_intra(ctx, x, y, last_mb, qp, msg, encoder_context, 0, 0, slice_batch);
+            msg += INTRA_VME_OUTPUT_IN_DWS;
+        } else {
+            if (msg[0] & INTRA_MB_FLAG_MASK) {
+                gen75_mfc_avc_pak_object_intra(ctx, x, y, last_mb, qp, msg, encoder_context, 0, 0, slice_batch);
+            } else {
+                gen75_mfc_avc_pak_object_inter(ctx, x, y, last_mb, qp, msg, offset, encoder_context, 0, 0, pSliceParameter->slice_type, slice_batch);
+            }
+
+            msg += INTER_VME_OUTPUT_IN_DWS;
+            offset += INTER_VME_OUTPUT_IN_BYTES;
+        }
+    }
+   
+    dri_bo_unmap(vme_context->vme_output.bo);
+
+    if ( last_slice ) {    
+        mfc_context->insert_object(ctx, encoder_context,
+                                   tail_data, 2, 8,
+                                   2, 1, 1, 0, slice_batch);
+    } else {
+        mfc_context->insert_object(ctx, encoder_context,
+                                   tail_data, 1, 8,
+                                   1, 1, 1, 0, slice_batch);
+    }
+
+    free(slice_header);
+
+}
+
+static dri_bo *
+gen75_mfc_avc_software_batchbuffer(VADriverContextP ctx,
+                                  struct encode_state *encode_state,
+                                  struct intel_encoder_context *encoder_context)
+{
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
+    struct intel_batchbuffer *batch = intel_batchbuffer_new(&i965->intel, I915_EXEC_BSD);
+    dri_bo *batch_bo = batch->buffer;
+    int i;
+
+    for (i = 0; i < encode_state->num_slice_params_ext; i++) {
+        gen75_mfc_avc_pipeline_slice_programing(ctx, encode_state, encoder_context, i, batch);
+    }
+
+    intel_batchbuffer_align(batch, 8);
+    
+    BEGIN_BCS_BATCH(batch, 2);
+    OUT_BCS_BATCH(batch, 0);
+    OUT_BCS_BATCH(batch, MI_BATCH_BUFFER_END);
+    ADVANCE_BCS_BATCH(batch);
+
+    dri_bo_reference(batch_bo);
+    intel_batchbuffer_free(batch);
+
+    return batch_bo;
+}
+
+#else
+
+static void
+gen75_mfc_batchbuffer_surfaces_input(VADriverContextP ctx,
+                                    struct encode_state *encode_state,
+                                    struct intel_encoder_context *encoder_context)
+
+{
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+
+    assert(vme_context->vme_output.bo);
+    mfc_context->buffer_suface_setup(ctx,
+                                     &mfc_context->gpe_context,
+                                     &vme_context->vme_output,
+                                     BINDING_TABLE_OFFSET(BIND_IDX_VME_OUTPUT),
+                                     SURFACE_STATE_OFFSET(BIND_IDX_VME_OUTPUT));
+    assert(mfc_context->aux_batchbuffer_surface.bo);
+    mfc_context->buffer_suface_setup(ctx,
+                                     &mfc_context->gpe_context,
+                                     &mfc_context->aux_batchbuffer_surface,
+                                     BINDING_TABLE_OFFSET(BIND_IDX_MFC_SLICE_HEADER),
+                                     SURFACE_STATE_OFFSET(BIND_IDX_MFC_SLICE_HEADER));
+}
+
+static void
+gen75_mfc_batchbuffer_surfaces_output(VADriverContextP ctx,
+                                     struct encode_state *encode_state,
+                                     struct intel_encoder_context *encoder_context)
+
+{
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
+    int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
+    int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
+    mfc_context->mfc_batchbuffer_surface.num_blocks = width_in_mbs * height_in_mbs + encode_state->num_slice_params_ext * 8 + 1;
+    mfc_context->mfc_batchbuffer_surface.size_block = 16 * CMD_LEN_IN_OWORD; /* 3 OWORDs */
+    mfc_context->mfc_batchbuffer_surface.pitch = 16;
+    mfc_context->mfc_batchbuffer_surface.bo = dri_bo_alloc(i965->intel.bufmgr, 
+                                                           "MFC batchbuffer",
+                                                           mfc_context->mfc_batchbuffer_surface.num_blocks * mfc_context->mfc_batchbuffer_surface.size_block,
+                                                           0x1000);
+    mfc_context->buffer_suface_setup(ctx,
+                                     &mfc_context->gpe_context,
+                                     &mfc_context->mfc_batchbuffer_surface,
+                                     BINDING_TABLE_OFFSET(BIND_IDX_MFC_BATCHBUFFER),
+                                     SURFACE_STATE_OFFSET(BIND_IDX_MFC_BATCHBUFFER));
+}
+
+static void
+gen75_mfc_batchbuffer_surfaces_setup(VADriverContextP ctx, 
+                                    struct encode_state *encode_state,
+                                    struct intel_encoder_context *encoder_context)
+{
+    gen75_mfc_batchbuffer_surfaces_input(ctx, encode_state, encoder_context);
+    gen75_mfc_batchbuffer_surfaces_output(ctx, encode_state, encoder_context);
+}
+
+static void
+gen75_mfc_batchbuffer_idrt_setup(VADriverContextP ctx, 
+                                struct encode_state *encode_state,
+                                struct intel_encoder_context *encoder_context)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    struct gen6_interface_descriptor_data *desc;   
+    int i;
+    dri_bo *bo;
+
+    bo = mfc_context->gpe_context.idrt.bo;
+    dri_bo_map(bo, 1);
+    assert(bo->virtual);
+    desc = bo->virtual;
+
+    for (i = 0; i < mfc_context->gpe_context.num_kernels; i++) {
+        struct i965_kernel *kernel;
+
+        kernel = &mfc_context->gpe_context.kernels[i];
+        assert(sizeof(*desc) == 32);
+
+        /*Setup the descritor table*/
+        memset(desc, 0, sizeof(*desc));
+        desc->desc0.kernel_start_pointer = (kernel->bo->offset >> 6);
+        desc->desc2.sampler_count = 0;
+        desc->desc2.sampler_state_pointer = 0;
+        desc->desc3.binding_table_entry_count = 2;
+        desc->desc3.binding_table_pointer = (BINDING_TABLE_OFFSET(0) >> 5);
+        desc->desc4.constant_urb_entry_read_offset = 0;
+        desc->desc4.constant_urb_entry_read_length = 4;
+               
+        /*kernel start*/
+        dri_bo_emit_reloc(bo,  
+                          I915_GEM_DOMAIN_INSTRUCTION, 0,
+                          0,
+                          i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc0),
+                          kernel->bo);
+        desc++;
+    }
+
+    dri_bo_unmap(bo);
+}
+
+static void
+gen75_mfc_batchbuffer_constant_setup(VADriverContextP ctx, 
+                                    struct encode_state *encode_state,
+                                    struct intel_encoder_context *encoder_context)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    
+    (void)mfc_context;
+}
+
+static void
+gen75_mfc_batchbuffer_emit_object_command(struct intel_batchbuffer *batch,
+                                         int index,
+                                         int head_offset,
+                                         int batchbuffer_offset,
+                                         int head_size,
+                                         int tail_size,
+                                         int number_mb_cmds,
+                                         int first_object,
+                                         int last_object,
+                                         int last_slice,
+                                         int mb_x,
+                                         int mb_y,
+                                         int width_in_mbs,
+                                         int qp)
+{
+    BEGIN_BATCH(batch, 12);
+    
+    OUT_BATCH(batch, CMD_MEDIA_OBJECT | (12 - 2));
+    OUT_BATCH(batch, index);
+    OUT_BATCH(batch, 0);
+    OUT_BATCH(batch, 0);
+    OUT_BATCH(batch, 0);
+    OUT_BATCH(batch, 0);
+   
+    /*inline data */
+    OUT_BATCH(batch, head_offset);
+    OUT_BATCH(batch, batchbuffer_offset);
+    OUT_BATCH(batch, 
+              head_size << 16 |
+              tail_size);
+    OUT_BATCH(batch,
+              number_mb_cmds << 16 |
+              first_object << 2 |
+              last_object << 1 |
+              last_slice);
+    OUT_BATCH(batch,
+              mb_y << 8 |
+              mb_x);
+    OUT_BATCH(batch,
+              qp << 16 |
+              width_in_mbs);
+
+    ADVANCE_BATCH(batch);
+}
+
+static void
+gen75_mfc_avc_batchbuffer_slice_command(VADriverContextP ctx,
+                                       struct intel_encoder_context *encoder_context,
+                                       VAEncSliceParameterBufferH264 *slice_param,
+                                       int head_offset,
+                                       unsigned short head_size,
+                                       unsigned short tail_size,
+                                       int batchbuffer_offset,
+                                       int qp,
+                                       int last_slice)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
+    int total_mbs = slice_param->num_macroblocks;
+    int number_mb_cmds = 128;
+    int starting_mb = 0;
+    int last_object = 0;
+    int first_object = 1;
+    int i;
+    int mb_x, mb_y;
+    int index = (slice_param->slice_type == SLICE_TYPE_I) ? MFC_BATCHBUFFER_AVC_INTRA : MFC_BATCHBUFFER_AVC_INTER;
+
+    for (i = 0; i < total_mbs / number_mb_cmds; i++) {
+        last_object = (total_mbs - starting_mb) == number_mb_cmds;
+        mb_x = (slice_param->macroblock_address + starting_mb) % width_in_mbs;
+        mb_y = (slice_param->macroblock_address + starting_mb) / width_in_mbs;
+        assert(mb_x <= 255 && mb_y <= 255);
+
+        starting_mb += number_mb_cmds;
+
+        gen75_mfc_batchbuffer_emit_object_command(batch,
+                                                 index,
+                                                 head_offset,
+                                                 batchbuffer_offset,
+                                                 head_size,
+                                                 tail_size,
+                                                 number_mb_cmds,
+                                                 first_object,
+                                                 last_object,
+                                                 last_slice,
+                                                 mb_x,
+                                                 mb_y,
+                                                 width_in_mbs,
+                                                 qp);
+
+        if (first_object) {
+            head_offset += head_size;
+            batchbuffer_offset += head_size;
+        }
+
+        if (last_object) {
+            head_offset += tail_size;
+            batchbuffer_offset += tail_size;
+        }
+
+        batchbuffer_offset += number_mb_cmds * CMD_LEN_IN_OWORD;
+
+        first_object = 0;
+    }
+
+    if (!last_object) {
+        last_object = 1;
+        number_mb_cmds = total_mbs % number_mb_cmds;
+        mb_x = (slice_param->macroblock_address + starting_mb) % width_in_mbs;
+        mb_y = (slice_param->macroblock_address + starting_mb) / width_in_mbs;
+        assert(mb_x <= 255 && mb_y <= 255);
+        starting_mb += number_mb_cmds;
+
+        gen75_mfc_batchbuffer_emit_object_command(batch,
+                                                 index,
+                                                 head_offset,
+                                                 batchbuffer_offset,
+                                                 head_size,
+                                                 tail_size,
+                                                 number_mb_cmds,
+                                                 first_object,
+                                                 last_object,
+                                                 last_slice,
+                                                 mb_x,
+                                                 mb_y,
+                                                 width_in_mbs,
+                                                 qp);
+    }
+}
+                          
+/*
+ * return size in Owords (16bytes)
+ */         
+static int
+gen75_mfc_avc_batchbuffer_slice(VADriverContextP ctx,
+                               struct encode_state *encode_state,
+                               struct intel_encoder_context *encoder_context,
+                               int slice_index,
+                               int batchbuffer_offset)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    struct intel_batchbuffer *slice_batch = mfc_context->aux_batchbuffer;
+    VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
+    VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
+    VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[slice_index]->buffer; 
+    int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
+    int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
+    int last_slice = (pSliceParameter->macroblock_address + pSliceParameter->num_macroblocks) == (width_in_mbs * height_in_mbs);
+    int qp = pPicParameter->pic_init_qp + pSliceParameter->slice_qp_delta;
+    unsigned int rate_control_mode = encoder_context->rate_control_mode;
+    unsigned char *slice_header = NULL;
+    int slice_header_length_in_bits = 0;
+    unsigned int tail_data[] = { 0x0, 0x0 };
+    long head_offset;
+    int old_used = intel_batchbuffer_used_size(slice_batch), used;
+    unsigned short head_size, tail_size;
+    int slice_type = pSliceParameter->slice_type;
+
+    if (rate_control_mode == VA_RC_CBR) {
+        qp = mfc_context->bit_rate_control_context[slice_type].QpPrimeY;
+        pSliceParameter->slice_qp_delta = qp - pPicParameter->pic_init_qp;
+    }
+
+    /* only support for 8-bit pixel bit-depth */
+    assert(pSequenceParameter->bit_depth_luma_minus8 == 0);
+    assert(pSequenceParameter->bit_depth_chroma_minus8 == 0);
+    assert(pPicParameter->pic_init_qp >= 0 && pPicParameter->pic_init_qp < 52);
+    assert(qp >= 0 && qp < 52);
+
+    head_offset = old_used / 16;
+    gen75_mfc_avc_slice_state(ctx,
+                             pPicParameter,
+                             pSliceParameter,
+                             encode_state,
+                             encoder_context,
+                             (rate_control_mode == VA_RC_CBR),
+                             qp,
+                             slice_batch);
+
+    if (slice_index == 0)
+        gen75_mfc_avc_pipeline_header_programing(ctx, encode_state, encoder_context, slice_batch);
+
+    slice_header_length_in_bits = build_avc_slice_header(pSequenceParameter, pPicParameter, pSliceParameter, &slice_header);
+
+    // slice hander
+    mfc_context->insert_object(ctx,
+                               encoder_context,
+                               (unsigned int *)slice_header,
+                               ALIGN(slice_header_length_in_bits, 32) >> 5,
+                               slice_header_length_in_bits & 0x1f,
+                               5,  /* first 5 bytes are start code + nal unit type */
+                               1,
+                               0,
+                               1,
+                               slice_batch);
+    free(slice_header);
+
+    intel_batchbuffer_align(slice_batch, 16); /* aligned by an Oword */
+    used = intel_batchbuffer_used_size(slice_batch);
+    head_size = (used - old_used) / 16;
+    old_used = used;
+
+    /* tail */
+    if (last_slice) {    
+        mfc_context->insert_object(ctx,
+                                   encoder_context,
+                                   tail_data,
+                                   2,
+                                   8,
+                                   2,
+                                   1,
+                                   1,
+                                   0,
+                                   slice_batch);
+    } else {
+        mfc_context->insert_object(ctx,
+                                   encoder_context,
+                                   tail_data,
+                                   1,
+                                   8,
+                                   1,
+                                   1,
+                                   1,
+                                   0,
+                                   slice_batch);
+    }
+
+    intel_batchbuffer_align(slice_batch, 16); /* aligned by an Oword */
+    used = intel_batchbuffer_used_size(slice_batch);
+    tail_size = (used - old_used) / 16;
+
+   
+    gen75_mfc_avc_batchbuffer_slice_command(ctx,
+                                           encoder_context,
+                                           pSliceParameter,
+                                           head_offset,
+                                           head_size,
+                                           tail_size,
+                                           batchbuffer_offset,
+                                           qp,
+                                           last_slice);
+
+    return head_size + tail_size + pSliceParameter->num_macroblocks * CMD_LEN_IN_OWORD;
+}
+
+static void
+gen75_mfc_avc_batchbuffer_pipeline(VADriverContextP ctx,
+                                  struct encode_state *encode_state,
+                                  struct intel_encoder_context *encoder_context)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    int i, size, offset = 0;
+    intel_batchbuffer_start_atomic(batch, 0x4000); 
+    gen6_gpe_pipeline_setup(ctx, &mfc_context->gpe_context, batch);
+
+    for ( i = 0; i < encode_state->num_slice_params_ext; i++) {
+        size = gen75_mfc_avc_batchbuffer_slice(ctx, encode_state, encoder_context, i, offset);
+        offset += size;
+    }
+
+    intel_batchbuffer_end_atomic(batch);
+    intel_batchbuffer_flush(batch);
+}
+
+static void
+gen75_mfc_build_avc_batchbuffer(VADriverContextP ctx, 
+                               struct encode_state *encode_state,
+                               struct intel_encoder_context *encoder_context)
+{
+    gen75_mfc_batchbuffer_surfaces_setup(ctx, encode_state, encoder_context);
+    gen75_mfc_batchbuffer_idrt_setup(ctx, encode_state, encoder_context);
+    gen75_mfc_batchbuffer_constant_setup(ctx, encode_state, encoder_context);
+    gen75_mfc_avc_batchbuffer_pipeline(ctx, encode_state, encoder_context);
+}
+
+static dri_bo *
+gen75_mfc_avc_hardware_batchbuffer(VADriverContextP ctx,
+                                  struct encode_state *encode_state,
+                                  struct intel_encoder_context *encoder_context)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+
+    gen75_mfc_build_avc_batchbuffer(ctx, encode_state, encoder_context);
+    dri_bo_reference(mfc_context->mfc_batchbuffer_surface.bo);
+
+    return mfc_context->mfc_batchbuffer_surface.bo;
+}
+
+#endif
+
+static void
+gen75_mfc_avc_pipeline_programing(VADriverContextP ctx,
+                                 struct encode_state *encode_state,
+                                 struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    dri_bo *slice_batch_bo;
+
+    if ( interlace_check(ctx, encode_state, encoder_context) ) {
+        fprintf(stderr, "Current VA driver don't support interlace mode!\n");
+        assert(0);
+        return; 
+    }
+
+#if __SOFTWARE__
+    slice_batch_bo = gen75_mfc_avc_software_batchbuffer(ctx, encode_state, encoder_context);
+#else
+    slice_batch_bo = gen75_mfc_avc_hardware_batchbuffer(ctx, encode_state, encoder_context);
+#endif
+
+    // begin programing
+    intel_batchbuffer_start_atomic_bcs(batch, 0x4000); 
+    intel_batchbuffer_emit_mi_flush(batch);
+    
+    // picture level programing
+    gen75_mfc_avc_pipeline_picture_programing(ctx, encode_state, encoder_context);
+
+    BEGIN_BCS_BATCH(batch, 2);
+    OUT_BCS_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8));
+    OUT_BCS_RELOC(batch,
+                  slice_batch_bo,
+                  I915_GEM_DOMAIN_COMMAND, 0, 
+                  0);
+    ADVANCE_BCS_BATCH(batch);
+
+    // end programing
+    intel_batchbuffer_end_atomic(batch);
+
+    dri_bo_unreference(slice_batch_bo);
+}
+
+
+static VAStatus
+gen75_mfc_avc_encode_picture(VADriverContextP ctx, 
+                            struct encode_state *encode_state,
+                            struct intel_encoder_context *encoder_context)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    unsigned int rate_control_mode = encoder_context->rate_control_mode;
+    int current_frame_bits_size;
+    int sts;
+    for (;;) {
+        gen75_mfc_init(ctx, encoder_context);
+        gen75_mfc_avc_prepare(ctx, encode_state, encoder_context);
+        /*Programing bcs pipeline*/
+        gen75_mfc_avc_pipeline_programing(ctx, encode_state, encoder_context); //filling the pipeline
+        gen75_mfc_run(ctx, encode_state, encoder_context);
+        if (rate_control_mode == VA_RC_CBR /*|| rate_control_mode == VA_RC_VBR*/) {
+            gen75_mfc_stop(ctx, encode_state, encoder_context, &current_frame_bits_size);
+            sts = gen75_mfc_brc_postpack(encode_state, mfc_context, current_frame_bits_size);
+            if (sts == BRC_NO_HRD_VIOLATION) {
+                gen75_mfc_hrd_context_update(encode_state, mfc_context);
+                break;
+            }
+            else if (sts == BRC_OVERFLOW_WITH_MIN_QP || sts == BRC_UNDERFLOW_WITH_MAX_QP) {
+                if (!mfc_context->hrd.violation_noted) {
+                    fprintf(stderr, "Unrepairable %s!\n", (sts == BRC_OVERFLOW_WITH_MIN_QP)? "overflow": "underflow");
+                    mfc_context->hrd.violation_noted = 1;
+                }
+                return VA_STATUS_SUCCESS;
+            }
+        } else {
+            break;
+        }
+    }
+
+    return VA_STATUS_SUCCESS;
+}
+
+static void gen75_mfc_brc_prepare(struct encode_state *encode_state,
+                          struct intel_encoder_context *encoder_context)
+{
+    unsigned int rate_control_mode = encoder_context->rate_control_mode;
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+
+    if (rate_control_mode == VA_RC_CBR) {
+        /*Programing bit rate control */
+        if ( mfc_context->bit_rate_control_context[SLICE_TYPE_I].MaxSizeInWord == 0 ) {
+            gen75_mfc_bit_rate_control_context_init(encode_state, mfc_context);
+            gen75_mfc_brc_init(encode_state, encoder_context);
+        }
+
+        /*Programing HRD control */
+        if ( mfc_context->vui_hrd.i_cpb_size_value == 0 )
+            gen75_mfc_hrd_context_init(encode_state, encoder_context);    
+    }
+}
+
+static void
+gen75_mfc_context_destroy(void *context)
+{
+    struct gen6_mfc_context *mfc_context = context;
+    int i;
+
+    dri_bo_unreference(mfc_context->post_deblocking_output.bo);
+    mfc_context->post_deblocking_output.bo = NULL;
+
+    dri_bo_unreference(mfc_context->pre_deblocking_output.bo);
+    mfc_context->pre_deblocking_output.bo = NULL;
+
+    dri_bo_unreference(mfc_context->uncompressed_picture_source.bo);
+    mfc_context->uncompressed_picture_source.bo = NULL;
+
+    dri_bo_unreference(mfc_context->mfc_indirect_pak_bse_object.bo); 
+    mfc_context->mfc_indirect_pak_bse_object.bo = NULL;
+
+    for (i = 0; i < NUM_MFC_DMV_BUFFERS; i++){
+        dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
+        mfc_context->direct_mv_buffers[i].bo = NULL;
+    }
+
+    dri_bo_unreference(mfc_context->intra_row_store_scratch_buffer.bo);
+    mfc_context->intra_row_store_scratch_buffer.bo = NULL;
+
+    dri_bo_unreference(mfc_context->macroblock_status_buffer.bo);
+    mfc_context->macroblock_status_buffer.bo = NULL;
+
+    dri_bo_unreference(mfc_context->deblocking_filter_row_store_scratch_buffer.bo);
+    mfc_context->deblocking_filter_row_store_scratch_buffer.bo = NULL;
+
+    dri_bo_unreference(mfc_context->bsd_mpc_row_store_scratch_buffer.bo);
+    mfc_context->bsd_mpc_row_store_scratch_buffer.bo = NULL;
+
+
+    for (i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++){
+        dri_bo_unreference(mfc_context->reference_surfaces[i].bo);
+        mfc_context->reference_surfaces[i].bo = NULL;  
+    }
+
+    i965_gpe_context_destroy(&mfc_context->gpe_context);
+
+    dri_bo_unreference(mfc_context->mfc_batchbuffer_surface.bo);
+    mfc_context->mfc_batchbuffer_surface.bo = NULL;
+
+    dri_bo_unreference(mfc_context->aux_batchbuffer_surface.bo);
+    mfc_context->aux_batchbuffer_surface.bo = NULL;
+
+    if (mfc_context->aux_batchbuffer)
+        intel_batchbuffer_free(mfc_context->aux_batchbuffer);
+
+    mfc_context->aux_batchbuffer = NULL;
+
+    free(mfc_context);
+}
+
+static VAStatus gen75_mfc_pipeline(VADriverContextP ctx,
+                  VAProfile profile,
+                  struct encode_state *encode_state,
+                  struct intel_encoder_context *encoder_context)
+{
+    VAStatus vaStatus;
+
+    switch (profile) {
+    case VAProfileH264Baseline:
+    case VAProfileH264Main:
+    case VAProfileH264High:
+        vaStatus = gen75_mfc_avc_encode_picture(ctx, encode_state, encoder_context);
+        break;
+
+        /* FIXME: add for other profile */
+    default:
+        vaStatus = VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
+        break;
+    }
+
+    return vaStatus;
+}
+
+Bool gen75_mfc_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+    struct gen6_mfc_context *mfc_context = calloc(1, sizeof(struct gen6_mfc_context));
+
+    mfc_context->gpe_context.surface_state_binding_table.length = (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_MEDIA_SURFACES_GEN6;
+
+    mfc_context->gpe_context.idrt.max_entries = MAX_GPE_KERNELS;
+    mfc_context->gpe_context.idrt.entry_size = sizeof(struct gen6_interface_descriptor_data);
+
+    mfc_context->gpe_context.curbe.length = 32 * 4;
+
+    mfc_context->gpe_context.vfe_state.max_num_threads = 60 - 1;
+    mfc_context->gpe_context.vfe_state.num_urb_entries = 16;
+    mfc_context->gpe_context.vfe_state.gpgpu_mode = 0;
+    mfc_context->gpe_context.vfe_state.urb_entry_size = 59 - 1;
+    mfc_context->gpe_context.vfe_state.curbe_allocation_size = 37 - 1;
+
+    i965_gpe_load_kernels(ctx,
+                          &mfc_context->gpe_context,
+                          gen75_mfc_kernels,
+                          NUM_MFC_KERNEL);
+
+    mfc_context->pipe_mode_select = gen75_mfc_pipe_mode_select;
+    mfc_context->set_surface_state = gen75_mfc_surface_state;
+    mfc_context->ind_obj_base_addr_state = gen75_mfc_ind_obj_base_addr_state;
+    mfc_context->avc_img_state = gen75_mfc_avc_img_state;
+    mfc_context->avc_qm_state = gen75_mfc_avc_qm_state;
+    mfc_context->avc_fqm_state = gen75_mfc_avc_fqm_state;
+    mfc_context->insert_object = gen75_mfc_avc_insert_object;
+    mfc_context->buffer_suface_setup = gen7_gpe_buffer_suface_setup;
+
+    encoder_context->mfc_context = mfc_context;
+    encoder_context->mfc_context_destroy = gen75_mfc_context_destroy;
+    encoder_context->mfc_pipeline = gen75_mfc_pipeline;
+    encoder_context->mfc_brc_prepare = gen75_mfc_brc_prepare;
+
+    return True;
+}
diff --git a/src/gen75_vme.c b/src/gen75_vme.c
new file mode 100644 (file)
index 0000000..0c52677
--- /dev/null
@@ -0,0 +1,628 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Zhao Yakui <yakui.zhao@intel.com>
+ *    Xiang Haihao <haihao.xiang@intel.com>
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "intel_batchbuffer.h"
+#include "intel_driver.h"
+
+#include "i965_defines.h"
+#include "i965_drv_video.h"
+#include "i965_encoder.h"
+#include "gen6_vme.h"
+#include "gen6_mfc.h"
+
+#define SURFACE_STATE_PADDED_SIZE_0_GEN7        ALIGN(sizeof(struct gen7_surface_state), 32)
+#define SURFACE_STATE_PADDED_SIZE_1_GEN7        ALIGN(sizeof(struct gen7_surface_state2), 32)
+#define SURFACE_STATE_PADDED_SIZE_GEN7          MAX(SURFACE_STATE_PADDED_SIZE_0_GEN7, SURFACE_STATE_PADDED_SIZE_1_GEN7)
+
+#define SURFACE_STATE_PADDED_SIZE_0_GEN6        ALIGN(sizeof(struct i965_surface_state), 32)
+#define SURFACE_STATE_PADDED_SIZE_1_GEN6        ALIGN(sizeof(struct i965_surface_state2), 32)
+#define SURFACE_STATE_PADDED_SIZE_GEN6          MAX(SURFACE_STATE_PADDED_SIZE_0_GEN6, SURFACE_STATE_PADDED_SIZE_1_GEN6)
+
+#define SURFACE_STATE_PADDED_SIZE               MAX(SURFACE_STATE_PADDED_SIZE_GEN6, SURFACE_STATE_PADDED_SIZE_GEN7)
+#define SURFACE_STATE_OFFSET(index)             (SURFACE_STATE_PADDED_SIZE * index)
+#define BINDING_TABLE_OFFSET(index)             (SURFACE_STATE_OFFSET(MAX_MEDIA_SURFACES_GEN6) + sizeof(unsigned int) * index)
+
+#define VME_INTRA_SHADER        0
+#define VME_INTER_SHADER        1
+#define VME_BATCHBUFFER         2
+
+#define CURBE_ALLOCATION_SIZE   37              /* in 256-bit */
+#define CURBE_TOTAL_DATA_LENGTH (4 * 32)        /* in byte, it should be less than or equal to CURBE_ALLOCATION_SIZE * 32 */
+#define CURBE_URB_ENTRY_LENGTH  4               /* in 256-bit, it should be less than or equal to CURBE_TOTAL_DATA_LENGTH / 32 */
+  
+static const uint32_t gen75_vme_intra_frame[][4] = {
+#include "shaders/vme/intra_frame.g7b"
+};
+
+static const uint32_t gen75_vme_inter_frame[][4] = {
+#include "shaders/vme/inter_frame.g7b"
+};
+
+static const uint32_t gen75_vme_batchbuffer[][4] = {
+#include "shaders/vme/batchbuffer.g7b"
+};
+
+static struct i965_kernel gen75_vme_kernels[] = {
+    {
+        "VME Intra Frame",
+        VME_INTRA_SHADER, /*index*/
+        gen75_vme_intra_frame,                         
+        sizeof(gen75_vme_intra_frame),         
+        NULL
+    },
+    {
+        "VME inter Frame",
+        VME_INTER_SHADER,
+        gen75_vme_inter_frame,
+        sizeof(gen75_vme_inter_frame),
+        NULL
+    },
+    {
+        "VME BATCHBUFFER",
+        VME_BATCHBUFFER,
+        gen75_vme_batchbuffer,
+        sizeof(gen75_vme_batchbuffer),
+        NULL
+    },
+};
+
+/* only used for VME source surface state */
+static void 
+gen75_vme_source_surface_state(VADriverContextP ctx,
+                              int index,
+                              struct object_surface *obj_surface,
+                              struct intel_encoder_context *encoder_context)
+{
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+
+    vme_context->vme_surface2_setup(ctx,
+                                    &vme_context->gpe_context,
+                                    obj_surface,
+                                    BINDING_TABLE_OFFSET(index),
+                                    SURFACE_STATE_OFFSET(index));
+}
+
+static void
+gen75_vme_media_source_surface_state(VADriverContextP ctx,
+                                    int index,
+                                    struct object_surface *obj_surface,
+                                    struct intel_encoder_context *encoder_context)
+{
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+
+    vme_context->vme_media_rw_surface_setup(ctx,
+                                            &vme_context->gpe_context,
+                                            obj_surface,
+                                            BINDING_TABLE_OFFSET(index),
+                                            SURFACE_STATE_OFFSET(index));
+}
+
+static void
+gen75_vme_output_buffer_setup(VADriverContextP ctx,
+                             struct encode_state *encode_state,
+                             int index,
+                             struct intel_encoder_context *encoder_context)
+
+{
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+    VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
+    VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
+    int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
+    int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
+    int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
+
+    vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs;
+    vme_context->vme_output.pitch = 16; /* in bytes, always 16 */
+
+    if (is_intra)
+        vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES;
+    else
+        vme_context->vme_output.size_block = INTER_VME_OUTPUT_IN_BYTES;
+
+    vme_context->vme_output.bo = dri_bo_alloc(i965->intel.bufmgr, 
+                                              "VME output buffer",
+                                              vme_context->vme_output.num_blocks * vme_context->vme_output.size_block,
+                                              0x1000);
+    assert(vme_context->vme_output.bo);
+    vme_context->vme_buffer_suface_setup(ctx,
+                                         &vme_context->gpe_context,
+                                         &vme_context->vme_output,
+                                         BINDING_TABLE_OFFSET(index),
+                                         SURFACE_STATE_OFFSET(index));
+}
+
+static void
+gen75_vme_output_vme_batchbuffer_setup(VADriverContextP ctx,
+                                      struct encode_state *encode_state,
+                                      int index,
+                                      struct intel_encoder_context *encoder_context)
+
+{
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+    VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
+    int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
+    int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
+
+    vme_context->vme_batchbuffer.num_blocks = width_in_mbs * height_in_mbs + 1;
+    vme_context->vme_batchbuffer.size_block = 32; /* 2 OWORDs */
+    vme_context->vme_batchbuffer.pitch = 16;
+    vme_context->vme_batchbuffer.bo = dri_bo_alloc(i965->intel.bufmgr, 
+                                                   "VME batchbuffer",
+                                                   vme_context->vme_batchbuffer.num_blocks * vme_context->vme_batchbuffer.size_block,
+                                                   0x1000);
+    vme_context->vme_buffer_suface_setup(ctx,
+                                         &vme_context->gpe_context,
+                                         &vme_context->vme_batchbuffer,
+                                         BINDING_TABLE_OFFSET(index),
+                                         SURFACE_STATE_OFFSET(index));
+}
+
+static VAStatus
+gen75_vme_surface_setup(VADriverContextP ctx, 
+                       struct encode_state *encode_state,
+                       int is_intra,
+                       struct intel_encoder_context *encoder_context)
+{
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
+    struct object_surface *obj_surface;
+    VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
+
+    /*Setup surfaces state*/
+    /* current picture for encoding */
+    obj_surface = SURFACE(encoder_context->input_yuv_surface);
+    assert(obj_surface);
+    gen75_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
+    gen75_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
+
+    if (!is_intra) {
+        /* reference 0 */
+        obj_surface = SURFACE(pPicParameter->ReferenceFrames[0].picture_id);
+        assert(obj_surface);
+        if ( obj_surface->bo != NULL)
+            gen75_vme_source_surface_state(ctx, 1, obj_surface, encoder_context);
+
+        /* reference 1 */
+        obj_surface = SURFACE(pPicParameter->ReferenceFrames[1].picture_id);
+        assert(obj_surface);
+        if ( obj_surface->bo != NULL ) 
+            gen75_vme_source_surface_state(ctx, 2, obj_surface, encoder_context);
+    }
+
+    /* VME output */
+    gen75_vme_output_buffer_setup(ctx, encode_state, 3, encoder_context);
+    gen75_vme_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
+
+    return VA_STATUS_SUCCESS;
+}
+
+static VAStatus gen75_vme_interface_setup(VADriverContextP ctx, 
+                                         struct encode_state *encode_state,
+                                         struct intel_encoder_context *encoder_context)
+{
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+    struct gen6_interface_descriptor_data *desc;   
+    int i;
+    dri_bo *bo;
+
+    bo = vme_context->gpe_context.idrt.bo;
+    dri_bo_map(bo, 1);
+    assert(bo->virtual);
+    desc = bo->virtual;
+
+    for (i = 0; i < GEN6_VME_KERNEL_NUMBER; i++) {
+        struct i965_kernel *kernel;
+        kernel = &vme_context->gpe_context.kernels[i];
+        assert(sizeof(*desc) == 32);
+        /*Setup the descritor table*/
+        memset(desc, 0, sizeof(*desc));
+        desc->desc0.kernel_start_pointer = (kernel->bo->offset >> 6);
+        desc->desc2.sampler_count = 1; /* FIXME: */
+        desc->desc2.sampler_state_pointer = (vme_context->vme_state.bo->offset >> 5);
+        desc->desc3.binding_table_entry_count = 1; /* FIXME: */
+        desc->desc3.binding_table_pointer = (BINDING_TABLE_OFFSET(0) >> 5);
+        desc->desc4.constant_urb_entry_read_offset = 0;
+        desc->desc4.constant_urb_entry_read_length = CURBE_URB_ENTRY_LENGTH;
+               
+        /*kernel start*/
+        dri_bo_emit_reloc(bo,  
+                          I915_GEM_DOMAIN_INSTRUCTION, 0,
+                          0,
+                          i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc0),
+                          kernel->bo);
+        /*Sampler State(VME state pointer)*/
+        dri_bo_emit_reloc(bo,
+                          I915_GEM_DOMAIN_INSTRUCTION, 0,
+                          (1 << 2),                                                                    //
+                          i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc2),
+                          vme_context->vme_state.bo);
+        desc++;
+    }
+    dri_bo_unmap(bo);
+
+    return VA_STATUS_SUCCESS;
+}
+
+static VAStatus gen75_vme_constant_setup(VADriverContextP ctx, 
+                                        struct encode_state *encode_state,
+                                        struct intel_encoder_context *encoder_context)
+{
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+    // unsigned char *constant_buffer;
+
+    dri_bo_map(vme_context->gpe_context.curbe.bo, 1);
+    assert(vme_context->gpe_context.curbe.bo->virtual);
+    // constant_buffer = vme_context->curbe.bo->virtual;
+       
+    /*TODO copy buffer into CURB*/
+
+    dri_bo_unmap( vme_context->gpe_context.curbe.bo);
+
+    return VA_STATUS_SUCCESS;
+}
+
+static const unsigned int intra_mb_mode_cost_table[] = {
+    0x31110001, // for qp0
+    0x09110001, // for qp1
+    0x15030001, // for qp2
+    0x0b030001, // for qp3
+    0x0d030011, // for qp4
+    0x17210011, // for qp5
+    0x41210011, // for qp6
+    0x19210011, // for qp7
+    0x25050003, // for qp8
+    0x1b130003, // for qp9
+    0x1d130003, // for qp10
+    0x27070021, // for qp11
+    0x51310021, // for qp12
+    0x29090021, // for qp13
+    0x35150005, // for qp14
+    0x2b0b0013, // for qp15
+    0x2d0d0013, // for qp16
+    0x37170007, // for qp17
+    0x61410031, // for qp18
+    0x39190009, // for qp19
+    0x45250015, // for qp20
+    0x3b1b000b, // for qp21
+    0x3d1d000d, // for qp22
+    0x47270017, // for qp23
+    0x71510041, // for qp24 ! center for qp=0..30
+    0x49290019, // for qp25
+    0x55350025, // for qp26
+    0x4b2b001b, // for qp27
+    0x4d2d001d, // for qp28
+    0x57370027, // for qp29
+    0x81610051, // for qp30
+    0x57270017, // for qp31
+    0x81510041, // for qp32 ! center for qp=31..51
+    0x59290019, // for qp33
+    0x65350025, // for qp34
+    0x5b2b001b, // for qp35
+    0x5d2d001d, // for qp36
+    0x67370027, // for qp37
+    0x91610051, // for qp38
+    0x69390029, // for qp39
+    0x75450035, // for qp40
+    0x6b3b002b, // for qp41
+    0x6d3d002d, // for qp42
+    0x77470037, // for qp43
+    0xa1710061, // for qp44
+    0x79490039, // for qp45
+    0x85550045, // for qp46
+    0x7b4b003b, // for qp47
+    0x7d4d003d, // for qp48
+    0x87570047, // for qp49
+    0xb1810071, // for qp50
+    0x89590049  // for qp51
+};
+
+static void gen75_vme_state_setup_fixup(VADriverContextP ctx,
+                                       struct encode_state *encode_state,
+                                       struct intel_encoder_context *encoder_context,
+                                       unsigned int *vme_state_message)
+{
+    struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+    VAEncPictureParameterBufferH264 *pic_param = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
+    VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
+
+    if (slice_param->slice_type != SLICE_TYPE_I &&
+        slice_param->slice_type != SLICE_TYPE_SI)
+        return;
+    if (encoder_context->rate_control_mode == VA_RC_CQP)
+        vme_state_message[16] = intra_mb_mode_cost_table[pic_param->pic_init_qp + slice_param->slice_qp_delta];
+    else
+        vme_state_message[16] = intra_mb_mode_cost_table[mfc_context->bit_rate_control_context[slice_param->slice_type].QpPrimeY];
+}
+
+static VAStatus gen75_vme_vme_state_setup(VADriverContextP ctx,
+                                         struct encode_state *encode_state,
+                                         int is_intra,
+                                         struct intel_encoder_context *encoder_context)
+{
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+    unsigned int *vme_state_message;
+    int i;
+       
+    //building VME state message
+    dri_bo_map(vme_context->vme_state.bo, 1);
+    assert(vme_context->vme_state.bo->virtual);
+    vme_state_message = (unsigned int *)vme_context->vme_state.bo->virtual;
+
+    vme_state_message[0] = 0x01010101;
+    vme_state_message[1] = 0x10010101;
+    vme_state_message[2] = 0x0F0F0F0F;
+    vme_state_message[3] = 0x100F0F0F;
+    vme_state_message[4] = 0x01010101;
+    vme_state_message[5] = 0x00010101;
+    vme_state_message[6] = 0x01010101;
+    vme_state_message[7] = 0x10010101;
+    vme_state_message[8] = 0x0F0F0F0F;
+    vme_state_message[9] = 0x100F0F0F;
+    vme_state_message[10] = 0x01010101;
+    vme_state_message[11] = 0x00010101;
+    vme_state_message[12] = 0x00;
+    vme_state_message[13] = 0x00;
+
+    vme_state_message[14] = 0x4a4a;
+    vme_state_message[15] = 0x0;
+    vme_state_message[16] = 0x4a4a4a4a;
+    vme_state_message[17] = 0x4a4a4a4a;
+    vme_state_message[18] = 0x22120200;
+    vme_state_message[19] = 0x62524232;
+
+    for(i = 20; i < 32; i++) {
+        vme_state_message[i] = 0;
+    }
+    //vme_state_message[16] = 0x42424242;                      //cost function LUT set 0 for Intra
+
+    gen75_vme_state_setup_fixup(ctx, encode_state, encoder_context, vme_state_message);
+
+    dri_bo_unmap( vme_context->vme_state.bo);
+    return VA_STATUS_SUCCESS;
+}
+
+static void
+gen75_vme_fill_vme_batchbuffer(VADriverContextP ctx, 
+                              struct encode_state *encode_state,
+                              int mb_width, int mb_height,
+                              int kernel,
+                              int transform_8x8_mode_flag,
+                              struct intel_encoder_context *encoder_context)
+{
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+    int number_mb_cmds;
+    int mb_x = 0, mb_y = 0;
+    int i, s;
+    unsigned int *command_ptr;
+
+    dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
+    command_ptr = vme_context->vme_batchbuffer.bo->virtual;
+
+    for (s = 0; s < encode_state->num_slice_params_ext; s++) {
+        VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer; 
+        int slice_mb_begin = pSliceParameter->macroblock_address;
+        int slice_mb_number = pSliceParameter->num_macroblocks;
+        
+        for (i = 0; i < slice_mb_number;  ) {
+            int mb_count = i + slice_mb_begin;    
+            mb_x = mb_count % mb_width;
+            mb_y = mb_count / mb_width;
+            if( i == 0 ) {
+                number_mb_cmds = mb_width;          // we must mark the slice edge. 
+            } else if ( (i + 128 ) <= slice_mb_number) {
+                number_mb_cmds = 128;
+            } else {
+                number_mb_cmds = slice_mb_number - i;
+            }
+
+            *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
+            *command_ptr++ = kernel;
+            *command_ptr++ = 0;
+            *command_ptr++ = 0;
+            *command_ptr++ = 0;
+            *command_ptr++ = 0;
+   
+            /*inline data */
+            *command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
+            *command_ptr++ = (number_mb_cmds << 16 | transform_8x8_mode_flag | ((i==0) << 1));
+
+            i += number_mb_cmds;
+        } 
+    }
+
+    *command_ptr++ = 0;
+    *command_ptr++ = MI_BATCH_BUFFER_END;
+
+    dri_bo_unmap(vme_context->vme_batchbuffer.bo);
+}
+
+static void gen75_vme_media_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+    dri_bo *bo;
+
+    i965_gpe_context_init(ctx, &vme_context->gpe_context);
+
+    /* VME output buffer */
+    dri_bo_unreference(vme_context->vme_output.bo);
+    vme_context->vme_output.bo = NULL;
+
+    dri_bo_unreference(vme_context->vme_batchbuffer.bo);
+    vme_context->vme_batchbuffer.bo = NULL;
+
+    /* VME state */
+    dri_bo_unreference(vme_context->vme_state.bo);
+    bo = dri_bo_alloc(i965->intel.bufmgr,
+                      "Buffer",
+                      1024*16, 64);
+    assert(bo);
+    vme_context->vme_state.bo = bo;
+}
+
+static void gen75_vme_pipeline_programing(VADriverContextP ctx, 
+                                         struct encode_state *encode_state,
+                                         struct intel_encoder_context *encoder_context)
+{
+    struct gen6_vme_context *vme_context = encoder_context->vme_context;
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+    VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
+    VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
+    VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
+    int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
+    int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
+    int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
+
+    gen75_vme_fill_vme_batchbuffer(ctx, 
+                                  encode_state,
+                                  width_in_mbs, height_in_mbs,
+                                  is_intra ? VME_INTRA_SHADER : VME_INTER_SHADER,
+                                  pPicParameter->pic_fields.bits.transform_8x8_mode_flag,
+                                  encoder_context);
+
+    intel_batchbuffer_start_atomic(batch, 0x1000);
+    gen6_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
+    BEGIN_BATCH(batch, 2);
+    OUT_BATCH(batch, MI_BATCH_BUFFER_START | (2 << 6));
+    OUT_RELOC(batch,
+              vme_context->vme_batchbuffer.bo,
+              I915_GEM_DOMAIN_COMMAND, 0, 
+              0);
+    ADVANCE_BATCH(batch);
+
+    intel_batchbuffer_end_atomic(batch);       
+}
+
+static VAStatus gen75_vme_prepare(VADriverContextP ctx, 
+                                 struct encode_state *encode_state,
+                                 struct intel_encoder_context *encoder_context)
+{
+    VAStatus vaStatus = VA_STATUS_SUCCESS;
+    VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
+    int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
+       
+    /*Setup all the memory object*/
+    gen75_vme_surface_setup(ctx, encode_state, is_intra, encoder_context);
+    gen75_vme_interface_setup(ctx, encode_state, encoder_context);
+    gen75_vme_constant_setup(ctx, encode_state, encoder_context);
+    gen75_vme_vme_state_setup(ctx, encode_state, is_intra, encoder_context);
+
+    /*Programing media pipeline*/
+    gen75_vme_pipeline_programing(ctx, encode_state, encoder_context);
+
+    return vaStatus;
+}
+
+static VAStatus gen75_vme_run(VADriverContextP ctx, 
+                             struct encode_state *encode_state,
+                             struct intel_encoder_context *encoder_context)
+{
+    struct intel_batchbuffer *batch = encoder_context->base.batch;
+
+    intel_batchbuffer_flush(batch);
+
+    return VA_STATUS_SUCCESS;
+}
+
+static VAStatus gen75_vme_stop(VADriverContextP ctx, 
+                              struct encode_state *encode_state,
+                              struct intel_encoder_context *encoder_context)
+{
+    return VA_STATUS_SUCCESS;
+}
+
+static VAStatus
+gen75_vme_pipeline(VADriverContextP ctx,
+                  VAProfile profile,
+                  struct encode_state *encode_state,
+                  struct intel_encoder_context *encoder_context)
+{
+    gen75_vme_media_init(ctx, encoder_context);
+    gen75_vme_prepare(ctx, encode_state, encoder_context);
+    gen75_vme_run(ctx, encode_state, encoder_context);
+    gen75_vme_stop(ctx, encode_state, encoder_context);
+
+    return VA_STATUS_SUCCESS;
+}
+
+static void
+gen75_vme_context_destroy(void *context)
+{
+    struct gen6_vme_context *vme_context = context;
+
+    i965_gpe_context_destroy(&vme_context->gpe_context);
+
+    dri_bo_unreference(vme_context->vme_output.bo);
+    vme_context->vme_output.bo = NULL;
+
+    dri_bo_unreference(vme_context->vme_state.bo);
+    vme_context->vme_state.bo = NULL;
+
+    dri_bo_unreference(vme_context->vme_batchbuffer.bo);
+    vme_context->vme_batchbuffer.bo = NULL;
+
+    free(vme_context);
+}
+
+Bool gen75_vme_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+    struct gen6_vme_context *vme_context = calloc(1, sizeof(struct gen6_vme_context));
+
+    vme_context->gpe_context.surface_state_binding_table.length = (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_MEDIA_SURFACES_GEN6;
+
+    vme_context->gpe_context.idrt.max_entries = MAX_INTERFACE_DESC_GEN6;
+    vme_context->gpe_context.idrt.entry_size = sizeof(struct gen6_interface_descriptor_data);
+
+    vme_context->gpe_context.curbe.length = CURBE_TOTAL_DATA_LENGTH;
+
+    vme_context->gpe_context.vfe_state.max_num_threads = 60 - 1;
+    vme_context->gpe_context.vfe_state.num_urb_entries = 16;
+    vme_context->gpe_context.vfe_state.gpgpu_mode = 0;
+    vme_context->gpe_context.vfe_state.urb_entry_size = 59 - 1;
+    vme_context->gpe_context.vfe_state.curbe_allocation_size = CURBE_ALLOCATION_SIZE - 1;
+
+        i965_gpe_load_kernels(ctx,
+                              &vme_context->gpe_context,
+                              gen75_vme_kernels,
+                              GEN6_VME_KERNEL_NUMBER);
+        vme_context->vme_surface2_setup = gen7_gpe_surface2_setup;
+        vme_context->vme_media_rw_surface_setup = gen7_gpe_media_rw_surface_setup;
+        vme_context->vme_buffer_suface_setup = gen7_gpe_buffer_suface_setup;
+
+    encoder_context->vme_context = vme_context;
+    encoder_context->vme_context_destroy = gen75_vme_context_destroy;
+    encoder_context->vme_pipeline = gen75_vme_pipeline;
+
+    return True;
+}
index 8a99e43..2860a58 100755 (executable)
@@ -50,6 +50,7 @@
 #include "intel_batchbuffer.h"
 #include "i965_defines.h"
 #include "i965_drv_video.h"
+#include "i965_encoder.h"
 
 #define CONFIG_ID_OFFSET                0x01000000
 #define CONTEXT_ID_OFFSET               0x02000000
@@ -223,6 +224,14 @@ static struct hw_codec_info gen7_hw_codec_info = {
     .max_height = 4096,
 };
 
+static struct hw_codec_info gen75_hw_codec_info = {
+    .dec_hw_context_init = gen7_dec_hw_context_init,
+    .enc_hw_context_init = gen75_enc_hw_context_init,
+    .proc_hw_context_init = i965_proc_context_init,
+    .max_width = 4096,
+    .max_height = 4096,
+};
+
 #define I965_PACKED_HEADER_BASE         0
 #define I965_PACKED_MISC_HEADER_BASE    3
 
@@ -2090,7 +2099,9 @@ i965_Init(VADriverContextP ctx)
     if (intel_driver_init(ctx) == False)
         return VA_STATUS_ERROR_UNKNOWN;
 
-    if (IS_G4X(i965->intel.device_id))
+    if (IS_HASWELL(i965->intel.device_id))
+       i965->codec_info = &gen75_hw_codec_info;
+    else if (IS_G4X(i965->intel.device_id))
         i965->codec_info = &g4x_hw_codec_info;
     else if (IS_IRONLAKE(i965->intel.device_id))
         i965->codec_info = &ironlake_hw_codec_info;
index d701d50..6d10a6d 100644 (file)
@@ -325,4 +325,11 @@ va_enc_packed_type_to_idx(int packed_type);
 /* reserve 1 byte for internal using */
 #define I965_CODEDBUFFER_SIZE   ALIGN(sizeof(VACodedBufferSegment) + 1, 64)
 
+
+extern VAStatus i965_MapBuffer(VADriverContextP ctx,
+               VABufferID buf_id,       /* in */
+               void **pbuf);            /* out */
+
+extern VAStatus i965_UnmapBuffer(VADriverContextP ctx, VABufferID buf_id);
+
 #endif /* _I965_DRV_VIDEO_H_ */
index 4f0c245..b37915b 100644 (file)
@@ -37,6 +37,8 @@
 #include "i965_defines.h"
 #include "i965_drv_video.h"
 #include "i965_encoder.h"
+#include "gen6_vme.h"
+#include "gen6_mfc.h"
 
 extern Bool gen6_mfc_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context);
 extern Bool gen6_vme_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context);
@@ -215,3 +217,37 @@ gen7_enc_hw_context_init(VADriverContextP ctx, struct object_config *obj_config)
 
     return (struct hw_context *)encoder_context;
 }
+
+struct hw_context *
+gen75_enc_hw_context_init(VADriverContextP ctx, struct object_config *obj_config)
+{
+    struct intel_driver_data *intel = intel_driver_data(ctx);
+    struct intel_encoder_context *encoder_context = calloc(1, sizeof(struct intel_encoder_context));
+    int i;
+
+    encoder_context->base.destroy = intel_encoder_context_destroy;
+    encoder_context->base.run = intel_encoder_end_picture;
+    encoder_context->base.batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER);
+    encoder_context->input_yuv_surface = VA_INVALID_SURFACE;
+    encoder_context->is_tmp_id = 0;
+    encoder_context->rate_control_mode = VA_RC_NONE;
+
+    for (i = 0; i < obj_config->num_attribs; i++) {
+        if (obj_config->attrib_list[i].type == VAConfigAttribRateControl) {
+            encoder_context->rate_control_mode = obj_config->attrib_list[i].value;
+            break;
+        }
+    }
+
+    gen75_vme_context_init(ctx, encoder_context);
+    assert(encoder_context->vme_context);
+    assert(encoder_context->vme_context_destroy);
+    assert(encoder_context->vme_pipeline);
+
+    gen75_mfc_context_init(ctx, encoder_context);
+    assert(encoder_context->mfc_context);
+    assert(encoder_context->mfc_context_destroy);
+    assert(encoder_context->mfc_pipeline);
+
+    return (struct hw_context *)encoder_context;
+}
index 180aa65..2da608f 100644 (file)
@@ -58,6 +58,9 @@ struct intel_encoder_context
                             struct intel_encoder_context *encoder_context);
 };
 
+extern struct hw_context *
+gen75_enc_hw_context_init(VADriverContextP ctx, struct object_config *obj_config);
+
 #endif /* _I965_ENCODER_H_ */