#include <stdio.h>
#include <stdlib.h>
+#include <stdbool.h>
#include <string.h>
#include <assert.h>
#define VME_INTRA_SHADER 0
#define VME_INTER_SHADER 1
+#define VME_BINTER_SHADER 3
#define VME_BATCHBUFFER 2
#define CURBE_ALLOCATION_SIZE 37 /* in 256-bit */
#define CURBE_TOTAL_DATA_LENGTH (4 * 32) /* in byte, it should be less than or equal to CURBE_ALLOCATION_SIZE * 32 */
#define CURBE_URB_ENTRY_LENGTH 4 /* in 256-bit, it should be less than or equal to CURBE_TOTAL_DATA_LENGTH / 32 */
+
+#define VME_MSG_LENGTH 32
static const uint32_t gen75_vme_intra_frame[][4] = {
-#include "shaders/vme/intra_frame.g7b"
+#include "shaders/vme/intra_frame_haswell.g75b"
};
static const uint32_t gen75_vme_inter_frame[][4] = {
-#include "shaders/vme/inter_frame.g7b"
+#include "shaders/vme/inter_frame_haswell.g75b"
+};
+
+static const uint32_t gen75_vme_inter_bframe[][4] = {
+#include "shaders/vme/inter_bframe_haswell.g75b"
};
static const uint32_t gen75_vme_batchbuffer[][4] = {
-#include "shaders/vme/batchbuffer.g7b"
+#include "shaders/vme/batchbuffer.g75b"
};
static struct i965_kernel gen75_vme_kernels[] = {
sizeof(gen75_vme_batchbuffer),
NULL
},
+ {
+ "VME inter BFrame",
+ VME_BINTER_SHADER,
+ gen75_vme_inter_bframe,
+ sizeof(gen75_vme_inter_bframe),
+ NULL
+ }
+};
+
+static const uint32_t gen75_vme_mpeg2_intra_frame[][4] = {
+#include "shaders/vme/intra_frame_haswell.g75b"
+};
+
+static const uint32_t gen75_vme_mpeg2_inter_frame[][4] = {
+#include "shaders/vme/mpeg2_inter_frame_haswell.g75b"
+};
+
+static const uint32_t gen75_vme_mpeg2_batchbuffer[][4] = {
+#include "shaders/vme/batchbuffer.g75b"
+};
+
+static struct i965_kernel gen75_vme_mpeg2_kernels[] = {
+ {
+ "VME Intra Frame",
+ VME_INTRA_SHADER, /*index*/
+ gen75_vme_mpeg2_intra_frame,
+ sizeof(gen75_vme_mpeg2_intra_frame),
+ NULL
+ },
+ {
+ "VME inter Frame",
+ VME_INTER_SHADER,
+ gen75_vme_mpeg2_inter_frame,
+ sizeof(gen75_vme_mpeg2_inter_frame),
+ NULL
+ },
+ {
+ "VME BATCHBUFFER",
+ VME_BATCHBUFFER,
+ gen75_vme_mpeg2_batchbuffer,
+ sizeof(gen75_vme_mpeg2_batchbuffer),
+ NULL
+ },
};
/* only used for VME source surface state */
static void
gen75_vme_source_surface_state(VADriverContextP ctx,
- int index,
- struct object_surface *obj_surface,
- struct intel_encoder_context *encoder_context)
+ int index,
+ struct object_surface *obj_surface,
+ struct intel_encoder_context *encoder_context)
{
struct gen6_vme_context *vme_context = encoder_context->vme_context;
static void
gen75_vme_media_source_surface_state(VADriverContextP ctx,
- int index,
- struct object_surface *obj_surface,
- struct intel_encoder_context *encoder_context)
+ int index,
+ struct object_surface *obj_surface,
+ struct intel_encoder_context *encoder_context)
{
struct gen6_vme_context *vme_context = encoder_context->vme_context;
static void
gen75_vme_media_chroma_source_surface_state(VADriverContextP ctx,
- int index,
- struct object_surface *obj_surface,
- struct intel_encoder_context *encoder_context)
+ int index,
+ struct object_surface *obj_surface,
+ struct intel_encoder_context *encoder_context)
{
struct gen6_vme_context *vme_context = encoder_context->vme_context;
vme_context->vme_media_chroma_surface_setup(ctx,
- &vme_context->gpe_context,
- obj_surface,
- BINDING_TABLE_OFFSET(index),
- SURFACE_STATE_OFFSET(index));
+ &vme_context->gpe_context,
+ obj_surface,
+ BINDING_TABLE_OFFSET(index),
+ SURFACE_STATE_OFFSET(index));
}
static void
gen75_vme_output_buffer_setup(VADriverContextP ctx,
- struct encode_state *encode_state,
- int index,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ int index,
+ struct intel_encoder_context *encoder_context)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
vme_context->vme_output.pitch = 16; /* in bytes, always 16 */
if (is_intra)
- vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES;
+ vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 2;
else
- vme_context->vme_output.size_block = INTER_VME_OUTPUT_IN_BYTES;
+ vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 24;
+ /*
+ * Inter MV . 32-byte Intra search + 16 IME info + 128 IME MV + 32 IME Ref
+ * + 16 FBR Info + 128 FBR MV + 32 FBR Ref.
+ * 16 * (2 + 2 * (1 + 8 + 2))= 16 * 24.
+ */
vme_context->vme_output.bo = dri_bo_alloc(i965->intel.bufmgr,
"VME output buffer",
static void
gen75_vme_output_vme_batchbuffer_setup(VADriverContextP ctx,
- struct encode_state *encode_state,
- int index,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ int index,
+ struct intel_encoder_context *encoder_context)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
vme_context->vme_batchbuffer.num_blocks = width_in_mbs * height_in_mbs + 1;
- vme_context->vme_batchbuffer.size_block = 32; /* 2 OWORDs */
+ vme_context->vme_batchbuffer.size_block = 64; /* 4 OWORDs */
vme_context->vme_batchbuffer.pitch = 16;
vme_context->vme_batchbuffer.bo = dri_bo_alloc(i965->intel.bufmgr,
"VME batchbuffer",
static VAStatus
gen75_vme_surface_setup(VADriverContextP ctx,
- struct encode_state *encode_state,
- int is_intra,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ int is_intra,
+ struct intel_encoder_context *encoder_context)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct object_surface *obj_surface;
}
static VAStatus gen75_vme_interface_setup(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
struct gen6_vme_context *vme_context = encoder_context->vme_context;
struct gen6_interface_descriptor_data *desc;
assert(bo->virtual);
desc = bo->virtual;
- for (i = 0; i < GEN6_VME_KERNEL_NUMBER; i++) {
+ for (i = 0; i < vme_context->vme_kernel_sum; i++) {
struct i965_kernel *kernel;
kernel = &vme_context->gpe_context.kernels[i];
assert(sizeof(*desc) == 32);
/*Setup the descritor table*/
memset(desc, 0, sizeof(*desc));
desc->desc0.kernel_start_pointer = (kernel->bo->offset >> 6);
- desc->desc2.sampler_count = 1; /* FIXME: */
- desc->desc2.sampler_state_pointer = (vme_context->vme_state.bo->offset >> 5);
+ desc->desc2.sampler_count = 0; /* FIXME: */
+ desc->desc2.sampler_state_pointer = 0;
desc->desc3.binding_table_entry_count = 1; /* FIXME: */
desc->desc3.binding_table_pointer = (BINDING_TABLE_OFFSET(0) >> 5);
desc->desc4.constant_urb_entry_read_offset = 0;
0,
i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc0),
kernel->bo);
- /*Sampler State(VME state pointer)*/
- dri_bo_emit_reloc(bo,
- I915_GEM_DOMAIN_INSTRUCTION, 0,
- (1 << 2), //
- i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc2),
- vme_context->vme_state.bo);
desc++;
}
dri_bo_unmap(bo);
}
static VAStatus gen75_vme_constant_setup(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
struct gen6_vme_context *vme_context = encoder_context->vme_context;
- // unsigned char *constant_buffer;
+ unsigned char *constant_buffer;
+ unsigned int *vme_state_message;
+ int mv_num = 32;
+
+ vme_state_message = (unsigned int *)vme_context->vme_state_message;
+
+ if (encoder_context->profile == VAProfileH264Baseline ||
+ encoder_context->profile == VAProfileH264Main ||
+ encoder_context->profile == VAProfileH264High) {
+ if (vme_context->h264_level >= 30) {
+ mv_num = 16;
+
+ if (vme_context->h264_level >= 31)
+ mv_num = 8;
+ }
+ } else if (encoder_context->profile == VAProfileMPEG2Simple ||
+ encoder_context->profile == VAProfileMPEG2Main) {
+ mv_num = 2;
+ }
+
+ vme_state_message[31] = mv_num;
dri_bo_map(vme_context->gpe_context.curbe.bo, 1);
assert(vme_context->gpe_context.curbe.bo->virtual);
- // constant_buffer = vme_context->curbe.bo->virtual;
-
- /*TODO copy buffer into CURB*/
+ constant_buffer = vme_context->gpe_context.curbe.bo->virtual;
- dri_bo_unmap( vme_context->gpe_context.curbe.bo);
+ /* VME MV/Mb cost table is passed by using const buffer */
+ /* Now it uses the fixed search path. So it is constructed directly
+ * in the GPU shader.
+ */
+ memcpy(constant_buffer, (char *)vme_context->vme_state_message, 128);
+
+ dri_bo_unmap(vme_context->gpe_context.curbe.bo);
return VA_STATUS_SUCCESS;
}
};
static void gen75_vme_state_setup_fixup(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context,
- unsigned int *vme_state_message)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context,
+ unsigned int *vme_state_message)
{
struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
VAEncPictureParameterBufferH264 *pic_param = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
slice_param->slice_type != SLICE_TYPE_SI)
return;
if (encoder_context->rate_control_mode == VA_RC_CQP)
- vme_state_message[16] = intra_mb_mode_cost_table[pic_param->pic_init_qp + slice_param->slice_qp_delta];
+ vme_state_message[0] = intra_mb_mode_cost_table[pic_param->pic_init_qp + slice_param->slice_qp_delta];
else
- vme_state_message[16] = intra_mb_mode_cost_table[mfc_context->bit_rate_control_context[slice_param->slice_type].QpPrimeY];
+ vme_state_message[0] = intra_mb_mode_cost_table[mfc_context->bit_rate_control_context[SLICE_TYPE_I].QpPrimeY];
}
static VAStatus gen75_vme_vme_state_setup(VADriverContextP ctx,
- struct encode_state *encode_state,
- int is_intra,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ int is_intra,
+ struct intel_encoder_context *encoder_context)
{
struct gen6_vme_context *vme_context = encoder_context->vme_context;
unsigned int *vme_state_message;
int i;
- //building VME state message
- dri_bo_map(vme_context->vme_state.bo, 1);
- assert(vme_context->vme_state.bo->virtual);
- vme_state_message = (unsigned int *)vme_context->vme_state.bo->virtual;
-
- vme_state_message[0] = 0x01010101;
- vme_state_message[1] = 0x10010101;
- vme_state_message[2] = 0x0F0F0F0F;
- vme_state_message[3] = 0x100F0F0F;
- vme_state_message[4] = 0x01010101;
- vme_state_message[5] = 0x00010101;
- vme_state_message[6] = 0x01010101;
- vme_state_message[7] = 0x10010101;
- vme_state_message[8] = 0x0F0F0F0F;
- vme_state_message[9] = 0x100F0F0F;
- vme_state_message[10] = 0x01010101;
- vme_state_message[11] = 0x00010101;
- vme_state_message[12] = 0x00;
- vme_state_message[13] = 0x00;
-
- vme_state_message[14] = 0x4a4a;
- vme_state_message[15] = 0x0;
- vme_state_message[16] = 0x4a4a4a4a;
- vme_state_message[17] = 0x4a4a4a4a;
- vme_state_message[18] = 0x22120200;
- vme_state_message[19] = 0x62524232;
-
- for(i = 20; i < 32; i++) {
- vme_state_message[i] = 0;
+ //pass the MV/Mb cost into VME message on HASWell
+ assert(vme_context->vme_state_message);
+ vme_state_message = (unsigned int *)vme_context->vme_state_message;
+
+ vme_state_message[0] = 0x4a4a4a4a;
+ vme_state_message[1] = 0x4a4a4a4a;
+ vme_state_message[2] = 0x4a4a4a4a;
+ vme_state_message[3] = 0x22120200;
+ vme_state_message[4] = 0x62524232;
+
+ for (i=5; i < 8; i++) {
+ vme_state_message[i] = 0;
}
- //vme_state_message[16] = 0x42424242; //cost function LUT set 0 for Intra
- gen75_vme_state_setup_fixup(ctx, encode_state, encoder_context, vme_state_message);
+ switch (encoder_context->profile) {
+ case VAProfileH264Baseline:
+ case VAProfileH264Main:
+ case VAProfileH264High:
+ gen75_vme_state_setup_fixup(ctx, encode_state, encoder_context, vme_state_message);
+
+ break;
+
+ default:
+ /* no fixup */
+ break;
+ }
- dri_bo_unmap( vme_context->vme_state.bo);
return VA_STATUS_SUCCESS;
}
+
static void
gen75_vme_fill_vme_batchbuffer(VADriverContextP ctx,
- struct encode_state *encode_state,
- int mb_width, int mb_height,
- int kernel,
- int transform_8x8_mode_flag,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ int mb_width, int mb_height,
+ int kernel,
+ int transform_8x8_mode_flag,
+ struct intel_encoder_context *encoder_context)
{
struct gen6_vme_context *vme_context = encoder_context->vme_context;
- int number_mb_cmds;
int mb_x = 0, mb_y = 0;
int i, s;
unsigned int *command_ptr;
VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
int slice_mb_begin = pSliceParameter->macroblock_address;
int slice_mb_number = pSliceParameter->num_macroblocks;
-
+ unsigned int mb_intra_ub;
+ int slice_mb_x = pSliceParameter->macroblock_address % mb_width;
for (i = 0; i < slice_mb_number; ) {
int mb_count = i + slice_mb_begin;
mb_x = mb_count % mb_width;
mb_y = mb_count / mb_width;
- if( i == 0 ) {
- number_mb_cmds = mb_width; // we must mark the slice edge.
- } else if ( (i + 128 ) <= slice_mb_number) {
- number_mb_cmds = 128;
- } else {
- number_mb_cmds = slice_mb_number - i;
- }
-
+ mb_intra_ub = 0;
+ if (mb_x != 0) {
+ mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
+ }
+ if (mb_y != 0) {
+ mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
+ if (mb_x != 0)
+ mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
+ if (mb_x != (mb_width -1))
+ mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
+ }
+ if (i < mb_width) {
+ if (i == 0)
+ mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_AE);
+ mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_BCD_MASK);
+ if ((i == (mb_width - 1)) && slice_mb_x) {
+ mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
+ }
+ }
+
+ if ((i == mb_width) && slice_mb_x) {
+ mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_D);
+ }
*command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
*command_ptr++ = kernel;
*command_ptr++ = 0;
/*inline data */
*command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
- *command_ptr++ = (number_mb_cmds << 16 | transform_8x8_mode_flag | ((i==0) << 1));
+ *command_ptr++ = ( (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
- i += number_mb_cmds;
+ i += 1;
}
}
static void gen75_vme_media_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
struct gen6_vme_context *vme_context = encoder_context->vme_context;
- dri_bo *bo;
i965_gpe_context_init(ctx, &vme_context->gpe_context);
/* VME state */
dri_bo_unreference(vme_context->vme_state.bo);
- bo = dri_bo_alloc(i965->intel.bufmgr,
- "Buffer",
- 1024*16, 64);
- assert(bo);
- vme_context->vme_state.bo = bo;
+ vme_context->vme_state.bo = NULL;
}
static void gen75_vme_pipeline_programing(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
struct gen6_vme_context *vme_context = encoder_context->vme_context;
struct intel_batchbuffer *batch = encoder_context->base.batch;
VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
- int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
+ int kernel_shader;
+ bool allow_hwscore = true;
+ int s;
- gen75_vme_fill_vme_batchbuffer(ctx,
+ for (s = 0; s < encode_state->num_slice_params_ext; s++) {
+ pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
+ if ((pSliceParameter->macroblock_address % width_in_mbs)) {
+ allow_hwscore = false;
+ break;
+ }
+ }
+ if ((pSliceParameter->slice_type == SLICE_TYPE_I) ||
+ (pSliceParameter->slice_type == SLICE_TYPE_I)) {
+ kernel_shader = VME_INTRA_SHADER;
+ } else if ((pSliceParameter->slice_type == SLICE_TYPE_P) ||
+ (pSliceParameter->slice_type == SLICE_TYPE_SP)) {
+ kernel_shader = VME_INTER_SHADER;
+ } else {
+ kernel_shader = VME_BINTER_SHADER;
+ if (!allow_hwscore)
+ kernel_shader = VME_INTER_SHADER;
+ }
+ if (allow_hwscore)
+ gen7_vme_walker_fill_vme_batchbuffer(ctx,
encode_state,
width_in_mbs, height_in_mbs,
- is_intra ? VME_INTRA_SHADER : VME_INTER_SHADER,
+ kernel_shader,
pPicParameter->pic_fields.bits.transform_8x8_mode_flag,
encoder_context);
+ else
+ gen75_vme_fill_vme_batchbuffer(ctx,
+ encode_state,
+ width_in_mbs, height_in_mbs,
+ kernel_shader,
+ pPicParameter->pic_fields.bits.transform_8x8_mode_flag,
+ encoder_context);
intel_batchbuffer_start_atomic(batch, 0x1000);
gen6_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
}
static VAStatus gen75_vme_prepare(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
VAStatus vaStatus = VA_STATUS_SUCCESS;
VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
-
+ VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
+ struct gen6_vme_context *vme_context = encoder_context->vme_context;
+
+ if (!vme_context->h264_level ||
+ (vme_context->h264_level != pSequenceParameter->level_idc)) {
+ vme_context->h264_level = pSequenceParameter->level_idc;
+ }
+
+ intel_vme_update_mbmv_cost(ctx, encode_state, encoder_context);
+
/*Setup all the memory object*/
gen75_vme_surface_setup(ctx, encode_state, is_intra, encoder_context);
gen75_vme_interface_setup(ctx, encode_state, encoder_context);
+ //gen75_vme_vme_state_setup(ctx, encode_state, is_intra, encoder_context);
gen75_vme_constant_setup(ctx, encode_state, encoder_context);
- gen75_vme_vme_state_setup(ctx, encode_state, is_intra, encoder_context);
/*Programing media pipeline*/
gen75_vme_pipeline_programing(ctx, encode_state, encoder_context);
}
static VAStatus gen75_vme_run(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
struct intel_batchbuffer *batch = encoder_context->base.batch;
}
static VAStatus gen75_vme_stop(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
return VA_STATUS_SUCCESS;
}
static VAStatus
gen75_vme_pipeline(VADriverContextP ctx,
- VAProfile profile,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ VAProfile profile,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
gen75_vme_media_init(ctx, encoder_context);
gen75_vme_prepare(ctx, encode_state, encoder_context);
}
static void
+gen75_vme_mpeg2_output_buffer_setup(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ int index,
+ int is_intra,
+ struct intel_encoder_context *encoder_context)
+
+{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
+ struct gen6_vme_context *vme_context = encoder_context->vme_context;
+ VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
+ int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
+ int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
+
+ vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs;
+ vme_context->vme_output.pitch = 16; /* in bytes, always 16 */
+
+ if (is_intra)
+ vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 2;
+ else
+ vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 24;
+ /*
+ * Inter MV . 32-byte Intra search + 16 IME info + 128 IME MV + 32 IME Ref
+ * + 16 FBR Info + 128 FBR MV + 32 FBR Ref.
+ * 16 * (2 + 2 * (1 + 8 + 2))= 16 * 24.
+ */
+
+ vme_context->vme_output.bo = dri_bo_alloc(i965->intel.bufmgr,
+ "VME output buffer",
+ vme_context->vme_output.num_blocks * vme_context->vme_output.size_block,
+ 0x1000);
+ assert(vme_context->vme_output.bo);
+ vme_context->vme_buffer_suface_setup(ctx,
+ &vme_context->gpe_context,
+ &vme_context->vme_output,
+ BINDING_TABLE_OFFSET(index),
+ SURFACE_STATE_OFFSET(index));
+}
+
+static void
+gen75_vme_mpeg2_output_vme_batchbuffer_setup(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ int index,
+ struct intel_encoder_context *encoder_context)
+
+{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
+ struct gen6_vme_context *vme_context = encoder_context->vme_context;
+ VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
+ int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
+ int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
+
+ vme_context->vme_batchbuffer.num_blocks = width_in_mbs * height_in_mbs + 1;
+ vme_context->vme_batchbuffer.size_block = 64; /* 4 OWORDs */
+ vme_context->vme_batchbuffer.pitch = 16;
+ vme_context->vme_batchbuffer.bo = dri_bo_alloc(i965->intel.bufmgr,
+ "VME batchbuffer",
+ vme_context->vme_batchbuffer.num_blocks * vme_context->vme_batchbuffer.size_block,
+ 0x1000);
+ vme_context->vme_buffer_suface_setup(ctx,
+ &vme_context->gpe_context,
+ &vme_context->vme_batchbuffer,
+ BINDING_TABLE_OFFSET(index),
+ SURFACE_STATE_OFFSET(index));
+}
+
+static VAStatus
+gen75_vme_mpeg2_surface_setup(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ int is_intra,
+ struct intel_encoder_context *encoder_context)
+{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
+ struct object_surface *obj_surface;
+ VAEncPictureParameterBufferMPEG2 *pic_param = (VAEncPictureParameterBufferMPEG2 *)encode_state->pic_param_ext->buffer;
+
+ /*Setup surfaces state*/
+ /* current picture for encoding */
+ obj_surface = SURFACE(encoder_context->input_yuv_surface);
+ assert(obj_surface);
+ gen75_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
+ gen75_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
+ gen75_vme_media_chroma_source_surface_state(ctx, 6, obj_surface, encoder_context);
+
+ if (!is_intra) {
+ /* reference 0 */
+ obj_surface = SURFACE(pic_param->forward_reference_picture);
+ assert(obj_surface);
+ if ( obj_surface->bo != NULL)
+ gen75_vme_source_surface_state(ctx, 1, obj_surface, encoder_context);
+
+ /* reference 1 */
+ obj_surface = SURFACE(pic_param->backward_reference_picture);
+ if (obj_surface && obj_surface->bo != NULL)
+ gen75_vme_source_surface_state(ctx, 2, obj_surface, encoder_context);
+ }
+
+ /* VME output */
+ gen75_vme_mpeg2_output_buffer_setup(ctx, encode_state, 3, is_intra, encoder_context);
+ gen75_vme_mpeg2_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
+
+ return VA_STATUS_SUCCESS;
+}
+
+static void
+gen75_vme_mpeg2_fill_vme_batchbuffer(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ int mb_width, int mb_height,
+ int kernel,
+ int transform_8x8_mode_flag,
+ struct intel_encoder_context *encoder_context)
+{
+ struct gen6_vme_context *vme_context = encoder_context->vme_context;
+ int mb_x = 0, mb_y = 0;
+ int i, s, j;
+ unsigned int *command_ptr;
+
+
+ dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
+ command_ptr = vme_context->vme_batchbuffer.bo->virtual;
+
+ for (s = 0; s < encode_state->num_slice_params_ext; s++) {
+ VAEncSliceParameterBufferMPEG2 *slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[s]->buffer;
+
+ for (j = 0; j < encode_state->slice_params_ext[s]->num_elements; j++) {
+ int slice_mb_begin = slice_param->macroblock_address;
+ int slice_mb_number = slice_param->num_macroblocks;
+ unsigned int mb_intra_ub;
+ int slice_mb_x = slice_param->macroblock_address % mb_width;
+
+ for (i = 0; i < slice_mb_number;) {
+ int mb_count = i + slice_mb_begin;
+
+ mb_x = mb_count % mb_width;
+ mb_y = mb_count / mb_width;
+ mb_intra_ub = 0;
+
+ if (mb_x != 0) {
+ mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
+ }
+
+ if (mb_y != 0) {
+ mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
+
+ if (mb_x != 0)
+ mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
+
+ if (mb_x != (mb_width -1))
+ mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
+ }
+
+ if (i < mb_width) {
+ if (i == 0)
+ mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_AE);
+
+ mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_BCD_MASK);
+
+ if ((i == (mb_width - 1)) && slice_mb_x) {
+ mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
+ }
+ }
+
+ if ((i == mb_width) && slice_mb_x) {
+ mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_D);
+ }
+
+ *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
+ *command_ptr++ = kernel;
+ *command_ptr++ = 0;
+ *command_ptr++ = 0;
+ *command_ptr++ = 0;
+ *command_ptr++ = 0;
+
+ /*inline data */
+ *command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
+ *command_ptr++ = ( (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
+
+ i += 1;
+ }
+
+ slice_param++;
+ }
+ }
+
+ *command_ptr++ = 0;
+ *command_ptr++ = MI_BATCH_BUFFER_END;
+
+ dri_bo_unmap(vme_context->vme_batchbuffer.bo);
+}
+
+static void
+gen75_vme_mpeg2_pipeline_programing(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ int is_intra,
+ struct intel_encoder_context *encoder_context)
+{
+ struct gen6_vme_context *vme_context = encoder_context->vme_context;
+ struct intel_batchbuffer *batch = encoder_context->base.batch;
+ VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
+ int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
+ int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
+
+ gen75_vme_mpeg2_fill_vme_batchbuffer(ctx,
+ encode_state,
+ width_in_mbs, height_in_mbs,
+ is_intra ? VME_INTRA_SHADER : VME_INTER_SHADER,
+ 0,
+ encoder_context);
+
+ intel_batchbuffer_start_atomic(batch, 0x1000);
+ gen6_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
+ BEGIN_BATCH(batch, 2);
+ OUT_BATCH(batch, MI_BATCH_BUFFER_START | (2 << 6));
+ OUT_RELOC(batch,
+ vme_context->vme_batchbuffer.bo,
+ I915_GEM_DOMAIN_COMMAND, 0,
+ 0);
+ ADVANCE_BATCH(batch);
+
+ intel_batchbuffer_end_atomic(batch);
+}
+
+static VAStatus
+gen75_vme_mpeg2_prepare(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ VAStatus vaStatus = VA_STATUS_SUCCESS;
+ VAEncSliceParameterBufferMPEG2 *slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[0]->buffer;
+
+ /*Setup all the memory object*/
+ gen75_vme_mpeg2_surface_setup(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
+ gen75_vme_interface_setup(ctx, encode_state, encoder_context);
+ gen75_vme_vme_state_setup(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
+ gen75_vme_constant_setup(ctx, encode_state, encoder_context);
+
+ /*Programing media pipeline*/
+ gen75_vme_mpeg2_pipeline_programing(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
+
+ return vaStatus;
+}
+
+static VAStatus
+gen75_vme_mpeg2_pipeline(VADriverContextP ctx,
+ VAProfile profile,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ gen75_vme_media_init(ctx, encoder_context);
+ gen75_vme_mpeg2_prepare(ctx, encode_state, encoder_context);
+ gen75_vme_run(ctx, encode_state, encoder_context);
+ gen75_vme_stop(ctx, encode_state, encoder_context);
+
+ return VA_STATUS_SUCCESS;
+}
+
+static void
gen75_vme_context_destroy(void *context)
{
struct gen6_vme_context *vme_context = context;
dri_bo_unreference(vme_context->vme_batchbuffer.bo);
vme_context->vme_batchbuffer.bo = NULL;
+ if (vme_context->vme_state_message) {
+ free(vme_context->vme_state_message);
+ vme_context->vme_state_message = NULL;
+ }
+
free(vme_context);
}
Bool gen75_vme_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
{
struct gen6_vme_context *vme_context = calloc(1, sizeof(struct gen6_vme_context));
-
+ struct i965_kernel *vme_kernel_list = NULL;
+ int i965_kernel_num;
+
+ switch (encoder_context->profile) {
+ case VAProfileH264Baseline:
+ case VAProfileH264Main:
+ case VAProfileH264High:
+ vme_kernel_list = gen75_vme_kernels;
+ encoder_context->vme_pipeline = gen75_vme_pipeline;
+ i965_kernel_num = sizeof(gen75_vme_kernels) / sizeof(struct i965_kernel);
+ break;
+
+ case VAProfileMPEG2Simple:
+ case VAProfileMPEG2Main:
+ vme_kernel_list = gen75_vme_mpeg2_kernels;
+ encoder_context->vme_pipeline = gen75_vme_mpeg2_pipeline;
+ i965_kernel_num = sizeof(gen75_vme_mpeg2_kernels) / sizeof(struct i965_kernel);
+
+ break;
+
+ default:
+ /* never get here */
+ assert(0);
+
+ break;
+ }
+ vme_context->vme_kernel_sum = i965_kernel_num;
vme_context->gpe_context.surface_state_binding_table.length = (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_MEDIA_SURFACES_GEN6;
vme_context->gpe_context.idrt.max_entries = MAX_INTERFACE_DESC_GEN6;
vme_context->gpe_context.vfe_state.urb_entry_size = 59 - 1;
vme_context->gpe_context.vfe_state.curbe_allocation_size = CURBE_ALLOCATION_SIZE - 1;
- i965_gpe_load_kernels(ctx,
- &vme_context->gpe_context,
- gen75_vme_kernels,
- GEN6_VME_KERNEL_NUMBER);
- vme_context->vme_surface2_setup = gen7_gpe_surface2_setup;
- vme_context->vme_media_rw_surface_setup = gen7_gpe_media_rw_surface_setup;
- vme_context->vme_buffer_suface_setup = gen7_gpe_buffer_suface_setup;
- vme_context->vme_media_chroma_surface_setup = gen75_gpe_media_chroma_surface_setup;
+ gen7_vme_scoreboard_init(ctx, vme_context);
+
+ i965_gpe_load_kernels(ctx,
+ &vme_context->gpe_context,
+ vme_kernel_list,
+ i965_kernel_num);
+ vme_context->vme_surface2_setup = gen7_gpe_surface2_setup;
+ vme_context->vme_media_rw_surface_setup = gen7_gpe_media_rw_surface_setup;
+ vme_context->vme_buffer_suface_setup = gen7_gpe_buffer_suface_setup;
+ vme_context->vme_media_chroma_surface_setup = gen75_gpe_media_chroma_surface_setup;
encoder_context->vme_context = vme_context;
encoder_context->vme_context_destroy = gen75_vme_context_destroy;
- encoder_context->vme_pipeline = gen75_vme_pipeline;
+
+ vme_context->vme_state_message = malloc(VME_MSG_LENGTH * sizeof(int));
return True;
}