The command buffer is adaptive to the size of the frame.
Signed-off-by: Xiang, Haihao <haihao.xiang@intel.com>
struct gen6_encoder_context *gen6_encoder_context)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
+ struct intel_batchbuffer *main_batch = gen6_encoder_context->base.batch;
struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param->buffer;
int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
int x,y;
+ struct intel_batchbuffer *batch = intel_batchbuffer_new(&i965->intel, I915_EXEC_BSD, width_in_mbs * height_in_mbs * 12 * 4 + 0x800);
- intel_batchbuffer_start_atomic_bcs(batch, 0x1000);
+ intel_batchbuffer_start_atomic_bcs(batch, width_in_mbs * height_in_mbs * 12 * 4 + 0x700);
if (is_intra) {
dri_bo_map(vme_context->vme_output.bo , 1);
if (is_intra)
dri_bo_unmap(vme_context->vme_output.bo);
-
+
+ intel_batchbuffer_align(batch, 8);
+
+ BEGIN_BCS_BATCH(batch, 2);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, MI_BATCH_BUFFER_END);
+ ADVANCE_BCS_BATCH(batch);
+
intel_batchbuffer_end_atomic(batch);
+
+ /* chain to the main batch buffer */
+ intel_batchbuffer_start_atomic_bcs(main_batch, 0x100);
+ intel_batchbuffer_emit_mi_flush(main_batch);
+ BEGIN_BCS_BATCH(main_batch, 2);
+ OUT_BCS_BATCH(main_batch, MI_BATCH_BUFFER_START | (1 << 8));
+ OUT_BCS_RELOC(main_batch,
+ batch->buffer,
+ I915_GEM_DOMAIN_COMMAND, 0,
+ 0);
+ ADVANCE_BCS_BATCH(main_batch);
+ intel_batchbuffer_end_atomic(main_batch);
+
+ // end programing
+ intel_batchbuffer_free(batch);
}
static VAStatus gen6_mfc_avc_prepare(VADriverContextP ctx,
struct encode_state *encode_state,
struct gen6_encoder_context *gen6_encoder_context)
{
- struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
+ struct intel_batchbuffer *main_batch = gen6_encoder_context->base.batch;
VAEncSliceParameterBuffer *pSliceParameter = (VAEncSliceParameterBuffer *)encode_state->slice_params[0]->buffer;
VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param->buffer;
int is_intra = pSliceParameter->slice_flags.bits.is_intra;
int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
int emit_new_state = 1, object_len_in_bytes;
int x, y;
+ struct intel_batchbuffer *batch = intel_batchbuffer_new(&i965->intel, I915_EXEC_RENDER, width_in_mbs * height_in_mbs * 8 * 4 + 0x200);
- intel_batchbuffer_start_atomic(batch, 0x1000);
+ intel_batchbuffer_start_atomic(batch, width_in_mbs * height_in_mbs * 8 * 4 + 0x100);
for(y = 0; y < height_in_mbs; y++){
for(x = 0; x < width_in_mbs; x++){
}
}
- intel_batchbuffer_end_atomic(batch);
+ intel_batchbuffer_align(batch, 8);
+
+ BEGIN_BATCH(batch, 2);
+ OUT_BATCH(batch, 0);
+ OUT_BATCH(batch, MI_BATCH_BUFFER_END);
+ ADVANCE_BATCH(batch);
+
+ intel_batchbuffer_end_atomic(batch);
+
+ /* chain to the main batch buffer */
+ intel_batchbuffer_start_atomic(main_batch, 0x100);
+ intel_batchbuffer_emit_mi_flush(main_batch);
+ BEGIN_BATCH(main_batch, 2);
+ OUT_BATCH(main_batch, MI_BATCH_BUFFER_START | (2 << 6));
+ OUT_RELOC(main_batch,
+ batch->buffer,
+ I915_GEM_DOMAIN_COMMAND, 0,
+ 0);
+ ADVANCE_BATCH(main_batch);
+ intel_batchbuffer_end_atomic(main_batch);
+
+ // end programing
+ intel_batchbuffer_free(batch);
}
static VAStatus gen6_vme_prepare(VADriverContextP ctx,
struct gen6_encoder_context *gen6_encoder_context)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
+ struct intel_batchbuffer *main_batch = gen6_encoder_context->base.batch;
struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param->buffer;
int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
int x,y, mb_index;
int inter_rdo, intra_rdo;
+ struct intel_batchbuffer *batch = intel_batchbuffer_new(&i965->intel, I915_EXEC_BSD, width_in_mbs * height_in_mbs * 12 * 4 + 0x800);
- intel_batchbuffer_start_atomic_bcs(batch, 0x1000);
+ intel_batchbuffer_start_atomic_bcs(batch, width_in_mbs * height_in_mbs * 12 * 4 + 0x700);
dri_bo_map(vme_context->vme_output.bo , 1);
msg_ptr = (unsigned char *)vme_context->vme_output.bo->virtual;
dri_bo_unmap(vme_context->vme_output.bo);
+ intel_batchbuffer_align(batch, 8);
+
+ BEGIN_BCS_BATCH(batch, 2);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, MI_BATCH_BUFFER_END);
+ ADVANCE_BCS_BATCH(batch);
+
intel_batchbuffer_end_atomic(batch);
+
+ /* chain to the main batch buffer */
+ intel_batchbuffer_start_atomic_bcs(main_batch, 0x100);
+ intel_batchbuffer_emit_mi_flush(main_batch);
+ BEGIN_BCS_BATCH(main_batch, 2);
+ OUT_BCS_BATCH(main_batch, MI_BATCH_BUFFER_START | (1 << 8));
+ OUT_BCS_RELOC(main_batch,
+ batch->buffer,
+ I915_GEM_DOMAIN_COMMAND, 0,
+ 0);
+ ADVANCE_BCS_BATCH(main_batch);
+ intel_batchbuffer_end_atomic(main_batch);
+
+ // end programing
+ intel_batchbuffer_free(batch);
}
static VAStatus gen75_mfc_avc_prepare(VADriverContextP ctx,
struct encode_state *encode_state,
struct gen6_encoder_context *gen6_encoder_context)
{
- struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
+ struct intel_batchbuffer *main_batch = gen6_encoder_context->base.batch;
VAEncSliceParameterBuffer *pSliceParameter = (VAEncSliceParameterBuffer *)encode_state->slice_params[0]->buffer;
VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param->buffer;
int is_intra = pSliceParameter->slice_flags.bits.is_intra;
int emit_new_state = 1, object_len_in_bytes;
int x, y;
unsigned int mb_intra_ub;
+ struct intel_batchbuffer *batch = intel_batchbuffer_new(&i965->intel, I915_EXEC_RENDER, width_in_mbs * height_in_mbs * 8 * 4 + 0x200);
- intel_batchbuffer_start_atomic(batch, 0x1000);
+ intel_batchbuffer_start_atomic(batch, width_in_mbs * height_in_mbs * 8 * 4 + 0x100);
for(y = 0; y < height_in_mbs; y++){
for(x = 0; x < width_in_mbs; x++){
}
}
- intel_batchbuffer_end_atomic(batch);
+ intel_batchbuffer_align(batch, 8);
+
+ BEGIN_BATCH(batch, 2);
+ OUT_BATCH(batch, 0);
+ OUT_BATCH(batch, MI_BATCH_BUFFER_END);
+ ADVANCE_BATCH(batch);
+
+ intel_batchbuffer_end_atomic(batch);
+
+ /* chain to the main batch buffer */
+ intel_batchbuffer_start_atomic(main_batch, 0x100);
+ intel_batchbuffer_emit_mi_flush(main_batch);
+ BEGIN_BATCH(main_batch, 2);
+ OUT_BATCH(main_batch, MI_BATCH_BUFFER_START | (2 << 6));
+ OUT_RELOC(main_batch,
+ batch->buffer,
+ I915_GEM_DOMAIN_COMMAND, 0,
+ 0);
+ ADVANCE_BATCH(main_batch);
+ intel_batchbuffer_end_atomic(main_batch);
+
+ // end programing
+ intel_batchbuffer_free(batch);
}
static VAStatus gen75_vme_prepare(VADriverContextP ctx,