IS_GEN6((ctx)->intel.device_id)) && \
(ctx)->intel.has_bsd)
+#define HAS_VPP(ctx) (IS_IRONLAKE((ctx)->intel.device_id) || \
+ IS_GEN6((ctx)->intel.device_id) || \
+ IS_GEN7((ctx)->intel.device_id))
+
enum {
I965_SURFACETYPE_RGBA = 1,
I965_SURFACETYPE_YUV,
return NULL;
}
+extern struct hw_context *i965_proc_context_init(VADriverContextP, VAProfile);
extern struct hw_context *g4x_dec_hw_context_init(VADriverContextP, VAProfile);
static struct hw_codec_info g4x_hw_codec_info = {
.dec_hw_context_init = g4x_dec_hw_context_init,
.enc_hw_context_init = NULL,
+ .proc_hw_context_init = NULL,
};
extern struct hw_context *ironlake_dec_hw_context_init(VADriverContextP, VAProfile);
static struct hw_codec_info ironlake_hw_codec_info = {
.dec_hw_context_init = ironlake_dec_hw_context_init,
.enc_hw_context_init = NULL,
+ .proc_hw_context_init = i965_proc_context_init,
};
extern struct hw_context *gen6_dec_hw_context_init(VADriverContextP, VAProfile);
static struct hw_codec_info gen6_hw_codec_info = {
.dec_hw_context_init = gen6_dec_hw_context_init,
.enc_hw_context_init = gen6_enc_hw_context_init,
+ .proc_hw_context_init = i965_proc_context_init,
};
extern struct hw_context *gen7_dec_hw_context_init(VADriverContextP, VAProfile);
static struct hw_codec_info gen7_hw_codec_info = {
.dec_hw_context_init = gen7_dec_hw_context_init,
.enc_hw_context_init = gen6_enc_hw_context_init,
+ .proc_hw_context_init = NULL,
};
VAStatus
profile_list[i++] = VAProfileVC1Advanced;
}
+ if (HAS_VPP(i965)) {
+ profile_list[i++] = VAProfileNone;
+ }
+
/* If the assert fails then I965_MAX_PROFILES needs to be bigger */
assert(i <= I965_MAX_PROFILES);
*num_profiles = i;
entrypoint_list[n++] = VAEntrypointVLD;
break;
+ case VAProfileNone:
+ if (HAS_VPP(i965))
+ entrypoint_list[n++] = VAEntrypointVideoProc;
+ break;
+
default:
break;
}
break;
+ case VAProfileNone:
+ if (HAS_VPP(i965) && VAEntrypointVideoProc == entrypoint) {
+ vaStatus = VA_STATUS_SUCCESS;
+ } else {
+ vaStatus = VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT;
+ }
+
+ break;
default:
vaStatus = VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
break;
obj_context->hw_context = NULL;
}
- if (obj_context->codec_type == CODEC_ENC) {
+ if (obj_context->codec_type == CODEC_PROC) {
+ i965_release_buffer_store(&obj_context->codec_state.proc.pipeline_param);
+ i965_release_buffer_store(&obj_context->codec_state.proc.input_param);
+
+ for (i = 0; i < VA_PROC_PIPELINE_MAX_NUM_FILTERS; i++)
+ i965_release_buffer_store(&obj_context->codec_state.proc.filter_param[i]);
+ } else if (obj_context->codec_type == CODEC_ENC) {
assert(obj_context->codec_state.encode.num_slice_params <= obj_context->codec_state.encode.max_slice_params);
i965_release_buffer_store(&obj_context->codec_state.encode.pic_param);
i965_release_buffer_store(&obj_context->codec_state.encode.seq_param);
}
if (VA_STATUS_SUCCESS == vaStatus) {
- if (VAEntrypointEncSlice == obj_config->entrypoint ) { /*encode routin only*/
+ if (VAEntrypointVideoProc == obj_config->entrypoint) {
+ obj_context->codec_type = CODEC_PROC;
+ memset(&obj_context->codec_state.proc, 0, sizeof(obj_context->codec_state.proc));
+ obj_context->codec_state.proc.current_render_target = VA_INVALID_ID;
+ assert(i965->codec_info->proc_hw_context_init);
+ obj_context->hw_context = i965->codec_info->proc_hw_context_init(ctx, obj_config->profile);
+ } else if (VAEntrypointEncSlice == obj_config->entrypoint) { /*encode routin only*/
obj_context->codec_type = CODEC_ENC;
memset(&obj_context->codec_state.encode, 0, sizeof(obj_context->codec_state.encode));
obj_context->codec_state.encode.current_render_target = VA_INVALID_ID;
break;
}
- if (obj_context->codec_type == CODEC_ENC) {
+ if (obj_context->codec_type == CODEC_PROC) {
+ obj_context->codec_state.proc.current_render_target = render_target;
+ } else if (obj_context->codec_type == CODEC_ENC) {
i965_release_buffer_store(&obj_context->codec_state.encode.pic_param);
i965_release_buffer_store(&obj_context->codec_state.encode.seq_param);
return vaStatus;
}
+#define I965_RENDER_PROC_BUFFER(name) I965_RENDER_BUFFER(proc, name)
+
+#define DEF_RENDER_PROC_SINGLE_BUFFER_FUNC(name, member) DEF_RENDER_SINGLE_BUFFER_FUNC(proc, name, member)
+DEF_RENDER_PROC_SINGLE_BUFFER_FUNC(pipeline_parameter, pipeline_param)
+DEF_RENDER_PROC_SINGLE_BUFFER_FUNC(input_parameter, input_param)
+
+static VAStatus
+i965_render_proc_filter_parameter_buffer(VADriverContextP ctx,
+ struct object_context *obj_context,
+ struct object_buffer *obj_buffer,
+ VAProcFilterType type)
+{
+ struct proc_state *proc = &obj_context->codec_state.proc;
+
+ assert(obj_buffer->buffer_store->bo == NULL);
+ assert(obj_buffer->buffer_store->buffer);
+ i965_release_buffer_store(&proc->filter_param[type]);
+ i965_reference_buffer_store(&proc->filter_param[type], obj_buffer->buffer_store);
+
+ return VA_STATUS_SUCCESS;
+}
+
+static VAStatus
+i965_proc_render_picture(VADriverContextP ctx,
+ VAContextID context,
+ VABufferID *buffers,
+ int num_buffers)
+{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
+ struct object_context *obj_context = CONTEXT(context);
+ VAStatus vaStatus;
+ int i;
+
+ for (i = 0; i < num_buffers; i++) {
+ struct object_buffer *obj_buffer = BUFFER(buffers[i]);
+ assert(obj_buffer);
+
+ switch (obj_buffer->type) {
+ case VAProcPipelineParameterBufferType:
+ vaStatus = I965_RENDER_PROC_BUFFER(pipeline_parameter);
+ break;
+
+ case VAProcInputParameterBufferType:
+ vaStatus = I965_RENDER_PROC_BUFFER(input_parameter);
+ break;
+
+ case VAProcFilterBaseParameterBufferType:
+ {
+ VAProcFilterBaseParameterBuffer *param = (VAProcFilterBaseParameterBuffer *)obj_buffer->buffer_store->buffer;
+ vaStatus = i965_render_proc_filter_parameter_buffer(ctx, obj_context, obj_buffer, param->filter);
+ break;
+ }
+
+ case VAProcFilterDeinterlacingParameterBufferType:
+ vaStatus = i965_render_proc_filter_parameter_buffer(ctx, obj_context, obj_buffer, VAProcFilterDeinterlacing);
+ break;
+
+ case VAProcFilterProcAmpParameterBufferType:
+ vaStatus = i965_render_proc_filter_parameter_buffer(ctx, obj_context, obj_buffer, VAProcFilterProcAmp);
+ break;
+
+ default:
+ vaStatus = VA_STATUS_ERROR_UNSUPPORTED_BUFFERTYPE;
+ break;
+ }
+ }
+
+ return vaStatus;
+}
+
VAStatus
i965_RenderPicture(VADriverContextP ctx,
VAContextID context,
obj_config = CONFIG(config);
assert(obj_config);
- if (VAEntrypointEncSlice == obj_config->entrypoint ){
+ if (VAEntrypointVideoProc == obj_config->entrypoint) {
+ vaStatus = i965_proc_render_picture(ctx, context, buffers, num_buffers);
+ } else if (VAEntrypointEncSlice == obj_config->entrypoint ) {
vaStatus = i965_encoder_render_picture(ctx, context, buffers, num_buffers);
} else {
vaStatus = i965_decoder_render_picture(ctx, context, buffers, num_buffers);
obj_config = CONFIG(config);
assert(obj_config);
- if (obj_context->codec_type == CODEC_ENC) {
+ if (obj_context->codec_type == CODEC_PROC) {
+ assert(VAEntrypointVideoProc == obj_config->entrypoint);
+ } else if (obj_context->codec_type == CODEC_ENC) {
assert(VAEntrypointEncSlice == obj_config->entrypoint);
assert(obj_context->codec_state.encode.pic_param ||
else
return VA_STATUS_ERROR_UNKNOWN;
+ i965->batch = intel_batchbuffer_new(&i965->intel, I915_EXEC_RENDER);
+
if (i965_post_processing_init(ctx) == False)
return VA_STATUS_ERROR_UNKNOWN;
return VA_STATUS_ERROR_UNKNOWN;
_i965InitMutex(&i965->render_mutex);
- i965->batch = intel_batchbuffer_new(&i965->intel, I915_EXEC_RENDER);
return VA_STATUS_SUCCESS;
}
return vaStatus;
}
+/*
+ * Query video processing pipeline
+ */
+VAStatus i965_QueryVideoProcPipelineCap(
+ VADriverContextP ctx,
+ VAContextID context,
+ VAProcPipelineCap *pipeline_cap /* out */
+ )
+{
+ int i;
+
+ for (i = 0; i < VA_PROC_PIPELINE_MAX_NUM_FILTERS; i++) {
+ pipeline_cap->filter_pipeline[i] = VAProcFilterNone;
+ pipeline_cap->bypass[i] = 1;
+ }
+
+ return VA_STATUS_SUCCESS;
+}
+
+VAStatus i965_QueryVideoProcFilterCap(
+ VADriverContextP ctx,
+ VAContextID context,
+ VAProcFilterType filter,
+ void *cap /* out */
+ )
+{
+ return VA_STATUS_SUCCESS;
+}
+
VAStatus DLL_EXPORT
VA_DRIVER_INIT_FUNC(VADriverContextP ctx);
vtable->vaBufferInfo = i965_BufferInfo;
vtable->vaLockSurface = i965_LockSurface;
vtable->vaUnlockSurface = i965_UnlockSurface;
+ vtable->vaQueryVideoProcPipelineCap = i965_QueryVideoProcPipelineCap;
+ vtable->vaQueryVideoProcFilterCap = i965_QueryVideoProcFilterCap;
+
// vtable->vaDbgCopySurfaceToBuffer = i965_DbgCopySurfaceToBuffer;
i965 = (struct i965_driver_data *)calloc(1, sizeof(*i965));
#include "shaders/post_processing/nv12_dndi_nv12.g4b.gen5"
};
-static void pp_null_initialize(VADriverContextP ctx,
+static void pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id, VASurfaceID out_surface_id,
const VARectangle *src_rect, const VARectangle *dst_rect);
-static void pp_nv12_avs_initialize(VADriverContextP ctx,
+static void pp_nv12_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id, VASurfaceID out_surface_id,
const VARectangle *src_rect, const VARectangle *dst_rect);
-static void pp_nv12_scaling_initialize(VADriverContextP ctx,
+static void pp_nv12_scaling_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id, VASurfaceID out_surface_id,
const VARectangle *src_rect, const VARectangle *dst_rect);
-static void pp_nv12_load_save_initialize(VADriverContextP ctx,
+static void pp_nv12_load_save_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id, VASurfaceID out_surface_id,
const VARectangle *src_rect, const VARectangle *dst_rect);
-static void pp_nv12_dndi_initialize(VADriverContextP ctx,
+static void pp_nv12_dndi_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id, VASurfaceID out_surface_id,
const VARectangle *src_rect, const VARectangle *dst_rect);
}
static void
-ironlake_pp_states_setup(VADriverContextP ctx)
+ironlake_pp_states_setup(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct i965_post_processing_context *pp_context = i965->pp_context;
-
ironlake_pp_surface_state(pp_context);
ironlake_pp_binding_table(pp_context);
ironlake_pp_interface_descriptor_table(pp_context);
}
static void
-ironlake_pp_pipeline_select(VADriverContextP ctx)
+ironlake_pp_pipeline_select(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
BEGIN_BATCH(batch, 1);
OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
}
static void
-ironlake_pp_urb_layout(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
+ironlake_pp_urb_layout(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
unsigned int vfe_fence, cs_fence;
vfe_fence = pp_context->urb.cs_start;
}
static void
-ironlake_pp_state_base_address(VADriverContextP ctx)
+ironlake_pp_state_base_address(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
BEGIN_BATCH(batch, 8);
OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
}
static void
-ironlake_pp_state_pointers(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
+ironlake_pp_state_pointers(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
BEGIN_BATCH(batch, 3);
OUT_BATCH(batch, CMD_MEDIA_STATE_POINTERS | 1);
}
static void
-ironlake_pp_cs_urb_layout(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
+ironlake_pp_cs_urb_layout(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
}
static void
-ironlake_pp_constant_buffer(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
+ironlake_pp_constant_buffer(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
}
static void
-ironlake_pp_object_walker(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
+ironlake_pp_object_walker(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
int x, x_steps, y, y_steps;
x_steps = pp_context->pp_x_steps(&pp_context->private_context);
}
static void
-ironlake_pp_pipeline_setup(VADriverContextP ctx)
+ironlake_pp_pipeline_setup(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
- struct i965_post_processing_context *pp_context = i965->pp_context;
+ struct intel_batchbuffer *batch = pp_context->batch;
intel_batchbuffer_start_atomic(batch, 0x1000);
intel_batchbuffer_emit_mi_flush(batch);
- ironlake_pp_pipeline_select(ctx);
- ironlake_pp_state_base_address(ctx);
+ ironlake_pp_pipeline_select(ctx, pp_context);
+ ironlake_pp_state_base_address(ctx, pp_context);
ironlake_pp_state_pointers(ctx, pp_context);
ironlake_pp_urb_layout(ctx, pp_context);
ironlake_pp_cs_urb_layout(ctx, pp_context);
}
static void
-pp_null_initialize(VADriverContextP ctx,
+pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id, VASurfaceID out_surface_id,
const VARectangle *src_rect, const VARectangle *dst_rect)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct i965_post_processing_context *pp_context = i965->pp_context;
-
/* private function & data */
pp_context->pp_x_steps = pp_null_x_steps;
pp_context->pp_y_steps = pp_null_y_steps;
}
static void
-pp_nv12_load_save_initialize(VADriverContextP ctx,
+pp_nv12_load_save_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id, VASurfaceID out_surface_id,
const VARectangle *src_rect, const VARectangle *dst_rect)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct i965_post_processing_context *pp_context = i965->pp_context;
struct pp_load_save_context *pp_load_save_context = (struct pp_load_save_context *)&pp_context->private_context;
struct object_surface *obj_surface;
struct i965_surface_state *ss;
}
static void
-pp_nv12_scaling_initialize(VADriverContextP ctx,
+pp_nv12_scaling_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id, VASurfaceID out_surface_id,
const VARectangle *src_rect, const VARectangle *dst_rect)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct i965_post_processing_context *pp_context = i965->pp_context;
struct pp_scaling_context *pp_scaling_context = (struct pp_scaling_context *)&pp_context->private_context;
struct object_surface *obj_surface;
struct i965_sampler_state *sampler_state;
}
static void
-pp_nv12_avs_initialize(VADriverContextP ctx,
+pp_nv12_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id, VASurfaceID out_surface_id,
const VARectangle *src_rect, const VARectangle *dst_rect)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct i965_post_processing_context *pp_context = i965->pp_context;
struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->private_context;
struct object_surface *obj_surface;
struct i965_surface_state *ss;
}
static
-void pp_nv12_dndi_initialize(VADriverContextP ctx,
+void pp_nv12_dndi_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id, VASurfaceID out_surface_id,
const VARectangle *src_rect, const VARectangle *dst_rect)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct i965_post_processing_context *pp_context = i965->pp_context;
struct pp_dndi_context *pp_dndi_context = (struct pp_dndi_context *)&pp_context->private_context;
struct object_surface *obj_surface;
struct i965_surface_state *ss;
static void
ironlake_pp_initialize(
VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id,
VASurfaceID out_surface_id,
const VARectangle *src_rect,
)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct i965_post_processing_context *pp_context = i965->pp_context;
struct pp_module *pp_module;
dri_bo *bo;
int i;
pp_module = &pp_context->pp_modules[pp_index];
if (pp_module->initialize)
- pp_module->initialize(ctx, in_surface_id, out_surface_id,
+ pp_module->initialize(ctx, pp_context,
+ in_surface_id, out_surface_id,
src_rect, dst_rect);
}
static void
ironlake_post_processing(
VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id,
VASurfaceID out_surface_id,
const VARectangle *src_rect,
int pp_index
)
{
- ironlake_pp_initialize(ctx, in_surface_id, out_surface_id, src_rect, dst_rect, pp_index);
- ironlake_pp_states_setup(ctx);
- ironlake_pp_pipeline_setup(ctx);
+ ironlake_pp_initialize(ctx, pp_context, in_surface_id, out_surface_id, src_rect, dst_rect, pp_index);
+ ironlake_pp_states_setup(ctx, pp_context);
+ ironlake_pp_pipeline_setup(ctx, pp_context);
}
static void
gen6_pp_initialize(
VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id,
VASurfaceID out_surface_id,
const VARectangle *src_rect,
)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct i965_post_processing_context *pp_context = i965->pp_context;
struct pp_module *pp_module;
dri_bo *bo;
int i;
pp_module = &pp_context->pp_modules[pp_index];
if (pp_module->initialize)
- pp_module->initialize(ctx, in_surface_id, out_surface_id,
+ pp_module->initialize(ctx, pp_context,
+ in_surface_id, out_surface_id,
src_rect, dst_rect);
}
}
static void
-gen6_pp_states_setup(VADriverContextP ctx)
+gen6_pp_states_setup(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct i965_post_processing_context *pp_context = i965->pp_context;
-
gen6_pp_binding_table(pp_context);
gen6_pp_interface_descriptor_table(pp_context);
gen6_pp_upload_constants(pp_context);
}
static void
-gen6_pp_pipeline_select(VADriverContextP ctx)
+gen6_pp_pipeline_select(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
BEGIN_BATCH(batch, 1);
OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
}
static void
-gen6_pp_state_base_address(VADriverContextP ctx)
+gen6_pp_state_base_address(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
BEGIN_BATCH(batch, 10);
OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
}
static void
-gen6_pp_vfe_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
+gen6_pp_vfe_state(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
BEGIN_BATCH(batch, 8);
OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (8 - 2));
}
static void
-gen6_pp_curbe_load(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
+gen6_pp_curbe_load(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
assert(pp_context->urb.size_cs_entry * pp_context->urb.num_cs_entries * 512 <= pp_context->curbe.bo->size);
}
static void
-gen6_interface_descriptor_load(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
+gen6_interface_descriptor_load(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
BEGIN_BATCH(batch, 4);
OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
}
static void
-gen6_pp_object_walker(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
+gen6_pp_object_walker(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
+ struct intel_batchbuffer *batch = pp_context->batch;
int x, x_steps, y, y_steps;
x_steps = pp_context->pp_x_steps(&pp_context->private_context);
}
static void
-gen6_pp_pipeline_setup(VADriverContextP ctx)
+gen6_pp_pipeline_setup(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct intel_batchbuffer *batch = i965->batch;
- struct i965_post_processing_context *pp_context = i965->pp_context;
+ struct intel_batchbuffer *batch = pp_context->batch;
intel_batchbuffer_start_atomic(batch, 0x1000);
intel_batchbuffer_emit_mi_flush(batch);
- gen6_pp_pipeline_select(ctx);
+ gen6_pp_pipeline_select(ctx, pp_context);
gen6_pp_curbe_load(ctx, pp_context);
gen6_interface_descriptor_load(ctx, pp_context);
- gen6_pp_state_base_address(ctx);
+ gen6_pp_state_base_address(ctx, pp_context);
gen6_pp_vfe_state(ctx, pp_context);
gen6_pp_object_walker(ctx, pp_context);
intel_batchbuffer_end_atomic(batch);
static void
gen6_post_processing(
VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id,
VASurfaceID out_surface_id,
const VARectangle *src_rect,
int pp_index
)
{
- gen6_pp_initialize(ctx, in_surface_id, out_surface_id, src_rect, dst_rect, pp_index);
- gen6_pp_states_setup(ctx);
- gen6_pp_pipeline_setup(ctx);
+ gen6_pp_initialize(ctx, pp_context, in_surface_id, out_surface_id, src_rect, dst_rect, pp_index);
+ gen6_pp_states_setup(ctx, pp_context);
+ gen6_pp_pipeline_setup(ctx, pp_context);
}
static void
i965_post_processing_internal(
VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context,
VASurfaceID in_surface_id,
VASurfaceID out_surface_id,
const VARectangle *src_rect,
if (IS_GEN6(i965->intel.device_id) ||
IS_GEN7(i965->intel.device_id))
- gen6_post_processing(ctx, in_surface_id, out_surface_id, src_rect, dst_rect, pp_index);
+ gen6_post_processing(ctx, pp_context, in_surface_id, out_surface_id, src_rect, dst_rect, pp_index);
else
- ironlake_post_processing(ctx, in_surface_id, out_surface_id, src_rect, dst_rect, pp_index);
+ ironlake_post_processing(ctx, pp_context, in_surface_id, out_surface_id, src_rect, dst_rect, pp_index);
}
VAStatus
assert(status == VA_STATUS_SUCCESS);
obj_surface = SURFACE(out_surface_id);
i965_check_alloc_surface_bo(ctx, obj_surface, 0, VA_FOURCC('N','V','1','2'));
- i965_post_processing_internal(ctx,
+ i965_post_processing_internal(ctx, i965->pp_context,
in_surface_id, out_surface_id,
src_rect, dst_rect,
PP_NV12_DNDI);
assert(status == VA_STATUS_SUCCESS);
obj_surface = SURFACE(out_surface_id);
i965_check_alloc_surface_bo(ctx, obj_surface, 0, VA_FOURCC('N','V','1','2'));
- i965_post_processing_internal(ctx,
+ i965_post_processing_internal(ctx, i965->pp_context,
in_surface_id, out_surface_id,
src_rect, dst_rect,
PP_NV12_AVS);
return out_surface_id;
}
-Bool
-i965_post_processing_terminate(VADriverContextP ctx)
+static void
+i965_post_processing_context_finalize(struct i965_post_processing_context *pp_context)
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
- struct i965_post_processing_context *pp_context = i965->pp_context;
int i;
- if (HAS_PP(i965)) {
- if (pp_context) {
- dri_bo_unreference(pp_context->curbe.bo);
- pp_context->curbe.bo = NULL;
+ dri_bo_unreference(pp_context->curbe.bo);
+ pp_context->curbe.bo = NULL;
- for (i = 0; i < MAX_PP_SURFACES; i++) {
- dri_bo_unreference(pp_context->surfaces[i].ss_bo);
- pp_context->surfaces[i].ss_bo = NULL;
+ for (i = 0; i < MAX_PP_SURFACES; i++) {
+ dri_bo_unreference(pp_context->surfaces[i].ss_bo);
+ pp_context->surfaces[i].ss_bo = NULL;
- dri_bo_unreference(pp_context->surfaces[i].s_bo);
- pp_context->surfaces[i].s_bo = NULL;
- }
+ dri_bo_unreference(pp_context->surfaces[i].s_bo);
+ pp_context->surfaces[i].s_bo = NULL;
+ }
- dri_bo_unreference(pp_context->sampler_state_table.bo);
- pp_context->sampler_state_table.bo = NULL;
+ dri_bo_unreference(pp_context->sampler_state_table.bo);
+ pp_context->sampler_state_table.bo = NULL;
- dri_bo_unreference(pp_context->sampler_state_table.bo_8x8);
- pp_context->sampler_state_table.bo_8x8 = NULL;
+ dri_bo_unreference(pp_context->sampler_state_table.bo_8x8);
+ pp_context->sampler_state_table.bo_8x8 = NULL;
- dri_bo_unreference(pp_context->sampler_state_table.bo_8x8_uv);
- pp_context->sampler_state_table.bo_8x8_uv = NULL;
+ dri_bo_unreference(pp_context->sampler_state_table.bo_8x8_uv);
+ pp_context->sampler_state_table.bo_8x8_uv = NULL;
- dri_bo_unreference(pp_context->binding_table.bo);
- pp_context->binding_table.bo = NULL;
+ dri_bo_unreference(pp_context->binding_table.bo);
+ pp_context->binding_table.bo = NULL;
- dri_bo_unreference(pp_context->idrt.bo);
- pp_context->idrt.bo = NULL;
- pp_context->idrt.num_interface_descriptors = 0;
+ dri_bo_unreference(pp_context->idrt.bo);
+ pp_context->idrt.bo = NULL;
+ pp_context->idrt.num_interface_descriptors = 0;
- dri_bo_unreference(pp_context->vfe_state.bo);
- pp_context->vfe_state.bo = NULL;
+ dri_bo_unreference(pp_context->vfe_state.bo);
+ pp_context->vfe_state.bo = NULL;
- dri_bo_unreference(pp_context->stmm.bo);
- pp_context->stmm.bo = NULL;
+ dri_bo_unreference(pp_context->stmm.bo);
+ pp_context->stmm.bo = NULL;
- for (i = 0; i < NUM_PP_MODULES; i++) {
- struct pp_module *pp_module = &pp_context->pp_modules[i];
+ for (i = 0; i < NUM_PP_MODULES; i++) {
+ struct pp_module *pp_module = &pp_context->pp_modules[i];
- dri_bo_unreference(pp_module->kernel.bo);
- pp_module->kernel.bo = NULL;
- }
+ dri_bo_unreference(pp_module->kernel.bo);
+ pp_module->kernel.bo = NULL;
+ }
- free(pp_context);
- }
+}
+
+Bool
+i965_post_processing_terminate(VADriverContextP ctx)
+{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
+ struct i965_post_processing_context *pp_context = i965->pp_context;
- i965->pp_context = NULL;
+ if (pp_context) {
+ i965_post_processing_context_finalize(pp_context);
+ free(pp_context);
}
+ i965->pp_context = NULL;
+
return True;
}
+static void
+i965_post_processing_context_init(VADriverContextP ctx,
+ struct i965_post_processing_context *pp_context,
+ struct intel_batchbuffer *batch)
+{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
+ int i;
+
+ pp_context->urb.size = URB_SIZE((&i965->intel));
+ pp_context->urb.num_vfe_entries = 32;
+ pp_context->urb.size_vfe_entry = 1; /* in 512 bits unit */
+ pp_context->urb.num_cs_entries = 1;
+ pp_context->urb.size_cs_entry = 2; /* in 512 bits unit */
+ pp_context->urb.vfe_start = 0;
+ pp_context->urb.cs_start = pp_context->urb.vfe_start +
+ pp_context->urb.num_vfe_entries * pp_context->urb.size_vfe_entry;
+ assert(pp_context->urb.cs_start +
+ pp_context->urb.num_cs_entries * pp_context->urb.size_cs_entry <= URB_SIZE((&i965->intel)));
+
+ assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen5));
+ assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen6));
+
+ if (IS_GEN6(i965->intel.device_id) ||
+ IS_GEN7(i965->intel.device_id))
+ memcpy(pp_context->pp_modules, pp_modules_gen6, sizeof(pp_context->pp_modules));
+ else if (IS_IRONLAKE(i965->intel.device_id))
+ memcpy(pp_context->pp_modules, pp_modules_gen5, sizeof(pp_context->pp_modules));
+
+ for (i = 0; i < NUM_PP_MODULES; i++) {
+ struct pp_module *pp_module = &pp_context->pp_modules[i];
+ dri_bo_unreference(pp_module->kernel.bo);
+ pp_module->kernel.bo = dri_bo_alloc(i965->intel.bufmgr,
+ pp_module->kernel.name,
+ pp_module->kernel.size,
+ 4096);
+ assert(pp_module->kernel.bo);
+ dri_bo_subdata(pp_module->kernel.bo, 0, pp_module->kernel.size, pp_module->kernel.bin);
+ }
+
+ pp_context->batch = batch;
+}
+
Bool
i965_post_processing_init(VADriverContextP ctx)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct i965_post_processing_context *pp_context = i965->pp_context;
- int i;
if (HAS_PP(i965)) {
if (pp_context == NULL) {
pp_context = calloc(1, sizeof(*pp_context));
+ i965_post_processing_context_init(ctx, pp_context, i965->batch);
i965->pp_context = pp_context;
-
- pp_context->urb.size = URB_SIZE((&i965->intel));
- pp_context->urb.num_vfe_entries = 32;
- pp_context->urb.size_vfe_entry = 1; /* in 512 bits unit */
- pp_context->urb.num_cs_entries = 1;
- pp_context->urb.size_cs_entry = 2; /* in 512 bits unit */
- pp_context->urb.vfe_start = 0;
- pp_context->urb.cs_start = pp_context->urb.vfe_start +
- pp_context->urb.num_vfe_entries * pp_context->urb.size_vfe_entry;
- assert(pp_context->urb.cs_start +
- pp_context->urb.num_cs_entries * pp_context->urb.size_cs_entry <= URB_SIZE((&i965->intel)));
-
- assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen5));
- assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen6));
-
- if (IS_GEN6(i965->intel.device_id) ||
- IS_GEN7(i965->intel.device_id))
- memcpy(pp_context->pp_modules, pp_modules_gen6, sizeof(pp_context->pp_modules));
- else if (IS_IRONLAKE(i965->intel.device_id))
- memcpy(pp_context->pp_modules, pp_modules_gen5, sizeof(pp_context->pp_modules));
-
- for (i = 0; i < NUM_PP_MODULES; i++) {
- struct pp_module *pp_module = &pp_context->pp_modules[i];
- dri_bo_unreference(pp_module->kernel.bo);
- pp_module->kernel.bo = dri_bo_alloc(i965->intel.bufmgr,
- pp_module->kernel.name,
- pp_module->kernel.size,
- 4096);
- assert(pp_module->kernel.bo);
- dri_bo_subdata(pp_module->kernel.bo, 0, pp_module->kernel.size, pp_module->kernel.bin);
- }
}
}
return True;
}
+
+static void
+i965_proc_picture(VADriverContextP ctx,
+ VAProfile profile,
+ union codec_state *codec_state,
+ struct hw_context *hw_context)
+{
+ struct i965_proc_context *proc_context = (struct i965_proc_context *)hw_context;
+ struct proc_state *proc_state = &codec_state->proc;
+ VAProcPipelineParameterBuffer *pipeline_param = (VAProcPipelineParameterBuffer *)proc_state->pipeline_param->buffer;
+ VAProcInputParameterBuffer *input_param = (VAProcInputParameterBuffer *)proc_state->input_param->buffer;
+
+ assert(input_param->surface != VA_INVALID_ID);
+ assert(proc_state->current_render_target != VA_INVALID_ID);
+
+ i965_post_processing_internal(ctx, &proc_context->pp_context,
+ input_param->surface, proc_state->current_render_target,
+ &input_param->region, &pipeline_param->output_region,
+ PP_NV12_SCALING);
+
+ intel_batchbuffer_flush(hw_context->batch);
+}
+
+static void
+i965_proc_context_destroy(void *hw_context)
+{
+ struct i965_proc_context *proc_context = (struct i965_proc_context *)hw_context;
+
+ i965_post_processing_context_finalize(&proc_context->pp_context);
+ intel_batchbuffer_free(proc_context->base.batch);
+ free(proc_context);
+}
+
+struct hw_context *
+i965_proc_context_init(VADriverContextP ctx, VAProfile profile)
+{
+ struct intel_driver_data *intel = intel_driver_data(ctx);
+ struct i965_proc_context *proc_context = calloc(1, sizeof(struct i965_proc_context));
+
+ proc_context->base.destroy = i965_proc_context_destroy;
+ proc_context->base.run = i965_proc_picture;
+ proc_context->base.batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER);
+ i965_post_processing_context_init(ctx, &proc_context->pp_context, proc_context->base.batch);
+
+ return (struct hw_context *)proc_context;
+}