return va_status;
}
-void
+static void
gen8_post_processing_context_finalize(struct i965_post_processing_context *pp_context)
{
dri_bo_unreference(pp_context->surface_state_binding_table.bo);
void
gen8_post_processing_context_init(VADriverContextP ctx,
- struct i965_post_processing_context *pp_context,
+ void *data,
struct intel_batchbuffer *batch)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
unsigned int kernel_offset, end_offset;
unsigned char *kernel_ptr;
struct pp_module *pp_module;
+ struct i965_post_processing_context *pp_context = data;
{
pp_context->vfe_gpu_state.max_num_threads = 60;
}
pp_context->intel_post_processing = gen8_post_processing;
+ pp_context->finalize = gen8_post_processing_context_finalize;
assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen8));
.enc_hw_context_init = NULL,
.proc_hw_context_init = NULL,
.render_init = genx_render_init,
+ .post_processing_context_init = NULL,
.max_width = 2048,
.max_height = 2048,
};
extern struct hw_context *ironlake_dec_hw_context_init(VADriverContextP, struct object_config *);
+extern void i965_post_processing_context_init(VADriverContextP, void *, struct intel_batchbuffer *);
+
static const struct hw_codec_info ilk_hw_codec_info = {
.dec_hw_context_init = ironlake_dec_hw_context_init,
.enc_hw_context_init = NULL,
.proc_hw_context_init = i965_proc_context_init,
.render_init = genx_render_init,
+ .post_processing_context_init = i965_post_processing_context_init,
.max_width = 2048,
.max_height = 2048,
.enc_hw_context_init = gen6_enc_hw_context_init,
.proc_hw_context_init = i965_proc_context_init,
.render_init = genx_render_init,
+ .post_processing_context_init = i965_post_processing_context_init,
.max_width = 2048,
.max_height = 2048,
.enc_hw_context_init = gen7_enc_hw_context_init,
.proc_hw_context_init = i965_proc_context_init,
.render_init = genx_render_init,
+ .post_processing_context_init = i965_post_processing_context_init,
.max_width = 4096,
.max_height = 4096,
.enc_hw_context_init = gen75_enc_hw_context_init,
.proc_hw_context_init = gen75_proc_context_init,
.render_init = genx_render_init,
+ .post_processing_context_init = i965_post_processing_context_init,
.max_width = 4096,
.max_height = 4096,
extern struct hw_context *gen8_dec_hw_context_init(VADriverContextP, struct object_config *);
extern struct hw_context *gen8_enc_hw_context_init(VADriverContextP, struct object_config *);
+extern void gen8_post_processing_context_init(VADriverContextP, void *, struct intel_batchbuffer *);
static const struct hw_codec_info bdw_hw_codec_info = {
.dec_hw_context_init = gen8_dec_hw_context_init,
.enc_hw_context_init = gen8_enc_hw_context_init,
.proc_hw_context_init = gen75_proc_context_init,
.render_init = gen8_render_init,
+ .post_processing_context_init = gen8_post_processing_context_init,
.max_width = 4096,
.max_height = 4096,
struct hw_context *(*enc_hw_context_init)(VADriverContextP, struct object_config *);
struct hw_context *(*proc_hw_context_init)(VADriverContextP, struct object_config *);
bool (*render_init)(VADriverContextP);
+ void (*post_processing_context_init)(VADriverContextP, void *, struct intel_batchbuffer *);
int max_width;
int max_height;
struct i965_post_processing_context *pp_context = i965->pp_context;
if (pp_context) {
- if (IS_GEN8(i965->intel.device_info)) {
- gen8_post_processing_context_finalize(pp_context);
- } else {
- i965_post_processing_context_finalize(pp_context);
- }
+ pp_context->finalize(pp_context);
free(pp_context);
}
#define VPP_CURBE_ALLOCATION_SIZE 32
-static void
+void
i965_post_processing_context_init(VADriverContextP ctx,
- struct i965_post_processing_context *pp_context,
+ void *data,
struct intel_batchbuffer *batch)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
int i;
-
- if (IS_GEN8(i965->intel.device_info)) {
- gen8_post_processing_context_init(ctx, pp_context, batch);
- return;
- };
+ struct i965_post_processing_context *pp_context = data;
if (IS_IRONLAKE(i965->intel.device_info)) {
pp_context->urb.size = i965->intel.device_info->urb_size;
pp_context->vfe_gpu_state.curbe_allocation_size = VPP_CURBE_ALLOCATION_SIZE;
pp_context->intel_post_processing = gen6_post_processing;
}
-
+
+ pp_context->finalize = i965_post_processing_context_finalize;
assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen5));
assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen6));
if (HAS_PP(i965)) {
if (pp_context == NULL) {
pp_context = calloc(1, sizeof(*pp_context));
- i965_post_processing_context_init(ctx, pp_context, i965->pp_batch);
+ i965->codec_info->post_processing_context_init(ctx, pp_context, i965->pp_batch);
i965->pp_context = pp_context;
}
}
struct hw_context *
i965_proc_context_init(VADriverContextP ctx, struct object_config *obj_config)
{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_driver_data *intel = intel_driver_data(ctx);
struct i965_proc_context *proc_context = calloc(1, sizeof(struct i965_proc_context));
proc_context->base.destroy = i965_proc_context_destroy;
proc_context->base.run = i965_proc_picture;
proc_context->base.batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER, 0);
- i965_post_processing_context_init(ctx, &proc_context->pp_context, proc_context->base.batch);
+ i965->codec_info->post_processing_context_init(ctx, &proc_context->pp_context, proc_context->base.batch);
return (struct hw_context *)proc_context;
}
const VARectangle *dst_rect,
int pp_index,
void * filter_param);
+ void (*finalize)(struct i965_post_processing_context *pp_context);
};
struct i965_proc_context
bool
i965_post_processing_init(VADriverContextP ctx);
-
-extern void
-gen8_post_processing_context_init(VADriverContextP ctx,
- struct i965_post_processing_context *pp_context,
- struct intel_batchbuffer *batch);
-
-extern void
-gen8_post_processing_context_finalize(struct i965_post_processing_context *pp_context);
-
#endif /* __I965_POST_PROCESSING_H__ */