2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Li Xiaowei <xiaowei.a.li@intel.com>
33 #include "intel_batchbuffer.h"
34 #include "intel_driver.h"
36 #include "i965_defines.h"
37 #include "i965_drv_video.h"
38 #include "gen75_vpp_gpe.h"
40 #define MAX_INTERFACE_DESC_GEN6 MAX_GPE_KERNELS
41 #define MAX_MEDIA_SURFACES_GEN6 34
43 #define SURFACE_STATE_PADDED_SIZE_0_GEN7 ALIGN(sizeof(struct gen7_surface_state), 32)
44 #define SURFACE_STATE_PADDED_SIZE_1_GEN7 ALIGN(sizeof(struct gen7_surface_state2), 32)
45 #define SURFACE_STATE_PADDED_SIZE MAX(SURFACE_STATE_PADDED_SIZE_0_GEN7, SURFACE_STATE_PADDED_SIZE_1_GEN7)
47 #define SURFACE_STATE_OFFSET(index) (SURFACE_STATE_PADDED_SIZE * (index))
48 #define BINDING_TABLE_OFFSET(index) (SURFACE_STATE_OFFSET(MAX_MEDIA_SURFACES_GEN6) + sizeof(unsigned int) * (index))
50 #define CURBE_ALLOCATION_SIZE 37
51 #define CURBE_TOTAL_DATA_LENGTH (4 * 32)
52 #define CURBE_URB_ENTRY_LENGTH 4
55 i965_CreateSurfaces(VADriverContextP ctx,
60 VASurfaceID *surfaces);
63 i965_DestroySurfaces(VADriverContextP ctx,
64 VASurfaceID *surface_list,
67 /* Shaders information for sharpening */
68 static const unsigned int gen75_gpe_sharpening_h_blur[][4] = {
69 #include "shaders/post_processing/gen75/sharpening_h_blur.g75b"
71 static const unsigned int gen75_gpe_sharpening_v_blur[][4] = {
72 #include "shaders/post_processing/gen75/sharpening_v_blur.g75b"
74 static const unsigned int gen75_gpe_sharpening_unmask[][4] = {
75 #include "shaders/post_processing/gen75/sharpening_unmask.g75b"
77 static struct i965_kernel gen75_vpp_sharpening_kernels[] = {
79 "vpp: sharpening(horizontal blur)",
81 gen75_gpe_sharpening_h_blur,
82 sizeof(gen75_gpe_sharpening_h_blur),
86 "vpp: sharpening(vertical blur)",
88 gen75_gpe_sharpening_v_blur,
89 sizeof(gen75_gpe_sharpening_v_blur),
93 "vpp: sharpening(unmask)",
95 gen75_gpe_sharpening_unmask,
96 sizeof(gen75_gpe_sharpening_unmask),
102 gpe_surfaces_setup(VADriverContextP ctx,
103 struct vpp_gpe_context *vpp_gpe_ctx)
105 struct object_surface *obj_surface;
107 unsigned char input_surface_sum = (1 + vpp_gpe_ctx->forward_surf_sum +
108 vpp_gpe_ctx->backward_surf_sum) * 2;
110 /* Binding input NV12 surfaces (Luma + Chroma)*/
111 for( i = 0; i < input_surface_sum; i += 2){
112 obj_surface = vpp_gpe_ctx->surface_input_object[i/2];
114 vpp_gpe_ctx->vpp_media_rw_surface_setup(ctx,
115 &vpp_gpe_ctx->gpe_ctx,
117 BINDING_TABLE_OFFSET(i),
118 SURFACE_STATE_OFFSET(i));
120 vpp_gpe_ctx->vpp_media_chroma_surface_setup(ctx,
121 &vpp_gpe_ctx->gpe_ctx,
123 BINDING_TABLE_OFFSET(i + 1),
124 SURFACE_STATE_OFFSET(i + 1));
127 /* Binding output NV12 surface(Luma + Chroma) */
128 obj_surface = vpp_gpe_ctx->surface_output_object;
130 vpp_gpe_ctx->vpp_media_rw_surface_setup(ctx,
131 &vpp_gpe_ctx->gpe_ctx,
133 BINDING_TABLE_OFFSET(input_surface_sum),
134 SURFACE_STATE_OFFSET(input_surface_sum));
135 vpp_gpe_ctx->vpp_media_chroma_surface_setup(ctx,
136 &vpp_gpe_ctx->gpe_ctx,
138 BINDING_TABLE_OFFSET(input_surface_sum + 1),
139 SURFACE_STATE_OFFSET(input_surface_sum + 1));
140 /* Bind kernel return buffer surface */
141 vpp_gpe_ctx->vpp_buffer_surface_setup(ctx,
142 &vpp_gpe_ctx->gpe_ctx,
143 &vpp_gpe_ctx->vpp_kernel_return,
144 BINDING_TABLE_OFFSET((input_surface_sum + 2)),
145 SURFACE_STATE_OFFSET(input_surface_sum + 2));
147 return VA_STATUS_SUCCESS;
151 gpe_interface_setup(VADriverContextP ctx,
152 struct vpp_gpe_context *vpp_gpe_ctx)
154 struct gen6_interface_descriptor_data *desc;
155 dri_bo *bo = vpp_gpe_ctx->gpe_ctx.idrt.bo;
162 /*Setup the descritor table*/
163 for(i = 0; i < vpp_gpe_ctx->sub_shader_sum; i++){
164 struct i965_kernel *kernel = &vpp_gpe_ctx->gpe_ctx.kernels[i];
165 assert(sizeof(*desc) == 32);
166 memset(desc, 0, sizeof(*desc));
167 desc->desc0.kernel_start_pointer = (kernel->bo->offset >> 6);
168 desc->desc2.sampler_count = 0; /* FIXME: */
169 desc->desc2.sampler_state_pointer = 0;
170 desc->desc3.binding_table_entry_count = 6; /* FIXME: */
171 desc->desc3.binding_table_pointer = (BINDING_TABLE_OFFSET(0) >> 5);
172 desc->desc4.constant_urb_entry_read_offset = 0;
173 desc->desc4.constant_urb_entry_read_length = 0;
175 dri_bo_emit_reloc(bo,
176 I915_GEM_DOMAIN_INSTRUCTION, 0,
178 i* sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc0),
185 return VA_STATUS_SUCCESS;
189 gpe_constant_setup(VADriverContextP ctx,
190 struct vpp_gpe_context *vpp_gpe_ctx){
191 dri_bo_map(vpp_gpe_ctx->gpe_ctx.curbe.bo, 1);
192 assert(vpp_gpe_ctx->gpe_ctx.curbe.bo->virtual);
193 /*Copy buffer into CURB*/
195 unsigned char* constant_buffer = vpp_gpe_ctx->gpe_ctx.curbe.bo->virtual;
196 memcpy(constant_buffer, vpp_gpe_ctx->kernel_param,
197 vpp_gpe_ctx->kernel_param_size);
199 dri_bo_unmap(vpp_gpe_ctx->gpe_ctx.curbe.bo);
201 return VA_STATUS_SUCCESS;
205 gpe_fill_thread_parameters(VADriverContextP ctx,
206 struct vpp_gpe_context *vpp_gpe_ctx)
208 unsigned int *command_ptr;
209 unsigned int i, size = vpp_gpe_ctx->thread_param_size;
210 unsigned char* position = NULL;
212 /* Thread inline data setting*/
213 dri_bo_map(vpp_gpe_ctx->vpp_batchbuffer.bo, 1);
214 command_ptr = vpp_gpe_ctx->vpp_batchbuffer.bo->virtual;
216 for(i = 0; i < vpp_gpe_ctx->thread_num; i ++)
218 *command_ptr++ = (CMD_MEDIA_OBJECT | (size/sizeof(int) + 6 - 2));
219 *command_ptr++ = vpp_gpe_ctx->sub_shader_index;
225 /* copy thread inline data */
226 position =(unsigned char*)(vpp_gpe_ctx->thread_param + size * i);
227 memcpy(command_ptr, position, size);
228 command_ptr += size/sizeof(int);
232 *command_ptr++ = MI_BATCH_BUFFER_END;
234 dri_bo_unmap(vpp_gpe_ctx->vpp_batchbuffer.bo);
236 return VA_STATUS_SUCCESS;
240 gpe_pipeline_setup(VADriverContextP ctx,
241 struct vpp_gpe_context *vpp_gpe_ctx)
243 intel_batchbuffer_start_atomic(vpp_gpe_ctx->batch, 0x1000);
244 intel_batchbuffer_emit_mi_flush(vpp_gpe_ctx->batch);
246 gen6_gpe_pipeline_setup(ctx, &vpp_gpe_ctx->gpe_ctx, vpp_gpe_ctx->batch);
248 gpe_fill_thread_parameters(ctx, vpp_gpe_ctx);
250 BEGIN_BATCH(vpp_gpe_ctx->batch, 2);
251 OUT_BATCH(vpp_gpe_ctx->batch, MI_BATCH_BUFFER_START | (2 << 6));
252 OUT_RELOC(vpp_gpe_ctx->batch,
253 vpp_gpe_ctx->vpp_batchbuffer.bo,
254 I915_GEM_DOMAIN_COMMAND, 0,
256 ADVANCE_BATCH(vpp_gpe_ctx->batch);
258 intel_batchbuffer_end_atomic(vpp_gpe_ctx->batch);
260 return VA_STATUS_SUCCESS;
264 gpe_process_init(VADriverContextP ctx,
265 struct vpp_gpe_context *vpp_gpe_ctx)
267 struct i965_driver_data *i965 = i965_driver_data(ctx);
270 unsigned int batch_buf_size = vpp_gpe_ctx->thread_num *
271 (vpp_gpe_ctx->thread_param_size + 6 * sizeof(int)) + 16;
273 vpp_gpe_ctx->vpp_kernel_return.num_blocks = vpp_gpe_ctx->thread_num;
274 vpp_gpe_ctx->vpp_kernel_return.size_block = 16;
275 vpp_gpe_ctx->vpp_kernel_return.pitch = 1;
276 unsigned int kernel_return_size = vpp_gpe_ctx->vpp_kernel_return.num_blocks
277 * vpp_gpe_ctx->vpp_kernel_return.size_block;
279 dri_bo_unreference(vpp_gpe_ctx->vpp_batchbuffer.bo);
280 bo = dri_bo_alloc(i965->intel.bufmgr,
282 batch_buf_size, 0x1000);
283 vpp_gpe_ctx->vpp_batchbuffer.bo = bo;
284 dri_bo_reference(vpp_gpe_ctx->vpp_batchbuffer.bo);
286 dri_bo_unreference(vpp_gpe_ctx->vpp_kernel_return.bo);
287 bo = dri_bo_alloc(i965->intel.bufmgr,
288 "vpp kernel return buffer",
289 kernel_return_size, 0x1000);
290 vpp_gpe_ctx->vpp_kernel_return.bo = bo;
291 dri_bo_reference(vpp_gpe_ctx->vpp_kernel_return.bo);
293 i965_gpe_context_init(ctx, &vpp_gpe_ctx->gpe_ctx);
295 return VA_STATUS_SUCCESS;
299 gpe_process_prepare(VADriverContextP ctx,
300 struct vpp_gpe_context *vpp_gpe_ctx)
302 /*Setup all the memory object*/
303 gpe_surfaces_setup(ctx, vpp_gpe_ctx);
304 gpe_interface_setup(ctx, vpp_gpe_ctx);
305 gpe_constant_setup(ctx, vpp_gpe_ctx);
307 /*Programing media pipeline*/
308 gpe_pipeline_setup(ctx, vpp_gpe_ctx);
310 return VA_STATUS_SUCCESS;
314 gpe_process_run(VADriverContextP ctx,
315 struct vpp_gpe_context *vpp_gpe_ctx)
317 intel_batchbuffer_flush(vpp_gpe_ctx->batch);
319 return VA_STATUS_SUCCESS;
323 gen75_gpe_process(VADriverContextP ctx,
324 struct vpp_gpe_context * vpp_gpe_ctx)
326 VAStatus va_status = VA_STATUS_SUCCESS;
327 va_status = gpe_process_init(ctx, vpp_gpe_ctx);
328 va_status |=gpe_process_prepare(ctx, vpp_gpe_ctx);
329 va_status |=gpe_process_run(ctx, vpp_gpe_ctx);
335 gen75_gpe_process_sharpening(VADriverContextP ctx,
336 struct vpp_gpe_context * vpp_gpe_ctx)
338 VAStatus va_status = VA_STATUS_SUCCESS;
339 struct i965_driver_data *i965 = i965_driver_data(ctx);
340 struct object_surface *origin_in_obj_surface = vpp_gpe_ctx->surface_input_object[0];
341 struct object_surface *origin_out_obj_surface = vpp_gpe_ctx->surface_output_object;
343 VAProcPipelineParameterBuffer* pipe = vpp_gpe_ctx->pipeline_param;
344 VABufferID *filter_ids = (VABufferID*)pipe->filters ;
345 struct object_buffer *obj_buf = BUFFER((*(filter_ids + 0)));
347 assert(obj_buf && obj_buf->buffer_store && obj_buf->buffer_store->buffer);
350 !obj_buf->buffer_store ||
351 !obj_buf->buffer_store->buffer)
354 VAProcFilterParameterBuffer* filter =
355 (VAProcFilterParameterBuffer*)obj_buf-> buffer_store->buffer;
356 float sharpening_intensity = filter->value;
358 ThreadParameterSharpening thr_param;
359 unsigned int thr_param_size = sizeof(ThreadParameterSharpening);
363 if(vpp_gpe_ctx->is_first_frame){
364 vpp_gpe_ctx->sub_shader_sum = 3;
365 i965_gpe_load_kernels(ctx,
366 &vpp_gpe_ctx->gpe_ctx,
367 gen75_vpp_sharpening_kernels,
368 vpp_gpe_ctx->sub_shader_sum);
371 if(vpp_gpe_ctx->surface_tmp == VA_INVALID_ID){
372 va_status = i965_CreateSurfaces(ctx,
373 vpp_gpe_ctx->in_frame_w,
374 vpp_gpe_ctx->in_frame_h,
377 &vpp_gpe_ctx->surface_tmp);
378 assert(va_status == VA_STATUS_SUCCESS);
380 struct object_surface * obj_surf = SURFACE(vpp_gpe_ctx->surface_tmp);
384 i965_check_alloc_surface_bo(ctx, obj_surf, 1, VA_FOURCC('N','V','1','2'),
386 vpp_gpe_ctx->surface_tmp_object = obj_surf;
390 assert(sharpening_intensity >= 0.0 && sharpening_intensity <= 1.0);
391 thr_param.l_amount = (unsigned int)(sharpening_intensity * 128);
392 thr_param.d_amount = (unsigned int)(sharpening_intensity * 128);
394 thr_param.base.pic_width = vpp_gpe_ctx->in_frame_w;
395 thr_param.base.pic_height = vpp_gpe_ctx->in_frame_h;
397 /* Step 1: horizontal blur process */
398 vpp_gpe_ctx->forward_surf_sum = 0;
399 vpp_gpe_ctx->backward_surf_sum = 0;
401 vpp_gpe_ctx->thread_num = vpp_gpe_ctx->in_frame_h/16;
402 vpp_gpe_ctx->thread_param_size = thr_param_size;
403 vpp_gpe_ctx->thread_param = (unsigned char*) malloc(vpp_gpe_ctx->thread_param_size
404 *vpp_gpe_ctx->thread_num);
405 pos = vpp_gpe_ctx->thread_param;
408 return VA_STATUS_ERROR_ALLOCATION_FAILED;
411 for( i = 0 ; i < vpp_gpe_ctx->thread_num; i++){
412 thr_param.base.v_pos = 16 * i;
413 thr_param.base.h_pos = 0;
414 memcpy(pos, &thr_param, thr_param_size);
415 pos += thr_param_size;
418 vpp_gpe_ctx->sub_shader_index = 0;
419 va_status = gen75_gpe_process(ctx, vpp_gpe_ctx);
420 free(vpp_gpe_ctx->thread_param);
422 /* Step 2: vertical blur process */
423 vpp_gpe_ctx->surface_input_object[0] = vpp_gpe_ctx->surface_output_object;
424 vpp_gpe_ctx->surface_output_object = vpp_gpe_ctx->surface_tmp_object;
425 vpp_gpe_ctx->forward_surf_sum = 0;
426 vpp_gpe_ctx->backward_surf_sum = 0;
428 vpp_gpe_ctx->thread_num = vpp_gpe_ctx->in_frame_w/16;
429 vpp_gpe_ctx->thread_param_size = thr_param_size;
430 vpp_gpe_ctx->thread_param = (unsigned char*) malloc(vpp_gpe_ctx->thread_param_size
431 *vpp_gpe_ctx->thread_num);
432 pos = vpp_gpe_ctx->thread_param;
435 return VA_STATUS_ERROR_ALLOCATION_FAILED;
438 for( i = 0 ; i < vpp_gpe_ctx->thread_num; i++){
439 thr_param.base.v_pos = 0;
440 thr_param.base.h_pos = 16 * i;
441 memcpy(pos, &thr_param, thr_param_size);
442 pos += thr_param_size;
445 vpp_gpe_ctx->sub_shader_index = 1;
446 gen75_gpe_process(ctx, vpp_gpe_ctx);
447 free(vpp_gpe_ctx->thread_param);
449 /* Step 3: apply the blur to original surface */
450 vpp_gpe_ctx->surface_input_object[0] = origin_in_obj_surface;
451 vpp_gpe_ctx->surface_input_object[1] = vpp_gpe_ctx->surface_tmp_object;
452 vpp_gpe_ctx->surface_output_object = origin_out_obj_surface;
453 vpp_gpe_ctx->forward_surf_sum = 1;
454 vpp_gpe_ctx->backward_surf_sum = 0;
456 vpp_gpe_ctx->thread_num = vpp_gpe_ctx->in_frame_h/4;
457 vpp_gpe_ctx->thread_param_size = thr_param_size;
458 vpp_gpe_ctx->thread_param = (unsigned char*) malloc(vpp_gpe_ctx->thread_param_size
459 *vpp_gpe_ctx->thread_num);
460 pos = vpp_gpe_ctx->thread_param;
463 return VA_STATUS_ERROR_ALLOCATION_FAILED;
466 for( i = 0 ; i < vpp_gpe_ctx->thread_num; i++){
467 thr_param.base.v_pos = 4 * i;
468 thr_param.base.h_pos = 0;
469 memcpy(pos, &thr_param, thr_param_size);
470 pos += thr_param_size;
473 vpp_gpe_ctx->sub_shader_index = 2;
474 va_status = gen75_gpe_process(ctx, vpp_gpe_ctx);
475 free(vpp_gpe_ctx->thread_param);
480 return VA_STATUS_ERROR_INVALID_PARAMETER;
483 VAStatus gen75_gpe_process_picture(VADriverContextP ctx,
484 struct vpp_gpe_context * vpp_gpe_ctx)
486 VAStatus va_status = VA_STATUS_SUCCESS;
487 struct i965_driver_data *i965 = i965_driver_data(ctx);
488 VAProcPipelineParameterBuffer* pipe = vpp_gpe_ctx->pipeline_param;
489 VAProcFilterParameterBuffer* filter = NULL;
491 struct object_surface *obj_surface = NULL;
493 if (pipe->num_filters && !pipe->filters)
496 for(i = 0; i < pipe->num_filters; i++){
497 struct object_buffer *obj_buf = BUFFER(pipe->filters[i]);
499 assert(obj_buf && obj_buf->buffer_store && obj_buf->buffer_store->buffer);
502 !obj_buf->buffer_store ||
503 !obj_buf->buffer_store->buffer)
506 filter = (VAProcFilterParameterBuffer*)obj_buf-> buffer_store->buffer;
507 if(filter->type == VAProcFilterSharpening){
512 assert(pipe->num_forward_references + pipe->num_backward_references <= 4);
513 vpp_gpe_ctx->surface_input_object[0] = vpp_gpe_ctx->surface_pipeline_input_object;
515 vpp_gpe_ctx->forward_surf_sum = 0;
516 vpp_gpe_ctx->backward_surf_sum = 0;
518 for(i = 0; i < pipe->num_forward_references; i ++)
520 obj_surface = SURFACE(pipe->forward_references[i]);
523 vpp_gpe_ctx->surface_input_object[i + 1] = obj_surface;
524 vpp_gpe_ctx->forward_surf_sum++;
527 for(i = 0; i < pipe->num_backward_references; i ++)
529 obj_surface = SURFACE(pipe->backward_references[i]);
532 vpp_gpe_ctx->surface_input_object[vpp_gpe_ctx->forward_surf_sum + 1 + i ] = obj_surface;
533 vpp_gpe_ctx->backward_surf_sum++;
536 obj_surface = vpp_gpe_ctx->surface_input_object[0];
537 vpp_gpe_ctx->in_frame_w = obj_surface->orig_width;
538 vpp_gpe_ctx->in_frame_h = obj_surface->orig_height;
540 if(filter && filter->type == VAProcFilterSharpening) {
541 va_status = gen75_gpe_process_sharpening(ctx, vpp_gpe_ctx);
543 va_status = VA_STATUS_ERROR_ATTR_NOT_SUPPORTED;
546 vpp_gpe_ctx->is_first_frame = 0;
551 return VA_STATUS_ERROR_INVALID_PARAMETER;
555 gen75_gpe_context_destroy(VADriverContextP ctx,
556 struct vpp_gpe_context *vpp_gpe_ctx)
558 dri_bo_unreference(vpp_gpe_ctx->vpp_batchbuffer.bo);
559 vpp_gpe_ctx->vpp_batchbuffer.bo = NULL;
561 dri_bo_unreference(vpp_gpe_ctx->vpp_kernel_return.bo);
562 vpp_gpe_ctx->vpp_kernel_return.bo = NULL;
564 i965_gpe_context_destroy(&vpp_gpe_ctx->gpe_ctx);
566 if(vpp_gpe_ctx->surface_tmp != VA_INVALID_ID){
567 assert(vpp_gpe_ctx->surface_tmp_object != NULL);
568 i965_DestroySurfaces(ctx, &vpp_gpe_ctx->surface_tmp, 1);
569 vpp_gpe_ctx->surface_tmp = VA_INVALID_ID;
570 vpp_gpe_ctx->surface_tmp_object = NULL;
573 free(vpp_gpe_ctx->batch);
578 struct vpp_gpe_context *
579 gen75_gpe_context_init(VADriverContextP ctx)
581 struct i965_driver_data *i965 = i965_driver_data(ctx);
582 struct vpp_gpe_context *vpp_gpe_ctx = calloc(1, sizeof(struct vpp_gpe_context));
583 struct i965_gpe_context *gpe_ctx = &(vpp_gpe_ctx->gpe_ctx);
585 gpe_ctx->surface_state_binding_table.length =
586 (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_MEDIA_SURFACES_GEN6;
587 gpe_ctx->idrt.max_entries = MAX_INTERFACE_DESC_GEN6;
588 gpe_ctx->idrt.entry_size = sizeof(struct gen6_interface_descriptor_data);
590 gpe_ctx->curbe.length = CURBE_TOTAL_DATA_LENGTH;
592 gpe_ctx->vfe_state.max_num_threads = 60 - 1;
593 gpe_ctx->vfe_state.num_urb_entries = 16;
594 gpe_ctx->vfe_state.gpgpu_mode = 0;
595 gpe_ctx->vfe_state.urb_entry_size = 59 - 1;
596 gpe_ctx->vfe_state.curbe_allocation_size = CURBE_ALLOCATION_SIZE - 1;
598 vpp_gpe_ctx->vpp_surface2_setup = gen7_gpe_surface2_setup;
599 vpp_gpe_ctx->vpp_media_rw_surface_setup = gen7_gpe_media_rw_surface_setup;
600 vpp_gpe_ctx->vpp_buffer_surface_setup = gen7_gpe_buffer_suface_setup;
601 vpp_gpe_ctx->vpp_media_chroma_surface_setup = gen75_gpe_media_chroma_surface_setup;
602 vpp_gpe_ctx->surface_tmp = VA_INVALID_ID;
603 vpp_gpe_ctx->surface_tmp_object = NULL;
605 vpp_gpe_ctx->batch = intel_batchbuffer_new(&i965->intel, I915_EXEC_RENDER, 0);
607 vpp_gpe_ctx->is_first_frame = 1;