2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Li Xiaowei <xiaowei.a.li@intel.com>
34 #include "intel_batchbuffer.h"
35 #include "intel_driver.h"
36 #include "i965_defines.h"
37 #include "i965_structs.h"
38 #include "gen75_vpp_vebox.h"
39 #include "intel_media.h"
44 i965_MapBuffer(VADriverContextP ctx, VABufferID buf_id, void **);
47 i965_UnmapBuffer(VADriverContextP ctx, VABufferID buf_id);
50 i965_DeriveImage(VADriverContextP ctx, VABufferID surface, VAImage *out_image);
53 i965_DestroyImage(VADriverContextP ctx, VAImageID image);
56 i965_DestroySurfaces(VADriverContextP ctx,
57 VASurfaceID *surface_list,
61 i965_CreateSurfaces(VADriverContextP ctx,
66 VASurfaceID *surfaces);
68 VAStatus vpp_surface_convert(VADriverContextP ctx,
69 struct object_surface *src_obj_surf,
70 struct object_surface *dst_obj_surf)
72 VAStatus va_status = VA_STATUS_SUCCESS;
74 assert(src_obj_surf->orig_width == dst_obj_surf->orig_width);
75 assert(src_obj_surf->orig_height == dst_obj_surf->orig_height);
77 VARectangle src_rect, dst_rect;
78 src_rect.x = dst_rect.x = 0;
79 src_rect.y = dst_rect.y = 0;
80 src_rect.width = dst_rect.width = src_obj_surf->orig_width;
81 src_rect.height = dst_rect.height = dst_obj_surf->orig_height;
83 struct i965_surface src_surface, dst_surface;
84 src_surface.base = (struct object_base *)src_obj_surf;
85 src_surface.type = I965_SURFACE_TYPE_SURFACE;
86 src_surface.flags = I965_SURFACE_FLAG_FRAME;
88 dst_surface.base = (struct object_base *)dst_obj_surf;
89 dst_surface.type = I965_SURFACE_TYPE_SURFACE;
90 dst_surface.flags = I965_SURFACE_FLAG_FRAME;
92 va_status = i965_image_processing(ctx,
100 VAStatus vpp_surface_scaling(VADriverContextP ctx,
101 struct object_surface *dst_obj_surf,
102 struct object_surface *src_obj_surf)
104 VAStatus va_status = VA_STATUS_SUCCESS;
105 int flags = I965_PP_FLAG_AVS;
107 assert(src_obj_surf->fourcc == VA_FOURCC('N','V','1','2'));
108 assert(dst_obj_surf->fourcc == VA_FOURCC('N','V','1','2'));
110 VARectangle src_rect, dst_rect;
113 src_rect.width = src_obj_surf->orig_width;
114 src_rect.height = src_obj_surf->orig_height;
118 dst_rect.width = dst_obj_surf->orig_width;
119 dst_rect.height = dst_obj_surf->orig_height;
121 va_status = i965_scaling_processing(ctx,
131 void hsw_veb_dndi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
133 unsigned int* p_table ;
135 VAProcFilterParameterBufferDeinterlacing *di_param =
136 (VAProcFilterParameterBufferDeinterlacing *) proc_ctx->filter_di;
138 VAProcFilterParameterBuffer * dn_param =
139 (VAProcFilterParameterBuffer *) proc_ctx->filter_dn;
141 p_table = (unsigned int *)proc_ctx->dndi_state_table.ptr;
143 *p_table ++ = 0; // reserved . w0
144 *p_table ++ = ( 0 << 24 | // denoise STAD threshold . w1
145 128 << 16 | // dnmh_history_max
146 0 << 12 | // reserved
147 8 << 8 | // dnmh_delta[3:0]
148 0 ); // denoise ASD threshold
150 *p_table ++ = ( 0 << 30 | // reserved . w2
151 16 << 24 | // temporal diff th
152 0 << 22 | // reserved.
153 8 << 16 | // low temporal diff th
155 0 << 8 | // denoise moving pixel th
156 64 ); // denoise th for sum of complexity measure
158 *p_table ++ = ( 0 << 30 | // reserved . w3
159 4 << 24 | // good neighbor th[5:0]
160 9 << 20 | // CAT slope minus 1
161 5 << 16 | // SAD Tight in
162 0 << 14 | // smooth mv th
163 0 << 12 | // reserved
164 1 << 8 | // bne_edge_th[3:0]
165 15 ); // block noise estimate noise th
167 *p_table ++ = ( 0 << 31 | // STMM blending constant select. w4
168 64 << 24 | // STMM trc1
169 0 << 16 | // STMM trc2
170 0 << 14 | // reserved
172 128 ); // maximum STMM
174 *p_table ++ = ( 0 << 24 | // minumum STMM . W5
175 0 << 22 | // STMM shift down
176 0 << 20 | // STMM shift up
177 7 << 16 | // STMM output shift
178 128 << 8 | // SDI threshold
181 *p_table ++ = ( 0 << 24 | // SDI fallback mode 1 T1 constant . W6
182 0 << 16 | // SDI fallback mode 1 T2 constant
183 0 << 8 | // SDI fallback mode 2 constant(angle2x1)
184 0 ); // FMD temporal difference threshold
186 *p_table ++ = ( 32 << 24 | // FMD #1 vertical difference th . w7
187 32 << 16 | // FMD #2 vertical difference th
189 32 << 8 | // FMD tear threshold
190 0 << 7 | // MCDI Enable, use motion compensated deinterlace algorithm
191 0 << 6 | // progressive DN
193 0 << 3 | // DN/DI Top First
196 *p_table ++ = ( 0 << 29 | // reserved . W8
197 0 << 23 | // dnmh_history_init[5:0]
198 10 << 19 | // neighborPixel th
199 0 << 18 | // reserved
200 0 << 16 | // FMD for 2nd field of previous frame
201 25 << 10 | // MC pixel consistency th
202 0 << 8 | // FMD for 1st field for current frame
206 *p_table ++ = ( 0 << 24 | // reserved
207 0 << 16 | // chr_dnmh_stad_th
208 0 << 13 | // reserved
209 0 << 12 | // chrome denoise enable
210 0 << 6 | // chr temp diff th
211 0 ); // chr temp diff low
215 void hsw_veb_iecp_std_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
217 unsigned int *p_table = proc_ctx->iecp_state_table.ptr + 0 ;
218 //VAProcFilterParameterBuffer * std_param =
219 // (VAProcFilterParameterBuffer *) proc_ctx->filter_std;
221 if(!(proc_ctx->filters_mask & VPP_IECP_STD_STE)){
222 memset(p_table, 0, 29 * 4);
224 *p_table ++ = 0x9a6e39f0;
225 *p_table ++ = 0x400c0000;
226 *p_table ++ = 0x00001180;
227 *p_table ++ = 0xfe2f2e00;
228 *p_table ++ = 0x000000ff;
230 *p_table ++ = 0x00140000;
231 *p_table ++ = 0xd82e0000;
232 *p_table ++ = 0x8285ecec;
233 *p_table ++ = 0x00008282;
234 *p_table ++ = 0x00000000;
236 *p_table ++ = 0x02117000;
237 *p_table ++ = 0xa38fec96;
238 *p_table ++ = 0x0000c8c8;
239 *p_table ++ = 0x00000000;
240 *p_table ++ = 0x01478000;
242 *p_table ++ = 0x0007c306;
243 *p_table ++ = 0x00000000;
244 *p_table ++ = 0x00000000;
245 *p_table ++ = 0x1c1bd000;
246 *p_table ++ = 0x00000000;
248 *p_table ++ = 0x00000000;
249 *p_table ++ = 0x00000000;
250 *p_table ++ = 0x0007cf80;
251 *p_table ++ = 0x00000000;
252 *p_table ++ = 0x00000000;
254 *p_table ++ = 0x1c080000;
255 *p_table ++ = 0x00000000;
256 *p_table ++ = 0x00000000;
257 *p_table ++ = 0x00000000;
261 void hsw_veb_iecp_ace_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
263 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 116);
265 if(!(proc_ctx->filters_mask & VPP_IECP_ACE)){
266 memset(p_table, 0, 13 * 4);
268 *p_table ++ = 0x00000068;
269 *p_table ++ = 0x4c382410;
270 *p_table ++ = 0x9c887460;
271 *p_table ++ = 0xebd8c4b0;
272 *p_table ++ = 0x604c3824;
274 *p_table ++ = 0xb09c8874;
275 *p_table ++ = 0x0000d8c4;
276 *p_table ++ = 0x00000000;
277 *p_table ++ = 0x00000000;
278 *p_table ++ = 0x00000000;
280 *p_table ++ = 0x00000000;
281 *p_table ++ = 0x00000000;
282 *p_table ++ = 0x00000000;
286 void hsw_veb_iecp_tcc_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
288 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 168);
290 // VAProcFilterParameterBuffer * tcc_param =
291 // (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_tcc;
293 if(!(proc_ctx->filters_mask & VPP_IECP_TCC)){
294 memset(p_table, 0, 11 * 4);
296 *p_table ++ = 0x00000000;
297 *p_table ++ = 0x00000000;
298 *p_table ++ = 0x1e34cc91;
299 *p_table ++ = 0x3e3cce91;
300 *p_table ++ = 0x02e80195;
302 *p_table ++ = 0x0197046b;
303 *p_table ++ = 0x01790174;
304 *p_table ++ = 0x00000000;
305 *p_table ++ = 0x00000000;
306 *p_table ++ = 0x03030000;
308 *p_table ++ = 0x009201c0;
312 void hsw_veb_iecp_pro_amp_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
314 unsigned int contrast = 0x80; //default
315 int brightness = 0x00; //default
316 int cos_c_s = 256 ; //default
317 int sin_c_s = 0; //default
318 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 212);
320 if(!(proc_ctx->filters_mask & VPP_IECP_PRO_AMP)){
321 memset(p_table, 0, 2 * 4);
323 float src_saturation = 1.0;
325 float src_contrast = 1.0;
326 float src_brightness = 0.0;
327 float tmp_value = 0.0;
330 VAProcFilterParameterBufferColorBalance * amp_params =
331 (VAProcFilterParameterBufferColorBalance *) proc_ctx->filter_iecp_amp;
333 for (i = 0; i < proc_ctx->filter_iecp_amp_num_elements; i++){
334 VAProcColorBalanceType attrib = amp_params[i].attrib;
336 if(attrib == VAProcColorBalanceHue) {
337 src_hue = amp_params[i].value; //(-180.0, 180.0)
338 }else if(attrib == VAProcColorBalanceSaturation) {
339 src_saturation = amp_params[i].value; //(0.0, 10.0)
340 }else if(attrib == VAProcColorBalanceBrightness) {
341 src_brightness = amp_params[i].value; // (-100.0, 100.0)
342 brightness = intel_format_convert(src_brightness, 7, 4, 1);
343 }else if(attrib == VAProcColorBalanceContrast) {
344 src_contrast = amp_params[i].value; // (0.0, 10.0)
345 contrast = intel_format_convert(src_contrast, 4, 7, 0);
349 tmp_value = cos(src_hue/180*PI) * src_contrast * src_saturation;
350 cos_c_s = intel_format_convert(tmp_value, 7, 8, 1);
352 tmp_value = sin(src_hue/180*PI) * src_contrast * src_saturation;
353 sin_c_s = intel_format_convert(tmp_value, 7, 8, 1);
355 *p_table ++ = ( 0 << 28 | //reserved
356 contrast << 17 | //contrast value (U4.7 format)
358 brightness << 1| // S7.4 format
361 *p_table ++ = ( cos_c_s << 16 | // cos(h) * contrast * saturation
362 sin_c_s); // sin(h) * contrast * saturation
368 void hsw_veb_iecp_csc_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
370 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 220);
371 float tran_coef[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
372 float v_coef[3] = {0.0, 0.0, 0.0};
373 float u_coef[3] = {0.0, 0.0, 0.0};
374 int is_transform_enabled = 0;
376 if(!(proc_ctx->filters_mask & VPP_IECP_CSC)){
377 memset(p_table, 0, 8 * 4);
381 VAProcColorStandardType in_color_std = proc_ctx->pipeline_param->surface_color_standard;
382 VAProcColorStandardType out_color_std = proc_ctx->pipeline_param->output_color_standard;
383 assert(in_color_std == out_color_std);
385 if(proc_ctx->fourcc_input == VA_FOURCC('R','G','B','A') &&
386 (proc_ctx->fourcc_output == VA_FOURCC('N','V','1','2') ||
387 proc_ctx->fourcc_output == VA_FOURCC('Y','V','1','2') ||
388 proc_ctx->fourcc_output == VA_FOURCC('Y','V','Y','2') ||
389 proc_ctx->fourcc_output == VA_FOURCC('A','Y','U','V'))) {
391 tran_coef[0] = 0.257;
392 tran_coef[1] = 0.504;
393 tran_coef[2] = 0.098;
394 tran_coef[3] = -0.148;
395 tran_coef[4] = -0.291;
396 tran_coef[5] = 0.439;
397 tran_coef[6] = 0.439;
398 tran_coef[7] = -0.368;
399 tran_coef[8] = -0.071;
405 is_transform_enabled = 1;
406 }else if((proc_ctx->fourcc_input == VA_FOURCC('N','V','1','2') ||
407 proc_ctx->fourcc_input == VA_FOURCC('Y','V','1','2') ||
408 proc_ctx->fourcc_input == VA_FOURCC('Y','U','Y','2') ||
409 proc_ctx->fourcc_input == VA_FOURCC('A','Y','U','V'))&&
410 proc_ctx->fourcc_output == VA_FOURCC('R','G','B','A')) {
412 tran_coef[0] = 1.164;
413 tran_coef[1] = 0.000;
414 tran_coef[2] = 1.569;
415 tran_coef[3] = 1.164;
416 tran_coef[4] = -0.813;
417 tran_coef[5] = -0.392;
418 tran_coef[6] = 1.164;
419 tran_coef[7] = 2.017;
420 tran_coef[8] = 0.000;
423 v_coef[1] = -128 * 4;
424 v_coef[2] = -128 * 4;
426 is_transform_enabled = 1;
427 }else if(proc_ctx->fourcc_input != proc_ctx->fourcc_output){
428 //enable when input and output format are different.
429 is_transform_enabled = 1;
432 if(is_transform_enabled == 0){
433 memset(p_table, 0, 8 * 4);
435 *p_table ++ = ( 0 << 29 | //reserved
436 intel_format_convert(tran_coef[1], 2, 10, 1) << 16 | //c1, s2.10 format
437 intel_format_convert(tran_coef[0], 2, 10, 1) << 3 | //c0, s2.10 format
439 0 << 1 | // yuv_channel swap
440 is_transform_enabled);
442 *p_table ++ = ( 0 << 26 | //reserved
443 intel_format_convert(tran_coef[3], 2, 10, 1) << 13 |
444 intel_format_convert(tran_coef[2], 2, 10, 1));
446 *p_table ++ = ( 0 << 26 | //reserved
447 intel_format_convert(tran_coef[5], 2, 10, 1) << 13 |
448 intel_format_convert(tran_coef[4], 2, 10, 1));
450 *p_table ++ = ( 0 << 26 | //reserved
451 intel_format_convert(tran_coef[7], 2, 10, 1) << 13 |
452 intel_format_convert(tran_coef[6], 2, 10, 1));
454 *p_table ++ = ( 0 << 13 | //reserved
455 intel_format_convert(tran_coef[8], 2, 10, 1));
457 *p_table ++ = ( 0 << 22 | //reserved
458 intel_format_convert(u_coef[0], 10, 0, 1) << 11 |
459 intel_format_convert(v_coef[0], 10, 0, 1));
461 *p_table ++ = ( 0 << 22 | //reserved
462 intel_format_convert(u_coef[1], 10, 0, 1) << 11 |
463 intel_format_convert(v_coef[1], 10, 0, 1));
465 *p_table ++ = ( 0 << 22 | //reserved
466 intel_format_convert(u_coef[2], 10, 0, 1) << 11 |
467 intel_format_convert(v_coef[2], 10, 0, 1));
471 void hsw_veb_iecp_aoi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
473 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 252);
474 // VAProcFilterParameterBuffer * tcc_param =
475 // (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_tcc;
477 if(!(proc_ctx->filters_mask & VPP_IECP_AOI)){
478 memset(p_table, 0, 3 * 4);
480 *p_table ++ = 0x00000000;
481 *p_table ++ = 0x00030000;
482 *p_table ++ = 0x00030000;
486 void hsw_veb_state_table_setup(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
488 if(proc_ctx->filters_mask & 0x000000ff) {
489 dri_bo *dndi_bo = proc_ctx->dndi_state_table.bo;
490 dri_bo_map(dndi_bo, 1);
491 proc_ctx->dndi_state_table.ptr = dndi_bo->virtual;
493 hsw_veb_dndi_table(ctx, proc_ctx);
495 dri_bo_unmap(dndi_bo);
498 if(proc_ctx->filters_mask & 0x0000ff00 ||
499 proc_ctx->fourcc_input != proc_ctx->fourcc_output) {
500 dri_bo *iecp_bo = proc_ctx->iecp_state_table.bo;
501 dri_bo_map(iecp_bo, 1);
502 proc_ctx->iecp_state_table.ptr = iecp_bo->virtual;
504 hsw_veb_iecp_std_table(ctx, proc_ctx);
505 hsw_veb_iecp_ace_table(ctx, proc_ctx);
506 hsw_veb_iecp_tcc_table(ctx, proc_ctx);
507 hsw_veb_iecp_pro_amp_table(ctx, proc_ctx);
508 hsw_veb_iecp_csc_table(ctx, proc_ctx);
509 hsw_veb_iecp_aoi_table(ctx, proc_ctx);
511 dri_bo_unmap(iecp_bo);
515 void hsw_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
517 struct intel_batchbuffer *batch = proc_ctx->batch;
518 unsigned int is_dn_enabled = (proc_ctx->filters_mask & 0x01)? 1: 0;
519 unsigned int is_di_enabled = (proc_ctx->filters_mask & 0x02)? 1: 0;
520 unsigned int is_iecp_enabled = (proc_ctx->filters_mask & 0xff00)?1:0;
522 if(proc_ctx->fourcc_input != proc_ctx->fourcc_output ||
523 (is_dn_enabled == 0 && is_di_enabled == 0)){
527 BEGIN_VEB_BATCH(batch, 6);
528 OUT_VEB_BATCH(batch, VEB_STATE | (6 - 2));
530 0 << 26 | // state surface control bits
531 0 << 11 | // reserved.
532 0 << 10 | // pipe sync disable
533 2 << 8 | // DI output frame
534 0 << 7 | // 444->422 downsample method
535 0 << 6 | // 422->420 downsample method
536 !!(proc_ctx->is_first_frame && (is_di_enabled || is_dn_enabled)) << 5 | // DN/DI first frame
537 is_di_enabled << 4 | // DI enable
538 is_dn_enabled << 3 | // DN enable
539 is_iecp_enabled << 2 | // global IECP enabled
540 0 << 1 | // ColorGamutCompressionEnable
541 0 ) ; // ColorGamutExpansionEnable.
544 proc_ctx->dndi_state_table.bo,
545 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
548 proc_ctx->iecp_state_table.bo,
549 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
552 proc_ctx->gamut_state_table.bo,
553 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
556 proc_ctx->vertex_state_table.bo,
557 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
559 ADVANCE_VEB_BATCH(batch);
562 void hsw_veb_surface_state(VADriverContextP ctx, struct intel_vebox_context *proc_ctx, unsigned int is_output)
564 struct intel_batchbuffer *batch = proc_ctx->batch;
565 unsigned int u_offset_y = 0, v_offset_y = 0;
566 unsigned int is_uv_interleaved = 0, tiling = 0, swizzle = 0;
567 unsigned int surface_format = PLANAR_420_8;
568 struct object_surface* obj_surf = NULL;
569 unsigned int surface_pitch = 0;
570 unsigned int half_pitch_chroma = 0;
573 obj_surf = proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface;
575 obj_surf = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
578 assert(obj_surf->fourcc == VA_FOURCC_NV12 ||
579 obj_surf->fourcc == VA_FOURCC_YUY2 ||
580 obj_surf->fourcc == VA_FOURCC_AYUV ||
581 obj_surf->fourcc == VA_FOURCC_RGBA);
583 if (obj_surf->fourcc == VA_FOURCC_NV12) {
584 surface_format = PLANAR_420_8;
585 surface_pitch = obj_surf->width;
586 is_uv_interleaved = 1;
587 half_pitch_chroma = 0;
588 } else if (obj_surf->fourcc == VA_FOURCC_YUY2) {
589 surface_format = YCRCB_NORMAL;
590 surface_pitch = obj_surf->width * 2;
591 is_uv_interleaved = 0;
592 half_pitch_chroma = 0;
593 } else if (obj_surf->fourcc == VA_FOURCC_AYUV) {
594 surface_format = PACKED_444A_8;
595 surface_pitch = obj_surf->width * 4;
596 is_uv_interleaved = 0;
597 half_pitch_chroma = 0;
598 } else if (obj_surf->fourcc == VA_FOURCC_RGBA) {
599 surface_format = R8G8B8A8_UNORM_SRGB;
600 surface_pitch = obj_surf->width * 4;
601 is_uv_interleaved = 0;
602 half_pitch_chroma = 0;
605 u_offset_y = obj_surf->y_cb_offset;
606 v_offset_y = obj_surf->y_cr_offset;
608 dri_bo_get_tiling(obj_surf->bo, &tiling, &swizzle);
610 BEGIN_VEB_BATCH(batch, 6);
611 OUT_VEB_BATCH(batch, VEB_SURFACE_STATE | (6 - 2));
614 is_output); // surface indentification.
617 (proc_ctx->height_input - 1) << 18 | // height . w3
618 (proc_ctx->width_input) << 4 | // width
622 surface_format << 28 | // surface format, YCbCr420. w4
623 is_uv_interleaved << 27 | // interleave chrome , two seperate palar
624 0 << 20 | // reserved
625 (surface_pitch - 1) << 3 | // surface pitch, 64 align
626 half_pitch_chroma << 2 | // half pitch for chrome
627 !!tiling << 1 | // tiled surface, linear surface used
628 (tiling == I915_TILING_Y)); // tiled walk, ignored when liner surface
631 0 << 29 | // reserved . w5
632 0 << 16 | // X offset for V(Cb)
633 0 << 15 | // reserved
634 u_offset_y); // Y offset for V(Cb)
637 0 << 29 | // reserved . w6
638 0 << 16 | // X offset for V(Cr)
639 0 << 15 | // reserved
640 v_offset_y ); // Y offset for V(Cr)
642 ADVANCE_VEB_BATCH(batch);
645 void hsw_veb_dndi_iecp_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
647 struct intel_batchbuffer *batch = proc_ctx->batch;
648 unsigned char frame_ctrl_bits = 0;
649 unsigned int startingX = 0;
650 unsigned int endingX = proc_ctx->width_input;
652 /* s1:update the previous and current input */
653 /* tempFrame = proc_ctx->frame_store[FRAME_IN_PREVIOUS];
654 proc_ctx->frame_store[FRAME_IN_PREVIOUS] = proc_ctx->frame_store[FRAME_IN_CURRENT]; ;
655 proc_ctx->frame_store[FRAME_IN_CURRENT] = tempFrame;
657 if(proc_ctx->surface_input_vebox != -1){
658 vpp_surface_copy(ctx, proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id,
659 proc_ctx->surface_input_vebox);
661 vpp_surface_copy(ctx, proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id,
662 proc_ctx->surface_input);
665 /*s2: update the STMM input and output */
666 /* tempFrame = proc_ctx->frame_store[FRAME_IN_STMM];
667 proc_ctx->frame_store[FRAME_IN_STMM] = proc_ctx->frame_store[FRAME_OUT_STMM]; ;
668 proc_ctx->frame_store[FRAME_OUT_STMM] = tempFrame;
670 /*s3:set reloc buffer address */
671 BEGIN_VEB_BATCH(batch, 10);
672 OUT_VEB_BATCH(batch, VEB_DNDI_IECP_STATE | (10 - 2));
677 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface->bo,
678 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
680 proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface->bo,
681 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
683 proc_ctx->frame_store[FRAME_IN_STMM].obj_surface->bo,
684 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
686 proc_ctx->frame_store[FRAME_OUT_STMM].obj_surface->bo,
687 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
689 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface->bo,
690 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
692 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface->bo,
693 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
695 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface->bo,
696 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
698 proc_ctx->frame_store[FRAME_OUT_STATISTIC].obj_surface->bo,
699 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
701 ADVANCE_VEB_BATCH(batch);
704 void hsw_veb_resource_prepare(VADriverContextP ctx,
705 struct intel_vebox_context *proc_ctx)
709 struct i965_driver_data *i965 = i965_driver_data(ctx);
710 unsigned int input_fourcc, output_fourcc;
711 unsigned int input_sampling, output_sampling;
712 unsigned int input_tiling, output_tiling;
713 unsigned int i, swizzle;
714 struct object_surface *obj_surf_out = NULL, *obj_surf_in = NULL;
716 if (proc_ctx->surface_input_vebox_object != NULL) {
717 obj_surf_in = proc_ctx->surface_input_vebox_object;
719 obj_surf_in = proc_ctx->surface_input_object;
722 if (proc_ctx->surface_output_vebox_object != NULL) {
723 obj_surf_out = proc_ctx->surface_output_vebox_object;
725 obj_surf_out = proc_ctx->surface_output_object;
728 if(obj_surf_in->bo == NULL){
729 input_fourcc = VA_FOURCC('N','V','1','2');
730 input_sampling = SUBSAMPLE_YUV420;
732 i965_check_alloc_surface_bo(ctx, obj_surf_in, input_tiling, input_fourcc, input_sampling);
734 input_fourcc = obj_surf_in->fourcc;
735 input_sampling = obj_surf_in->subsampling;
736 dri_bo_get_tiling(obj_surf_in->bo, &input_tiling, &swizzle);
737 input_tiling = !!input_tiling;
740 if(obj_surf_out->bo == NULL){
741 output_fourcc = VA_FOURCC('N','V','1','2');
742 output_sampling = SUBSAMPLE_YUV420;
744 i965_check_alloc_surface_bo(ctx, obj_surf_out, output_tiling, output_fourcc, output_sampling);
746 output_fourcc = obj_surf_out->fourcc;
747 output_sampling = obj_surf_out->subsampling;
748 dri_bo_get_tiling(obj_surf_out->bo, &output_tiling, &swizzle);
749 output_tiling = !!output_tiling;
752 /* vebox pipelien input surface format info */
753 proc_ctx->fourcc_input = input_fourcc;
754 proc_ctx->fourcc_output = output_fourcc;
756 /* create pipeline surfaces */
757 VASurfaceID surfaces[FRAME_STORE_SUM];
758 va_status = i965_CreateSurfaces(ctx,
759 proc_ctx ->width_input,
760 proc_ctx ->height_input,
764 assert(va_status == VA_STATUS_SUCCESS);
766 for(i = 0; i < FRAME_STORE_SUM; i ++) {
767 if(proc_ctx->frame_store[i].obj_surface){
768 continue; //refer external surface for vebox pipeline
771 VASurfaceID new_surface;
772 struct object_surface *obj_surf = NULL;
774 va_status = i965_CreateSurfaces(ctx,
775 proc_ctx ->width_input,
776 proc_ctx ->height_input,
780 assert(va_status == VA_STATUS_SUCCESS);
782 obj_surf = SURFACE(new_surface);
785 if( i <= FRAME_IN_PREVIOUS || i == FRAME_OUT_CURRENT_DN) {
786 i965_check_alloc_surface_bo(ctx, obj_surf, input_tiling, input_fourcc, input_sampling);
787 } else if( i == FRAME_IN_STMM || i == FRAME_OUT_STMM){
788 i965_check_alloc_surface_bo(ctx, obj_surf, 1, input_fourcc, input_sampling);
789 } else if( i >= FRAME_OUT_CURRENT){
790 i965_check_alloc_surface_bo(ctx, obj_surf, output_tiling, output_fourcc, output_sampling);
793 proc_ctx->frame_store[i].surface_id = new_surface;
794 proc_ctx->frame_store[i].is_internal_surface = 1;
795 proc_ctx->frame_store[i].obj_surface = obj_surf;
798 /* alloc dndi state table */
799 dri_bo_unreference(proc_ctx->dndi_state_table.bo);
800 bo = dri_bo_alloc(i965->intel.bufmgr,
801 "vebox: dndi state Buffer",
803 proc_ctx->dndi_state_table.bo = bo;
804 dri_bo_reference(proc_ctx->dndi_state_table.bo);
806 /* alloc iecp state table */
807 dri_bo_unreference(proc_ctx->iecp_state_table.bo);
808 bo = dri_bo_alloc(i965->intel.bufmgr,
809 "vebox: iecp state Buffer",
811 proc_ctx->iecp_state_table.bo = bo;
812 dri_bo_reference(proc_ctx->iecp_state_table.bo);
814 /* alloc gamut state table */
815 dri_bo_unreference(proc_ctx->gamut_state_table.bo);
816 bo = dri_bo_alloc(i965->intel.bufmgr,
817 "vebox: gamut state Buffer",
819 proc_ctx->gamut_state_table.bo = bo;
820 dri_bo_reference(proc_ctx->gamut_state_table.bo);
822 /* alloc vertex state table */
823 dri_bo_unreference(proc_ctx->vertex_state_table.bo);
824 bo = dri_bo_alloc(i965->intel.bufmgr,
825 "vertex: iecp state Buffer",
827 proc_ctx->vertex_state_table.bo = bo;
828 dri_bo_reference(proc_ctx->vertex_state_table.bo);
832 void hsw_veb_surface_reference(VADriverContextP ctx,
833 struct intel_vebox_context *proc_ctx)
835 struct object_surface * obj_surf;
837 if (proc_ctx->surface_input_vebox_object != NULL) {
838 obj_surf = proc_ctx->surface_input_vebox_object;
840 obj_surf = proc_ctx->surface_input_object;
843 /* update the input surface */
844 proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id = VA_INVALID_ID;
845 proc_ctx->frame_store[FRAME_IN_CURRENT].is_internal_surface = 0;
846 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface = obj_surf;
848 if (proc_ctx->surface_output_vebox_object != NULL) {
849 obj_surf = proc_ctx->surface_output_vebox_object;
851 obj_surf = proc_ctx->surface_output_object;
854 /* update the output surface */
855 if (proc_ctx->filters_mask == VPP_DNDI_DN) {
856 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].surface_id = VA_INVALID_ID;
857 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].is_internal_surface = 0;
858 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface = obj_surf;
860 proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id = VA_INVALID_ID;
861 proc_ctx->frame_store[FRAME_OUT_CURRENT].is_internal_surface = 0;
862 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface = obj_surf;
866 void hsw_veb_surface_unreference(VADriverContextP ctx,
867 struct intel_vebox_context *proc_ctx)
869 /* unreference the input surface */
870 proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id = VA_INVALID_ID;
871 proc_ctx->frame_store[FRAME_IN_CURRENT].is_internal_surface = 0;
872 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface = NULL;
874 /* unreference the shared output surface */
875 if (proc_ctx->filters_mask == VPP_DNDI_DN) {
876 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].surface_id = VA_INVALID_ID;
877 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].is_internal_surface = 0;
878 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface = NULL;
880 proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id = VA_INVALID_ID;
881 proc_ctx->frame_store[FRAME_OUT_CURRENT].is_internal_surface = 0;
882 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface = NULL;
886 int hsw_veb_pre_format_convert(VADriverContextP ctx,
887 struct intel_vebox_context *proc_ctx)
890 struct i965_driver_data *i965 = i965_driver_data(ctx);
891 struct object_surface* obj_surf_input = proc_ctx->surface_input_object;
892 struct object_surface* obj_surf_output = proc_ctx->surface_output_object;
893 struct object_surface* obj_surf_input_vebox;
894 struct object_surface* obj_surf_output_vebox;
896 proc_ctx->width_input = obj_surf_input->orig_width;
897 proc_ctx->height_input = obj_surf_input->orig_height;
898 proc_ctx->width_output = obj_surf_output->orig_width;
899 proc_ctx->height_output = obj_surf_output->orig_height;
901 /* only partial frame is not supported to be processed */
903 assert(proc_ctx->width_input == proc_ctx->pipeline_param->surface_region->width);
904 assert(proc_ctx->height_input == proc_ctx->pipeline_param->surface_region->height);
905 assert(proc_ctx->width_output == proc_ctx->pipeline_param->output_region->width);
906 assert(proc_ctx->height_output == proc_ctx->pipeline_param->output_region->height);
909 if(proc_ctx->width_output != proc_ctx->width_input ||
910 proc_ctx->height_output != proc_ctx->height_input){
911 proc_ctx->format_convert_flags |= POST_SCALING_CONVERT;
914 /* convert the following format to NV12 format */
915 if(obj_surf_input->fourcc == VA_FOURCC('Y','V','1','2') ||
916 obj_surf_input->fourcc == VA_FOURCC('I','4','2','0') ||
917 obj_surf_input->fourcc == VA_FOURCC('I','M','C','1') ||
918 obj_surf_input->fourcc == VA_FOURCC('I','M','C','3')){
920 proc_ctx->format_convert_flags |= PRE_FORMAT_CONVERT;
922 } else if(obj_surf_input->fourcc == VA_FOURCC('R','G','B','A') ||
923 obj_surf_input->fourcc == VA_FOURCC('A','Y','U','V') ||
924 obj_surf_input->fourcc == VA_FOURCC('Y','U','Y','2') ||
925 obj_surf_input->fourcc == VA_FOURCC('N','V','1','2')){
926 // nothing to do here
928 /* not support other format as input */
932 if (proc_ctx->format_convert_flags & PRE_FORMAT_CONVERT) {
933 if(proc_ctx->surface_input_vebox_object == NULL){
934 va_status = i965_CreateSurfaces(ctx,
935 proc_ctx->width_input,
936 proc_ctx->height_input,
939 &(proc_ctx->surface_input_vebox));
940 assert(va_status == VA_STATUS_SUCCESS);
941 obj_surf_input_vebox = SURFACE(proc_ctx->surface_input_vebox);
942 assert(obj_surf_input_vebox);
944 if (obj_surf_input_vebox) {
945 proc_ctx->surface_input_vebox_object = obj_surf_input_vebox;
946 i965_check_alloc_surface_bo(ctx, obj_surf_input_vebox, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
950 vpp_surface_convert(ctx, proc_ctx->surface_input_vebox_object, proc_ctx->surface_input_object);
953 /* create one temporary NV12 surfaces for conversion*/
954 if(obj_surf_output->fourcc == VA_FOURCC('Y','V','1','2') ||
955 obj_surf_output->fourcc == VA_FOURCC('I','4','2','0') ||
956 obj_surf_output->fourcc == VA_FOURCC('I','M','C','1') ||
957 obj_surf_output->fourcc == VA_FOURCC('I','M','C','3')) {
959 proc_ctx->format_convert_flags |= POST_FORMAT_CONVERT;
960 } else if(obj_surf_output->fourcc == VA_FOURCC('R','G','B','A') ||
961 obj_surf_output->fourcc == VA_FOURCC('A','Y','U','V') ||
962 obj_surf_output->fourcc == VA_FOURCC('Y','U','Y','2') ||
963 obj_surf_output->fourcc == VA_FOURCC('N','V','1','2')){
964 /* Nothing to do here */
966 /* not support other format as input */
970 if(proc_ctx->format_convert_flags & POST_FORMAT_CONVERT ||
971 proc_ctx->format_convert_flags & POST_SCALING_CONVERT){
972 if(proc_ctx->surface_output_vebox_object == NULL){
973 va_status = i965_CreateSurfaces(ctx,
974 proc_ctx->width_input,
975 proc_ctx->height_input,
978 &(proc_ctx->surface_output_vebox));
979 assert(va_status == VA_STATUS_SUCCESS);
980 obj_surf_output_vebox = SURFACE(proc_ctx->surface_output_vebox);
981 assert(obj_surf_output_vebox);
983 if (obj_surf_output_vebox) {
984 proc_ctx->surface_output_vebox_object = obj_surf_output_vebox;
985 i965_check_alloc_surface_bo(ctx, obj_surf_output_vebox, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
990 if(proc_ctx->format_convert_flags & POST_SCALING_CONVERT){
991 if(proc_ctx->surface_output_scaled_object == NULL){
992 va_status = i965_CreateSurfaces(ctx,
993 proc_ctx->width_output,
994 proc_ctx->height_output,
997 &(proc_ctx->surface_output_scaled));
998 assert(va_status == VA_STATUS_SUCCESS);
999 obj_surf_output_vebox = SURFACE(proc_ctx->surface_output_scaled);
1000 assert(obj_surf_output_vebox);
1002 if (obj_surf_output_vebox) {
1003 proc_ctx->surface_output_scaled_object = obj_surf_output_vebox;
1004 i965_check_alloc_surface_bo(ctx, obj_surf_output_vebox, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
1012 int hsw_veb_post_format_convert(VADriverContextP ctx,
1013 struct intel_vebox_context *proc_ctx)
1015 struct object_surface *obj_surface = NULL;
1017 if(proc_ctx->filters_mask == VPP_DNDI_DN){
1018 obj_surface = proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface;
1020 obj_surface = proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface;
1023 if(!(proc_ctx->format_convert_flags & POST_FORMAT_CONVERT) &&
1024 !(proc_ctx->format_convert_flags & POST_SCALING_CONVERT)){
1025 /* Output surface format is covered by vebox pipeline and
1026 * processed picture is already store in output surface
1027 * so nothing will be done here */
1028 } else if ((proc_ctx->format_convert_flags & POST_FORMAT_CONVERT) &&
1029 !(proc_ctx->format_convert_flags & POST_SCALING_CONVERT)){
1030 /* convert and copy NV12 to YV12/IMC3/IMC2 output*/
1031 vpp_surface_convert(ctx,proc_ctx->surface_output_object, obj_surface);
1033 } else if(proc_ctx->format_convert_flags & POST_SCALING_CONVERT) {
1034 /* scaling, convert and copy NV12 to YV12/IMC3/IMC2/ output*/
1035 assert(obj_surface->fourcc == VA_FOURCC('N','V','1','2'));
1037 /* first step :surface scaling */
1038 vpp_surface_scaling(ctx,proc_ctx->surface_output_scaled_object, obj_surface);
1040 /* second step: color format convert and copy to output */
1041 obj_surface = proc_ctx->surface_output_object;
1043 if(obj_surface->fourcc == VA_FOURCC('N','V','1','2') ||
1044 obj_surface->fourcc == VA_FOURCC('Y','V','1','2') ||
1045 obj_surface->fourcc == VA_FOURCC('I','4','2','0') ||
1046 obj_surface->fourcc == VA_FOURCC('Y','U','Y','2') ||
1047 obj_surface->fourcc == VA_FOURCC('I','M','C','1') ||
1048 obj_surface->fourcc == VA_FOURCC('I','M','C','3')) {
1049 vpp_surface_convert(ctx, proc_ctx->surface_output_object, proc_ctx->surface_output_scaled_object);
1058 VAStatus gen75_vebox_process_picture(VADriverContextP ctx,
1059 struct intel_vebox_context *proc_ctx)
1061 struct i965_driver_data *i965 = i965_driver_data(ctx);
1063 VAProcPipelineParameterBuffer *pipe = proc_ctx->pipeline_param;
1064 VAProcFilterParameterBuffer* filter = NULL;
1065 struct object_buffer *obj_buf = NULL;
1068 for (i = 0; i < pipe->num_filters; i ++) {
1069 obj_buf = BUFFER(pipe->filters[i]);
1071 assert(obj_buf && obj_buf->buffer_store);
1073 if (!obj_buf || !obj_buf->buffer_store)
1076 filter = (VAProcFilterParameterBuffer*)obj_buf-> buffer_store->buffer;
1078 if (filter->type == VAProcFilterNoiseReduction) {
1079 proc_ctx->filters_mask |= VPP_DNDI_DN;
1080 proc_ctx->filter_dn = filter;
1081 } else if (filter->type == VAProcFilterDeinterlacing) {
1082 proc_ctx->filters_mask |= VPP_DNDI_DI;
1083 proc_ctx->filter_di = filter;
1084 } else if (filter->type == VAProcFilterColorBalance) {
1085 proc_ctx->filters_mask |= VPP_IECP_PRO_AMP;
1086 proc_ctx->filter_iecp_amp = filter;
1087 proc_ctx->filter_iecp_amp_num_elements = obj_buf->num_elements;
1088 } else if (filter->type == VAProcFilterColorStandard){
1089 proc_ctx->filters_mask |= VPP_IECP_CSC;
1090 proc_ctx->filter_iecp_csc = filter;
1094 hsw_veb_pre_format_convert(ctx, proc_ctx);
1095 hsw_veb_surface_reference(ctx, proc_ctx);
1097 if(proc_ctx->is_first_frame){
1098 hsw_veb_resource_prepare(ctx, proc_ctx);
1101 intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
1102 intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
1103 hsw_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
1104 hsw_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
1105 hsw_veb_state_table_setup(ctx, proc_ctx);
1107 hsw_veb_state_command(ctx, proc_ctx);
1108 hsw_veb_dndi_iecp_command(ctx, proc_ctx);
1109 intel_batchbuffer_end_atomic(proc_ctx->batch);
1110 intel_batchbuffer_flush(proc_ctx->batch);
1112 hsw_veb_post_format_convert(ctx, proc_ctx);
1113 hsw_veb_surface_unreference(ctx, proc_ctx);
1116 if(proc_ctx->is_first_frame)
1117 proc_ctx->is_first_frame = 0;
1119 return VA_STATUS_SUCCESS;
1122 return VA_STATUS_ERROR_INVALID_PARAMETER;
1125 void gen75_vebox_context_destroy(VADriverContextP ctx,
1126 struct intel_vebox_context *proc_ctx)
1130 if(proc_ctx->surface_input_vebox != VA_INVALID_ID){
1131 i965_DestroySurfaces(ctx, &proc_ctx->surface_input_vebox, 1);
1132 proc_ctx->surface_input_vebox = VA_INVALID_ID;
1133 proc_ctx->surface_input_vebox_object = NULL;
1136 if(proc_ctx->surface_output_vebox != VA_INVALID_ID){
1137 i965_DestroySurfaces(ctx, &proc_ctx->surface_output_vebox, 1);
1138 proc_ctx->surface_output_vebox = VA_INVALID_ID;
1139 proc_ctx->surface_output_vebox_object = NULL;
1142 if(proc_ctx->surface_output_scaled != VA_INVALID_ID){
1143 i965_DestroySurfaces(ctx, &proc_ctx->surface_output_scaled, 1);
1144 proc_ctx->surface_output_scaled = VA_INVALID_ID;
1145 proc_ctx->surface_output_scaled_object = NULL;
1148 for(i = 0; i < FRAME_STORE_SUM; i ++) {
1149 if (proc_ctx->frame_store[i].is_internal_surface == 1) {
1150 assert(proc_ctx->frame_store[i].surface_id != VA_INVALID_ID);
1152 if (proc_ctx->frame_store[i].surface_id != VA_INVALID_ID)
1153 i965_DestroySurfaces(ctx, &proc_ctx->frame_store[i].surface_id, 1);
1156 proc_ctx->frame_store[i].surface_id = VA_INVALID_ID;
1157 proc_ctx->frame_store[i].is_internal_surface = 0;
1158 proc_ctx->frame_store[i].obj_surface = NULL;
1161 /* dndi state table */
1162 dri_bo_unreference(proc_ctx->dndi_state_table.bo);
1163 proc_ctx->dndi_state_table.bo = NULL;
1165 /* iecp state table */
1166 dri_bo_unreference(proc_ctx->iecp_state_table.bo);
1167 proc_ctx->dndi_state_table.bo = NULL;
1169 /* gamut statu table */
1170 dri_bo_unreference(proc_ctx->gamut_state_table.bo);
1171 proc_ctx->gamut_state_table.bo = NULL;
1173 /* vertex state table */
1174 dri_bo_unreference(proc_ctx->vertex_state_table.bo);
1175 proc_ctx->vertex_state_table.bo = NULL;
1177 intel_batchbuffer_free(proc_ctx->batch);
1182 struct intel_vebox_context * gen75_vebox_context_init(VADriverContextP ctx)
1184 struct intel_driver_data *intel = intel_driver_data(ctx);
1185 struct intel_vebox_context *proc_context = calloc(1, sizeof(struct intel_vebox_context));
1188 proc_context->batch = intel_batchbuffer_new(intel, I915_EXEC_VEBOX, 0);
1189 memset(proc_context->frame_store, 0, sizeof(VEBFrameStore)*FRAME_STORE_SUM);
1191 for (i = 0; i < FRAME_STORE_SUM; i ++) {
1192 proc_context->frame_store[i].surface_id = VA_INVALID_ID;
1193 proc_context->frame_store[i].is_internal_surface = 0;
1194 proc_context->frame_store[i].obj_surface = NULL;
1197 proc_context->filters_mask = 0;
1198 proc_context->is_first_frame = 1;
1199 proc_context->surface_output_object = NULL;
1200 proc_context->surface_input_object = NULL;
1201 proc_context->surface_input_vebox = VA_INVALID_ID;
1202 proc_context->surface_input_vebox_object = NULL;
1203 proc_context->surface_output_vebox = VA_INVALID_ID;
1204 proc_context->surface_output_vebox_object = NULL;
1205 proc_context->surface_output_scaled = VA_INVALID_ID;
1206 proc_context->surface_output_scaled_object = NULL;
1207 proc_context->filters_mask = 0;
1208 proc_context->format_convert_flags = 0;
1210 return proc_context;