X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=src%2Fi965_render.c;h=9d0e8465cbba4f42863c84c1fa54835ee05bd8fb;hb=18d0aee33fd4a32f846475cf31a2be6499b17b4b;hp=5b1a1a5978d7688dbe07b682dc7dece4bf169231;hpb=b4c9ca6eb7f2858cf8bd0d4799c2eb164f434781;p=platform%2Fupstream%2Flibva-intel-driver.git diff --git a/src/i965_render.c b/src/i965_render.c index 5b1a1a5..9d0e846 100644 --- a/src/i965_render.c +++ b/src/i965_render.c @@ -147,9 +147,9 @@ static const uint32_t ps_kernel_static_gen7_haswell[][4] = { #include "shaders/render/exa_wm_write.g7b" }; -#define SURFACE_STATE_PADDED_SIZE_I965 ALIGN(sizeof(struct i965_surface_state), 32) -#define SURFACE_STATE_PADDED_SIZE_GEN7 ALIGN(sizeof(struct gen7_surface_state), 32) -#define SURFACE_STATE_PADDED_SIZE MAX(SURFACE_STATE_PADDED_SIZE_I965, SURFACE_STATE_PADDED_SIZE_GEN7) + +#define SURFACE_STATE_PADDED_SIZE MAX(SURFACE_STATE_PADDED_SIZE_GEN6, SURFACE_STATE_PADDED_SIZE_GEN7) + #define SURFACE_STATE_OFFSET(index) (SURFACE_STATE_PADDED_SIZE * index) #define BINDING_TABLE_OFFSET SURFACE_STATE_OFFSET(MAX_RENDER_SURFACES) @@ -317,6 +317,18 @@ static float yuv_to_rgb_bt601[3][4] = { {1.164, 2.017, 0, -0.50196,}, }; +static float yuv_to_rgb_bt709[3][4] = { +{1.164, 0, 1.793, -0.06275,}, +{1.164, -0.213, -0.533, -0.50196,}, +{1.164, 2.112, 0, -0.50196,}, +}; + +static float yuv_to_rgb_smpte_240[3][4] = { +{1.164, 0, 1.794, -0.06275,}, +{1.164, -0.258, -0.5425, -0.50196,}, +{1.164, 2.078, 0, -0.50196,}, +}; + static void i965_render_vs_unit(VADriverContextP ctx) { @@ -329,7 +341,7 @@ i965_render_vs_unit(VADriverContextP ctx) vs_state = render_state->vs.state->virtual; memset(vs_state, 0, sizeof(*vs_state)); - if (IS_IRONLAKE(i965->intel.device_id)) + if (IS_IRONLAKE(i965->intel.device_info)) vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2; else vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES; @@ -443,7 +455,7 @@ i965_subpic_render_wm_unit(VADriverContextP ctx) wm_state->thread1.single_program_flow = 1; /* XXX */ - if (IS_IRONLAKE(i965->intel.device_id)) + if (IS_IRONLAKE(i965->intel.device_info)) wm_state->thread1.binding_table_entry_count = 0; /* hardware requirement */ else wm_state->thread1.binding_table_entry_count = 7; @@ -460,13 +472,13 @@ i965_subpic_render_wm_unit(VADriverContextP ctx) wm_state->wm4.stats_enable = 0; wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; - if (IS_IRONLAKE(i965->intel.device_id)) { + if (IS_IRONLAKE(i965->intel.device_info)) { wm_state->wm4.sampler_count = 0; /* hardware requirement */ } else { wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4; } - wm_state->wm5.max_threads = render_state->max_wm_threads - 1; + wm_state->wm5.max_threads = i965->intel.device_info->max_wm_threads - 1; wm_state->wm5.thread_dispatch_enable = 1; wm_state->wm5.enable_16_pix = 1; wm_state->wm5.enable_8_pix = 0; @@ -507,7 +519,7 @@ i965_render_wm_unit(VADriverContextP ctx) wm_state->thread1.single_program_flow = 1; /* XXX */ - if (IS_IRONLAKE(i965->intel.device_id)) + if (IS_IRONLAKE(i965->intel.device_info)) wm_state->thread1.binding_table_entry_count = 0; /* hardware requirement */ else wm_state->thread1.binding_table_entry_count = 7; @@ -524,13 +536,13 @@ i965_render_wm_unit(VADriverContextP ctx) wm_state->wm4.stats_enable = 0; wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; - if (IS_IRONLAKE(i965->intel.device_id)) { + if (IS_IRONLAKE(i965->intel.device_info)) { wm_state->wm4.sampler_count = 0; /* hardware requirement */ } else { wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4; } - wm_state->wm5.max_threads = render_state->max_wm_threads - 1; + wm_state->wm5.max_threads = i965->intel.device_info->max_wm_threads - 1; wm_state->wm5.thread_dispatch_enable = 1; wm_state->wm5.enable_16_pix = 1; wm_state->wm5.enable_8_pix = 0; @@ -791,6 +803,7 @@ gen7_render_set_surface_state( gen7_render_set_surface_tiling(ss, tiling); } + static void i965_render_src_surface_state( VADriverContextP ctx, @@ -815,12 +828,12 @@ i965_render_src_surface_state( assert(ss_bo->virtual); ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index); - if (IS_GEN7(i965->intel.device_id)) { + if (IS_GEN7(i965->intel.device_info)) { gen7_render_set_surface_state(ss, region, offset, w, h, pitch, format, flags); - if (IS_HASWELL(i965->intel.device_id)) + if (IS_HASWELL(i965->intel.device_info)) gen7_render_set_surface_scs(ss); dri_bo_emit_reloc(ss_bo, I915_GEM_DOMAIN_SAMPLER, 0, @@ -863,7 +876,10 @@ i965_render_src_surfaces_state( i965_render_src_surface_state(ctx, 1, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags); /* Y */ i965_render_src_surface_state(ctx, 2, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags); - if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2')) { + if (obj_surface->fourcc == VA_FOURCC_Y800) /* single plane for grayscale */ + return; + + if (obj_surface->fourcc == VA_FOURCC_NV12) { i965_render_src_surface_state(ctx, 3, region, region_pitch * obj_surface->y_cb_offset, obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch, @@ -930,12 +946,12 @@ i965_render_dest_surface_state(VADriverContextP ctx, int index) assert(ss_bo->virtual); ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index); - if (IS_GEN7(i965->intel.device_id)) { + if (IS_GEN7(i965->intel.device_info)) { gen7_render_set_surface_state(ss, dest_region->bo, 0, dest_region->width, dest_region->height, dest_region->pitch, format, 0); - if (IS_HASWELL(i965->intel.device_id)) + if (IS_HASWELL(i965->intel.device_info)) gen7_render_set_surface_scs(ss); dri_bo_emit_reloc(ss_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, @@ -1066,7 +1082,8 @@ i965_render_upload_vertex( static void i965_render_upload_constants(VADriverContextP ctx, - struct object_surface *obj_surface) + struct object_surface *obj_surface, + unsigned int flags) { struct i965_driver_data *i965 = i965_driver_data(ctx); struct i965_render_state *render_state = &i965->render_state; @@ -1077,17 +1094,18 @@ i965_render_upload_constants(VADriverContextP ctx, float hue = (float)i965->hue_attrib->value / 180 * PI; float saturation = (float)i965->saturation_attrib->value / DEFAULT_SATURATION; float *yuv_to_rgb; + unsigned int color_flag; dri_bo_map(render_state->curbe.bo, 1); assert(render_state->curbe.bo->virtual); constant_buffer = render_state->curbe.bo->virtual; if (obj_surface->subsampling == SUBSAMPLE_YUV400) { - assert(obj_surface->fourcc == VA_FOURCC('Y', '8', '0', '0')); + assert(obj_surface->fourcc == VA_FOURCC_Y800); constant_buffer[0] = 2; } else { - if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2')) + if (obj_surface->fourcc == VA_FOURCC_NV12) constant_buffer[0] = 1; else constant_buffer[0] = 0; @@ -1107,8 +1125,14 @@ i965_render_upload_constants(VADriverContextP ctx, *color_balance_base++ = cos(hue) * contrast * saturation; *color_balance_base++ = sin(hue) * contrast * saturation; + color_flag = flags & VA_SRC_COLOR_MASK; yuv_to_rgb = (float *)constant_buffer + 8; - memcpy(yuv_to_rgb, yuv_to_rgb_bt601, sizeof(yuv_to_rgb_bt601)); + if (color_flag == VA_SRC_BT709) + memcpy(yuv_to_rgb, yuv_to_rgb_bt709, sizeof(yuv_to_rgb_bt709)); + else if (color_flag == VA_SRC_SMPTE_240) + memcpy(yuv_to_rgb, yuv_to_rgb_smpte_240, sizeof(yuv_to_rgb_smpte_240)); + else + memcpy(yuv_to_rgb, yuv_to_rgb_bt601, sizeof(yuv_to_rgb_bt601)); dri_bo_unmap(render_state->curbe.bo); } @@ -1155,7 +1179,7 @@ i965_surface_render_state_setup( i965_render_cc_viewport(ctx); i965_render_cc_unit(ctx); i965_render_upload_vertex(ctx, obj_surface, src_rect, dst_rect); - i965_render_upload_constants(ctx, obj_surface); + i965_render_upload_constants(ctx, obj_surface, flags); } static void @@ -1209,7 +1233,7 @@ i965_render_state_base_address(VADriverContextP ctx) struct intel_batchbuffer *batch = i965->batch; struct i965_render_state *render_state = &i965->render_state; - if (IS_IRONLAKE(i965->intel.device_id)) { + if (IS_IRONLAKE(i965->intel.device_info)) { BEGIN_BATCH(batch, 8); OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6); OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY); @@ -1373,7 +1397,7 @@ i965_render_vertex_elements(VADriverContextP ctx) struct i965_driver_data *i965 = i965_driver_data(ctx); struct intel_batchbuffer *batch = i965->batch; - if (IS_IRONLAKE(i965->intel.device_id)) { + if (IS_IRONLAKE(i965->intel.device_info)) { BEGIN_BATCH(batch, 5); OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3); /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */ @@ -1465,7 +1489,7 @@ i965_render_startup(VADriverContextP ctx) ((4 * 4) << VB0_BUFFER_PITCH_SHIFT)); OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0); - if (IS_IRONLAKE(i965->intel.device_id)) + if (IS_IRONLAKE(i965->intel.device_info)) OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4); else OUT_BATCH(batch, 3); @@ -1515,8 +1539,9 @@ i965_clear_dest_region(VADriverContextP ctx) br13 |= pitch; - if (IS_GEN6(i965->intel.device_id) || - IS_GEN7(i965->intel.device_id)) { + if (IS_GEN6(i965->intel.device_info) || + IS_GEN7(i965->intel.device_info) || + IS_GEN8(i965->intel.device_info)) { intel_batchbuffer_start_atomic_blt(batch, 24); BEGIN_BLT_BATCH(batch, 6); } else { @@ -1577,6 +1602,7 @@ i965_subpic_render_pipeline_setup(VADriverContextP ctx) i965_render_pipelined_pointers(ctx); i965_render_urb_layout(ctx); i965_render_cs_urb_layout(ctx); + i965_render_constant_buffer(ctx); i965_render_drawing_rectangle(ctx); i965_render_vertex_elements(ctx); i965_render_startup(ctx); @@ -1842,7 +1868,7 @@ gen6_render_setup_states( gen6_render_color_calc_state(ctx); gen6_render_blend_state(ctx); gen6_render_depth_stencil_state(ctx); - i965_render_upload_constants(ctx, obj_surface); + i965_render_upload_constants(ctx, obj_surface, flags); i965_render_upload_vertex(ctx, obj_surface, src_rect, dst_rect); } @@ -2094,7 +2120,7 @@ gen6_emit_wm_state(VADriverContextP ctx, int kernel) (5 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT)); OUT_BATCH(batch, 0); OUT_BATCH(batch, (6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT)); /* DW4 */ - OUT_BATCH(batch, ((render_state->max_wm_threads - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT) | + OUT_BATCH(batch, ((i965->intel.device_info->max_wm_threads - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT) | GEN6_3DSTATE_WM_DISPATCH_ENABLE | GEN6_3DSTATE_WM_16_DISPATCH_ENABLE); OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT) | @@ -2344,6 +2370,11 @@ gen7_render_initialize(VADriverContextP ctx) render_state->cc.depth_stencil = bo; } +/* + * for GEN8 + */ +#define ALIGNMENT 64 + static void gen7_render_color_calc_state(VADriverContextP ctx) { @@ -2420,6 +2451,7 @@ gen7_render_sampler(VADriverContextP ctx) dri_bo_unmap(render_state->wm.sampler); } + static void gen7_render_setup_states( VADriverContextP ctx, @@ -2436,10 +2468,11 @@ gen7_render_setup_states( gen7_render_color_calc_state(ctx); gen7_render_blend_state(ctx); gen7_render_depth_stencil_state(ctx); - i965_render_upload_constants(ctx, obj_surface); + i965_render_upload_constants(ctx, obj_surface, flags); i965_render_upload_vertex(ctx, obj_surface, src_rect, dst_rect); } + static void gen7_emit_invarient_states(VADriverContextP ctx) { @@ -2523,7 +2556,7 @@ gen7_emit_urb(VADriverContextP ctx) struct intel_batchbuffer *batch = i965->batch; unsigned int num_urb_entries = 32; - if (IS_HASWELL(i965->intel.device_id)) + if (IS_HASWELL(i965->intel.device_info)) num_urb_entries = 64; BEGIN_BATCH(batch, 2); @@ -2832,7 +2865,7 @@ gen7_emit_wm_state(VADriverContextP ctx, int kernel) unsigned int max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_IVB; unsigned int num_samples = 0; - if (IS_HASWELL(i965->intel.device_id)) { + if (IS_HASWELL(i965->intel.device_info)) { max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_HSW; num_samples = 1 << GEN7_PS_SAMPLE_MASK_SHIFT_HSW; } @@ -2869,7 +2902,7 @@ gen7_emit_wm_state(VADriverContextP ctx, int kernel) (5 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT)); OUT_BATCH(batch, 0); /* scratch space base offset */ OUT_BATCH(batch, - ((render_state->max_wm_threads - 1) << max_threads_shift) | num_samples | + ((i965->intel.device_info->max_wm_threads - 1) << max_threads_shift) | num_samples | GEN7_PS_PUSH_CONSTANT_ENABLE | GEN7_PS_ATTRIBUTE_ENABLE | GEN7_PS_16_DISPATCH_ENABLE); @@ -2967,6 +3000,7 @@ gen7_render_emit_states(VADriverContextP ctx, int kernel) intel_batchbuffer_end_atomic(batch); } + static void gen7_render_put_surface( VADriverContextP ctx, @@ -2986,6 +3020,7 @@ gen7_render_put_surface( intel_batchbuffer_flush(batch); } + static void gen7_subpicture_render_blend_state(VADriverContextP ctx) { @@ -3049,13 +3084,6 @@ gen7_render_put_subpicture( } -/* - * global functions - */ -VAStatus -i965_DestroySurfaces(VADriverContextP ctx, - VASurfaceID *surface_list, - int num_surfaces); void intel_render_put_surface( VADriverContextP ctx, @@ -3066,6 +3094,7 @@ intel_render_put_surface( ) { struct i965_driver_data *i965 = i965_driver_data(ctx); + struct i965_render_state *render_state = &i965->render_state; int has_done_scaling = 0; VASurfaceID out_surface_id = i965_post_processing(ctx, obj_surface, @@ -3086,12 +3115,7 @@ intel_render_put_surface( src_rect = dst_rect; } - if (IS_GEN7(i965->intel.device_id)) - gen7_render_put_surface(ctx, obj_surface, src_rect, dst_rect, flags); - else if (IS_GEN6(i965->intel.device_id)) - gen6_render_put_surface(ctx, obj_surface, src_rect, dst_rect, flags); - else - i965_render_put_surface(ctx, obj_surface, src_rect, dst_rect, flags); + render_state->render_put_surface(ctx, obj_surface, src_rect, dst_rect, flags); if (out_surface_id != VA_INVALID_ID) i965_DestroySurfaces(ctx, &out_surface_id, 1); @@ -3106,17 +3130,57 @@ intel_render_put_subpicture( ) { struct i965_driver_data *i965 = i965_driver_data(ctx); + struct i965_render_state *render_state = &i965->render_state; - if (IS_GEN7(i965->intel.device_id)) - gen7_render_put_subpicture(ctx, obj_surface, src_rect, dst_rect); - else if (IS_GEN6(i965->intel.device_id)) - gen6_render_put_subpicture(ctx, obj_surface, src_rect, dst_rect); - else - i965_render_put_subpicture(ctx, obj_surface, src_rect, dst_rect); + render_state->render_put_subpicture(ctx, obj_surface, src_rect, dst_rect); +} + +static void +genx_render_terminate(VADriverContextP ctx) +{ + int i; + struct i965_driver_data *i965 = i965_driver_data(ctx); + struct i965_render_state *render_state = &i965->render_state; + + dri_bo_unreference(render_state->curbe.bo); + render_state->curbe.bo = NULL; + + for (i = 0; i < NUM_RENDER_KERNEL; i++) { + struct i965_kernel *kernel = &render_state->render_kernels[i]; + + dri_bo_unreference(kernel->bo); + kernel->bo = NULL; + } + + dri_bo_unreference(render_state->vb.vertex_buffer); + render_state->vb.vertex_buffer = NULL; + dri_bo_unreference(render_state->vs.state); + render_state->vs.state = NULL; + dri_bo_unreference(render_state->sf.state); + render_state->sf.state = NULL; + dri_bo_unreference(render_state->wm.sampler); + render_state->wm.sampler = NULL; + dri_bo_unreference(render_state->wm.state); + render_state->wm.state = NULL; + dri_bo_unreference(render_state->wm.surface_state_binding_table_bo); + dri_bo_unreference(render_state->cc.viewport); + render_state->cc.viewport = NULL; + dri_bo_unreference(render_state->cc.state); + render_state->cc.state = NULL; + dri_bo_unreference(render_state->cc.blend); + render_state->cc.blend = NULL; + dri_bo_unreference(render_state->cc.depth_stencil); + render_state->cc.depth_stencil = NULL; + + if (render_state->draw_region) { + dri_bo_unreference(render_state->draw_region->bo); + free(render_state->draw_region); + render_state->draw_region = NULL; + } } bool -i965_render_init(VADriverContextP ctx) +genx_render_init(VADriverContextP ctx) { struct i965_driver_data *i965 = i965_driver_data(ctx); struct i965_render_state *render_state = &i965->render_state; @@ -3128,16 +3192,27 @@ i965_render_init(VADriverContextP ctx) assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen6) / sizeof(render_kernels_gen6[0]))); - if (IS_GEN7(i965->intel.device_id)) + if (IS_GEN7(i965->intel.device_info)) { memcpy(render_state->render_kernels, - (IS_HASWELL(i965->intel.device_id) ? render_kernels_gen7_haswell : render_kernels_gen7), + (IS_HASWELL(i965->intel.device_info) ? render_kernels_gen7_haswell : render_kernels_gen7), sizeof(render_state->render_kernels)); - else if (IS_GEN6(i965->intel.device_id)) + render_state->render_put_surface = gen7_render_put_surface; + render_state->render_put_subpicture = gen7_render_put_subpicture; + } else if (IS_GEN6(i965->intel.device_info)) { memcpy(render_state->render_kernels, render_kernels_gen6, sizeof(render_state->render_kernels)); - else if (IS_IRONLAKE(i965->intel.device_id)) + render_state->render_put_surface = gen6_render_put_surface; + render_state->render_put_subpicture = gen6_render_put_subpicture; + } else if (IS_IRONLAKE(i965->intel.device_info)) { memcpy(render_state->render_kernels, render_kernels_gen5, sizeof(render_state->render_kernels)); - else + render_state->render_put_surface = i965_render_put_surface; + render_state->render_put_subpicture = i965_render_put_subpicture; + } else { memcpy(render_state->render_kernels, render_kernels_gen4, sizeof(render_state->render_kernels)); + render_state->render_put_surface = i965_render_put_surface; + render_state->render_put_subpicture = i965_render_put_subpicture; + } + + render_state->render_terminate = genx_render_terminate; for (i = 0; i < NUM_RENDER_KERNEL; i++) { struct i965_kernel *kernel = &render_state->render_kernels[i]; @@ -3158,73 +3233,22 @@ i965_render_init(VADriverContextP ctx) 4096, 64); assert(render_state->curbe.bo); - if (IS_HSW_GT1(i965->intel.device_id)) { - render_state->max_wm_threads = 102; - } else if (IS_HSW_GT2(i965->intel.device_id)) { - render_state->max_wm_threads = 204; - } else if (IS_HSW_GT3(i965->intel.device_id)) { - render_state->max_wm_threads = 408; - } else if (IS_IVB_GT1(i965->intel.device_id) || IS_BAYTRAIL(i965->intel.device_id)) { - render_state->max_wm_threads = 48; - } else if (IS_IVB_GT2(i965->intel.device_id)) { - render_state->max_wm_threads = 172; - } else if (IS_SNB_GT1(i965->intel.device_id)) { - render_state->max_wm_threads = 40; - } else if (IS_SNB_GT2(i965->intel.device_id)) { - render_state->max_wm_threads = 80; - } else if (IS_IRONLAKE(i965->intel.device_id)) { - render_state->max_wm_threads = 72; /* 12 * 6 */ - } else if (IS_G4X(i965->intel.device_id)) { - render_state->max_wm_threads = 50; /* 12 * 5 */ - } else { - /* should never get here !!! */ - assert(0); - } - return true; } -void -i965_render_terminate(VADriverContextP ctx) +bool +i965_render_init(VADriverContextP ctx) { - int i; struct i965_driver_data *i965 = i965_driver_data(ctx); - struct i965_render_state *render_state = &i965->render_state; - dri_bo_unreference(render_state->curbe.bo); - render_state->curbe.bo = NULL; - - for (i = 0; i < NUM_RENDER_KERNEL; i++) { - struct i965_kernel *kernel = &render_state->render_kernels[i]; - - dri_bo_unreference(kernel->bo); - kernel->bo = NULL; - } + return i965->codec_info->render_init(ctx); +} - dri_bo_unreference(render_state->vb.vertex_buffer); - render_state->vb.vertex_buffer = NULL; - dri_bo_unreference(render_state->vs.state); - render_state->vs.state = NULL; - dri_bo_unreference(render_state->sf.state); - render_state->sf.state = NULL; - dri_bo_unreference(render_state->wm.sampler); - render_state->wm.sampler = NULL; - dri_bo_unreference(render_state->wm.state); - render_state->wm.state = NULL; - dri_bo_unreference(render_state->wm.surface_state_binding_table_bo); - dri_bo_unreference(render_state->cc.viewport); - render_state->cc.viewport = NULL; - dri_bo_unreference(render_state->cc.state); - render_state->cc.state = NULL; - dri_bo_unreference(render_state->cc.blend); - render_state->cc.blend = NULL; - dri_bo_unreference(render_state->cc.depth_stencil); - render_state->cc.depth_stencil = NULL; +void +i965_render_terminate(VADriverContextP ctx) +{ + struct i965_driver_data *i965 = i965_driver_data(ctx); + struct i965_render_state *render_state = &i965->render_state; - if (render_state->draw_region) { - dri_bo_unreference(render_state->draw_region->bo); - free(render_state->draw_region); - render_state->draw_region = NULL; - } + render_state->render_terminate(ctx); } -