5ca04970ded2d6b96a655532270b3ed82ad9473c
[platform/upstream/libva-intel-driver.git] / src / i965_render.c
1 /*
2  * Copyright © 2006 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *    Xiang Haihao <haihao.xiang@intel.com>
27  *
28  */
29
30 /*
31  * Most of rendering codes are ported from xf86-video-intel/src/i965_video.c
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <assert.h>
38
39 #include <va/va_drmcommon.h>
40
41 #include "intel_batchbuffer.h"
42 #include "intel_driver.h"
43 #include "i965_defines.h"
44 #include "i965_drv_video.h"
45 #include "i965_structs.h"
46
47 #include "i965_render.h"
48
49 #define SF_KERNEL_NUM_GRF       16
50 #define SF_MAX_THREADS          1
51
52 static const uint32_t sf_kernel_static[][4] = 
53 {
54 #include "shaders/render/exa_sf.g4b"
55 };
56
57 #define PS_KERNEL_NUM_GRF       32
58 #define PS_MAX_THREADS          32
59
60 #define I965_GRF_BLOCKS(nreg)   ((nreg + 15) / 16 - 1)
61
62 static const uint32_t ps_kernel_static[][4] = 
63 {
64 #include "shaders/render/exa_wm_xy.g4b"
65 #include "shaders/render/exa_wm_src_affine.g4b"
66 #include "shaders/render/exa_wm_src_sample_planar.g4b"
67 #include "shaders/render/exa_wm_yuv_rgb.g4b"
68 #include "shaders/render/exa_wm_write.g4b"
69 };
70 static const uint32_t ps_subpic_kernel_static[][4] = 
71 {
72 #include "shaders/render/exa_wm_xy.g4b"
73 #include "shaders/render/exa_wm_src_affine.g4b"
74 #include "shaders/render/exa_wm_src_sample_argb.g4b"
75 #include "shaders/render/exa_wm_write.g4b"
76 };
77
78 /* On IRONLAKE */
79 static const uint32_t sf_kernel_static_gen5[][4] = 
80 {
81 #include "shaders/render/exa_sf.g4b.gen5"
82 };
83
84 static const uint32_t ps_kernel_static_gen5[][4] = 
85 {
86 #include "shaders/render/exa_wm_xy.g4b.gen5"
87 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
88 #include "shaders/render/exa_wm_src_sample_planar.g4b.gen5"
89 #include "shaders/render/exa_wm_yuv_rgb.g4b.gen5"
90 #include "shaders/render/exa_wm_write.g4b.gen5"
91 };
92 static const uint32_t ps_subpic_kernel_static_gen5[][4] = 
93 {
94 #include "shaders/render/exa_wm_xy.g4b.gen5"
95 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
96 #include "shaders/render/exa_wm_src_sample_argb.g4b.gen5"
97 #include "shaders/render/exa_wm_write.g4b.gen5"
98 };
99
100 /* programs for Sandybridge */
101 static const uint32_t sf_kernel_static_gen6[][4] = 
102 {
103 };
104
105 static const uint32_t ps_kernel_static_gen6[][4] = {
106 #include "shaders/render/exa_wm_src_affine.g6b"
107 #include "shaders/render/exa_wm_src_sample_planar.g6b"
108 #include "shaders/render/exa_wm_yuv_rgb.g6b"
109 #include "shaders/render/exa_wm_write.g6b"
110 };
111
112 static const uint32_t ps_subpic_kernel_static_gen6[][4] = {
113 #include "shaders/render/exa_wm_src_affine.g6b"
114 #include "shaders/render/exa_wm_src_sample_argb.g6b"
115 #include "shaders/render/exa_wm_write.g6b"
116 };
117
118 /* programs for Ivybridge */
119 static const uint32_t sf_kernel_static_gen7[][4] = 
120 {
121 };
122
123 static const uint32_t ps_kernel_static_gen7[][4] = {
124 #include "shaders/render/exa_wm_src_affine.g7b"
125 #include "shaders/render/exa_wm_src_sample_planar.g7b"
126 #include "shaders/render/exa_wm_yuv_rgb.g7b"
127 #include "shaders/render/exa_wm_write.g7b"
128 };
129
130 static const uint32_t ps_subpic_kernel_static_gen7[][4] = {
131 #include "shaders/render/exa_wm_src_affine.g7b"
132 #include "shaders/render/exa_wm_src_sample_argb.g7b"
133 #include "shaders/render/exa_wm_write.g7b"
134 };
135
136 /* Programs for Haswell */
137 static const uint32_t ps_kernel_static_gen7_haswell[][4] = {
138 #include "shaders/render/exa_wm_src_affine.g7b"
139 #include "shaders/render/exa_wm_src_sample_planar.g7b.haswell"
140 #include "shaders/render/exa_wm_yuv_rgb.g7b"
141 #include "shaders/render/exa_wm_write.g7b"
142 };
143
144 #define SURFACE_STATE_PADDED_SIZE_I965  ALIGN(sizeof(struct i965_surface_state), 32)
145 #define SURFACE_STATE_PADDED_SIZE_GEN7  ALIGN(sizeof(struct gen7_surface_state), 32)
146 #define SURFACE_STATE_PADDED_SIZE       MAX(SURFACE_STATE_PADDED_SIZE_I965, SURFACE_STATE_PADDED_SIZE_GEN7)
147 #define SURFACE_STATE_OFFSET(index)     (SURFACE_STATE_PADDED_SIZE * index)
148 #define BINDING_TABLE_OFFSET            SURFACE_STATE_OFFSET(MAX_RENDER_SURFACES)
149
150 static uint32_t float_to_uint (float f) 
151 {
152     union {
153         uint32_t i; 
154         float f;
155     } x;
156
157     x.f = f;
158     return x.i;
159 }
160
161 enum 
162 {
163     SF_KERNEL = 0,
164     PS_KERNEL,
165     PS_SUBPIC_KERNEL
166 };
167
168 static struct i965_kernel render_kernels_gen4[] = {
169     {
170         "SF",
171         SF_KERNEL,
172         sf_kernel_static,
173         sizeof(sf_kernel_static),
174         NULL
175     },
176     {
177         "PS",
178         PS_KERNEL,
179         ps_kernel_static,
180         sizeof(ps_kernel_static),
181         NULL
182     },
183
184     {
185         "PS_SUBPIC",
186         PS_SUBPIC_KERNEL,
187         ps_subpic_kernel_static,
188         sizeof(ps_subpic_kernel_static),
189         NULL
190     }
191 };
192
193 static struct i965_kernel render_kernels_gen5[] = {
194     {
195         "SF",
196         SF_KERNEL,
197         sf_kernel_static_gen5,
198         sizeof(sf_kernel_static_gen5),
199         NULL
200     },
201     {
202         "PS",
203         PS_KERNEL,
204         ps_kernel_static_gen5,
205         sizeof(ps_kernel_static_gen5),
206         NULL
207     },
208
209     {
210         "PS_SUBPIC",
211         PS_SUBPIC_KERNEL,
212         ps_subpic_kernel_static_gen5,
213         sizeof(ps_subpic_kernel_static_gen5),
214         NULL
215     }
216 };
217
218 static struct i965_kernel render_kernels_gen6[] = {
219     {
220         "SF",
221         SF_KERNEL,
222         sf_kernel_static_gen6,
223         sizeof(sf_kernel_static_gen6),
224         NULL
225     },
226     {
227         "PS",
228         PS_KERNEL,
229         ps_kernel_static_gen6,
230         sizeof(ps_kernel_static_gen6),
231         NULL
232     },
233
234     {
235         "PS_SUBPIC",
236         PS_SUBPIC_KERNEL,
237         ps_subpic_kernel_static_gen6,
238         sizeof(ps_subpic_kernel_static_gen6),
239         NULL
240     }
241 };
242
243 static struct i965_kernel render_kernels_gen7[] = {
244     {
245         "SF",
246         SF_KERNEL,
247         sf_kernel_static_gen7,
248         sizeof(sf_kernel_static_gen7),
249         NULL
250     },
251     {
252         "PS",
253         PS_KERNEL,
254         ps_kernel_static_gen7,
255         sizeof(ps_kernel_static_gen7),
256         NULL
257     },
258
259     {
260         "PS_SUBPIC",
261         PS_SUBPIC_KERNEL,
262         ps_subpic_kernel_static_gen7,
263         sizeof(ps_subpic_kernel_static_gen7),
264         NULL
265     }
266 };
267
268 static struct i965_kernel render_kernels_gen7_haswell[] = {
269     {
270         "SF",
271         SF_KERNEL,
272         sf_kernel_static_gen7,
273         sizeof(sf_kernel_static_gen7),
274         NULL
275     },
276     {
277         "PS",
278         PS_KERNEL,
279         ps_kernel_static_gen7_haswell,
280         sizeof(ps_kernel_static_gen7_haswell),
281         NULL
282     },
283
284     {
285         "PS_SUBPIC",
286         PS_SUBPIC_KERNEL,
287         ps_subpic_kernel_static_gen7,
288         sizeof(ps_subpic_kernel_static_gen7),
289         NULL
290     }
291 };
292
293 #define URB_VS_ENTRIES        8
294 #define URB_VS_ENTRY_SIZE     1
295
296 #define URB_GS_ENTRIES        0
297 #define URB_GS_ENTRY_SIZE     0
298
299 #define URB_CLIP_ENTRIES      0
300 #define URB_CLIP_ENTRY_SIZE   0
301
302 #define URB_SF_ENTRIES        1
303 #define URB_SF_ENTRY_SIZE     2
304
305 #define URB_CS_ENTRIES        1
306 #define URB_CS_ENTRY_SIZE     1
307
308 static void
309 i965_render_vs_unit(VADriverContextP ctx)
310 {
311     struct i965_driver_data *i965 = i965_driver_data(ctx);
312     struct i965_render_state *render_state = &i965->render_state;
313     struct i965_vs_unit_state *vs_state;
314
315     dri_bo_map(render_state->vs.state, 1);
316     assert(render_state->vs.state->virtual);
317     vs_state = render_state->vs.state->virtual;
318     memset(vs_state, 0, sizeof(*vs_state));
319
320     if (IS_IRONLAKE(i965->intel.device_id))
321         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2;
322     else
323         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES;
324
325     vs_state->thread4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1;
326     vs_state->vs6.vs_enable = 0;
327     vs_state->vs6.vert_cache_disable = 1;
328     
329     dri_bo_unmap(render_state->vs.state);
330 }
331
332 static void
333 i965_render_sf_unit(VADriverContextP ctx)
334 {
335     struct i965_driver_data *i965 = i965_driver_data(ctx);
336     struct i965_render_state *render_state = &i965->render_state;
337     struct i965_sf_unit_state *sf_state;
338
339     dri_bo_map(render_state->sf.state, 1);
340     assert(render_state->sf.state->virtual);
341     sf_state = render_state->sf.state->virtual;
342     memset(sf_state, 0, sizeof(*sf_state));
343
344     sf_state->thread0.grf_reg_count = I965_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
345     sf_state->thread0.kernel_start_pointer = render_state->render_kernels[SF_KERNEL].bo->offset >> 6;
346
347     sf_state->sf1.single_program_flow = 1; /* XXX */
348     sf_state->sf1.binding_table_entry_count = 0;
349     sf_state->sf1.thread_priority = 0;
350     sf_state->sf1.floating_point_mode = 0; /* Mesa does this */
351     sf_state->sf1.illegal_op_exception_enable = 1;
352     sf_state->sf1.mask_stack_exception_enable = 1;
353     sf_state->sf1.sw_exception_enable = 1;
354
355     /* scratch space is not used in our kernel */
356     sf_state->thread2.per_thread_scratch_space = 0;
357     sf_state->thread2.scratch_space_base_pointer = 0;
358
359     sf_state->thread3.const_urb_entry_read_length = 0; /* no const URBs */
360     sf_state->thread3.const_urb_entry_read_offset = 0; /* no const URBs */
361     sf_state->thread3.urb_entry_read_length = 1; /* 1 URB per vertex */
362     sf_state->thread3.urb_entry_read_offset = 0;
363     sf_state->thread3.dispatch_grf_start_reg = 3;
364
365     sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
366     sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
367     sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
368     sf_state->thread4.stats_enable = 1;
369
370     sf_state->sf5.viewport_transform = 0; /* skip viewport */
371
372     sf_state->sf6.cull_mode = I965_CULLMODE_NONE;
373     sf_state->sf6.scissor = 0;
374
375     sf_state->sf7.trifan_pv = 2;
376
377     sf_state->sf6.dest_org_vbias = 0x8;
378     sf_state->sf6.dest_org_hbias = 0x8;
379
380     dri_bo_emit_reloc(render_state->sf.state,
381                       I915_GEM_DOMAIN_INSTRUCTION, 0,
382                       sf_state->thread0.grf_reg_count << 1,
383                       offsetof(struct i965_sf_unit_state, thread0),
384                       render_state->render_kernels[SF_KERNEL].bo);
385
386     dri_bo_unmap(render_state->sf.state);
387 }
388
389 static void 
390 i965_render_sampler(VADriverContextP ctx)
391 {
392     struct i965_driver_data *i965 = i965_driver_data(ctx);
393     struct i965_render_state *render_state = &i965->render_state;
394     struct i965_sampler_state *sampler_state;
395     int i;
396     
397     assert(render_state->wm.sampler_count > 0);
398     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
399
400     dri_bo_map(render_state->wm.sampler, 1);
401     assert(render_state->wm.sampler->virtual);
402     sampler_state = render_state->wm.sampler->virtual;
403     for (i = 0; i < render_state->wm.sampler_count; i++) {
404         memset(sampler_state, 0, sizeof(*sampler_state));
405         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
406         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
407         sampler_state->ss1.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
408         sampler_state->ss1.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
409         sampler_state->ss1.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
410         sampler_state++;
411     }
412
413     dri_bo_unmap(render_state->wm.sampler);
414 }
415 static void
416 i965_subpic_render_wm_unit(VADriverContextP ctx)
417 {
418     struct i965_driver_data *i965 = i965_driver_data(ctx);
419     struct i965_render_state *render_state = &i965->render_state;
420     struct i965_wm_unit_state *wm_state;
421
422     assert(render_state->wm.sampler);
423
424     dri_bo_map(render_state->wm.state, 1);
425     assert(render_state->wm.state->virtual);
426     wm_state = render_state->wm.state->virtual;
427     memset(wm_state, 0, sizeof(*wm_state));
428
429     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
430     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_SUBPIC_KERNEL].bo->offset >> 6;
431
432     wm_state->thread1.single_program_flow = 1; /* XXX */
433
434     if (IS_IRONLAKE(i965->intel.device_id))
435         wm_state->thread1.binding_table_entry_count = 0; /* hardware requirement */
436     else
437         wm_state->thread1.binding_table_entry_count = 7;
438
439     wm_state->thread2.scratch_space_base_pointer = 0;
440     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
441
442     wm_state->thread3.dispatch_grf_start_reg = 3; /* XXX */
443     wm_state->thread3.const_urb_entry_read_length = 0;
444     wm_state->thread3.const_urb_entry_read_offset = 0;
445     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
446     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
447
448     wm_state->wm4.stats_enable = 0;
449     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
450
451     if (IS_IRONLAKE(i965->intel.device_id)) {
452         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
453     } else {
454         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
455     }
456
457     wm_state->wm5.max_threads = render_state->max_wm_threads - 1;
458     wm_state->wm5.thread_dispatch_enable = 1;
459     wm_state->wm5.enable_16_pix = 1;
460     wm_state->wm5.enable_8_pix = 0;
461     wm_state->wm5.early_depth_test = 1;
462
463     dri_bo_emit_reloc(render_state->wm.state,
464                       I915_GEM_DOMAIN_INSTRUCTION, 0,
465                       wm_state->thread0.grf_reg_count << 1,
466                       offsetof(struct i965_wm_unit_state, thread0),
467                       render_state->render_kernels[PS_SUBPIC_KERNEL].bo);
468
469     dri_bo_emit_reloc(render_state->wm.state,
470                       I915_GEM_DOMAIN_INSTRUCTION, 0,
471                       wm_state->wm4.sampler_count << 2,
472                       offsetof(struct i965_wm_unit_state, wm4),
473                       render_state->wm.sampler);
474
475     dri_bo_unmap(render_state->wm.state);
476 }
477
478
479 static void
480 i965_render_wm_unit(VADriverContextP ctx)
481 {
482     struct i965_driver_data *i965 = i965_driver_data(ctx);
483     struct i965_render_state *render_state = &i965->render_state;
484     struct i965_wm_unit_state *wm_state;
485
486     assert(render_state->wm.sampler);
487
488     dri_bo_map(render_state->wm.state, 1);
489     assert(render_state->wm.state->virtual);
490     wm_state = render_state->wm.state->virtual;
491     memset(wm_state, 0, sizeof(*wm_state));
492
493     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
494     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_KERNEL].bo->offset >> 6;
495
496     wm_state->thread1.single_program_flow = 1; /* XXX */
497
498     if (IS_IRONLAKE(i965->intel.device_id))
499         wm_state->thread1.binding_table_entry_count = 0;        /* hardware requirement */
500     else
501         wm_state->thread1.binding_table_entry_count = 7;
502
503     wm_state->thread2.scratch_space_base_pointer = 0;
504     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
505
506     wm_state->thread3.dispatch_grf_start_reg = 2; /* XXX */
507     wm_state->thread3.const_urb_entry_read_length = 1;
508     wm_state->thread3.const_urb_entry_read_offset = 0;
509     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
510     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
511
512     wm_state->wm4.stats_enable = 0;
513     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
514
515     if (IS_IRONLAKE(i965->intel.device_id)) {
516         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
517     } else {
518         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
519     }
520
521     wm_state->wm5.max_threads = render_state->max_wm_threads - 1;
522     wm_state->wm5.thread_dispatch_enable = 1;
523     wm_state->wm5.enable_16_pix = 1;
524     wm_state->wm5.enable_8_pix = 0;
525     wm_state->wm5.early_depth_test = 1;
526
527     dri_bo_emit_reloc(render_state->wm.state,
528                       I915_GEM_DOMAIN_INSTRUCTION, 0,
529                       wm_state->thread0.grf_reg_count << 1,
530                       offsetof(struct i965_wm_unit_state, thread0),
531                       render_state->render_kernels[PS_KERNEL].bo);
532
533     dri_bo_emit_reloc(render_state->wm.state,
534                       I915_GEM_DOMAIN_INSTRUCTION, 0,
535                       wm_state->wm4.sampler_count << 2,
536                       offsetof(struct i965_wm_unit_state, wm4),
537                       render_state->wm.sampler);
538
539     dri_bo_unmap(render_state->wm.state);
540 }
541
542 static void 
543 i965_render_cc_viewport(VADriverContextP ctx)
544 {
545     struct i965_driver_data *i965 = i965_driver_data(ctx);
546     struct i965_render_state *render_state = &i965->render_state;
547     struct i965_cc_viewport *cc_viewport;
548
549     dri_bo_map(render_state->cc.viewport, 1);
550     assert(render_state->cc.viewport->virtual);
551     cc_viewport = render_state->cc.viewport->virtual;
552     memset(cc_viewport, 0, sizeof(*cc_viewport));
553     
554     cc_viewport->min_depth = -1.e35;
555     cc_viewport->max_depth = 1.e35;
556
557     dri_bo_unmap(render_state->cc.viewport);
558 }
559
560 static void 
561 i965_subpic_render_cc_unit(VADriverContextP ctx)
562 {
563     struct i965_driver_data *i965 = i965_driver_data(ctx);
564     struct i965_render_state *render_state = &i965->render_state;
565     struct i965_cc_unit_state *cc_state;
566
567     assert(render_state->cc.viewport);
568
569     dri_bo_map(render_state->cc.state, 1);
570     assert(render_state->cc.state->virtual);
571     cc_state = render_state->cc.state->virtual;
572     memset(cc_state, 0, sizeof(*cc_state));
573
574     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
575     cc_state->cc2.depth_test = 0;       /* disable depth test */
576     cc_state->cc2.logicop_enable = 0;   /* disable logic op */
577     cc_state->cc3.ia_blend_enable = 0 ;  /* blend alpha just like colors */
578     cc_state->cc3.blend_enable = 1;     /* enable color blend */
579     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
580     cc_state->cc3.alpha_test_format = 0;//0:ALPHATEST_UNORM8;       /*store alpha value with UNORM8 */
581     cc_state->cc3.alpha_test_func = 5;//COMPAREFUNCTION_LESS;       /*pass if less than the reference */
582     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
583
584     cc_state->cc5.dither_enable = 0;    /* disable dither */
585     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
586     cc_state->cc5.statistics_enable = 1;
587     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
588     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
589     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
590
591     cc_state->cc6.clamp_post_alpha_blend = 0; 
592     cc_state->cc6.clamp_pre_alpha_blend  =0; 
593     
594     /*final color = src_color*src_blend_factor +/- dst_color*dest_color_blend_factor*/
595     cc_state->cc6.blend_function = I965_BLENDFUNCTION_ADD;
596     cc_state->cc6.src_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
597     cc_state->cc6.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
598    
599     /*alpha test reference*/
600     cc_state->cc7.alpha_ref.f =0.0 ;
601
602
603     dri_bo_emit_reloc(render_state->cc.state,
604                       I915_GEM_DOMAIN_INSTRUCTION, 0,
605                       0,
606                       offsetof(struct i965_cc_unit_state, cc4),
607                       render_state->cc.viewport);
608
609     dri_bo_unmap(render_state->cc.state);
610 }
611
612
613 static void 
614 i965_render_cc_unit(VADriverContextP ctx)
615 {
616     struct i965_driver_data *i965 = i965_driver_data(ctx);
617     struct i965_render_state *render_state = &i965->render_state;
618     struct i965_cc_unit_state *cc_state;
619
620     assert(render_state->cc.viewport);
621
622     dri_bo_map(render_state->cc.state, 1);
623     assert(render_state->cc.state->virtual);
624     cc_state = render_state->cc.state->virtual;
625     memset(cc_state, 0, sizeof(*cc_state));
626
627     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
628     cc_state->cc2.depth_test = 0;       /* disable depth test */
629     cc_state->cc2.logicop_enable = 1;   /* enable logic op */
630     cc_state->cc3.ia_blend_enable = 0;  /* blend alpha just like colors */
631     cc_state->cc3.blend_enable = 0;     /* disable color blend */
632     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
633     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
634
635     cc_state->cc5.dither_enable = 0;    /* disable dither */
636     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
637     cc_state->cc5.statistics_enable = 1;
638     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
639     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_ONE;
640     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_ONE;
641
642     dri_bo_emit_reloc(render_state->cc.state,
643                       I915_GEM_DOMAIN_INSTRUCTION, 0,
644                       0,
645                       offsetof(struct i965_cc_unit_state, cc4),
646                       render_state->cc.viewport);
647
648     dri_bo_unmap(render_state->cc.state);
649 }
650
651 static void
652 i965_render_set_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
653 {
654     switch (tiling) {
655     case I915_TILING_NONE:
656         ss->ss3.tiled_surface = 0;
657         ss->ss3.tile_walk = 0;
658         break;
659     case I915_TILING_X:
660         ss->ss3.tiled_surface = 1;
661         ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
662         break;
663     case I915_TILING_Y:
664         ss->ss3.tiled_surface = 1;
665         ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
666         break;
667     }
668 }
669
670 static void
671 i965_render_set_surface_state(
672     struct i965_surface_state *ss,
673     dri_bo                    *bo,
674     unsigned long              offset,
675     unsigned int               width,
676     unsigned int               height,
677     unsigned int               pitch,
678     unsigned int               format,
679     unsigned int               flags
680 )
681 {
682     unsigned int tiling;
683     unsigned int swizzle;
684
685     memset(ss, 0, sizeof(*ss));
686
687     switch (flags & (I965_PP_FLAG_TOP_FIELD|I965_PP_FLAG_BOTTOM_FIELD)) {
688     case I965_PP_FLAG_BOTTOM_FIELD:
689         ss->ss0.vert_line_stride_ofs = 1;
690         /* fall-through */
691     case I965_PP_FLAG_TOP_FIELD:
692         ss->ss0.vert_line_stride = 1;
693         height /= 2;
694         break;
695     }
696
697     ss->ss0.surface_type = I965_SURFACE_2D;
698     ss->ss0.surface_format = format;
699     ss->ss0.color_blend = 1;
700
701     ss->ss1.base_addr = bo->offset + offset;
702
703     ss->ss2.width = width - 1;
704     ss->ss2.height = height - 1;
705
706     ss->ss3.pitch = pitch - 1;
707
708     dri_bo_get_tiling(bo, &tiling, &swizzle);
709     i965_render_set_surface_tiling(ss, tiling);
710 }
711
712 static void
713 gen7_render_set_surface_tiling(struct gen7_surface_state *ss, uint32_t tiling)
714 {
715    switch (tiling) {
716    case I915_TILING_NONE:
717       ss->ss0.tiled_surface = 0;
718       ss->ss0.tile_walk = 0;
719       break;
720    case I915_TILING_X:
721       ss->ss0.tiled_surface = 1;
722       ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
723       break;
724    case I915_TILING_Y:
725       ss->ss0.tiled_surface = 1;
726       ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
727       break;
728    }
729 }
730
731 /* Set "Shader Channel Select" */
732 void
733 gen7_render_set_surface_scs(struct gen7_surface_state *ss)
734 {
735     ss->ss7.shader_chanel_select_r = HSW_SCS_RED;
736     ss->ss7.shader_chanel_select_g = HSW_SCS_GREEN;
737     ss->ss7.shader_chanel_select_b = HSW_SCS_BLUE;
738     ss->ss7.shader_chanel_select_a = HSW_SCS_ALPHA;
739 }
740
741 static void
742 gen7_render_set_surface_state(
743     struct gen7_surface_state *ss,
744     dri_bo                    *bo,
745     unsigned long              offset,
746     int                        width,
747     int                        height,
748     int                        pitch,
749     int                        format,
750     unsigned int               flags
751 )
752 {
753     unsigned int tiling;
754     unsigned int swizzle;
755
756     memset(ss, 0, sizeof(*ss));
757
758     switch (flags & (I965_PP_FLAG_TOP_FIELD|I965_PP_FLAG_BOTTOM_FIELD)) {
759     case I965_PP_FLAG_BOTTOM_FIELD:
760         ss->ss0.vert_line_stride_ofs = 1;
761         /* fall-through */
762     case I965_PP_FLAG_TOP_FIELD:
763         ss->ss0.vert_line_stride = 1;
764         height /= 2;
765         break;
766     }
767
768     ss->ss0.surface_type = I965_SURFACE_2D;
769     ss->ss0.surface_format = format;
770
771     ss->ss1.base_addr = bo->offset + offset;
772
773     ss->ss2.width = width - 1;
774     ss->ss2.height = height - 1;
775
776     ss->ss3.pitch = pitch - 1;
777
778     dri_bo_get_tiling(bo, &tiling, &swizzle);
779     gen7_render_set_surface_tiling(ss, tiling);
780 }
781
782 static void
783 i965_render_src_surface_state(
784     VADriverContextP ctx, 
785     int              index,
786     dri_bo          *region,
787     unsigned long    offset,
788     int              w,
789     int              h,
790     int              pitch,
791     int              format,
792     unsigned int     flags
793 )
794 {
795     struct i965_driver_data *i965 = i965_driver_data(ctx);  
796     struct i965_render_state *render_state = &i965->render_state;
797     void *ss;
798     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
799
800     assert(index < MAX_RENDER_SURFACES);
801
802     dri_bo_map(ss_bo, 1);
803     assert(ss_bo->virtual);
804     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
805
806     if (IS_GEN7(i965->intel.device_id)) {
807         gen7_render_set_surface_state(ss,
808                                       region, offset,
809                                       w, h,
810                                       pitch, format, flags);
811         if (IS_HASWELL(i965->intel.device_id))
812             gen7_render_set_surface_scs(ss);
813         dri_bo_emit_reloc(ss_bo,
814                           I915_GEM_DOMAIN_SAMPLER, 0,
815                           offset,
816                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
817                           region);
818     } else {
819         i965_render_set_surface_state(ss,
820                                       region, offset,
821                                       w, h,
822                                       pitch, format, flags);
823         dri_bo_emit_reloc(ss_bo,
824                           I915_GEM_DOMAIN_SAMPLER, 0,
825                           offset,
826                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
827                           region);
828     }
829
830     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
831     dri_bo_unmap(ss_bo);
832     render_state->wm.sampler_count++;
833 }
834
835 static void
836 i965_render_src_surfaces_state(
837     VADriverContextP ctx,
838     VASurfaceID      surface,
839     unsigned int     flags
840 )
841 {
842     struct i965_driver_data *i965 = i965_driver_data(ctx);  
843     struct object_surface *obj_surface;
844     int region_pitch;
845     int rw, rh;
846     dri_bo *region;
847
848     obj_surface = SURFACE(surface);
849     assert(obj_surface);
850
851     region_pitch = obj_surface->width;
852     rw = obj_surface->orig_width;
853     rh = obj_surface->orig_height;
854     region = obj_surface->bo;
855
856     i965_render_src_surface_state(ctx, 1, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags);     /* Y */
857     i965_render_src_surface_state(ctx, 2, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags);
858
859     if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2')) {
860         i965_render_src_surface_state(ctx, 3, region,
861                                       region_pitch * obj_surface->y_cb_offset,
862                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
863                                       I965_SURFACEFORMAT_R8G8_UNORM, flags); /* UV */
864         i965_render_src_surface_state(ctx, 4, region,
865                                       region_pitch * obj_surface->y_cb_offset,
866                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
867                                       I965_SURFACEFORMAT_R8G8_UNORM, flags);
868     } else {
869         i965_render_src_surface_state(ctx, 3, region,
870                                       region_pitch * obj_surface->y_cb_offset,
871                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
872                                       I965_SURFACEFORMAT_R8_UNORM, flags); /* U */
873         i965_render_src_surface_state(ctx, 4, region,
874                                       region_pitch * obj_surface->y_cb_offset,
875                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
876                                       I965_SURFACEFORMAT_R8_UNORM, flags);
877         i965_render_src_surface_state(ctx, 5, region,
878                                       region_pitch * obj_surface->y_cr_offset,
879                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
880                                       I965_SURFACEFORMAT_R8_UNORM, flags); /* V */
881         i965_render_src_surface_state(ctx, 6, region,
882                                       region_pitch * obj_surface->y_cr_offset,
883                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
884                                       I965_SURFACEFORMAT_R8_UNORM, flags);
885     }
886 }
887
888 static void
889 i965_subpic_render_src_surfaces_state(VADriverContextP ctx,
890                               VASurfaceID surface)
891 {
892     struct i965_driver_data *i965 = i965_driver_data(ctx);  
893     struct object_surface *obj_surface = SURFACE(surface);
894     dri_bo *subpic_region;
895     unsigned int index = obj_surface->subpic_render_idx;
896     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic[index]);
897     struct object_image *obj_image = IMAGE(obj_subpic->image);
898     assert(obj_surface);
899     assert(obj_surface->bo);
900     subpic_region = obj_image->bo;
901     /*subpicture surface*/
902     i965_render_src_surface_state(ctx, 1, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);     
903     i965_render_src_surface_state(ctx, 2, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);     
904 }
905
906 static void
907 i965_render_dest_surface_state(VADriverContextP ctx, int index)
908 {
909     struct i965_driver_data *i965 = i965_driver_data(ctx);  
910     struct i965_render_state *render_state = &i965->render_state;
911     struct intel_region *dest_region = render_state->draw_region;
912     void *ss;
913     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
914     int format;
915     assert(index < MAX_RENDER_SURFACES);
916
917     if (dest_region->cpp == 2) {
918         format = I965_SURFACEFORMAT_B5G6R5_UNORM;
919     } else {
920         format = I965_SURFACEFORMAT_B8G8R8A8_UNORM;
921     }
922
923     dri_bo_map(ss_bo, 1);
924     assert(ss_bo->virtual);
925     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
926
927     if (IS_GEN7(i965->intel.device_id)) {
928         gen7_render_set_surface_state(ss,
929                                       dest_region->bo, 0,
930                                       dest_region->width, dest_region->height,
931                                       dest_region->pitch, format, 0);
932         if (IS_HASWELL(i965->intel.device_id))
933             gen7_render_set_surface_scs(ss);
934         dri_bo_emit_reloc(ss_bo,
935                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
936                           0,
937                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
938                           dest_region->bo);
939     } else {
940         i965_render_set_surface_state(ss,
941                                       dest_region->bo, 0,
942                                       dest_region->width, dest_region->height,
943                                       dest_region->pitch, format, 0);
944         dri_bo_emit_reloc(ss_bo,
945                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
946                           0,
947                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
948                           dest_region->bo);
949     }
950
951     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
952     dri_bo_unmap(ss_bo);
953 }
954
955 static void
956 i965_fill_vertex_buffer(
957     VADriverContextP ctx,
958     float tex_coords[4], /* [(u1,v1);(u2,v2)] */
959     float vid_coords[4]  /* [(x1,y1);(x2,y2)] */
960 )
961 {
962     struct i965_driver_data * const i965 = i965_driver_data(ctx);
963     float vb[12];
964
965     enum { X1, Y1, X2, Y2 };
966
967     static const unsigned int g_rotation_indices[][6] = {
968         [VA_ROTATION_NONE] = { X2, Y2, X1, Y2, X1, Y1 },
969         [VA_ROTATION_90]   = { X2, Y1, X2, Y2, X1, Y2 },
970         [VA_ROTATION_180]  = { X1, Y1, X2, Y1, X2, Y2 },
971         [VA_ROTATION_270]  = { X1, Y2, X1, Y1, X2, Y1 },
972     };
973
974     const unsigned int * const rotation_indices =
975         g_rotation_indices[i965->rotation_attrib->value];
976
977     vb[0]  = tex_coords[rotation_indices[0]]; /* bottom-right corner */
978     vb[1]  = tex_coords[rotation_indices[1]];
979     vb[2]  = vid_coords[X2];
980     vb[3]  = vid_coords[Y2];
981
982     vb[4]  = tex_coords[rotation_indices[2]]; /* bottom-left corner */
983     vb[5]  = tex_coords[rotation_indices[3]];
984     vb[6]  = vid_coords[X1];
985     vb[7]  = vid_coords[Y2];
986
987     vb[8]  = tex_coords[rotation_indices[4]]; /* top-left corner */
988     vb[9]  = tex_coords[rotation_indices[5]];
989     vb[10] = vid_coords[X1];
990     vb[11] = vid_coords[Y1];
991
992     dri_bo_subdata(i965->render_state.vb.vertex_buffer, 0, sizeof(vb), vb);
993 }
994
995 static void 
996 i965_subpic_render_upload_vertex(VADriverContextP ctx,
997                                  VASurfaceID surface,
998                                  const VARectangle *output_rect)
999 {    
1000     struct i965_driver_data  *i965         = i965_driver_data(ctx);
1001     struct object_surface    *obj_surface  = SURFACE(surface);
1002     unsigned int index = obj_surface->subpic_render_idx;
1003     struct object_subpic     *obj_subpic   = SUBPIC(obj_surface->subpic[index]);
1004     float tex_coords[4], vid_coords[4];
1005     VARectangle dst_rect;
1006
1007     if (obj_subpic->flags & VA_SUBPICTURE_DESTINATION_IS_SCREEN_COORD)
1008         dst_rect = obj_subpic->dst_rect;
1009     else {
1010         const float sx  = (float)output_rect->width  / obj_surface->orig_width;
1011         const float sy  = (float)output_rect->height / obj_surface->orig_height;
1012         dst_rect.x      = output_rect->x + sx * obj_subpic->dst_rect.x;
1013         dst_rect.y      = output_rect->y + sy * obj_subpic->dst_rect.y;
1014         dst_rect.width  = sx * obj_subpic->dst_rect.width;
1015         dst_rect.height = sy * obj_subpic->dst_rect.height;
1016     }
1017
1018     tex_coords[0] = (float)obj_subpic->src_rect.x / obj_subpic->width;
1019     tex_coords[1] = (float)obj_subpic->src_rect.y / obj_subpic->height;
1020     tex_coords[2] = (float)(obj_subpic->src_rect.x + obj_subpic->src_rect.width) / obj_subpic->width;
1021     tex_coords[3] = (float)(obj_subpic->src_rect.y + obj_subpic->src_rect.height) / obj_subpic->height;
1022
1023     vid_coords[0] = dst_rect.x;
1024     vid_coords[1] = dst_rect.y;
1025     vid_coords[2] = (float)(dst_rect.x + dst_rect.width);
1026     vid_coords[3] = (float)(dst_rect.y + dst_rect.height);
1027
1028     i965_fill_vertex_buffer(ctx, tex_coords, vid_coords);
1029 }
1030
1031 static void 
1032 i965_render_upload_vertex(
1033     VADriverContextP   ctx,
1034     VASurfaceID        surface,
1035     const VARectangle *src_rect,
1036     const VARectangle *dst_rect
1037 )
1038 {
1039     struct i965_driver_data *i965 = i965_driver_data(ctx);
1040     struct i965_render_state *render_state = &i965->render_state;
1041     struct intel_region *dest_region = render_state->draw_region;
1042     struct object_surface *obj_surface;
1043     float tex_coords[4], vid_coords[4];
1044     int width, height;
1045
1046     obj_surface = SURFACE(surface);
1047     assert(surface);
1048
1049     width  = obj_surface->orig_width;
1050     height = obj_surface->orig_height;
1051
1052     tex_coords[0] = (float)src_rect->x / width;
1053     tex_coords[1] = (float)src_rect->y / height;
1054     tex_coords[2] = (float)(src_rect->x + src_rect->width) / width;
1055     tex_coords[3] = (float)(src_rect->y + src_rect->height) / height;
1056
1057     vid_coords[0] = dest_region->x + dst_rect->x;
1058     vid_coords[1] = dest_region->y + dst_rect->y;
1059     vid_coords[2] = vid_coords[0] + dst_rect->width;
1060     vid_coords[3] = vid_coords[1] + dst_rect->height;
1061
1062     i965_fill_vertex_buffer(ctx, tex_coords, vid_coords);
1063 }
1064
1065 static void
1066 i965_render_upload_constants(VADriverContextP ctx,
1067                              VASurfaceID surface)
1068 {
1069     struct i965_driver_data *i965 = i965_driver_data(ctx);
1070     struct i965_render_state *render_state = &i965->render_state;
1071     unsigned short *constant_buffer;
1072     struct object_surface *obj_surface = SURFACE(surface);
1073
1074     dri_bo_map(render_state->curbe.bo, 1);
1075     assert(render_state->curbe.bo->virtual);
1076     constant_buffer = render_state->curbe.bo->virtual;
1077
1078     if (obj_surface->subsampling == SUBSAMPLE_YUV400) {
1079         assert(obj_surface->fourcc == VA_FOURCC('I', 'M', 'C', '1') ||
1080                obj_surface->fourcc == VA_FOURCC('I', 'M', 'C', '3'));
1081         *constant_buffer = 2;
1082     } else {
1083         if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2'))
1084             *constant_buffer = 1;
1085         else
1086             *constant_buffer = 0;
1087     }
1088
1089     dri_bo_unmap(render_state->curbe.bo);
1090 }
1091
1092 static void
1093 i965_subpic_render_upload_constants(VADriverContextP ctx,
1094                              VASurfaceID surface)
1095 {
1096     struct i965_driver_data *i965 = i965_driver_data(ctx);
1097     struct i965_render_state *render_state = &i965->render_state;
1098     float *constant_buffer;
1099     float global_alpha = 1.0;
1100     struct object_surface *obj_surface = SURFACE(surface);
1101     unsigned int index = obj_surface->subpic_render_idx;
1102
1103     if(obj_surface->subpic[index] != VA_INVALID_ID){
1104         struct object_subpic *obj_subpic= SUBPIC(obj_surface->subpic[index]);
1105         if(obj_subpic->flags & VA_SUBPICTURE_GLOBAL_ALPHA){
1106            global_alpha = obj_subpic->global_alpha;
1107         }
1108      }   
1109
1110     dri_bo_map(render_state->curbe.bo, 1);
1111
1112     assert(render_state->curbe.bo->virtual);
1113     constant_buffer = render_state->curbe.bo->virtual;
1114     *constant_buffer = global_alpha;
1115
1116     dri_bo_unmap(render_state->curbe.bo);
1117 }
1118  
1119 static void
1120 i965_surface_render_state_setup(
1121     VADriverContextP   ctx,
1122     VASurfaceID        surface,
1123     const VARectangle *src_rect,
1124     const VARectangle *dst_rect,
1125     unsigned int       flags
1126 )
1127 {
1128     i965_render_vs_unit(ctx);
1129     i965_render_sf_unit(ctx);
1130     i965_render_dest_surface_state(ctx, 0);
1131     i965_render_src_surfaces_state(ctx, surface, flags);
1132     i965_render_sampler(ctx);
1133     i965_render_wm_unit(ctx);
1134     i965_render_cc_viewport(ctx);
1135     i965_render_cc_unit(ctx);
1136     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1137     i965_render_upload_constants(ctx, surface);
1138 }
1139
1140 static void
1141 i965_subpic_render_state_setup(
1142     VADriverContextP   ctx,
1143     VASurfaceID        surface,
1144     const VARectangle *src_rect,
1145     const VARectangle *dst_rect
1146 )
1147 {
1148     i965_render_vs_unit(ctx);
1149     i965_render_sf_unit(ctx);
1150     i965_render_dest_surface_state(ctx, 0);
1151     i965_subpic_render_src_surfaces_state(ctx, surface);
1152     i965_render_sampler(ctx);
1153     i965_subpic_render_wm_unit(ctx);
1154     i965_render_cc_viewport(ctx);
1155     i965_subpic_render_cc_unit(ctx);
1156     i965_subpic_render_upload_constants(ctx, surface);
1157     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
1158 }
1159
1160
1161 static void
1162 i965_render_pipeline_select(VADriverContextP ctx)
1163 {
1164     struct i965_driver_data *i965 = i965_driver_data(ctx);
1165     struct intel_batchbuffer *batch = i965->batch;
1166  
1167     BEGIN_BATCH(batch, 1);
1168     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1169     ADVANCE_BATCH(batch);
1170 }
1171
1172 static void
1173 i965_render_state_sip(VADriverContextP ctx)
1174 {
1175     struct i965_driver_data *i965 = i965_driver_data(ctx);
1176     struct intel_batchbuffer *batch = i965->batch;
1177
1178     BEGIN_BATCH(batch, 2);
1179     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1180     OUT_BATCH(batch, 0);
1181     ADVANCE_BATCH(batch);
1182 }
1183
1184 static void
1185 i965_render_state_base_address(VADriverContextP ctx)
1186 {
1187     struct i965_driver_data *i965 = i965_driver_data(ctx);
1188     struct intel_batchbuffer *batch = i965->batch;
1189     struct i965_render_state *render_state = &i965->render_state;
1190
1191     if (IS_IRONLAKE(i965->intel.device_id)) {
1192         BEGIN_BATCH(batch, 8);
1193         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
1194         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1195         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1196         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1197         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1198         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1199         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1200         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1201         ADVANCE_BATCH(batch);
1202     } else {
1203         BEGIN_BATCH(batch, 6);
1204         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 4);
1205         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1206         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1207         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1208         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1209         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1210         ADVANCE_BATCH(batch);
1211     }
1212 }
1213
1214 static void
1215 i965_render_binding_table_pointers(VADriverContextP ctx)
1216 {
1217     struct i965_driver_data *i965 = i965_driver_data(ctx);
1218     struct intel_batchbuffer *batch = i965->batch;
1219
1220     BEGIN_BATCH(batch, 6);
1221     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS | 4);
1222     OUT_BATCH(batch, 0); /* vs */
1223     OUT_BATCH(batch, 0); /* gs */
1224     OUT_BATCH(batch, 0); /* clip */
1225     OUT_BATCH(batch, 0); /* sf */
1226     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1227     ADVANCE_BATCH(batch);
1228 }
1229
1230 static void 
1231 i965_render_constant_color(VADriverContextP ctx)
1232 {
1233     struct i965_driver_data *i965 = i965_driver_data(ctx);
1234     struct intel_batchbuffer *batch = i965->batch;
1235
1236     BEGIN_BATCH(batch, 5);
1237     OUT_BATCH(batch, CMD_CONSTANT_COLOR | 3);
1238     OUT_BATCH(batch, float_to_uint(1.0));
1239     OUT_BATCH(batch, float_to_uint(0.0));
1240     OUT_BATCH(batch, float_to_uint(1.0));
1241     OUT_BATCH(batch, float_to_uint(1.0));
1242     ADVANCE_BATCH(batch);
1243 }
1244
1245 static void
1246 i965_render_pipelined_pointers(VADriverContextP ctx)
1247 {
1248     struct i965_driver_data *i965 = i965_driver_data(ctx);
1249     struct intel_batchbuffer *batch = i965->batch;
1250     struct i965_render_state *render_state = &i965->render_state;
1251
1252     BEGIN_BATCH(batch, 7);
1253     OUT_BATCH(batch, CMD_PIPELINED_POINTERS | 5);
1254     OUT_RELOC(batch, render_state->vs.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1255     OUT_BATCH(batch, 0);  /* disable GS */
1256     OUT_BATCH(batch, 0);  /* disable CLIP */
1257     OUT_RELOC(batch, render_state->sf.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1258     OUT_RELOC(batch, render_state->wm.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1259     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1260     ADVANCE_BATCH(batch);
1261 }
1262
1263 static void
1264 i965_render_urb_layout(VADriverContextP ctx)
1265 {
1266     struct i965_driver_data *i965 = i965_driver_data(ctx);
1267     struct intel_batchbuffer *batch = i965->batch;
1268     int urb_vs_start, urb_vs_size;
1269     int urb_gs_start, urb_gs_size;
1270     int urb_clip_start, urb_clip_size;
1271     int urb_sf_start, urb_sf_size;
1272     int urb_cs_start, urb_cs_size;
1273
1274     urb_vs_start = 0;
1275     urb_vs_size = URB_VS_ENTRIES * URB_VS_ENTRY_SIZE;
1276     urb_gs_start = urb_vs_start + urb_vs_size;
1277     urb_gs_size = URB_GS_ENTRIES * URB_GS_ENTRY_SIZE;
1278     urb_clip_start = urb_gs_start + urb_gs_size;
1279     urb_clip_size = URB_CLIP_ENTRIES * URB_CLIP_ENTRY_SIZE;
1280     urb_sf_start = urb_clip_start + urb_clip_size;
1281     urb_sf_size = URB_SF_ENTRIES * URB_SF_ENTRY_SIZE;
1282     urb_cs_start = urb_sf_start + urb_sf_size;
1283     urb_cs_size = URB_CS_ENTRIES * URB_CS_ENTRY_SIZE;
1284
1285     BEGIN_BATCH(batch, 3);
1286     OUT_BATCH(batch, 
1287               CMD_URB_FENCE |
1288               UF0_CS_REALLOC |
1289               UF0_SF_REALLOC |
1290               UF0_CLIP_REALLOC |
1291               UF0_GS_REALLOC |
1292               UF0_VS_REALLOC |
1293               1);
1294     OUT_BATCH(batch, 
1295               ((urb_clip_start + urb_clip_size) << UF1_CLIP_FENCE_SHIFT) |
1296               ((urb_gs_start + urb_gs_size) << UF1_GS_FENCE_SHIFT) |
1297               ((urb_vs_start + urb_vs_size) << UF1_VS_FENCE_SHIFT));
1298     OUT_BATCH(batch,
1299               ((urb_cs_start + urb_cs_size) << UF2_CS_FENCE_SHIFT) |
1300               ((urb_sf_start + urb_sf_size) << UF2_SF_FENCE_SHIFT));
1301     ADVANCE_BATCH(batch);
1302 }
1303
1304 static void 
1305 i965_render_cs_urb_layout(VADriverContextP ctx)
1306 {
1307     struct i965_driver_data *i965 = i965_driver_data(ctx);
1308     struct intel_batchbuffer *batch = i965->batch;
1309
1310     BEGIN_BATCH(batch, 2);
1311     OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
1312     OUT_BATCH(batch,
1313               ((URB_CS_ENTRY_SIZE - 1) << 4) |          /* URB Entry Allocation Size */
1314               (URB_CS_ENTRIES << 0));                /* Number of URB Entries */
1315     ADVANCE_BATCH(batch);
1316 }
1317
1318 static void
1319 i965_render_constant_buffer(VADriverContextP ctx)
1320 {
1321     struct i965_driver_data *i965 = i965_driver_data(ctx);
1322     struct intel_batchbuffer *batch = i965->batch;
1323     struct i965_render_state *render_state = &i965->render_state;
1324
1325     BEGIN_BATCH(batch, 2);
1326     OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
1327     OUT_RELOC(batch, render_state->curbe.bo,
1328               I915_GEM_DOMAIN_INSTRUCTION, 0,
1329               URB_CS_ENTRY_SIZE - 1);
1330     ADVANCE_BATCH(batch);    
1331 }
1332
1333 static void
1334 i965_render_drawing_rectangle(VADriverContextP ctx)
1335 {
1336     struct i965_driver_data *i965 = i965_driver_data(ctx);
1337     struct intel_batchbuffer *batch = i965->batch;
1338     struct i965_render_state *render_state = &i965->render_state;
1339     struct intel_region *dest_region = render_state->draw_region;
1340
1341     BEGIN_BATCH(batch, 4);
1342     OUT_BATCH(batch, CMD_DRAWING_RECTANGLE | 2);
1343     OUT_BATCH(batch, 0x00000000);
1344     OUT_BATCH(batch, (dest_region->width - 1) | (dest_region->height - 1) << 16);
1345     OUT_BATCH(batch, 0x00000000);         
1346     ADVANCE_BATCH(batch);
1347 }
1348
1349 static void
1350 i965_render_vertex_elements(VADriverContextP ctx)
1351 {
1352     struct i965_driver_data *i965 = i965_driver_data(ctx);
1353     struct intel_batchbuffer *batch = i965->batch;
1354
1355     if (IS_IRONLAKE(i965->intel.device_id)) {
1356         BEGIN_BATCH(batch, 5);
1357         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1358         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1359         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1360                   VE0_VALID |
1361                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1362                   (0 << VE0_OFFSET_SHIFT));
1363         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1364                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1365                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1366                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1367         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1368         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1369                   VE0_VALID |
1370                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1371                   (8 << VE0_OFFSET_SHIFT));
1372         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1373                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1374                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1375                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1376         ADVANCE_BATCH(batch);
1377     } else {
1378         BEGIN_BATCH(batch, 5);
1379         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1380         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1381         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1382                   VE0_VALID |
1383                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1384                   (0 << VE0_OFFSET_SHIFT));
1385         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1386                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1387                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1388                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1389                   (0 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1390         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1391         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1392                   VE0_VALID |
1393                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1394                   (8 << VE0_OFFSET_SHIFT));
1395         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1396                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1397                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1398                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1399                   (4 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1400         ADVANCE_BATCH(batch);
1401     }
1402 }
1403
1404 static void
1405 i965_render_upload_image_palette(
1406     VADriverContextP ctx,
1407     VAImageID        image_id,
1408     unsigned int     alpha
1409 )
1410 {
1411     struct i965_driver_data *i965 = i965_driver_data(ctx);
1412     struct intel_batchbuffer *batch = i965->batch;
1413     unsigned int i;
1414
1415     struct object_image *obj_image = IMAGE(image_id);
1416     assert(obj_image);
1417
1418     if (obj_image->image.num_palette_entries == 0)
1419         return;
1420
1421     BEGIN_BATCH(batch, 1 + obj_image->image.num_palette_entries);
1422     OUT_BATCH(batch, CMD_SAMPLER_PALETTE_LOAD | (obj_image->image.num_palette_entries - 1));
1423     /*fill palette*/
1424     //int32_t out[16]; //0-23:color 23-31:alpha
1425     for (i = 0; i < obj_image->image.num_palette_entries; i++)
1426         OUT_BATCH(batch, (alpha << 24) | obj_image->palette[i]);
1427     ADVANCE_BATCH(batch);
1428 }
1429
1430 static void
1431 i965_render_startup(VADriverContextP ctx)
1432 {
1433     struct i965_driver_data *i965 = i965_driver_data(ctx);
1434     struct intel_batchbuffer *batch = i965->batch;
1435     struct i965_render_state *render_state = &i965->render_state;
1436
1437     BEGIN_BATCH(batch, 11);
1438     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
1439     OUT_BATCH(batch, 
1440               (0 << VB0_BUFFER_INDEX_SHIFT) |
1441               VB0_VERTEXDATA |
1442               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
1443     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
1444
1445     if (IS_IRONLAKE(i965->intel.device_id))
1446         OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
1447     else
1448         OUT_BATCH(batch, 3);
1449
1450     OUT_BATCH(batch, 0);
1451
1452     OUT_BATCH(batch, 
1453               CMD_3DPRIMITIVE |
1454               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
1455               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
1456               (0 << 9) |
1457               4);
1458     OUT_BATCH(batch, 3); /* vertex count per instance */
1459     OUT_BATCH(batch, 0); /* start vertex offset */
1460     OUT_BATCH(batch, 1); /* single instance */
1461     OUT_BATCH(batch, 0); /* start instance location */
1462     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
1463     ADVANCE_BATCH(batch);
1464 }
1465
1466 static void 
1467 i965_clear_dest_region(VADriverContextP ctx)
1468 {
1469     struct i965_driver_data *i965 = i965_driver_data(ctx);
1470     struct intel_batchbuffer *batch = i965->batch;
1471     struct i965_render_state *render_state = &i965->render_state;
1472     struct intel_region *dest_region = render_state->draw_region;
1473     unsigned int blt_cmd, br13;
1474     int pitch;
1475
1476     blt_cmd = XY_COLOR_BLT_CMD;
1477     br13 = 0xf0 << 16;
1478     pitch = dest_region->pitch;
1479
1480     if (dest_region->cpp == 4) {
1481         br13 |= BR13_8888;
1482         blt_cmd |= (XY_COLOR_BLT_WRITE_RGB | XY_COLOR_BLT_WRITE_ALPHA);
1483     } else {
1484         assert(dest_region->cpp == 2);
1485         br13 |= BR13_565;
1486     }
1487
1488     if (dest_region->tiling != I915_TILING_NONE) {
1489         blt_cmd |= XY_COLOR_BLT_DST_TILED;
1490         pitch /= 4;
1491     }
1492
1493     br13 |= pitch;
1494
1495     if (IS_GEN6(i965->intel.device_id) ||
1496         IS_GEN7(i965->intel.device_id)) {
1497         intel_batchbuffer_start_atomic_blt(batch, 24);
1498         BEGIN_BLT_BATCH(batch, 6);
1499     } else {
1500         intel_batchbuffer_start_atomic(batch, 24);
1501         BEGIN_BATCH(batch, 6);
1502     }
1503
1504     OUT_BATCH(batch, blt_cmd);
1505     OUT_BATCH(batch, br13);
1506     OUT_BATCH(batch, (dest_region->y << 16) | (dest_region->x));
1507     OUT_BATCH(batch, ((dest_region->y + dest_region->height) << 16) |
1508               (dest_region->x + dest_region->width));
1509     OUT_RELOC(batch, dest_region->bo, 
1510               I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1511               0);
1512     OUT_BATCH(batch, 0x0);
1513     ADVANCE_BATCH(batch);
1514     intel_batchbuffer_end_atomic(batch);
1515 }
1516
1517 static void
1518 i965_surface_render_pipeline_setup(VADriverContextP ctx)
1519 {
1520     struct i965_driver_data *i965 = i965_driver_data(ctx);
1521     struct intel_batchbuffer *batch = i965->batch;
1522
1523     i965_clear_dest_region(ctx);
1524     intel_batchbuffer_start_atomic(batch, 0x1000);
1525     intel_batchbuffer_emit_mi_flush(batch);
1526     i965_render_pipeline_select(ctx);
1527     i965_render_state_sip(ctx);
1528     i965_render_state_base_address(ctx);
1529     i965_render_binding_table_pointers(ctx);
1530     i965_render_constant_color(ctx);
1531     i965_render_pipelined_pointers(ctx);
1532     i965_render_urb_layout(ctx);
1533     i965_render_cs_urb_layout(ctx);
1534     i965_render_constant_buffer(ctx);
1535     i965_render_drawing_rectangle(ctx);
1536     i965_render_vertex_elements(ctx);
1537     i965_render_startup(ctx);
1538     intel_batchbuffer_end_atomic(batch);
1539 }
1540
1541 static void
1542 i965_subpic_render_pipeline_setup(VADriverContextP ctx)
1543 {
1544     struct i965_driver_data *i965 = i965_driver_data(ctx);
1545     struct intel_batchbuffer *batch = i965->batch;
1546
1547     intel_batchbuffer_start_atomic(batch, 0x1000);
1548     intel_batchbuffer_emit_mi_flush(batch);
1549     i965_render_pipeline_select(ctx);
1550     i965_render_state_sip(ctx);
1551     i965_render_state_base_address(ctx);
1552     i965_render_binding_table_pointers(ctx);
1553     i965_render_constant_color(ctx);
1554     i965_render_pipelined_pointers(ctx);
1555     i965_render_urb_layout(ctx);
1556     i965_render_cs_urb_layout(ctx);
1557     i965_render_drawing_rectangle(ctx);
1558     i965_render_vertex_elements(ctx);
1559     i965_render_startup(ctx);
1560     intel_batchbuffer_end_atomic(batch);
1561 }
1562
1563
1564 static void 
1565 i965_render_initialize(VADriverContextP ctx)
1566 {
1567     struct i965_driver_data *i965 = i965_driver_data(ctx);
1568     struct i965_render_state *render_state = &i965->render_state;
1569     dri_bo *bo;
1570
1571     /* VERTEX BUFFER */
1572     dri_bo_unreference(render_state->vb.vertex_buffer);
1573     bo = dri_bo_alloc(i965->intel.bufmgr,
1574                       "vertex buffer",
1575                       4096,
1576                       4096);
1577     assert(bo);
1578     render_state->vb.vertex_buffer = bo;
1579
1580     /* VS */
1581     dri_bo_unreference(render_state->vs.state);
1582     bo = dri_bo_alloc(i965->intel.bufmgr,
1583                       "vs state",
1584                       sizeof(struct i965_vs_unit_state),
1585                       64);
1586     assert(bo);
1587     render_state->vs.state = bo;
1588
1589     /* GS */
1590     /* CLIP */
1591     /* SF */
1592     dri_bo_unreference(render_state->sf.state);
1593     bo = dri_bo_alloc(i965->intel.bufmgr,
1594                       "sf state",
1595                       sizeof(struct i965_sf_unit_state),
1596                       64);
1597     assert(bo);
1598     render_state->sf.state = bo;
1599
1600     /* WM */
1601     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1602     bo = dri_bo_alloc(i965->intel.bufmgr,
1603                       "surface state & binding table",
1604                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1605                       4096);
1606     assert(bo);
1607     render_state->wm.surface_state_binding_table_bo = bo;
1608
1609     dri_bo_unreference(render_state->wm.sampler);
1610     bo = dri_bo_alloc(i965->intel.bufmgr,
1611                       "sampler state",
1612                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1613                       64);
1614     assert(bo);
1615     render_state->wm.sampler = bo;
1616     render_state->wm.sampler_count = 0;
1617
1618     dri_bo_unreference(render_state->wm.state);
1619     bo = dri_bo_alloc(i965->intel.bufmgr,
1620                       "wm state",
1621                       sizeof(struct i965_wm_unit_state),
1622                       64);
1623     assert(bo);
1624     render_state->wm.state = bo;
1625
1626     /* COLOR CALCULATOR */
1627     dri_bo_unreference(render_state->cc.state);
1628     bo = dri_bo_alloc(i965->intel.bufmgr,
1629                       "color calc state",
1630                       sizeof(struct i965_cc_unit_state),
1631                       64);
1632     assert(bo);
1633     render_state->cc.state = bo;
1634
1635     dri_bo_unreference(render_state->cc.viewport);
1636     bo = dri_bo_alloc(i965->intel.bufmgr,
1637                       "cc viewport",
1638                       sizeof(struct i965_cc_viewport),
1639                       64);
1640     assert(bo);
1641     render_state->cc.viewport = bo;
1642 }
1643
1644 static void
1645 i965_render_put_surface(
1646     VADriverContextP   ctx,
1647     VASurfaceID        surface,
1648     const VARectangle *src_rect,
1649     const VARectangle *dst_rect,
1650     unsigned int       flags
1651 )
1652 {
1653     struct i965_driver_data *i965 = i965_driver_data(ctx);
1654     struct intel_batchbuffer *batch = i965->batch;
1655
1656     i965_render_initialize(ctx);
1657     i965_surface_render_state_setup(ctx, surface, src_rect, dst_rect, flags);
1658     i965_surface_render_pipeline_setup(ctx);
1659     intel_batchbuffer_flush(batch);
1660 }
1661
1662 static void
1663 i965_render_put_subpicture(
1664     VADriverContextP   ctx,
1665     VASurfaceID        surface,
1666     const VARectangle *src_rect,
1667     const VARectangle *dst_rect
1668 )
1669 {
1670     struct i965_driver_data *i965 = i965_driver_data(ctx);
1671     struct intel_batchbuffer *batch = i965->batch;
1672     struct object_surface *obj_surface = SURFACE(surface);
1673     unsigned int index = obj_surface->subpic_render_idx;
1674     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic[index]);
1675
1676     assert(obj_subpic);
1677
1678     i965_render_initialize(ctx);
1679     i965_subpic_render_state_setup(ctx, surface, src_rect, dst_rect);
1680     i965_subpic_render_pipeline_setup(ctx);
1681     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
1682     intel_batchbuffer_flush(batch);
1683 }
1684
1685 /*
1686  * for GEN6+
1687  */
1688 static void 
1689 gen6_render_initialize(VADriverContextP ctx)
1690 {
1691     struct i965_driver_data *i965 = i965_driver_data(ctx);
1692     struct i965_render_state *render_state = &i965->render_state;
1693     dri_bo *bo;
1694
1695     /* VERTEX BUFFER */
1696     dri_bo_unreference(render_state->vb.vertex_buffer);
1697     bo = dri_bo_alloc(i965->intel.bufmgr,
1698                       "vertex buffer",
1699                       4096,
1700                       4096);
1701     assert(bo);
1702     render_state->vb.vertex_buffer = bo;
1703
1704     /* WM */
1705     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1706     bo = dri_bo_alloc(i965->intel.bufmgr,
1707                       "surface state & binding table",
1708                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1709                       4096);
1710     assert(bo);
1711     render_state->wm.surface_state_binding_table_bo = bo;
1712
1713     dri_bo_unreference(render_state->wm.sampler);
1714     bo = dri_bo_alloc(i965->intel.bufmgr,
1715                       "sampler state",
1716                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1717                       4096);
1718     assert(bo);
1719     render_state->wm.sampler = bo;
1720     render_state->wm.sampler_count = 0;
1721
1722     /* COLOR CALCULATOR */
1723     dri_bo_unreference(render_state->cc.state);
1724     bo = dri_bo_alloc(i965->intel.bufmgr,
1725                       "color calc state",
1726                       sizeof(struct gen6_color_calc_state),
1727                       4096);
1728     assert(bo);
1729     render_state->cc.state = bo;
1730
1731     /* CC VIEWPORT */
1732     dri_bo_unreference(render_state->cc.viewport);
1733     bo = dri_bo_alloc(i965->intel.bufmgr,
1734                       "cc viewport",
1735                       sizeof(struct i965_cc_viewport),
1736                       4096);
1737     assert(bo);
1738     render_state->cc.viewport = bo;
1739
1740     /* BLEND STATE */
1741     dri_bo_unreference(render_state->cc.blend);
1742     bo = dri_bo_alloc(i965->intel.bufmgr,
1743                       "blend state",
1744                       sizeof(struct gen6_blend_state),
1745                       4096);
1746     assert(bo);
1747     render_state->cc.blend = bo;
1748
1749     /* DEPTH & STENCIL STATE */
1750     dri_bo_unreference(render_state->cc.depth_stencil);
1751     bo = dri_bo_alloc(i965->intel.bufmgr,
1752                       "depth & stencil state",
1753                       sizeof(struct gen6_depth_stencil_state),
1754                       4096);
1755     assert(bo);
1756     render_state->cc.depth_stencil = bo;
1757 }
1758
1759 static void
1760 gen6_render_color_calc_state(VADriverContextP ctx)
1761 {
1762     struct i965_driver_data *i965 = i965_driver_data(ctx);
1763     struct i965_render_state *render_state = &i965->render_state;
1764     struct gen6_color_calc_state *color_calc_state;
1765     
1766     dri_bo_map(render_state->cc.state, 1);
1767     assert(render_state->cc.state->virtual);
1768     color_calc_state = render_state->cc.state->virtual;
1769     memset(color_calc_state, 0, sizeof(*color_calc_state));
1770     color_calc_state->constant_r = 1.0;
1771     color_calc_state->constant_g = 0.0;
1772     color_calc_state->constant_b = 1.0;
1773     color_calc_state->constant_a = 1.0;
1774     dri_bo_unmap(render_state->cc.state);
1775 }
1776
1777 static void
1778 gen6_render_blend_state(VADriverContextP ctx)
1779 {
1780     struct i965_driver_data *i965 = i965_driver_data(ctx);
1781     struct i965_render_state *render_state = &i965->render_state;
1782     struct gen6_blend_state *blend_state;
1783     
1784     dri_bo_map(render_state->cc.blend, 1);
1785     assert(render_state->cc.blend->virtual);
1786     blend_state = render_state->cc.blend->virtual;
1787     memset(blend_state, 0, sizeof(*blend_state));
1788     blend_state->blend1.logic_op_enable = 1;
1789     blend_state->blend1.logic_op_func = 0xc;
1790     dri_bo_unmap(render_state->cc.blend);
1791 }
1792
1793 static void
1794 gen6_render_depth_stencil_state(VADriverContextP ctx)
1795 {
1796     struct i965_driver_data *i965 = i965_driver_data(ctx);
1797     struct i965_render_state *render_state = &i965->render_state;
1798     struct gen6_depth_stencil_state *depth_stencil_state;
1799     
1800     dri_bo_map(render_state->cc.depth_stencil, 1);
1801     assert(render_state->cc.depth_stencil->virtual);
1802     depth_stencil_state = render_state->cc.depth_stencil->virtual;
1803     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
1804     dri_bo_unmap(render_state->cc.depth_stencil);
1805 }
1806
1807 static void
1808 gen6_render_setup_states(
1809     VADriverContextP   ctx,
1810     VASurfaceID        surface,
1811     const VARectangle *src_rect,
1812     const VARectangle *dst_rect,
1813     unsigned int       flags
1814 )
1815 {
1816     i965_render_dest_surface_state(ctx, 0);
1817     i965_render_src_surfaces_state(ctx, surface, flags);
1818     i965_render_sampler(ctx);
1819     i965_render_cc_viewport(ctx);
1820     gen6_render_color_calc_state(ctx);
1821     gen6_render_blend_state(ctx);
1822     gen6_render_depth_stencil_state(ctx);
1823     i965_render_upload_constants(ctx, surface);
1824     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1825 }
1826
1827 static void
1828 gen6_emit_invarient_states(VADriverContextP ctx)
1829 {
1830     struct i965_driver_data *i965 = i965_driver_data(ctx);
1831     struct intel_batchbuffer *batch = i965->batch;
1832
1833     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1834
1835     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (3 - 2));
1836     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
1837               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
1838     OUT_BATCH(batch, 0);
1839
1840     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
1841     OUT_BATCH(batch, 1);
1842
1843     /* Set system instruction pointer */
1844     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1845     OUT_BATCH(batch, 0);
1846 }
1847
1848 static void
1849 gen6_emit_state_base_address(VADriverContextP ctx)
1850 {
1851     struct i965_driver_data *i965 = i965_driver_data(ctx);
1852     struct intel_batchbuffer *batch = i965->batch;
1853     struct i965_render_state *render_state = &i965->render_state;
1854
1855     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
1856     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
1857     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1858     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
1859     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
1860     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
1861     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
1862     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
1863     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
1864     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
1865 }
1866
1867 static void
1868 gen6_emit_viewport_state_pointers(VADriverContextP ctx)
1869 {
1870     struct i965_driver_data *i965 = i965_driver_data(ctx);
1871     struct intel_batchbuffer *batch = i965->batch;
1872     struct i965_render_state *render_state = &i965->render_state;
1873
1874     OUT_BATCH(batch, GEN6_3DSTATE_VIEWPORT_STATE_POINTERS |
1875               GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CC |
1876               (4 - 2));
1877     OUT_BATCH(batch, 0);
1878     OUT_BATCH(batch, 0);
1879     OUT_RELOC(batch, render_state->cc.viewport, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1880 }
1881
1882 static void
1883 gen6_emit_urb(VADriverContextP ctx)
1884 {
1885     struct i965_driver_data *i965 = i965_driver_data(ctx);
1886     struct intel_batchbuffer *batch = i965->batch;
1887
1888     OUT_BATCH(batch, GEN6_3DSTATE_URB | (3 - 2));
1889     OUT_BATCH(batch, ((1 - 1) << GEN6_3DSTATE_URB_VS_SIZE_SHIFT) |
1890               (24 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */
1891     OUT_BATCH(batch, (0 << GEN6_3DSTATE_URB_GS_SIZE_SHIFT) |
1892               (0 << GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT)); /* no GS thread */
1893 }
1894
1895 static void
1896 gen6_emit_cc_state_pointers(VADriverContextP ctx)
1897 {
1898     struct i965_driver_data *i965 = i965_driver_data(ctx);
1899     struct intel_batchbuffer *batch = i965->batch;
1900     struct i965_render_state *render_state = &i965->render_state;
1901
1902     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2));
1903     OUT_RELOC(batch, render_state->cc.blend, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1904     OUT_RELOC(batch, render_state->cc.depth_stencil, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1905     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1906 }
1907
1908 static void
1909 gen6_emit_sampler_state_pointers(VADriverContextP ctx)
1910 {
1911     struct i965_driver_data *i965 = i965_driver_data(ctx);
1912     struct intel_batchbuffer *batch = i965->batch;
1913     struct i965_render_state *render_state = &i965->render_state;
1914
1915     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLER_STATE_POINTERS |
1916               GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS |
1917               (4 - 2));
1918     OUT_BATCH(batch, 0); /* VS */
1919     OUT_BATCH(batch, 0); /* GS */
1920     OUT_RELOC(batch,render_state->wm.sampler, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1921 }
1922
1923 static void
1924 gen6_emit_binding_table(VADriverContextP ctx)
1925 {
1926     struct i965_driver_data *i965 = i965_driver_data(ctx);
1927     struct intel_batchbuffer *batch = i965->batch;
1928
1929     /* Binding table pointers */
1930     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS |
1931               GEN6_BINDING_TABLE_MODIFY_PS |
1932               (4 - 2));
1933     OUT_BATCH(batch, 0);                /* vs */
1934     OUT_BATCH(batch, 0);                /* gs */
1935     /* Only the PS uses the binding table */
1936     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1937 }
1938
1939 static void
1940 gen6_emit_depth_buffer_state(VADriverContextP ctx)
1941 {
1942     struct i965_driver_data *i965 = i965_driver_data(ctx);
1943     struct intel_batchbuffer *batch = i965->batch;
1944
1945     OUT_BATCH(batch, CMD_DEPTH_BUFFER | (7 - 2));
1946     OUT_BATCH(batch, (I965_SURFACE_NULL << CMD_DEPTH_BUFFER_TYPE_SHIFT) |
1947               (I965_DEPTHFORMAT_D32_FLOAT << CMD_DEPTH_BUFFER_FORMAT_SHIFT));
1948     OUT_BATCH(batch, 0);
1949     OUT_BATCH(batch, 0);
1950     OUT_BATCH(batch, 0);
1951     OUT_BATCH(batch, 0);
1952     OUT_BATCH(batch, 0);
1953
1954     OUT_BATCH(batch, CMD_CLEAR_PARAMS | (2 - 2));
1955     OUT_BATCH(batch, 0);
1956 }
1957
1958 static void
1959 gen6_emit_drawing_rectangle(VADriverContextP ctx)
1960 {
1961     i965_render_drawing_rectangle(ctx);
1962 }
1963
1964 static void 
1965 gen6_emit_vs_state(VADriverContextP ctx)
1966 {
1967     struct i965_driver_data *i965 = i965_driver_data(ctx);
1968     struct intel_batchbuffer *batch = i965->batch;
1969
1970     /* disable VS constant buffer */
1971     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (5 - 2));
1972     OUT_BATCH(batch, 0);
1973     OUT_BATCH(batch, 0);
1974     OUT_BATCH(batch, 0);
1975     OUT_BATCH(batch, 0);
1976         
1977     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
1978     OUT_BATCH(batch, 0); /* without VS kernel */
1979     OUT_BATCH(batch, 0);
1980     OUT_BATCH(batch, 0);
1981     OUT_BATCH(batch, 0);
1982     OUT_BATCH(batch, 0); /* pass-through */
1983 }
1984
1985 static void 
1986 gen6_emit_gs_state(VADriverContextP ctx)
1987 {
1988     struct i965_driver_data *i965 = i965_driver_data(ctx);
1989     struct intel_batchbuffer *batch = i965->batch;
1990
1991     /* disable GS constant buffer */
1992     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (5 - 2));
1993     OUT_BATCH(batch, 0);
1994     OUT_BATCH(batch, 0);
1995     OUT_BATCH(batch, 0);
1996     OUT_BATCH(batch, 0);
1997         
1998     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
1999     OUT_BATCH(batch, 0); /* without GS kernel */
2000     OUT_BATCH(batch, 0);
2001     OUT_BATCH(batch, 0);
2002     OUT_BATCH(batch, 0);
2003     OUT_BATCH(batch, 0);
2004     OUT_BATCH(batch, 0); /* pass-through */
2005 }
2006
2007 static void 
2008 gen6_emit_clip_state(VADriverContextP ctx)
2009 {
2010     struct i965_driver_data *i965 = i965_driver_data(ctx);
2011     struct intel_batchbuffer *batch = i965->batch;
2012
2013     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
2014     OUT_BATCH(batch, 0);
2015     OUT_BATCH(batch, 0); /* pass-through */
2016     OUT_BATCH(batch, 0);
2017 }
2018
2019 static void 
2020 gen6_emit_sf_state(VADriverContextP ctx)
2021 {
2022     struct i965_driver_data *i965 = i965_driver_data(ctx);
2023     struct intel_batchbuffer *batch = i965->batch;
2024
2025     OUT_BATCH(batch, GEN6_3DSTATE_SF | (20 - 2));
2026     OUT_BATCH(batch, (1 << GEN6_3DSTATE_SF_NUM_OUTPUTS_SHIFT) |
2027               (1 << GEN6_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT) |
2028               (0 << GEN6_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT));
2029     OUT_BATCH(batch, 0);
2030     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
2031     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT); /* DW4 */
2032     OUT_BATCH(batch, 0);
2033     OUT_BATCH(batch, 0);
2034     OUT_BATCH(batch, 0);
2035     OUT_BATCH(batch, 0);
2036     OUT_BATCH(batch, 0); /* DW9 */
2037     OUT_BATCH(batch, 0);
2038     OUT_BATCH(batch, 0);
2039     OUT_BATCH(batch, 0);
2040     OUT_BATCH(batch, 0);
2041     OUT_BATCH(batch, 0); /* DW14 */
2042     OUT_BATCH(batch, 0);
2043     OUT_BATCH(batch, 0);
2044     OUT_BATCH(batch, 0);
2045     OUT_BATCH(batch, 0);
2046     OUT_BATCH(batch, 0); /* DW19 */
2047 }
2048
2049 static void 
2050 gen6_emit_wm_state(VADriverContextP ctx, int kernel)
2051 {
2052     struct i965_driver_data *i965 = i965_driver_data(ctx);
2053     struct intel_batchbuffer *batch = i965->batch;
2054     struct i965_render_state *render_state = &i965->render_state;
2055
2056     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS |
2057               GEN6_3DSTATE_CONSTANT_BUFFER_0_ENABLE |
2058               (5 - 2));
2059     OUT_RELOC(batch, 
2060               render_state->curbe.bo,
2061               I915_GEM_DOMAIN_INSTRUCTION, 0,
2062               0);
2063     OUT_BATCH(batch, 0);
2064     OUT_BATCH(batch, 0);
2065     OUT_BATCH(batch, 0);
2066
2067     OUT_BATCH(batch, GEN6_3DSTATE_WM | (9 - 2));
2068     OUT_RELOC(batch, render_state->render_kernels[kernel].bo,
2069               I915_GEM_DOMAIN_INSTRUCTION, 0,
2070               0);
2071     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHITF) |
2072               (5 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT));
2073     OUT_BATCH(batch, 0);
2074     OUT_BATCH(batch, (6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT)); /* DW4 */
2075     OUT_BATCH(batch, ((render_state->max_wm_threads - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT) |
2076               GEN6_3DSTATE_WM_DISPATCH_ENABLE |
2077               GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
2078     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT) |
2079               GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2080     OUT_BATCH(batch, 0);
2081     OUT_BATCH(batch, 0);
2082 }
2083
2084 static void
2085 gen6_emit_vertex_element_state(VADriverContextP ctx)
2086 {
2087     struct i965_driver_data *i965 = i965_driver_data(ctx);
2088     struct intel_batchbuffer *batch = i965->batch;
2089
2090     /* Set up our vertex elements, sourced from the single vertex buffer. */
2091     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2092     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2093     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2094               GEN6_VE0_VALID |
2095               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2096               (0 << VE0_OFFSET_SHIFT));
2097     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2098               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2099               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2100               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2101     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2102     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2103               GEN6_VE0_VALID |
2104               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2105               (8 << VE0_OFFSET_SHIFT));
2106     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2107               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2108               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2109               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2110 }
2111
2112 static void
2113 gen6_emit_vertices(VADriverContextP ctx)
2114 {
2115     struct i965_driver_data *i965 = i965_driver_data(ctx);
2116     struct intel_batchbuffer *batch = i965->batch;
2117     struct i965_render_state *render_state = &i965->render_state;
2118
2119     BEGIN_BATCH(batch, 11);
2120     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
2121     OUT_BATCH(batch, 
2122               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2123               GEN6_VB0_VERTEXDATA |
2124               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2125     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2126     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2127     OUT_BATCH(batch, 0);
2128
2129     OUT_BATCH(batch, 
2130               CMD_3DPRIMITIVE |
2131               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
2132               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
2133               (0 << 9) |
2134               4);
2135     OUT_BATCH(batch, 3); /* vertex count per instance */
2136     OUT_BATCH(batch, 0); /* start vertex offset */
2137     OUT_BATCH(batch, 1); /* single instance */
2138     OUT_BATCH(batch, 0); /* start instance location */
2139     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
2140     ADVANCE_BATCH(batch);
2141 }
2142
2143 static void
2144 gen6_render_emit_states(VADriverContextP ctx, int kernel)
2145 {
2146     struct i965_driver_data *i965 = i965_driver_data(ctx);
2147     struct intel_batchbuffer *batch = i965->batch;
2148
2149     intel_batchbuffer_start_atomic(batch, 0x1000);
2150     intel_batchbuffer_emit_mi_flush(batch);
2151     gen6_emit_invarient_states(ctx);
2152     gen6_emit_state_base_address(ctx);
2153     gen6_emit_viewport_state_pointers(ctx);
2154     gen6_emit_urb(ctx);
2155     gen6_emit_cc_state_pointers(ctx);
2156     gen6_emit_sampler_state_pointers(ctx);
2157     gen6_emit_vs_state(ctx);
2158     gen6_emit_gs_state(ctx);
2159     gen6_emit_clip_state(ctx);
2160     gen6_emit_sf_state(ctx);
2161     gen6_emit_wm_state(ctx, kernel);
2162     gen6_emit_binding_table(ctx);
2163     gen6_emit_depth_buffer_state(ctx);
2164     gen6_emit_drawing_rectangle(ctx);
2165     gen6_emit_vertex_element_state(ctx);
2166     gen6_emit_vertices(ctx);
2167     intel_batchbuffer_end_atomic(batch);
2168 }
2169
2170 static void
2171 gen6_render_put_surface(
2172     VADriverContextP   ctx,
2173     VASurfaceID        surface,
2174     const VARectangle *src_rect,
2175     const VARectangle *dst_rect,
2176     unsigned int       flags
2177 )
2178 {
2179     struct i965_driver_data *i965 = i965_driver_data(ctx);
2180     struct intel_batchbuffer *batch = i965->batch;
2181
2182     gen6_render_initialize(ctx);
2183     gen6_render_setup_states(ctx, surface, src_rect, dst_rect, flags);
2184     i965_clear_dest_region(ctx);
2185     gen6_render_emit_states(ctx, PS_KERNEL);
2186     intel_batchbuffer_flush(batch);
2187 }
2188
2189 static void
2190 gen6_subpicture_render_blend_state(VADriverContextP ctx)
2191 {
2192     struct i965_driver_data *i965 = i965_driver_data(ctx);
2193     struct i965_render_state *render_state = &i965->render_state;
2194     struct gen6_blend_state *blend_state;
2195
2196     dri_bo_unmap(render_state->cc.state);    
2197     dri_bo_map(render_state->cc.blend, 1);
2198     assert(render_state->cc.blend->virtual);
2199     blend_state = render_state->cc.blend->virtual;
2200     memset(blend_state, 0, sizeof(*blend_state));
2201     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2202     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2203     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2204     blend_state->blend0.blend_enable = 1;
2205     blend_state->blend1.post_blend_clamp_enable = 1;
2206     blend_state->blend1.pre_blend_clamp_enable = 1;
2207     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2208     dri_bo_unmap(render_state->cc.blend);
2209 }
2210
2211 static void
2212 gen6_subpicture_render_setup_states(
2213     VADriverContextP   ctx,
2214     VASurfaceID        surface,
2215     const VARectangle *src_rect,
2216     const VARectangle *dst_rect
2217 )
2218 {
2219     i965_render_dest_surface_state(ctx, 0);
2220     i965_subpic_render_src_surfaces_state(ctx, surface);
2221     i965_render_sampler(ctx);
2222     i965_render_cc_viewport(ctx);
2223     gen6_render_color_calc_state(ctx);
2224     gen6_subpicture_render_blend_state(ctx);
2225     gen6_render_depth_stencil_state(ctx);
2226     i965_subpic_render_upload_constants(ctx, surface);
2227     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2228 }
2229
2230 static void
2231 gen6_render_put_subpicture(
2232     VADriverContextP   ctx,
2233     VASurfaceID        surface,
2234     const VARectangle *src_rect,
2235     const VARectangle *dst_rect
2236 )
2237 {
2238     struct i965_driver_data *i965 = i965_driver_data(ctx);
2239     struct intel_batchbuffer *batch = i965->batch;
2240     struct object_surface *obj_surface = SURFACE(surface);
2241     unsigned int index = obj_surface->subpic_render_idx;
2242     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic[index]);
2243
2244     assert(obj_subpic);
2245     gen6_render_initialize(ctx);
2246     gen6_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2247     gen6_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2248     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2249     intel_batchbuffer_flush(batch);
2250 }
2251
2252 /*
2253  * for GEN7
2254  */
2255 static void 
2256 gen7_render_initialize(VADriverContextP ctx)
2257 {
2258     struct i965_driver_data *i965 = i965_driver_data(ctx);
2259     struct i965_render_state *render_state = &i965->render_state;
2260     dri_bo *bo;
2261
2262     /* VERTEX BUFFER */
2263     dri_bo_unreference(render_state->vb.vertex_buffer);
2264     bo = dri_bo_alloc(i965->intel.bufmgr,
2265                       "vertex buffer",
2266                       4096,
2267                       4096);
2268     assert(bo);
2269     render_state->vb.vertex_buffer = bo;
2270
2271     /* WM */
2272     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
2273     bo = dri_bo_alloc(i965->intel.bufmgr,
2274                       "surface state & binding table",
2275                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
2276                       4096);
2277     assert(bo);
2278     render_state->wm.surface_state_binding_table_bo = bo;
2279
2280     dri_bo_unreference(render_state->wm.sampler);
2281     bo = dri_bo_alloc(i965->intel.bufmgr,
2282                       "sampler state",
2283                       MAX_SAMPLERS * sizeof(struct gen7_sampler_state),
2284                       4096);
2285     assert(bo);
2286     render_state->wm.sampler = bo;
2287     render_state->wm.sampler_count = 0;
2288
2289     /* COLOR CALCULATOR */
2290     dri_bo_unreference(render_state->cc.state);
2291     bo = dri_bo_alloc(i965->intel.bufmgr,
2292                       "color calc state",
2293                       sizeof(struct gen6_color_calc_state),
2294                       4096);
2295     assert(bo);
2296     render_state->cc.state = bo;
2297
2298     /* CC VIEWPORT */
2299     dri_bo_unreference(render_state->cc.viewport);
2300     bo = dri_bo_alloc(i965->intel.bufmgr,
2301                       "cc viewport",
2302                       sizeof(struct i965_cc_viewport),
2303                       4096);
2304     assert(bo);
2305     render_state->cc.viewport = bo;
2306
2307     /* BLEND STATE */
2308     dri_bo_unreference(render_state->cc.blend);
2309     bo = dri_bo_alloc(i965->intel.bufmgr,
2310                       "blend state",
2311                       sizeof(struct gen6_blend_state),
2312                       4096);
2313     assert(bo);
2314     render_state->cc.blend = bo;
2315
2316     /* DEPTH & STENCIL STATE */
2317     dri_bo_unreference(render_state->cc.depth_stencil);
2318     bo = dri_bo_alloc(i965->intel.bufmgr,
2319                       "depth & stencil state",
2320                       sizeof(struct gen6_depth_stencil_state),
2321                       4096);
2322     assert(bo);
2323     render_state->cc.depth_stencil = bo;
2324 }
2325
2326 static void
2327 gen7_render_color_calc_state(VADriverContextP ctx)
2328 {
2329     struct i965_driver_data *i965 = i965_driver_data(ctx);
2330     struct i965_render_state *render_state = &i965->render_state;
2331     struct gen6_color_calc_state *color_calc_state;
2332     
2333     dri_bo_map(render_state->cc.state, 1);
2334     assert(render_state->cc.state->virtual);
2335     color_calc_state = render_state->cc.state->virtual;
2336     memset(color_calc_state, 0, sizeof(*color_calc_state));
2337     color_calc_state->constant_r = 1.0;
2338     color_calc_state->constant_g = 0.0;
2339     color_calc_state->constant_b = 1.0;
2340     color_calc_state->constant_a = 1.0;
2341     dri_bo_unmap(render_state->cc.state);
2342 }
2343
2344 static void
2345 gen7_render_blend_state(VADriverContextP ctx)
2346 {
2347     struct i965_driver_data *i965 = i965_driver_data(ctx);
2348     struct i965_render_state *render_state = &i965->render_state;
2349     struct gen6_blend_state *blend_state;
2350     
2351     dri_bo_map(render_state->cc.blend, 1);
2352     assert(render_state->cc.blend->virtual);
2353     blend_state = render_state->cc.blend->virtual;
2354     memset(blend_state, 0, sizeof(*blend_state));
2355     blend_state->blend1.logic_op_enable = 1;
2356     blend_state->blend1.logic_op_func = 0xc;
2357     blend_state->blend1.pre_blend_clamp_enable = 1;
2358     dri_bo_unmap(render_state->cc.blend);
2359 }
2360
2361 static void
2362 gen7_render_depth_stencil_state(VADriverContextP ctx)
2363 {
2364     struct i965_driver_data *i965 = i965_driver_data(ctx);
2365     struct i965_render_state *render_state = &i965->render_state;
2366     struct gen6_depth_stencil_state *depth_stencil_state;
2367     
2368     dri_bo_map(render_state->cc.depth_stencil, 1);
2369     assert(render_state->cc.depth_stencil->virtual);
2370     depth_stencil_state = render_state->cc.depth_stencil->virtual;
2371     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
2372     dri_bo_unmap(render_state->cc.depth_stencil);
2373 }
2374
2375 static void 
2376 gen7_render_sampler(VADriverContextP ctx)
2377 {
2378     struct i965_driver_data *i965 = i965_driver_data(ctx);
2379     struct i965_render_state *render_state = &i965->render_state;
2380     struct gen7_sampler_state *sampler_state;
2381     int i;
2382     
2383     assert(render_state->wm.sampler_count > 0);
2384     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
2385
2386     dri_bo_map(render_state->wm.sampler, 1);
2387     assert(render_state->wm.sampler->virtual);
2388     sampler_state = render_state->wm.sampler->virtual;
2389     for (i = 0; i < render_state->wm.sampler_count; i++) {
2390         memset(sampler_state, 0, sizeof(*sampler_state));
2391         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
2392         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
2393         sampler_state->ss3.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2394         sampler_state->ss3.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2395         sampler_state->ss3.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2396         sampler_state++;
2397     }
2398
2399     dri_bo_unmap(render_state->wm.sampler);
2400 }
2401
2402 static void
2403 gen7_render_setup_states(
2404     VADriverContextP   ctx,
2405     VASurfaceID        surface,
2406     const VARectangle *src_rect,
2407     const VARectangle *dst_rect,
2408     unsigned int       flags
2409 )
2410 {
2411     i965_render_dest_surface_state(ctx, 0);
2412     i965_render_src_surfaces_state(ctx, surface, flags);
2413     gen7_render_sampler(ctx);
2414     i965_render_cc_viewport(ctx);
2415     gen7_render_color_calc_state(ctx);
2416     gen7_render_blend_state(ctx);
2417     gen7_render_depth_stencil_state(ctx);
2418     i965_render_upload_constants(ctx, surface);
2419     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
2420 }
2421
2422 static void
2423 gen7_emit_invarient_states(VADriverContextP ctx)
2424 {
2425     struct i965_driver_data *i965 = i965_driver_data(ctx);
2426     struct intel_batchbuffer *batch = i965->batch;
2427
2428     BEGIN_BATCH(batch, 1);
2429     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
2430     ADVANCE_BATCH(batch);
2431
2432     BEGIN_BATCH(batch, 4);
2433     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (4 - 2));
2434     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
2435               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
2436     OUT_BATCH(batch, 0);
2437     OUT_BATCH(batch, 0);
2438     ADVANCE_BATCH(batch);
2439
2440     BEGIN_BATCH(batch, 2);
2441     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
2442     OUT_BATCH(batch, 1);
2443     ADVANCE_BATCH(batch);
2444
2445     /* Set system instruction pointer */
2446     BEGIN_BATCH(batch, 2);
2447     OUT_BATCH(batch, CMD_STATE_SIP | 0);
2448     OUT_BATCH(batch, 0);
2449     ADVANCE_BATCH(batch);
2450 }
2451
2452 static void
2453 gen7_emit_state_base_address(VADriverContextP ctx)
2454 {
2455     struct i965_driver_data *i965 = i965_driver_data(ctx);
2456     struct intel_batchbuffer *batch = i965->batch;
2457     struct i965_render_state *render_state = &i965->render_state;
2458
2459     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
2460     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
2461     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
2462     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
2463     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
2464     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
2465     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
2466     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
2467     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
2468     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
2469 }
2470
2471 static void
2472 gen7_emit_viewport_state_pointers(VADriverContextP ctx)
2473 {
2474     struct i965_driver_data *i965 = i965_driver_data(ctx);
2475     struct intel_batchbuffer *batch = i965->batch;
2476     struct i965_render_state *render_state = &i965->render_state;
2477
2478     BEGIN_BATCH(batch, 2);
2479     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC | (2 - 2));
2480     OUT_RELOC(batch,
2481               render_state->cc.viewport,
2482               I915_GEM_DOMAIN_INSTRUCTION, 0,
2483               0);
2484     ADVANCE_BATCH(batch);
2485
2486     BEGIN_BATCH(batch, 2);
2487     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL | (2 - 2));
2488     OUT_BATCH(batch, 0);
2489     ADVANCE_BATCH(batch);
2490 }
2491
2492 /*
2493  * URB layout on GEN7 
2494  * ----------------------------------------
2495  * | PS Push Constants (8KB) | VS entries |
2496  * ----------------------------------------
2497  */
2498 static void
2499 gen7_emit_urb(VADriverContextP ctx)
2500 {
2501     struct i965_driver_data *i965 = i965_driver_data(ctx);
2502     struct intel_batchbuffer *batch = i965->batch;
2503     unsigned int num_urb_entries = 32;
2504
2505     if (IS_HASWELL(i965->intel.device_id))
2506         num_urb_entries = 64;
2507
2508     BEGIN_BATCH(batch, 2);
2509     OUT_BATCH(batch, GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS | (2 - 2));
2510     OUT_BATCH(batch, 8); /* in 1KBs */
2511     ADVANCE_BATCH(batch);
2512
2513     BEGIN_BATCH(batch, 2);
2514     OUT_BATCH(batch, GEN7_3DSTATE_URB_VS | (2 - 2));
2515     OUT_BATCH(batch, 
2516               (num_urb_entries << GEN7_URB_ENTRY_NUMBER_SHIFT) |
2517               (2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
2518               (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2519    ADVANCE_BATCH(batch);
2520
2521    BEGIN_BATCH(batch, 2);
2522    OUT_BATCH(batch, GEN7_3DSTATE_URB_GS | (2 - 2));
2523    OUT_BATCH(batch,
2524              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2525              (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2526    ADVANCE_BATCH(batch);
2527
2528    BEGIN_BATCH(batch, 2);
2529    OUT_BATCH(batch, GEN7_3DSTATE_URB_HS | (2 - 2));
2530    OUT_BATCH(batch,
2531              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2532              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2533    ADVANCE_BATCH(batch);
2534
2535    BEGIN_BATCH(batch, 2);
2536    OUT_BATCH(batch, GEN7_3DSTATE_URB_DS | (2 - 2));
2537    OUT_BATCH(batch,
2538              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2539              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2540    ADVANCE_BATCH(batch);
2541 }
2542
2543 static void
2544 gen7_emit_cc_state_pointers(VADriverContextP ctx)
2545 {
2546     struct i965_driver_data *i965 = i965_driver_data(ctx);
2547     struct intel_batchbuffer *batch = i965->batch;
2548     struct i965_render_state *render_state = &i965->render_state;
2549
2550     BEGIN_BATCH(batch, 2);
2551     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (2 - 2));
2552     OUT_RELOC(batch,
2553               render_state->cc.state,
2554               I915_GEM_DOMAIN_INSTRUCTION, 0,
2555               1);
2556     ADVANCE_BATCH(batch);
2557
2558     BEGIN_BATCH(batch, 2);
2559     OUT_BATCH(batch, GEN7_3DSTATE_BLEND_STATE_POINTERS | (2 - 2));
2560     OUT_RELOC(batch,
2561               render_state->cc.blend,
2562               I915_GEM_DOMAIN_INSTRUCTION, 0,
2563               1);
2564     ADVANCE_BATCH(batch);
2565
2566     BEGIN_BATCH(batch, 2);
2567     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS | (2 - 2));
2568     OUT_RELOC(batch,
2569               render_state->cc.depth_stencil,
2570               I915_GEM_DOMAIN_INSTRUCTION, 0, 
2571               1);
2572     ADVANCE_BATCH(batch);
2573 }
2574
2575 static void
2576 gen7_emit_sampler_state_pointers(VADriverContextP ctx)
2577 {
2578     struct i965_driver_data *i965 = i965_driver_data(ctx);
2579     struct intel_batchbuffer *batch = i965->batch;
2580     struct i965_render_state *render_state = &i965->render_state;
2581
2582     BEGIN_BATCH(batch, 2);
2583     OUT_BATCH(batch, GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
2584     OUT_RELOC(batch,
2585               render_state->wm.sampler,
2586               I915_GEM_DOMAIN_INSTRUCTION, 0,
2587               0);
2588     ADVANCE_BATCH(batch);
2589 }
2590
2591 static void
2592 gen7_emit_binding_table(VADriverContextP ctx)
2593 {
2594     struct i965_driver_data *i965 = i965_driver_data(ctx);
2595     struct intel_batchbuffer *batch = i965->batch;
2596
2597     BEGIN_BATCH(batch, 2);
2598     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS | (2 - 2));
2599     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
2600     ADVANCE_BATCH(batch);
2601 }
2602
2603 static void
2604 gen7_emit_depth_buffer_state(VADriverContextP ctx)
2605 {
2606     struct i965_driver_data *i965 = i965_driver_data(ctx);
2607     struct intel_batchbuffer *batch = i965->batch;
2608
2609     BEGIN_BATCH(batch, 7);
2610     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_BUFFER | (7 - 2));
2611     OUT_BATCH(batch,
2612               (I965_DEPTHFORMAT_D32_FLOAT << 18) |
2613               (I965_SURFACE_NULL << 29));
2614     OUT_BATCH(batch, 0);
2615     OUT_BATCH(batch, 0);
2616     OUT_BATCH(batch, 0);
2617     OUT_BATCH(batch, 0);
2618     OUT_BATCH(batch, 0);
2619     ADVANCE_BATCH(batch);
2620
2621     BEGIN_BATCH(batch, 3);
2622     OUT_BATCH(batch, GEN7_3DSTATE_CLEAR_PARAMS | (3 - 2));
2623     OUT_BATCH(batch, 0);
2624     OUT_BATCH(batch, 0);
2625     ADVANCE_BATCH(batch);
2626 }
2627
2628 static void
2629 gen7_emit_drawing_rectangle(VADriverContextP ctx)
2630 {
2631     i965_render_drawing_rectangle(ctx);
2632 }
2633
2634 static void 
2635 gen7_emit_vs_state(VADriverContextP ctx)
2636 {
2637     struct i965_driver_data *i965 = i965_driver_data(ctx);
2638     struct intel_batchbuffer *batch = i965->batch;
2639
2640     /* disable VS constant buffer */
2641     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (7 - 2));
2642     OUT_BATCH(batch, 0);
2643     OUT_BATCH(batch, 0);
2644     OUT_BATCH(batch, 0);
2645     OUT_BATCH(batch, 0);
2646     OUT_BATCH(batch, 0);
2647     OUT_BATCH(batch, 0);
2648         
2649     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
2650     OUT_BATCH(batch, 0); /* without VS kernel */
2651     OUT_BATCH(batch, 0);
2652     OUT_BATCH(batch, 0);
2653     OUT_BATCH(batch, 0);
2654     OUT_BATCH(batch, 0); /* pass-through */
2655 }
2656
2657 static void 
2658 gen7_emit_bypass_state(VADriverContextP ctx)
2659 {
2660     struct i965_driver_data *i965 = i965_driver_data(ctx);
2661     struct intel_batchbuffer *batch = i965->batch;
2662
2663     /* bypass GS */
2664     BEGIN_BATCH(batch, 7);
2665     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (7 - 2));
2666     OUT_BATCH(batch, 0);
2667     OUT_BATCH(batch, 0);
2668     OUT_BATCH(batch, 0);
2669     OUT_BATCH(batch, 0);
2670     OUT_BATCH(batch, 0);
2671     OUT_BATCH(batch, 0);
2672     ADVANCE_BATCH(batch);
2673
2674     BEGIN_BATCH(batch, 7);      
2675     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
2676     OUT_BATCH(batch, 0); /* without GS kernel */
2677     OUT_BATCH(batch, 0);
2678     OUT_BATCH(batch, 0);
2679     OUT_BATCH(batch, 0);
2680     OUT_BATCH(batch, 0);
2681     OUT_BATCH(batch, 0); /* pass-through */
2682     ADVANCE_BATCH(batch);
2683
2684     BEGIN_BATCH(batch, 2);
2685     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_GS | (2 - 2));
2686     OUT_BATCH(batch, 0);
2687     ADVANCE_BATCH(batch);
2688
2689     /* disable HS */
2690     BEGIN_BATCH(batch, 7);
2691     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_HS | (7 - 2));
2692     OUT_BATCH(batch, 0);
2693     OUT_BATCH(batch, 0);
2694     OUT_BATCH(batch, 0);
2695     OUT_BATCH(batch, 0);
2696     OUT_BATCH(batch, 0);
2697     OUT_BATCH(batch, 0);
2698     ADVANCE_BATCH(batch);
2699
2700     BEGIN_BATCH(batch, 7);
2701     OUT_BATCH(batch, GEN7_3DSTATE_HS | (7 - 2));
2702     OUT_BATCH(batch, 0);
2703     OUT_BATCH(batch, 0);
2704     OUT_BATCH(batch, 0);
2705     OUT_BATCH(batch, 0);
2706     OUT_BATCH(batch, 0);
2707     OUT_BATCH(batch, 0);
2708     ADVANCE_BATCH(batch);
2709
2710     BEGIN_BATCH(batch, 2);
2711     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_HS | (2 - 2));
2712     OUT_BATCH(batch, 0);
2713     ADVANCE_BATCH(batch);
2714
2715     /* Disable TE */
2716     BEGIN_BATCH(batch, 4);
2717     OUT_BATCH(batch, GEN7_3DSTATE_TE | (4 - 2));
2718     OUT_BATCH(batch, 0);
2719     OUT_BATCH(batch, 0);
2720     OUT_BATCH(batch, 0);
2721     ADVANCE_BATCH(batch);
2722
2723     /* Disable DS */
2724     BEGIN_BATCH(batch, 7);
2725     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_DS | (7 - 2));
2726     OUT_BATCH(batch, 0);
2727     OUT_BATCH(batch, 0);
2728     OUT_BATCH(batch, 0);
2729     OUT_BATCH(batch, 0);
2730     OUT_BATCH(batch, 0);
2731     OUT_BATCH(batch, 0);
2732     ADVANCE_BATCH(batch);
2733
2734     BEGIN_BATCH(batch, 6);
2735     OUT_BATCH(batch, GEN7_3DSTATE_DS | (6 - 2));
2736     OUT_BATCH(batch, 0);
2737     OUT_BATCH(batch, 0);
2738     OUT_BATCH(batch, 0);
2739     OUT_BATCH(batch, 0);
2740     OUT_BATCH(batch, 0);
2741     ADVANCE_BATCH(batch);
2742
2743     BEGIN_BATCH(batch, 2);
2744     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_DS | (2 - 2));
2745     OUT_BATCH(batch, 0);
2746     ADVANCE_BATCH(batch);
2747
2748     /* Disable STREAMOUT */
2749     BEGIN_BATCH(batch, 3);
2750     OUT_BATCH(batch, GEN7_3DSTATE_STREAMOUT | (3 - 2));
2751     OUT_BATCH(batch, 0);
2752     OUT_BATCH(batch, 0);
2753     ADVANCE_BATCH(batch);
2754 }
2755
2756 static void 
2757 gen7_emit_clip_state(VADriverContextP ctx)
2758 {
2759     struct i965_driver_data *i965 = i965_driver_data(ctx);
2760     struct intel_batchbuffer *batch = i965->batch;
2761
2762     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
2763     OUT_BATCH(batch, 0);
2764     OUT_BATCH(batch, 0); /* pass-through */
2765     OUT_BATCH(batch, 0);
2766 }
2767
2768 static void 
2769 gen7_emit_sf_state(VADriverContextP ctx)
2770 {
2771     struct i965_driver_data *i965 = i965_driver_data(ctx);
2772     struct intel_batchbuffer *batch = i965->batch;
2773
2774     BEGIN_BATCH(batch, 14);
2775     OUT_BATCH(batch, GEN7_3DSTATE_SBE | (14 - 2));
2776     OUT_BATCH(batch,
2777               (1 << GEN7_SBE_NUM_OUTPUTS_SHIFT) |
2778               (1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT) |
2779               (0 << GEN7_SBE_URB_ENTRY_READ_OFFSET_SHIFT));
2780     OUT_BATCH(batch, 0);
2781     OUT_BATCH(batch, 0);
2782     OUT_BATCH(batch, 0); /* DW4 */
2783     OUT_BATCH(batch, 0);
2784     OUT_BATCH(batch, 0);
2785     OUT_BATCH(batch, 0);
2786     OUT_BATCH(batch, 0);
2787     OUT_BATCH(batch, 0); /* DW9 */
2788     OUT_BATCH(batch, 0);
2789     OUT_BATCH(batch, 0);
2790     OUT_BATCH(batch, 0);
2791     OUT_BATCH(batch, 0);
2792     ADVANCE_BATCH(batch);
2793
2794     BEGIN_BATCH(batch, 7);
2795     OUT_BATCH(batch, GEN6_3DSTATE_SF | (7 - 2));
2796     OUT_BATCH(batch, 0);
2797     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
2798     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT);
2799     OUT_BATCH(batch, 0);
2800     OUT_BATCH(batch, 0);
2801     OUT_BATCH(batch, 0);
2802     ADVANCE_BATCH(batch);
2803 }
2804
2805 static void 
2806 gen7_emit_wm_state(VADriverContextP ctx, int kernel)
2807 {
2808     struct i965_driver_data *i965 = i965_driver_data(ctx);
2809     struct intel_batchbuffer *batch = i965->batch;
2810     struct i965_render_state *render_state = &i965->render_state;
2811     unsigned int max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_IVB;
2812     unsigned int num_samples = 0;
2813
2814     if (IS_HASWELL(i965->intel.device_id)) {
2815         max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_HSW;
2816         num_samples = 1 << GEN7_PS_SAMPLE_MASK_SHIFT_HSW;
2817     }
2818
2819     BEGIN_BATCH(batch, 3);
2820     OUT_BATCH(batch, GEN6_3DSTATE_WM | (3 - 2));
2821     OUT_BATCH(batch,
2822               GEN7_WM_DISPATCH_ENABLE |
2823               GEN7_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2824     OUT_BATCH(batch, 0);
2825     ADVANCE_BATCH(batch);
2826
2827     BEGIN_BATCH(batch, 7);
2828     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS | (7 - 2));
2829     OUT_BATCH(batch, 1);
2830     OUT_BATCH(batch, 0);
2831     OUT_RELOC(batch, 
2832               render_state->curbe.bo,
2833               I915_GEM_DOMAIN_INSTRUCTION, 0,
2834               0);
2835     OUT_BATCH(batch, 0);
2836     OUT_BATCH(batch, 0);
2837     OUT_BATCH(batch, 0);
2838     ADVANCE_BATCH(batch);
2839
2840     BEGIN_BATCH(batch, 8);
2841     OUT_BATCH(batch, GEN7_3DSTATE_PS | (8 - 2));
2842     OUT_RELOC(batch, 
2843               render_state->render_kernels[kernel].bo,
2844               I915_GEM_DOMAIN_INSTRUCTION, 0,
2845               0);
2846     OUT_BATCH(batch, 
2847               (1 << GEN7_PS_SAMPLER_COUNT_SHIFT) |
2848               (5 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
2849     OUT_BATCH(batch, 0); /* scratch space base offset */
2850     OUT_BATCH(batch, 
2851               ((render_state->max_wm_threads - 1) << max_threads_shift) | num_samples |
2852               GEN7_PS_PUSH_CONSTANT_ENABLE |
2853               GEN7_PS_ATTRIBUTE_ENABLE |
2854               GEN7_PS_16_DISPATCH_ENABLE);
2855     OUT_BATCH(batch, 
2856               (6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0));
2857     OUT_BATCH(batch, 0); /* kernel 1 pointer */
2858     OUT_BATCH(batch, 0); /* kernel 2 pointer */
2859     ADVANCE_BATCH(batch);
2860 }
2861
2862 static void
2863 gen7_emit_vertex_element_state(VADriverContextP ctx)
2864 {
2865     struct i965_driver_data *i965 = i965_driver_data(ctx);
2866     struct intel_batchbuffer *batch = i965->batch;
2867
2868     /* Set up our vertex elements, sourced from the single vertex buffer. */
2869     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2870     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2871     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2872               GEN6_VE0_VALID |
2873               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2874               (0 << VE0_OFFSET_SHIFT));
2875     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2876               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2877               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2878               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2879     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2880     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2881               GEN6_VE0_VALID |
2882               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2883               (8 << VE0_OFFSET_SHIFT));
2884     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2885               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2886               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2887               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2888 }
2889
2890 static void
2891 gen7_emit_vertices(VADriverContextP ctx)
2892 {
2893     struct i965_driver_data *i965 = i965_driver_data(ctx);
2894     struct intel_batchbuffer *batch = i965->batch;
2895     struct i965_render_state *render_state = &i965->render_state;
2896
2897     BEGIN_BATCH(batch, 5);
2898     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | (5 - 2));
2899     OUT_BATCH(batch, 
2900               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2901               GEN6_VB0_VERTEXDATA |
2902               GEN7_VB0_ADDRESS_MODIFYENABLE |
2903               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2904     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2905     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2906     OUT_BATCH(batch, 0);
2907     ADVANCE_BATCH(batch);
2908
2909     BEGIN_BATCH(batch, 7);
2910     OUT_BATCH(batch, CMD_3DPRIMITIVE | (7 - 2));
2911     OUT_BATCH(batch,
2912               _3DPRIM_RECTLIST |
2913               GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL);
2914     OUT_BATCH(batch, 3); /* vertex count per instance */
2915     OUT_BATCH(batch, 0); /* start vertex offset */
2916     OUT_BATCH(batch, 1); /* single instance */
2917     OUT_BATCH(batch, 0); /* start instance location */
2918     OUT_BATCH(batch, 0);
2919     ADVANCE_BATCH(batch);
2920 }
2921
2922 static void
2923 gen7_render_emit_states(VADriverContextP ctx, int kernel)
2924 {
2925     struct i965_driver_data *i965 = i965_driver_data(ctx);
2926     struct intel_batchbuffer *batch = i965->batch;
2927
2928     intel_batchbuffer_start_atomic(batch, 0x1000);
2929     intel_batchbuffer_emit_mi_flush(batch);
2930     gen7_emit_invarient_states(ctx);
2931     gen7_emit_state_base_address(ctx);
2932     gen7_emit_viewport_state_pointers(ctx);
2933     gen7_emit_urb(ctx);
2934     gen7_emit_cc_state_pointers(ctx);
2935     gen7_emit_sampler_state_pointers(ctx);
2936     gen7_emit_bypass_state(ctx);
2937     gen7_emit_vs_state(ctx);
2938     gen7_emit_clip_state(ctx);
2939     gen7_emit_sf_state(ctx);
2940     gen7_emit_wm_state(ctx, kernel);
2941     gen7_emit_binding_table(ctx);
2942     gen7_emit_depth_buffer_state(ctx);
2943     gen7_emit_drawing_rectangle(ctx);
2944     gen7_emit_vertex_element_state(ctx);
2945     gen7_emit_vertices(ctx);
2946     intel_batchbuffer_end_atomic(batch);
2947 }
2948
2949 static void
2950 gen7_render_put_surface(
2951     VADriverContextP   ctx,
2952     VASurfaceID        surface,
2953     const VARectangle *src_rect,
2954     const VARectangle *dst_rect,
2955     unsigned int       flags
2956 )
2957 {
2958     struct i965_driver_data *i965 = i965_driver_data(ctx);
2959     struct intel_batchbuffer *batch = i965->batch;
2960
2961     gen7_render_initialize(ctx);
2962     gen7_render_setup_states(ctx, surface, src_rect, dst_rect, flags);
2963     i965_clear_dest_region(ctx);
2964     gen7_render_emit_states(ctx, PS_KERNEL);
2965     intel_batchbuffer_flush(batch);
2966 }
2967
2968 static void
2969 gen7_subpicture_render_blend_state(VADriverContextP ctx)
2970 {
2971     struct i965_driver_data *i965 = i965_driver_data(ctx);
2972     struct i965_render_state *render_state = &i965->render_state;
2973     struct gen6_blend_state *blend_state;
2974
2975     dri_bo_unmap(render_state->cc.state);    
2976     dri_bo_map(render_state->cc.blend, 1);
2977     assert(render_state->cc.blend->virtual);
2978     blend_state = render_state->cc.blend->virtual;
2979     memset(blend_state, 0, sizeof(*blend_state));
2980     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2981     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2982     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2983     blend_state->blend0.blend_enable = 1;
2984     blend_state->blend1.post_blend_clamp_enable = 1;
2985     blend_state->blend1.pre_blend_clamp_enable = 1;
2986     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2987     dri_bo_unmap(render_state->cc.blend);
2988 }
2989
2990 static void
2991 gen7_subpicture_render_setup_states(
2992     VADriverContextP   ctx,
2993     VASurfaceID        surface,
2994     const VARectangle *src_rect,
2995     const VARectangle *dst_rect
2996 )
2997 {
2998     i965_render_dest_surface_state(ctx, 0);
2999     i965_subpic_render_src_surfaces_state(ctx, surface);
3000     i965_render_sampler(ctx);
3001     i965_render_cc_viewport(ctx);
3002     gen7_render_color_calc_state(ctx);
3003     gen7_subpicture_render_blend_state(ctx);
3004     gen7_render_depth_stencil_state(ctx);
3005     i965_subpic_render_upload_constants(ctx, surface);
3006     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
3007 }
3008
3009 static void
3010 gen7_render_put_subpicture(
3011     VADriverContextP   ctx,
3012     VASurfaceID        surface,
3013     const VARectangle *src_rect,
3014     const VARectangle *dst_rect
3015 )
3016 {
3017     struct i965_driver_data *i965 = i965_driver_data(ctx);
3018     struct intel_batchbuffer *batch = i965->batch;
3019     struct object_surface *obj_surface = SURFACE(surface);
3020     unsigned int index = obj_surface->subpic_render_idx;
3021     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic[index]);
3022
3023     assert(obj_subpic);
3024     gen7_render_initialize(ctx);
3025     gen7_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
3026     gen7_render_emit_states(ctx, PS_SUBPIC_KERNEL);
3027     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
3028     intel_batchbuffer_flush(batch);
3029 }
3030
3031
3032 /*
3033  * global functions
3034  */
3035 VAStatus 
3036 i965_DestroySurfaces(VADriverContextP ctx,
3037                      VASurfaceID *surface_list,
3038                      int num_surfaces);
3039 void
3040 intel_render_put_surface(
3041     VADriverContextP   ctx,
3042     VASurfaceID        surface,
3043     const VARectangle *src_rect,
3044     const VARectangle *dst_rect,
3045     unsigned int       flags
3046 )
3047 {
3048     struct i965_driver_data *i965 = i965_driver_data(ctx);
3049     int has_done_scaling = 0;
3050     VASurfaceID in_surface_id = surface;
3051     VASurfaceID out_surface_id = i965_post_processing(ctx, surface, src_rect, dst_rect, flags, &has_done_scaling);
3052
3053     assert((!has_done_scaling) || (out_surface_id != VA_INVALID_ID));
3054
3055     if (out_surface_id != VA_INVALID_ID)
3056         in_surface_id = out_surface_id;
3057
3058     if (IS_GEN7(i965->intel.device_id))
3059         gen7_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
3060     else if (IS_GEN6(i965->intel.device_id))
3061         gen6_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
3062     else
3063         i965_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
3064
3065     if (in_surface_id != surface)
3066         i965_DestroySurfaces(ctx, &in_surface_id, 1);
3067 }
3068
3069 void
3070 intel_render_put_subpicture(
3071     VADriverContextP   ctx,
3072     VASurfaceID        surface,
3073     const VARectangle *src_rect,
3074     const VARectangle *dst_rect
3075 )
3076 {
3077     struct i965_driver_data *i965 = i965_driver_data(ctx);
3078
3079     if (IS_GEN7(i965->intel.device_id))
3080         gen7_render_put_subpicture(ctx, surface, src_rect, dst_rect);
3081     else if (IS_GEN6(i965->intel.device_id))
3082         gen6_render_put_subpicture(ctx, surface, src_rect, dst_rect);
3083     else
3084         i965_render_put_subpicture(ctx, surface, src_rect, dst_rect);
3085 }
3086
3087 bool 
3088 i965_render_init(VADriverContextP ctx)
3089 {
3090     struct i965_driver_data *i965 = i965_driver_data(ctx);
3091     struct i965_render_state *render_state = &i965->render_state;
3092     int i;
3093
3094     /* kernel */
3095     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen5) / 
3096                                  sizeof(render_kernels_gen5[0])));
3097     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen6) / 
3098                                  sizeof(render_kernels_gen6[0])));
3099
3100     if (IS_GEN7(i965->intel.device_id))
3101         memcpy(render_state->render_kernels,
3102                (IS_HASWELL(i965->intel.device_id) ? render_kernels_gen7_haswell : render_kernels_gen7),
3103                sizeof(render_state->render_kernels));
3104     else if (IS_GEN6(i965->intel.device_id))
3105         memcpy(render_state->render_kernels, render_kernels_gen6, sizeof(render_state->render_kernels));
3106     else if (IS_IRONLAKE(i965->intel.device_id))
3107         memcpy(render_state->render_kernels, render_kernels_gen5, sizeof(render_state->render_kernels));
3108     else
3109         memcpy(render_state->render_kernels, render_kernels_gen4, sizeof(render_state->render_kernels));
3110
3111     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
3112         struct i965_kernel *kernel = &render_state->render_kernels[i];
3113
3114         if (!kernel->size)
3115             continue;
3116
3117         kernel->bo = dri_bo_alloc(i965->intel.bufmgr, 
3118                                   kernel->name, 
3119                                   kernel->size, 0x1000);
3120         assert(kernel->bo);
3121         dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
3122     }
3123
3124     /* constant buffer */
3125     render_state->curbe.bo = dri_bo_alloc(i965->intel.bufmgr,
3126                       "constant buffer",
3127                       4096, 64);
3128     assert(render_state->curbe.bo);
3129
3130     if (IS_IVB_GT1(i965->intel.device_id) ||
3131         IS_HSW_GT1(i965->intel.device_id)) {
3132         render_state->max_wm_threads = 48;
3133     } else if (IS_IVB_GT2(i965->intel.device_id) ||
3134                IS_HSW_GT2(i965->intel.device_id)) {
3135         render_state->max_wm_threads = 172;
3136     } else if (IS_SNB_GT1(i965->intel.device_id)) {
3137         render_state->max_wm_threads = 40;
3138     } else if (IS_SNB_GT2(i965->intel.device_id)) {
3139         render_state->max_wm_threads = 80;
3140     } else if (IS_IRONLAKE(i965->intel.device_id)) {
3141         render_state->max_wm_threads = 72; /* 12 * 6 */
3142     } else if (IS_G4X(i965->intel.device_id)) {
3143         render_state->max_wm_threads = 50; /* 12 * 5 */
3144     } else {
3145         /* should never get here !!! */
3146         assert(0);
3147     }
3148
3149     return true;
3150 }
3151
3152 void 
3153 i965_render_terminate(VADriverContextP ctx)
3154 {
3155     int i;
3156     struct i965_driver_data *i965 = i965_driver_data(ctx);
3157     struct i965_render_state *render_state = &i965->render_state;
3158
3159     dri_bo_unreference(render_state->curbe.bo);
3160     render_state->curbe.bo = NULL;
3161
3162     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
3163         struct i965_kernel *kernel = &render_state->render_kernels[i];
3164         
3165         dri_bo_unreference(kernel->bo);
3166         kernel->bo = NULL;
3167     }
3168
3169     dri_bo_unreference(render_state->vb.vertex_buffer);
3170     render_state->vb.vertex_buffer = NULL;
3171     dri_bo_unreference(render_state->vs.state);
3172     render_state->vs.state = NULL;
3173     dri_bo_unreference(render_state->sf.state);
3174     render_state->sf.state = NULL;
3175     dri_bo_unreference(render_state->wm.sampler);
3176     render_state->wm.sampler = NULL;
3177     dri_bo_unreference(render_state->wm.state);
3178     render_state->wm.state = NULL;
3179     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
3180     dri_bo_unreference(render_state->cc.viewport);
3181     render_state->cc.viewport = NULL;
3182     dri_bo_unreference(render_state->cc.state);
3183     render_state->cc.state = NULL;
3184     dri_bo_unreference(render_state->cc.blend);
3185     render_state->cc.blend = NULL;
3186     dri_bo_unreference(render_state->cc.depth_stencil);
3187     render_state->cc.depth_stencil = NULL;
3188
3189     if (render_state->draw_region) {
3190         dri_bo_unreference(render_state->draw_region->bo);
3191         free(render_state->draw_region);
3192         render_state->draw_region = NULL;
3193     }
3194 }
3195