Render: directly use the backing store buffer
[profile/ivi/vaapi-intel-driver.git] / src / i965_render.c
1 /*
2  * Copyright © 2006 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *    Xiang Haihao <haihao.xiang@intel.com>
27  *
28  */
29
30 /*
31  * Most of rendering codes are ported from xf86-video-intel/src/i965_video.c
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <assert.h>
38
39 #include <va/va_drmcommon.h>
40
41 #include "intel_batchbuffer.h"
42 #include "intel_driver.h"
43 #include "i965_defines.h"
44 #include "i965_drv_video.h"
45 #include "i965_structs.h"
46
47 #include "i965_render.h"
48
49 #define SF_KERNEL_NUM_GRF       16
50 #define SF_MAX_THREADS          1
51
52 static const uint32_t sf_kernel_static[][4] = 
53 {
54 #include "shaders/render/exa_sf.g4b"
55 };
56
57 #define PS_KERNEL_NUM_GRF       32
58 #define PS_MAX_THREADS          32
59
60 #define I965_GRF_BLOCKS(nreg)   ((nreg + 15) / 16 - 1)
61
62 static const uint32_t ps_kernel_static[][4] = 
63 {
64 #include "shaders/render/exa_wm_xy.g4b"
65 #include "shaders/render/exa_wm_src_affine.g4b"
66 #include "shaders/render/exa_wm_src_sample_planar.g4b"
67 #include "shaders/render/exa_wm_yuv_rgb.g4b"
68 #include "shaders/render/exa_wm_write.g4b"
69 };
70 static const uint32_t ps_subpic_kernel_static[][4] = 
71 {
72 #include "shaders/render/exa_wm_xy.g4b"
73 #include "shaders/render/exa_wm_src_affine.g4b"
74 #include "shaders/render/exa_wm_src_sample_argb.g4b"
75 #include "shaders/render/exa_wm_write.g4b"
76 };
77
78 /* On IRONLAKE */
79 static const uint32_t sf_kernel_static_gen5[][4] = 
80 {
81 #include "shaders/render/exa_sf.g4b.gen5"
82 };
83
84 static const uint32_t ps_kernel_static_gen5[][4] = 
85 {
86 #include "shaders/render/exa_wm_xy.g4b.gen5"
87 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
88 #include "shaders/render/exa_wm_src_sample_planar.g4b.gen5"
89 #include "shaders/render/exa_wm_yuv_rgb.g4b.gen5"
90 #include "shaders/render/exa_wm_write.g4b.gen5"
91 };
92 static const uint32_t ps_subpic_kernel_static_gen5[][4] = 
93 {
94 #include "shaders/render/exa_wm_xy.g4b.gen5"
95 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
96 #include "shaders/render/exa_wm_src_sample_argb.g4b.gen5"
97 #include "shaders/render/exa_wm_write.g4b.gen5"
98 };
99
100 /* programs for Sandybridge */
101 static const uint32_t sf_kernel_static_gen6[][4] = 
102 {
103 };
104
105 static const uint32_t ps_kernel_static_gen6[][4] = {
106 #include "shaders/render/exa_wm_src_affine.g6b"
107 #include "shaders/render/exa_wm_src_sample_planar.g6b"
108 #include "shaders/render/exa_wm_yuv_rgb.g6b"
109 #include "shaders/render/exa_wm_write.g6b"
110 };
111
112 static const uint32_t ps_subpic_kernel_static_gen6[][4] = {
113 #include "shaders/render/exa_wm_src_affine.g6b"
114 #include "shaders/render/exa_wm_src_sample_argb.g6b"
115 #include "shaders/render/exa_wm_write.g6b"
116 };
117
118 /* programs for Ivybridge */
119 static const uint32_t sf_kernel_static_gen7[][4] = 
120 {
121 };
122
123 static const uint32_t ps_kernel_static_gen7[][4] = {
124 #include "shaders/render/exa_wm_src_affine.g7b"
125 #include "shaders/render/exa_wm_src_sample_planar.g7b"
126 #include "shaders/render/exa_wm_yuv_rgb.g7b"
127 #include "shaders/render/exa_wm_write.g7b"
128 };
129
130 static const uint32_t ps_subpic_kernel_static_gen7[][4] = {
131 #include "shaders/render/exa_wm_src_affine.g7b"
132 #include "shaders/render/exa_wm_src_sample_argb.g7b"
133 #include "shaders/render/exa_wm_write.g7b"
134 };
135
136 /* Programs for Haswell */
137 static const uint32_t ps_kernel_static_gen7_haswell[][4] = {
138 #include "shaders/render/exa_wm_src_affine.g7b"
139 #include "shaders/render/exa_wm_src_sample_planar.g7b.haswell"
140 #include "shaders/render/exa_wm_yuv_rgb.g7b"
141 #include "shaders/render/exa_wm_write.g7b"
142 };
143
144 #define SURFACE_STATE_PADDED_SIZE_I965  ALIGN(sizeof(struct i965_surface_state), 32)
145 #define SURFACE_STATE_PADDED_SIZE_GEN7  ALIGN(sizeof(struct gen7_surface_state), 32)
146 #define SURFACE_STATE_PADDED_SIZE       MAX(SURFACE_STATE_PADDED_SIZE_I965, SURFACE_STATE_PADDED_SIZE_GEN7)
147 #define SURFACE_STATE_OFFSET(index)     (SURFACE_STATE_PADDED_SIZE * index)
148 #define BINDING_TABLE_OFFSET            SURFACE_STATE_OFFSET(MAX_RENDER_SURFACES)
149
150 static uint32_t float_to_uint (float f) 
151 {
152     union {
153         uint32_t i; 
154         float f;
155     } x;
156
157     x.f = f;
158     return x.i;
159 }
160
161 enum 
162 {
163     SF_KERNEL = 0,
164     PS_KERNEL,
165     PS_SUBPIC_KERNEL
166 };
167
168 static struct i965_kernel render_kernels_gen4[] = {
169     {
170         "SF",
171         SF_KERNEL,
172         sf_kernel_static,
173         sizeof(sf_kernel_static),
174         NULL
175     },
176     {
177         "PS",
178         PS_KERNEL,
179         ps_kernel_static,
180         sizeof(ps_kernel_static),
181         NULL
182     },
183
184     {
185         "PS_SUBPIC",
186         PS_SUBPIC_KERNEL,
187         ps_subpic_kernel_static,
188         sizeof(ps_subpic_kernel_static),
189         NULL
190     }
191 };
192
193 static struct i965_kernel render_kernels_gen5[] = {
194     {
195         "SF",
196         SF_KERNEL,
197         sf_kernel_static_gen5,
198         sizeof(sf_kernel_static_gen5),
199         NULL
200     },
201     {
202         "PS",
203         PS_KERNEL,
204         ps_kernel_static_gen5,
205         sizeof(ps_kernel_static_gen5),
206         NULL
207     },
208
209     {
210         "PS_SUBPIC",
211         PS_SUBPIC_KERNEL,
212         ps_subpic_kernel_static_gen5,
213         sizeof(ps_subpic_kernel_static_gen5),
214         NULL
215     }
216 };
217
218 static struct i965_kernel render_kernels_gen6[] = {
219     {
220         "SF",
221         SF_KERNEL,
222         sf_kernel_static_gen6,
223         sizeof(sf_kernel_static_gen6),
224         NULL
225     },
226     {
227         "PS",
228         PS_KERNEL,
229         ps_kernel_static_gen6,
230         sizeof(ps_kernel_static_gen6),
231         NULL
232     },
233
234     {
235         "PS_SUBPIC",
236         PS_SUBPIC_KERNEL,
237         ps_subpic_kernel_static_gen6,
238         sizeof(ps_subpic_kernel_static_gen6),
239         NULL
240     }
241 };
242
243 static struct i965_kernel render_kernels_gen7[] = {
244     {
245         "SF",
246         SF_KERNEL,
247         sf_kernel_static_gen7,
248         sizeof(sf_kernel_static_gen7),
249         NULL
250     },
251     {
252         "PS",
253         PS_KERNEL,
254         ps_kernel_static_gen7,
255         sizeof(ps_kernel_static_gen7),
256         NULL
257     },
258
259     {
260         "PS_SUBPIC",
261         PS_SUBPIC_KERNEL,
262         ps_subpic_kernel_static_gen7,
263         sizeof(ps_subpic_kernel_static_gen7),
264         NULL
265     }
266 };
267
268 static struct i965_kernel render_kernels_gen7_haswell[] = {
269     {
270         "SF",
271         SF_KERNEL,
272         sf_kernel_static_gen7,
273         sizeof(sf_kernel_static_gen7),
274         NULL
275     },
276     {
277         "PS",
278         PS_KERNEL,
279         ps_kernel_static_gen7_haswell,
280         sizeof(ps_kernel_static_gen7_haswell),
281         NULL
282     },
283
284     {
285         "PS_SUBPIC",
286         PS_SUBPIC_KERNEL,
287         ps_subpic_kernel_static_gen7,
288         sizeof(ps_subpic_kernel_static_gen7),
289         NULL
290     }
291 };
292
293 #define URB_VS_ENTRIES        8
294 #define URB_VS_ENTRY_SIZE     1
295
296 #define URB_GS_ENTRIES        0
297 #define URB_GS_ENTRY_SIZE     0
298
299 #define URB_CLIP_ENTRIES      0
300 #define URB_CLIP_ENTRY_SIZE   0
301
302 #define URB_SF_ENTRIES        1
303 #define URB_SF_ENTRY_SIZE     2
304
305 #define URB_CS_ENTRIES        1
306 #define URB_CS_ENTRY_SIZE     1
307
308 static void
309 i965_render_vs_unit(VADriverContextP ctx)
310 {
311     struct i965_driver_data *i965 = i965_driver_data(ctx);
312     struct i965_render_state *render_state = &i965->render_state;
313     struct i965_vs_unit_state *vs_state;
314
315     dri_bo_map(render_state->vs.state, 1);
316     assert(render_state->vs.state->virtual);
317     vs_state = render_state->vs.state->virtual;
318     memset(vs_state, 0, sizeof(*vs_state));
319
320     if (IS_IRONLAKE(i965->intel.device_id))
321         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2;
322     else
323         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES;
324
325     vs_state->thread4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1;
326     vs_state->vs6.vs_enable = 0;
327     vs_state->vs6.vert_cache_disable = 1;
328     
329     dri_bo_unmap(render_state->vs.state);
330 }
331
332 static void
333 i965_render_sf_unit(VADriverContextP ctx)
334 {
335     struct i965_driver_data *i965 = i965_driver_data(ctx);
336     struct i965_render_state *render_state = &i965->render_state;
337     struct i965_sf_unit_state *sf_state;
338
339     dri_bo_map(render_state->sf.state, 1);
340     assert(render_state->sf.state->virtual);
341     sf_state = render_state->sf.state->virtual;
342     memset(sf_state, 0, sizeof(*sf_state));
343
344     sf_state->thread0.grf_reg_count = I965_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
345     sf_state->thread0.kernel_start_pointer = render_state->render_kernels[SF_KERNEL].bo->offset >> 6;
346
347     sf_state->sf1.single_program_flow = 1; /* XXX */
348     sf_state->sf1.binding_table_entry_count = 0;
349     sf_state->sf1.thread_priority = 0;
350     sf_state->sf1.floating_point_mode = 0; /* Mesa does this */
351     sf_state->sf1.illegal_op_exception_enable = 1;
352     sf_state->sf1.mask_stack_exception_enable = 1;
353     sf_state->sf1.sw_exception_enable = 1;
354
355     /* scratch space is not used in our kernel */
356     sf_state->thread2.per_thread_scratch_space = 0;
357     sf_state->thread2.scratch_space_base_pointer = 0;
358
359     sf_state->thread3.const_urb_entry_read_length = 0; /* no const URBs */
360     sf_state->thread3.const_urb_entry_read_offset = 0; /* no const URBs */
361     sf_state->thread3.urb_entry_read_length = 1; /* 1 URB per vertex */
362     sf_state->thread3.urb_entry_read_offset = 0;
363     sf_state->thread3.dispatch_grf_start_reg = 3;
364
365     sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
366     sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
367     sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
368     sf_state->thread4.stats_enable = 1;
369
370     sf_state->sf5.viewport_transform = 0; /* skip viewport */
371
372     sf_state->sf6.cull_mode = I965_CULLMODE_NONE;
373     sf_state->sf6.scissor = 0;
374
375     sf_state->sf7.trifan_pv = 2;
376
377     sf_state->sf6.dest_org_vbias = 0x8;
378     sf_state->sf6.dest_org_hbias = 0x8;
379
380     dri_bo_emit_reloc(render_state->sf.state,
381                       I915_GEM_DOMAIN_INSTRUCTION, 0,
382                       sf_state->thread0.grf_reg_count << 1,
383                       offsetof(struct i965_sf_unit_state, thread0),
384                       render_state->render_kernels[SF_KERNEL].bo);
385
386     dri_bo_unmap(render_state->sf.state);
387 }
388
389 static void 
390 i965_render_sampler(VADriverContextP ctx)
391 {
392     struct i965_driver_data *i965 = i965_driver_data(ctx);
393     struct i965_render_state *render_state = &i965->render_state;
394     struct i965_sampler_state *sampler_state;
395     int i;
396     
397     assert(render_state->wm.sampler_count > 0);
398     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
399
400     dri_bo_map(render_state->wm.sampler, 1);
401     assert(render_state->wm.sampler->virtual);
402     sampler_state = render_state->wm.sampler->virtual;
403     for (i = 0; i < render_state->wm.sampler_count; i++) {
404         memset(sampler_state, 0, sizeof(*sampler_state));
405         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
406         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
407         sampler_state->ss1.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
408         sampler_state->ss1.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
409         sampler_state->ss1.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
410         sampler_state++;
411     }
412
413     dri_bo_unmap(render_state->wm.sampler);
414 }
415 static void
416 i965_subpic_render_wm_unit(VADriverContextP ctx)
417 {
418     struct i965_driver_data *i965 = i965_driver_data(ctx);
419     struct i965_render_state *render_state = &i965->render_state;
420     struct i965_wm_unit_state *wm_state;
421
422     assert(render_state->wm.sampler);
423
424     dri_bo_map(render_state->wm.state, 1);
425     assert(render_state->wm.state->virtual);
426     wm_state = render_state->wm.state->virtual;
427     memset(wm_state, 0, sizeof(*wm_state));
428
429     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
430     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_SUBPIC_KERNEL].bo->offset >> 6;
431
432     wm_state->thread1.single_program_flow = 1; /* XXX */
433
434     if (IS_IRONLAKE(i965->intel.device_id))
435         wm_state->thread1.binding_table_entry_count = 0; /* hardware requirement */
436     else
437         wm_state->thread1.binding_table_entry_count = 7;
438
439     wm_state->thread2.scratch_space_base_pointer = 0;
440     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
441
442     wm_state->thread3.dispatch_grf_start_reg = 3; /* XXX */
443     wm_state->thread3.const_urb_entry_read_length = 0;
444     wm_state->thread3.const_urb_entry_read_offset = 0;
445     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
446     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
447
448     wm_state->wm4.stats_enable = 0;
449     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
450
451     if (IS_IRONLAKE(i965->intel.device_id)) {
452         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
453     } else {
454         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
455     }
456
457     wm_state->wm5.max_threads = render_state->max_wm_threads - 1;
458     wm_state->wm5.thread_dispatch_enable = 1;
459     wm_state->wm5.enable_16_pix = 1;
460     wm_state->wm5.enable_8_pix = 0;
461     wm_state->wm5.early_depth_test = 1;
462
463     dri_bo_emit_reloc(render_state->wm.state,
464                       I915_GEM_DOMAIN_INSTRUCTION, 0,
465                       wm_state->thread0.grf_reg_count << 1,
466                       offsetof(struct i965_wm_unit_state, thread0),
467                       render_state->render_kernels[PS_SUBPIC_KERNEL].bo);
468
469     dri_bo_emit_reloc(render_state->wm.state,
470                       I915_GEM_DOMAIN_INSTRUCTION, 0,
471                       wm_state->wm4.sampler_count << 2,
472                       offsetof(struct i965_wm_unit_state, wm4),
473                       render_state->wm.sampler);
474
475     dri_bo_unmap(render_state->wm.state);
476 }
477
478
479 static void
480 i965_render_wm_unit(VADriverContextP ctx)
481 {
482     struct i965_driver_data *i965 = i965_driver_data(ctx);
483     struct i965_render_state *render_state = &i965->render_state;
484     struct i965_wm_unit_state *wm_state;
485
486     assert(render_state->wm.sampler);
487
488     dri_bo_map(render_state->wm.state, 1);
489     assert(render_state->wm.state->virtual);
490     wm_state = render_state->wm.state->virtual;
491     memset(wm_state, 0, sizeof(*wm_state));
492
493     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
494     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_KERNEL].bo->offset >> 6;
495
496     wm_state->thread1.single_program_flow = 1; /* XXX */
497
498     if (IS_IRONLAKE(i965->intel.device_id))
499         wm_state->thread1.binding_table_entry_count = 0;        /* hardware requirement */
500     else
501         wm_state->thread1.binding_table_entry_count = 7;
502
503     wm_state->thread2.scratch_space_base_pointer = 0;
504     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
505
506     wm_state->thread3.dispatch_grf_start_reg = 2; /* XXX */
507     wm_state->thread3.const_urb_entry_read_length = 1;
508     wm_state->thread3.const_urb_entry_read_offset = 0;
509     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
510     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
511
512     wm_state->wm4.stats_enable = 0;
513     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
514
515     if (IS_IRONLAKE(i965->intel.device_id)) {
516         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
517     } else {
518         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
519     }
520
521     wm_state->wm5.max_threads = render_state->max_wm_threads - 1;
522     wm_state->wm5.thread_dispatch_enable = 1;
523     wm_state->wm5.enable_16_pix = 1;
524     wm_state->wm5.enable_8_pix = 0;
525     wm_state->wm5.early_depth_test = 1;
526
527     dri_bo_emit_reloc(render_state->wm.state,
528                       I915_GEM_DOMAIN_INSTRUCTION, 0,
529                       wm_state->thread0.grf_reg_count << 1,
530                       offsetof(struct i965_wm_unit_state, thread0),
531                       render_state->render_kernels[PS_KERNEL].bo);
532
533     dri_bo_emit_reloc(render_state->wm.state,
534                       I915_GEM_DOMAIN_INSTRUCTION, 0,
535                       wm_state->wm4.sampler_count << 2,
536                       offsetof(struct i965_wm_unit_state, wm4),
537                       render_state->wm.sampler);
538
539     dri_bo_unmap(render_state->wm.state);
540 }
541
542 static void 
543 i965_render_cc_viewport(VADriverContextP ctx)
544 {
545     struct i965_driver_data *i965 = i965_driver_data(ctx);
546     struct i965_render_state *render_state = &i965->render_state;
547     struct i965_cc_viewport *cc_viewport;
548
549     dri_bo_map(render_state->cc.viewport, 1);
550     assert(render_state->cc.viewport->virtual);
551     cc_viewport = render_state->cc.viewport->virtual;
552     memset(cc_viewport, 0, sizeof(*cc_viewport));
553     
554     cc_viewport->min_depth = -1.e35;
555     cc_viewport->max_depth = 1.e35;
556
557     dri_bo_unmap(render_state->cc.viewport);
558 }
559
560 static void 
561 i965_subpic_render_cc_unit(VADriverContextP ctx)
562 {
563     struct i965_driver_data *i965 = i965_driver_data(ctx);
564     struct i965_render_state *render_state = &i965->render_state;
565     struct i965_cc_unit_state *cc_state;
566
567     assert(render_state->cc.viewport);
568
569     dri_bo_map(render_state->cc.state, 1);
570     assert(render_state->cc.state->virtual);
571     cc_state = render_state->cc.state->virtual;
572     memset(cc_state, 0, sizeof(*cc_state));
573
574     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
575     cc_state->cc2.depth_test = 0;       /* disable depth test */
576     cc_state->cc2.logicop_enable = 0;   /* disable logic op */
577     cc_state->cc3.ia_blend_enable = 0 ;  /* blend alpha just like colors */
578     cc_state->cc3.blend_enable = 1;     /* enable color blend */
579     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
580     cc_state->cc3.alpha_test_format = 0;//0:ALPHATEST_UNORM8;       /*store alpha value with UNORM8 */
581     cc_state->cc3.alpha_test_func = 5;//COMPAREFUNCTION_LESS;       /*pass if less than the reference */
582     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
583
584     cc_state->cc5.dither_enable = 0;    /* disable dither */
585     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
586     cc_state->cc5.statistics_enable = 1;
587     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
588     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
589     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
590
591     cc_state->cc6.clamp_post_alpha_blend = 0; 
592     cc_state->cc6.clamp_pre_alpha_blend  =0; 
593     
594     /*final color = src_color*src_blend_factor +/- dst_color*dest_color_blend_factor*/
595     cc_state->cc6.blend_function = I965_BLENDFUNCTION_ADD;
596     cc_state->cc6.src_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
597     cc_state->cc6.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
598    
599     /*alpha test reference*/
600     cc_state->cc7.alpha_ref.f =0.0 ;
601
602
603     dri_bo_emit_reloc(render_state->cc.state,
604                       I915_GEM_DOMAIN_INSTRUCTION, 0,
605                       0,
606                       offsetof(struct i965_cc_unit_state, cc4),
607                       render_state->cc.viewport);
608
609     dri_bo_unmap(render_state->cc.state);
610 }
611
612
613 static void 
614 i965_render_cc_unit(VADriverContextP ctx)
615 {
616     struct i965_driver_data *i965 = i965_driver_data(ctx);
617     struct i965_render_state *render_state = &i965->render_state;
618     struct i965_cc_unit_state *cc_state;
619
620     assert(render_state->cc.viewport);
621
622     dri_bo_map(render_state->cc.state, 1);
623     assert(render_state->cc.state->virtual);
624     cc_state = render_state->cc.state->virtual;
625     memset(cc_state, 0, sizeof(*cc_state));
626
627     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
628     cc_state->cc2.depth_test = 0;       /* disable depth test */
629     cc_state->cc2.logicop_enable = 1;   /* enable logic op */
630     cc_state->cc3.ia_blend_enable = 0;  /* blend alpha just like colors */
631     cc_state->cc3.blend_enable = 0;     /* disable color blend */
632     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
633     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
634
635     cc_state->cc5.dither_enable = 0;    /* disable dither */
636     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
637     cc_state->cc5.statistics_enable = 1;
638     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
639     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_ONE;
640     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_ONE;
641
642     dri_bo_emit_reloc(render_state->cc.state,
643                       I915_GEM_DOMAIN_INSTRUCTION, 0,
644                       0,
645                       offsetof(struct i965_cc_unit_state, cc4),
646                       render_state->cc.viewport);
647
648     dri_bo_unmap(render_state->cc.state);
649 }
650
651 static void
652 i965_render_set_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
653 {
654     switch (tiling) {
655     case I915_TILING_NONE:
656         ss->ss3.tiled_surface = 0;
657         ss->ss3.tile_walk = 0;
658         break;
659     case I915_TILING_X:
660         ss->ss3.tiled_surface = 1;
661         ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
662         break;
663     case I915_TILING_Y:
664         ss->ss3.tiled_surface = 1;
665         ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
666         break;
667     }
668 }
669
670 static void
671 i965_render_set_surface_state(
672     struct i965_surface_state *ss,
673     dri_bo                    *bo,
674     unsigned long              offset,
675     unsigned int               width,
676     unsigned int               height,
677     unsigned int               pitch,
678     unsigned int               format,
679     unsigned int               flags
680 )
681 {
682     unsigned int tiling;
683     unsigned int swizzle;
684
685     memset(ss, 0, sizeof(*ss));
686
687     switch (flags & (I965_PP_FLAG_TOP_FIELD|I965_PP_FLAG_BOTTOM_FIELD)) {
688     case I965_PP_FLAG_BOTTOM_FIELD:
689         ss->ss0.vert_line_stride_ofs = 1;
690         /* fall-through */
691     case I965_PP_FLAG_TOP_FIELD:
692         ss->ss0.vert_line_stride = 1;
693         height /= 2;
694         break;
695     }
696
697     ss->ss0.surface_type = I965_SURFACE_2D;
698     ss->ss0.surface_format = format;
699     ss->ss0.color_blend = 1;
700
701     ss->ss1.base_addr = bo->offset + offset;
702
703     ss->ss2.width = width - 1;
704     ss->ss2.height = height - 1;
705
706     ss->ss3.pitch = pitch - 1;
707
708     dri_bo_get_tiling(bo, &tiling, &swizzle);
709     i965_render_set_surface_tiling(ss, tiling);
710 }
711
712 static void
713 gen7_render_set_surface_tiling(struct gen7_surface_state *ss, uint32_t tiling)
714 {
715    switch (tiling) {
716    case I915_TILING_NONE:
717       ss->ss0.tiled_surface = 0;
718       ss->ss0.tile_walk = 0;
719       break;
720    case I915_TILING_X:
721       ss->ss0.tiled_surface = 1;
722       ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
723       break;
724    case I915_TILING_Y:
725       ss->ss0.tiled_surface = 1;
726       ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
727       break;
728    }
729 }
730
731 /* Set "Shader Channel Select" */
732 void
733 gen7_render_set_surface_scs(struct gen7_surface_state *ss)
734 {
735     ss->ss7.shader_chanel_select_r = HSW_SCS_RED;
736     ss->ss7.shader_chanel_select_g = HSW_SCS_GREEN;
737     ss->ss7.shader_chanel_select_b = HSW_SCS_BLUE;
738     ss->ss7.shader_chanel_select_a = HSW_SCS_ALPHA;
739 }
740
741 static void
742 gen7_render_set_surface_state(
743     struct gen7_surface_state *ss,
744     dri_bo                    *bo,
745     unsigned long              offset,
746     int                        width,
747     int                        height,
748     int                        pitch,
749     int                        format,
750     unsigned int               flags
751 )
752 {
753     unsigned int tiling;
754     unsigned int swizzle;
755
756     memset(ss, 0, sizeof(*ss));
757
758     switch (flags & (I965_PP_FLAG_TOP_FIELD|I965_PP_FLAG_BOTTOM_FIELD)) {
759     case I965_PP_FLAG_BOTTOM_FIELD:
760         ss->ss0.vert_line_stride_ofs = 1;
761         /* fall-through */
762     case I965_PP_FLAG_TOP_FIELD:
763         ss->ss0.vert_line_stride = 1;
764         height /= 2;
765         break;
766     }
767
768     ss->ss0.surface_type = I965_SURFACE_2D;
769     ss->ss0.surface_format = format;
770
771     ss->ss1.base_addr = bo->offset + offset;
772
773     ss->ss2.width = width - 1;
774     ss->ss2.height = height - 1;
775
776     ss->ss3.pitch = pitch - 1;
777
778     dri_bo_get_tiling(bo, &tiling, &swizzle);
779     gen7_render_set_surface_tiling(ss, tiling);
780 }
781
782 static void
783 i965_render_src_surface_state(
784     VADriverContextP ctx, 
785     int              index,
786     dri_bo          *region,
787     unsigned long    offset,
788     int              w,
789     int              h,
790     int              pitch,
791     int              format,
792     unsigned int     flags
793 )
794 {
795     struct i965_driver_data *i965 = i965_driver_data(ctx);  
796     struct i965_render_state *render_state = &i965->render_state;
797     void *ss;
798     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
799
800     assert(index < MAX_RENDER_SURFACES);
801
802     dri_bo_map(ss_bo, 1);
803     assert(ss_bo->virtual);
804     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
805
806     if (IS_GEN7(i965->intel.device_id)) {
807         gen7_render_set_surface_state(ss,
808                                       region, offset,
809                                       w, h,
810                                       pitch, format, flags);
811         if (IS_HASWELL(i965->intel.device_id))
812             gen7_render_set_surface_scs(ss);
813         dri_bo_emit_reloc(ss_bo,
814                           I915_GEM_DOMAIN_SAMPLER, 0,
815                           offset,
816                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
817                           region);
818     } else {
819         i965_render_set_surface_state(ss,
820                                       region, offset,
821                                       w, h,
822                                       pitch, format, flags);
823         dri_bo_emit_reloc(ss_bo,
824                           I915_GEM_DOMAIN_SAMPLER, 0,
825                           offset,
826                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
827                           region);
828     }
829
830     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
831     dri_bo_unmap(ss_bo);
832     render_state->wm.sampler_count++;
833 }
834
835 static void
836 i965_render_src_surfaces_state(
837     VADriverContextP ctx,
838     struct object_surface *obj_surface,
839     unsigned int     flags
840 )
841 {
842     int region_pitch;
843     int rw, rh;
844     dri_bo *region;
845
846     region_pitch = obj_surface->width;
847     rw = obj_surface->orig_width;
848     rh = obj_surface->orig_height;
849     region = obj_surface->bo;
850
851     i965_render_src_surface_state(ctx, 1, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags);     /* Y */
852     i965_render_src_surface_state(ctx, 2, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags);
853
854     if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2')) {
855         i965_render_src_surface_state(ctx, 3, region,
856                                       region_pitch * obj_surface->y_cb_offset,
857                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
858                                       I965_SURFACEFORMAT_R8G8_UNORM, flags); /* UV */
859         i965_render_src_surface_state(ctx, 4, region,
860                                       region_pitch * obj_surface->y_cb_offset,
861                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
862                                       I965_SURFACEFORMAT_R8G8_UNORM, flags);
863     } else {
864         i965_render_src_surface_state(ctx, 3, region,
865                                       region_pitch * obj_surface->y_cb_offset,
866                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
867                                       I965_SURFACEFORMAT_R8_UNORM, flags); /* U */
868         i965_render_src_surface_state(ctx, 4, region,
869                                       region_pitch * obj_surface->y_cb_offset,
870                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
871                                       I965_SURFACEFORMAT_R8_UNORM, flags);
872         i965_render_src_surface_state(ctx, 5, region,
873                                       region_pitch * obj_surface->y_cr_offset,
874                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
875                                       I965_SURFACEFORMAT_R8_UNORM, flags); /* V */
876         i965_render_src_surface_state(ctx, 6, region,
877                                       region_pitch * obj_surface->y_cr_offset,
878                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
879                                       I965_SURFACEFORMAT_R8_UNORM, flags);
880     }
881 }
882
883 static void
884 i965_subpic_render_src_surfaces_state(VADriverContextP ctx,
885                                       struct object_surface *obj_surface)
886 {
887     struct i965_driver_data *i965 = i965_driver_data(ctx);  
888     dri_bo *subpic_region;
889     unsigned int index = obj_surface->subpic_render_idx;
890     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic[index]);
891     struct object_image *obj_image = IMAGE(obj_subpic->image);
892     assert(obj_surface);
893     assert(obj_surface->bo);
894     subpic_region = obj_image->bo;
895     /*subpicture surface*/
896     i965_render_src_surface_state(ctx, 1, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);     
897     i965_render_src_surface_state(ctx, 2, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);     
898 }
899
900 static void
901 i965_render_dest_surface_state(VADriverContextP ctx, int index)
902 {
903     struct i965_driver_data *i965 = i965_driver_data(ctx);  
904     struct i965_render_state *render_state = &i965->render_state;
905     struct intel_region *dest_region = render_state->draw_region;
906     void *ss;
907     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
908     int format;
909     assert(index < MAX_RENDER_SURFACES);
910
911     if (dest_region->cpp == 2) {
912         format = I965_SURFACEFORMAT_B5G6R5_UNORM;
913     } else {
914         format = I965_SURFACEFORMAT_B8G8R8A8_UNORM;
915     }
916
917     dri_bo_map(ss_bo, 1);
918     assert(ss_bo->virtual);
919     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
920
921     if (IS_GEN7(i965->intel.device_id)) {
922         gen7_render_set_surface_state(ss,
923                                       dest_region->bo, 0,
924                                       dest_region->width, dest_region->height,
925                                       dest_region->pitch, format, 0);
926         if (IS_HASWELL(i965->intel.device_id))
927             gen7_render_set_surface_scs(ss);
928         dri_bo_emit_reloc(ss_bo,
929                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
930                           0,
931                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
932                           dest_region->bo);
933     } else {
934         i965_render_set_surface_state(ss,
935                                       dest_region->bo, 0,
936                                       dest_region->width, dest_region->height,
937                                       dest_region->pitch, format, 0);
938         dri_bo_emit_reloc(ss_bo,
939                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
940                           0,
941                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
942                           dest_region->bo);
943     }
944
945     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
946     dri_bo_unmap(ss_bo);
947 }
948
949 static void
950 i965_fill_vertex_buffer(
951     VADriverContextP ctx,
952     float tex_coords[4], /* [(u1,v1);(u2,v2)] */
953     float vid_coords[4]  /* [(x1,y1);(x2,y2)] */
954 )
955 {
956     struct i965_driver_data * const i965 = i965_driver_data(ctx);
957     float vb[12];
958
959     enum { X1, Y1, X2, Y2 };
960
961     static const unsigned int g_rotation_indices[][6] = {
962         [VA_ROTATION_NONE] = { X2, Y2, X1, Y2, X1, Y1 },
963         [VA_ROTATION_90]   = { X2, Y1, X2, Y2, X1, Y2 },
964         [VA_ROTATION_180]  = { X1, Y1, X2, Y1, X2, Y2 },
965         [VA_ROTATION_270]  = { X1, Y2, X1, Y1, X2, Y1 },
966     };
967
968     const unsigned int * const rotation_indices =
969         g_rotation_indices[i965->rotation_attrib->value];
970
971     vb[0]  = tex_coords[rotation_indices[0]]; /* bottom-right corner */
972     vb[1]  = tex_coords[rotation_indices[1]];
973     vb[2]  = vid_coords[X2];
974     vb[3]  = vid_coords[Y2];
975
976     vb[4]  = tex_coords[rotation_indices[2]]; /* bottom-left corner */
977     vb[5]  = tex_coords[rotation_indices[3]];
978     vb[6]  = vid_coords[X1];
979     vb[7]  = vid_coords[Y2];
980
981     vb[8]  = tex_coords[rotation_indices[4]]; /* top-left corner */
982     vb[9]  = tex_coords[rotation_indices[5]];
983     vb[10] = vid_coords[X1];
984     vb[11] = vid_coords[Y1];
985
986     dri_bo_subdata(i965->render_state.vb.vertex_buffer, 0, sizeof(vb), vb);
987 }
988
989 static void 
990 i965_subpic_render_upload_vertex(VADriverContextP ctx,
991                                  struct object_surface *obj_surface,
992                                  const VARectangle *output_rect)
993 {    
994     struct i965_driver_data  *i965         = i965_driver_data(ctx);
995     unsigned int index = obj_surface->subpic_render_idx;
996     struct object_subpic     *obj_subpic   = SUBPIC(obj_surface->subpic[index]);
997     float tex_coords[4], vid_coords[4];
998     VARectangle dst_rect;
999
1000     if (obj_subpic->flags & VA_SUBPICTURE_DESTINATION_IS_SCREEN_COORD)
1001         dst_rect = obj_subpic->dst_rect;
1002     else {
1003         const float sx  = (float)output_rect->width  / obj_surface->orig_width;
1004         const float sy  = (float)output_rect->height / obj_surface->orig_height;
1005         dst_rect.x      = output_rect->x + sx * obj_subpic->dst_rect.x;
1006         dst_rect.y      = output_rect->y + sy * obj_subpic->dst_rect.y;
1007         dst_rect.width  = sx * obj_subpic->dst_rect.width;
1008         dst_rect.height = sy * obj_subpic->dst_rect.height;
1009     }
1010
1011     tex_coords[0] = (float)obj_subpic->src_rect.x / obj_subpic->width;
1012     tex_coords[1] = (float)obj_subpic->src_rect.y / obj_subpic->height;
1013     tex_coords[2] = (float)(obj_subpic->src_rect.x + obj_subpic->src_rect.width) / obj_subpic->width;
1014     tex_coords[3] = (float)(obj_subpic->src_rect.y + obj_subpic->src_rect.height) / obj_subpic->height;
1015
1016     vid_coords[0] = dst_rect.x;
1017     vid_coords[1] = dst_rect.y;
1018     vid_coords[2] = (float)(dst_rect.x + dst_rect.width);
1019     vid_coords[3] = (float)(dst_rect.y + dst_rect.height);
1020
1021     i965_fill_vertex_buffer(ctx, tex_coords, vid_coords);
1022 }
1023
1024 static void 
1025 i965_render_upload_vertex(
1026     VADriverContextP   ctx,
1027     struct object_surface *obj_surface,
1028     const VARectangle *src_rect,
1029     const VARectangle *dst_rect
1030 )
1031 {
1032     struct i965_driver_data *i965 = i965_driver_data(ctx);
1033     struct i965_render_state *render_state = &i965->render_state;
1034     struct intel_region *dest_region = render_state->draw_region;
1035     float tex_coords[4], vid_coords[4];
1036     int width, height;
1037
1038     width  = obj_surface->orig_width;
1039     height = obj_surface->orig_height;
1040
1041     tex_coords[0] = (float)src_rect->x / width;
1042     tex_coords[1] = (float)src_rect->y / height;
1043     tex_coords[2] = (float)(src_rect->x + src_rect->width) / width;
1044     tex_coords[3] = (float)(src_rect->y + src_rect->height) / height;
1045
1046     vid_coords[0] = dest_region->x + dst_rect->x;
1047     vid_coords[1] = dest_region->y + dst_rect->y;
1048     vid_coords[2] = vid_coords[0] + dst_rect->width;
1049     vid_coords[3] = vid_coords[1] + dst_rect->height;
1050
1051     i965_fill_vertex_buffer(ctx, tex_coords, vid_coords);
1052 }
1053
1054 static void
1055 i965_render_upload_constants(VADriverContextP ctx,
1056                              struct object_surface *obj_surface)
1057 {
1058     struct i965_driver_data *i965 = i965_driver_data(ctx);
1059     struct i965_render_state *render_state = &i965->render_state;
1060     unsigned short *constant_buffer;
1061
1062     dri_bo_map(render_state->curbe.bo, 1);
1063     assert(render_state->curbe.bo->virtual);
1064     constant_buffer = render_state->curbe.bo->virtual;
1065
1066     if (obj_surface->subsampling == SUBSAMPLE_YUV400) {
1067         assert(obj_surface->fourcc == VA_FOURCC('I', 'M', 'C', '1') ||
1068                obj_surface->fourcc == VA_FOURCC('I', 'M', 'C', '3'));
1069         *constant_buffer = 2;
1070     } else {
1071         if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2'))
1072             *constant_buffer = 1;
1073         else
1074             *constant_buffer = 0;
1075     }
1076
1077     dri_bo_unmap(render_state->curbe.bo);
1078 }
1079
1080 static void
1081 i965_subpic_render_upload_constants(VADriverContextP ctx,
1082                                     struct object_surface *obj_surface)
1083 {
1084     struct i965_driver_data *i965 = i965_driver_data(ctx);
1085     struct i965_render_state *render_state = &i965->render_state;
1086     float *constant_buffer;
1087     float global_alpha = 1.0;
1088     unsigned int index = obj_surface->subpic_render_idx;
1089
1090     if(obj_surface->subpic[index] != VA_INVALID_ID){
1091         struct object_subpic *obj_subpic= SUBPIC(obj_surface->subpic[index]);
1092         if(obj_subpic->flags & VA_SUBPICTURE_GLOBAL_ALPHA){
1093            global_alpha = obj_subpic->global_alpha;
1094         }
1095      }   
1096
1097     dri_bo_map(render_state->curbe.bo, 1);
1098
1099     assert(render_state->curbe.bo->virtual);
1100     constant_buffer = render_state->curbe.bo->virtual;
1101     *constant_buffer = global_alpha;
1102
1103     dri_bo_unmap(render_state->curbe.bo);
1104 }
1105  
1106 static void
1107 i965_surface_render_state_setup(
1108     VADriverContextP   ctx,
1109     struct object_surface *obj_surface,
1110     const VARectangle *src_rect,
1111     const VARectangle *dst_rect,
1112     unsigned int       flags
1113 )
1114 {
1115     i965_render_vs_unit(ctx);
1116     i965_render_sf_unit(ctx);
1117     i965_render_dest_surface_state(ctx, 0);
1118     i965_render_src_surfaces_state(ctx, obj_surface, flags);
1119     i965_render_sampler(ctx);
1120     i965_render_wm_unit(ctx);
1121     i965_render_cc_viewport(ctx);
1122     i965_render_cc_unit(ctx);
1123     i965_render_upload_vertex(ctx, obj_surface, src_rect, dst_rect);
1124     i965_render_upload_constants(ctx, obj_surface);
1125 }
1126
1127 static void
1128 i965_subpic_render_state_setup(
1129     VADriverContextP   ctx,
1130     struct object_surface *obj_surface,
1131     const VARectangle *src_rect,
1132     const VARectangle *dst_rect
1133 )
1134 {
1135     i965_render_vs_unit(ctx);
1136     i965_render_sf_unit(ctx);
1137     i965_render_dest_surface_state(ctx, 0);
1138     i965_subpic_render_src_surfaces_state(ctx, obj_surface);
1139     i965_render_sampler(ctx);
1140     i965_subpic_render_wm_unit(ctx);
1141     i965_render_cc_viewport(ctx);
1142     i965_subpic_render_cc_unit(ctx);
1143     i965_subpic_render_upload_constants(ctx, obj_surface);
1144     i965_subpic_render_upload_vertex(ctx, obj_surface, dst_rect);
1145 }
1146
1147
1148 static void
1149 i965_render_pipeline_select(VADriverContextP ctx)
1150 {
1151     struct i965_driver_data *i965 = i965_driver_data(ctx);
1152     struct intel_batchbuffer *batch = i965->batch;
1153  
1154     BEGIN_BATCH(batch, 1);
1155     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1156     ADVANCE_BATCH(batch);
1157 }
1158
1159 static void
1160 i965_render_state_sip(VADriverContextP ctx)
1161 {
1162     struct i965_driver_data *i965 = i965_driver_data(ctx);
1163     struct intel_batchbuffer *batch = i965->batch;
1164
1165     BEGIN_BATCH(batch, 2);
1166     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1167     OUT_BATCH(batch, 0);
1168     ADVANCE_BATCH(batch);
1169 }
1170
1171 static void
1172 i965_render_state_base_address(VADriverContextP ctx)
1173 {
1174     struct i965_driver_data *i965 = i965_driver_data(ctx);
1175     struct intel_batchbuffer *batch = i965->batch;
1176     struct i965_render_state *render_state = &i965->render_state;
1177
1178     if (IS_IRONLAKE(i965->intel.device_id)) {
1179         BEGIN_BATCH(batch, 8);
1180         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
1181         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1182         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1183         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1184         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1185         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1186         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1187         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1188         ADVANCE_BATCH(batch);
1189     } else {
1190         BEGIN_BATCH(batch, 6);
1191         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 4);
1192         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1193         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1194         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1195         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1196         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1197         ADVANCE_BATCH(batch);
1198     }
1199 }
1200
1201 static void
1202 i965_render_binding_table_pointers(VADriverContextP ctx)
1203 {
1204     struct i965_driver_data *i965 = i965_driver_data(ctx);
1205     struct intel_batchbuffer *batch = i965->batch;
1206
1207     BEGIN_BATCH(batch, 6);
1208     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS | 4);
1209     OUT_BATCH(batch, 0); /* vs */
1210     OUT_BATCH(batch, 0); /* gs */
1211     OUT_BATCH(batch, 0); /* clip */
1212     OUT_BATCH(batch, 0); /* sf */
1213     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1214     ADVANCE_BATCH(batch);
1215 }
1216
1217 static void 
1218 i965_render_constant_color(VADriverContextP ctx)
1219 {
1220     struct i965_driver_data *i965 = i965_driver_data(ctx);
1221     struct intel_batchbuffer *batch = i965->batch;
1222
1223     BEGIN_BATCH(batch, 5);
1224     OUT_BATCH(batch, CMD_CONSTANT_COLOR | 3);
1225     OUT_BATCH(batch, float_to_uint(1.0));
1226     OUT_BATCH(batch, float_to_uint(0.0));
1227     OUT_BATCH(batch, float_to_uint(1.0));
1228     OUT_BATCH(batch, float_to_uint(1.0));
1229     ADVANCE_BATCH(batch);
1230 }
1231
1232 static void
1233 i965_render_pipelined_pointers(VADriverContextP ctx)
1234 {
1235     struct i965_driver_data *i965 = i965_driver_data(ctx);
1236     struct intel_batchbuffer *batch = i965->batch;
1237     struct i965_render_state *render_state = &i965->render_state;
1238
1239     BEGIN_BATCH(batch, 7);
1240     OUT_BATCH(batch, CMD_PIPELINED_POINTERS | 5);
1241     OUT_RELOC(batch, render_state->vs.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1242     OUT_BATCH(batch, 0);  /* disable GS */
1243     OUT_BATCH(batch, 0);  /* disable CLIP */
1244     OUT_RELOC(batch, render_state->sf.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1245     OUT_RELOC(batch, render_state->wm.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1246     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1247     ADVANCE_BATCH(batch);
1248 }
1249
1250 static void
1251 i965_render_urb_layout(VADriverContextP ctx)
1252 {
1253     struct i965_driver_data *i965 = i965_driver_data(ctx);
1254     struct intel_batchbuffer *batch = i965->batch;
1255     int urb_vs_start, urb_vs_size;
1256     int urb_gs_start, urb_gs_size;
1257     int urb_clip_start, urb_clip_size;
1258     int urb_sf_start, urb_sf_size;
1259     int urb_cs_start, urb_cs_size;
1260
1261     urb_vs_start = 0;
1262     urb_vs_size = URB_VS_ENTRIES * URB_VS_ENTRY_SIZE;
1263     urb_gs_start = urb_vs_start + urb_vs_size;
1264     urb_gs_size = URB_GS_ENTRIES * URB_GS_ENTRY_SIZE;
1265     urb_clip_start = urb_gs_start + urb_gs_size;
1266     urb_clip_size = URB_CLIP_ENTRIES * URB_CLIP_ENTRY_SIZE;
1267     urb_sf_start = urb_clip_start + urb_clip_size;
1268     urb_sf_size = URB_SF_ENTRIES * URB_SF_ENTRY_SIZE;
1269     urb_cs_start = urb_sf_start + urb_sf_size;
1270     urb_cs_size = URB_CS_ENTRIES * URB_CS_ENTRY_SIZE;
1271
1272     BEGIN_BATCH(batch, 3);
1273     OUT_BATCH(batch, 
1274               CMD_URB_FENCE |
1275               UF0_CS_REALLOC |
1276               UF0_SF_REALLOC |
1277               UF0_CLIP_REALLOC |
1278               UF0_GS_REALLOC |
1279               UF0_VS_REALLOC |
1280               1);
1281     OUT_BATCH(batch, 
1282               ((urb_clip_start + urb_clip_size) << UF1_CLIP_FENCE_SHIFT) |
1283               ((urb_gs_start + urb_gs_size) << UF1_GS_FENCE_SHIFT) |
1284               ((urb_vs_start + urb_vs_size) << UF1_VS_FENCE_SHIFT));
1285     OUT_BATCH(batch,
1286               ((urb_cs_start + urb_cs_size) << UF2_CS_FENCE_SHIFT) |
1287               ((urb_sf_start + urb_sf_size) << UF2_SF_FENCE_SHIFT));
1288     ADVANCE_BATCH(batch);
1289 }
1290
1291 static void 
1292 i965_render_cs_urb_layout(VADriverContextP ctx)
1293 {
1294     struct i965_driver_data *i965 = i965_driver_data(ctx);
1295     struct intel_batchbuffer *batch = i965->batch;
1296
1297     BEGIN_BATCH(batch, 2);
1298     OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
1299     OUT_BATCH(batch,
1300               ((URB_CS_ENTRY_SIZE - 1) << 4) |          /* URB Entry Allocation Size */
1301               (URB_CS_ENTRIES << 0));                /* Number of URB Entries */
1302     ADVANCE_BATCH(batch);
1303 }
1304
1305 static void
1306 i965_render_constant_buffer(VADriverContextP ctx)
1307 {
1308     struct i965_driver_data *i965 = i965_driver_data(ctx);
1309     struct intel_batchbuffer *batch = i965->batch;
1310     struct i965_render_state *render_state = &i965->render_state;
1311
1312     BEGIN_BATCH(batch, 2);
1313     OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
1314     OUT_RELOC(batch, render_state->curbe.bo,
1315               I915_GEM_DOMAIN_INSTRUCTION, 0,
1316               URB_CS_ENTRY_SIZE - 1);
1317     ADVANCE_BATCH(batch);    
1318 }
1319
1320 static void
1321 i965_render_drawing_rectangle(VADriverContextP ctx)
1322 {
1323     struct i965_driver_data *i965 = i965_driver_data(ctx);
1324     struct intel_batchbuffer *batch = i965->batch;
1325     struct i965_render_state *render_state = &i965->render_state;
1326     struct intel_region *dest_region = render_state->draw_region;
1327
1328     BEGIN_BATCH(batch, 4);
1329     OUT_BATCH(batch, CMD_DRAWING_RECTANGLE | 2);
1330     OUT_BATCH(batch, 0x00000000);
1331     OUT_BATCH(batch, (dest_region->width - 1) | (dest_region->height - 1) << 16);
1332     OUT_BATCH(batch, 0x00000000);         
1333     ADVANCE_BATCH(batch);
1334 }
1335
1336 static void
1337 i965_render_vertex_elements(VADriverContextP ctx)
1338 {
1339     struct i965_driver_data *i965 = i965_driver_data(ctx);
1340     struct intel_batchbuffer *batch = i965->batch;
1341
1342     if (IS_IRONLAKE(i965->intel.device_id)) {
1343         BEGIN_BATCH(batch, 5);
1344         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1345         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1346         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1347                   VE0_VALID |
1348                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1349                   (0 << VE0_OFFSET_SHIFT));
1350         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1351                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1352                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1353                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1354         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1355         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1356                   VE0_VALID |
1357                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1358                   (8 << VE0_OFFSET_SHIFT));
1359         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1360                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1361                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1362                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1363         ADVANCE_BATCH(batch);
1364     } else {
1365         BEGIN_BATCH(batch, 5);
1366         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1367         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1368         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1369                   VE0_VALID |
1370                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1371                   (0 << VE0_OFFSET_SHIFT));
1372         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1373                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1374                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1375                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1376                   (0 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1377         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1378         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1379                   VE0_VALID |
1380                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1381                   (8 << VE0_OFFSET_SHIFT));
1382         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1383                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1384                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1385                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1386                   (4 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1387         ADVANCE_BATCH(batch);
1388     }
1389 }
1390
1391 static void
1392 i965_render_upload_image_palette(
1393     VADriverContextP ctx,
1394     VAImageID        image_id,
1395     unsigned int     alpha
1396 )
1397 {
1398     struct i965_driver_data *i965 = i965_driver_data(ctx);
1399     struct intel_batchbuffer *batch = i965->batch;
1400     unsigned int i;
1401
1402     struct object_image *obj_image = IMAGE(image_id);
1403     assert(obj_image);
1404
1405     if (obj_image->image.num_palette_entries == 0)
1406         return;
1407
1408     BEGIN_BATCH(batch, 1 + obj_image->image.num_palette_entries);
1409     OUT_BATCH(batch, CMD_SAMPLER_PALETTE_LOAD | (obj_image->image.num_palette_entries - 1));
1410     /*fill palette*/
1411     //int32_t out[16]; //0-23:color 23-31:alpha
1412     for (i = 0; i < obj_image->image.num_palette_entries; i++)
1413         OUT_BATCH(batch, (alpha << 24) | obj_image->palette[i]);
1414     ADVANCE_BATCH(batch);
1415 }
1416
1417 static void
1418 i965_render_startup(VADriverContextP ctx)
1419 {
1420     struct i965_driver_data *i965 = i965_driver_data(ctx);
1421     struct intel_batchbuffer *batch = i965->batch;
1422     struct i965_render_state *render_state = &i965->render_state;
1423
1424     BEGIN_BATCH(batch, 11);
1425     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
1426     OUT_BATCH(batch, 
1427               (0 << VB0_BUFFER_INDEX_SHIFT) |
1428               VB0_VERTEXDATA |
1429               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
1430     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
1431
1432     if (IS_IRONLAKE(i965->intel.device_id))
1433         OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
1434     else
1435         OUT_BATCH(batch, 3);
1436
1437     OUT_BATCH(batch, 0);
1438
1439     OUT_BATCH(batch, 
1440               CMD_3DPRIMITIVE |
1441               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
1442               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
1443               (0 << 9) |
1444               4);
1445     OUT_BATCH(batch, 3); /* vertex count per instance */
1446     OUT_BATCH(batch, 0); /* start vertex offset */
1447     OUT_BATCH(batch, 1); /* single instance */
1448     OUT_BATCH(batch, 0); /* start instance location */
1449     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
1450     ADVANCE_BATCH(batch);
1451 }
1452
1453 static void 
1454 i965_clear_dest_region(VADriverContextP ctx)
1455 {
1456     struct i965_driver_data *i965 = i965_driver_data(ctx);
1457     struct intel_batchbuffer *batch = i965->batch;
1458     struct i965_render_state *render_state = &i965->render_state;
1459     struct intel_region *dest_region = render_state->draw_region;
1460     unsigned int blt_cmd, br13;
1461     int pitch;
1462
1463     blt_cmd = XY_COLOR_BLT_CMD;
1464     br13 = 0xf0 << 16;
1465     pitch = dest_region->pitch;
1466
1467     if (dest_region->cpp == 4) {
1468         br13 |= BR13_8888;
1469         blt_cmd |= (XY_COLOR_BLT_WRITE_RGB | XY_COLOR_BLT_WRITE_ALPHA);
1470     } else {
1471         assert(dest_region->cpp == 2);
1472         br13 |= BR13_565;
1473     }
1474
1475     if (dest_region->tiling != I915_TILING_NONE) {
1476         blt_cmd |= XY_COLOR_BLT_DST_TILED;
1477         pitch /= 4;
1478     }
1479
1480     br13 |= pitch;
1481
1482     if (IS_GEN6(i965->intel.device_id) ||
1483         IS_GEN7(i965->intel.device_id)) {
1484         intel_batchbuffer_start_atomic_blt(batch, 24);
1485         BEGIN_BLT_BATCH(batch, 6);
1486     } else {
1487         intel_batchbuffer_start_atomic(batch, 24);
1488         BEGIN_BATCH(batch, 6);
1489     }
1490
1491     OUT_BATCH(batch, blt_cmd);
1492     OUT_BATCH(batch, br13);
1493     OUT_BATCH(batch, (dest_region->y << 16) | (dest_region->x));
1494     OUT_BATCH(batch, ((dest_region->y + dest_region->height) << 16) |
1495               (dest_region->x + dest_region->width));
1496     OUT_RELOC(batch, dest_region->bo, 
1497               I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1498               0);
1499     OUT_BATCH(batch, 0x0);
1500     ADVANCE_BATCH(batch);
1501     intel_batchbuffer_end_atomic(batch);
1502 }
1503
1504 static void
1505 i965_surface_render_pipeline_setup(VADriverContextP ctx)
1506 {
1507     struct i965_driver_data *i965 = i965_driver_data(ctx);
1508     struct intel_batchbuffer *batch = i965->batch;
1509
1510     i965_clear_dest_region(ctx);
1511     intel_batchbuffer_start_atomic(batch, 0x1000);
1512     intel_batchbuffer_emit_mi_flush(batch);
1513     i965_render_pipeline_select(ctx);
1514     i965_render_state_sip(ctx);
1515     i965_render_state_base_address(ctx);
1516     i965_render_binding_table_pointers(ctx);
1517     i965_render_constant_color(ctx);
1518     i965_render_pipelined_pointers(ctx);
1519     i965_render_urb_layout(ctx);
1520     i965_render_cs_urb_layout(ctx);
1521     i965_render_constant_buffer(ctx);
1522     i965_render_drawing_rectangle(ctx);
1523     i965_render_vertex_elements(ctx);
1524     i965_render_startup(ctx);
1525     intel_batchbuffer_end_atomic(batch);
1526 }
1527
1528 static void
1529 i965_subpic_render_pipeline_setup(VADriverContextP ctx)
1530 {
1531     struct i965_driver_data *i965 = i965_driver_data(ctx);
1532     struct intel_batchbuffer *batch = i965->batch;
1533
1534     intel_batchbuffer_start_atomic(batch, 0x1000);
1535     intel_batchbuffer_emit_mi_flush(batch);
1536     i965_render_pipeline_select(ctx);
1537     i965_render_state_sip(ctx);
1538     i965_render_state_base_address(ctx);
1539     i965_render_binding_table_pointers(ctx);
1540     i965_render_constant_color(ctx);
1541     i965_render_pipelined_pointers(ctx);
1542     i965_render_urb_layout(ctx);
1543     i965_render_cs_urb_layout(ctx);
1544     i965_render_drawing_rectangle(ctx);
1545     i965_render_vertex_elements(ctx);
1546     i965_render_startup(ctx);
1547     intel_batchbuffer_end_atomic(batch);
1548 }
1549
1550
1551 static void 
1552 i965_render_initialize(VADriverContextP ctx)
1553 {
1554     struct i965_driver_data *i965 = i965_driver_data(ctx);
1555     struct i965_render_state *render_state = &i965->render_state;
1556     dri_bo *bo;
1557
1558     /* VERTEX BUFFER */
1559     dri_bo_unreference(render_state->vb.vertex_buffer);
1560     bo = dri_bo_alloc(i965->intel.bufmgr,
1561                       "vertex buffer",
1562                       4096,
1563                       4096);
1564     assert(bo);
1565     render_state->vb.vertex_buffer = bo;
1566
1567     /* VS */
1568     dri_bo_unreference(render_state->vs.state);
1569     bo = dri_bo_alloc(i965->intel.bufmgr,
1570                       "vs state",
1571                       sizeof(struct i965_vs_unit_state),
1572                       64);
1573     assert(bo);
1574     render_state->vs.state = bo;
1575
1576     /* GS */
1577     /* CLIP */
1578     /* SF */
1579     dri_bo_unreference(render_state->sf.state);
1580     bo = dri_bo_alloc(i965->intel.bufmgr,
1581                       "sf state",
1582                       sizeof(struct i965_sf_unit_state),
1583                       64);
1584     assert(bo);
1585     render_state->sf.state = bo;
1586
1587     /* WM */
1588     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1589     bo = dri_bo_alloc(i965->intel.bufmgr,
1590                       "surface state & binding table",
1591                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1592                       4096);
1593     assert(bo);
1594     render_state->wm.surface_state_binding_table_bo = bo;
1595
1596     dri_bo_unreference(render_state->wm.sampler);
1597     bo = dri_bo_alloc(i965->intel.bufmgr,
1598                       "sampler state",
1599                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1600                       64);
1601     assert(bo);
1602     render_state->wm.sampler = bo;
1603     render_state->wm.sampler_count = 0;
1604
1605     dri_bo_unreference(render_state->wm.state);
1606     bo = dri_bo_alloc(i965->intel.bufmgr,
1607                       "wm state",
1608                       sizeof(struct i965_wm_unit_state),
1609                       64);
1610     assert(bo);
1611     render_state->wm.state = bo;
1612
1613     /* COLOR CALCULATOR */
1614     dri_bo_unreference(render_state->cc.state);
1615     bo = dri_bo_alloc(i965->intel.bufmgr,
1616                       "color calc state",
1617                       sizeof(struct i965_cc_unit_state),
1618                       64);
1619     assert(bo);
1620     render_state->cc.state = bo;
1621
1622     dri_bo_unreference(render_state->cc.viewport);
1623     bo = dri_bo_alloc(i965->intel.bufmgr,
1624                       "cc viewport",
1625                       sizeof(struct i965_cc_viewport),
1626                       64);
1627     assert(bo);
1628     render_state->cc.viewport = bo;
1629 }
1630
1631 static void
1632 i965_render_put_surface(
1633     VADriverContextP   ctx,
1634     struct object_surface *obj_surface,
1635     const VARectangle *src_rect,
1636     const VARectangle *dst_rect,
1637     unsigned int       flags
1638 )
1639 {
1640     struct i965_driver_data *i965 = i965_driver_data(ctx);
1641     struct intel_batchbuffer *batch = i965->batch;
1642
1643     i965_render_initialize(ctx);
1644     i965_surface_render_state_setup(ctx, obj_surface, src_rect, dst_rect, flags);
1645     i965_surface_render_pipeline_setup(ctx);
1646     intel_batchbuffer_flush(batch);
1647 }
1648
1649 static void
1650 i965_render_put_subpicture(
1651     VADriverContextP   ctx,
1652     struct object_surface *obj_surface,
1653     const VARectangle *src_rect,
1654     const VARectangle *dst_rect
1655 )
1656 {
1657     struct i965_driver_data *i965 = i965_driver_data(ctx);
1658     struct intel_batchbuffer *batch = i965->batch;
1659     unsigned int index = obj_surface->subpic_render_idx;
1660     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic[index]);
1661
1662     assert(obj_subpic);
1663
1664     i965_render_initialize(ctx);
1665     i965_subpic_render_state_setup(ctx, obj_surface, src_rect, dst_rect);
1666     i965_subpic_render_pipeline_setup(ctx);
1667     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
1668     intel_batchbuffer_flush(batch);
1669 }
1670
1671 /*
1672  * for GEN6+
1673  */
1674 static void 
1675 gen6_render_initialize(VADriverContextP ctx)
1676 {
1677     struct i965_driver_data *i965 = i965_driver_data(ctx);
1678     struct i965_render_state *render_state = &i965->render_state;
1679     dri_bo *bo;
1680
1681     /* VERTEX BUFFER */
1682     dri_bo_unreference(render_state->vb.vertex_buffer);
1683     bo = dri_bo_alloc(i965->intel.bufmgr,
1684                       "vertex buffer",
1685                       4096,
1686                       4096);
1687     assert(bo);
1688     render_state->vb.vertex_buffer = bo;
1689
1690     /* WM */
1691     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1692     bo = dri_bo_alloc(i965->intel.bufmgr,
1693                       "surface state & binding table",
1694                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1695                       4096);
1696     assert(bo);
1697     render_state->wm.surface_state_binding_table_bo = bo;
1698
1699     dri_bo_unreference(render_state->wm.sampler);
1700     bo = dri_bo_alloc(i965->intel.bufmgr,
1701                       "sampler state",
1702                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1703                       4096);
1704     assert(bo);
1705     render_state->wm.sampler = bo;
1706     render_state->wm.sampler_count = 0;
1707
1708     /* COLOR CALCULATOR */
1709     dri_bo_unreference(render_state->cc.state);
1710     bo = dri_bo_alloc(i965->intel.bufmgr,
1711                       "color calc state",
1712                       sizeof(struct gen6_color_calc_state),
1713                       4096);
1714     assert(bo);
1715     render_state->cc.state = bo;
1716
1717     /* CC VIEWPORT */
1718     dri_bo_unreference(render_state->cc.viewport);
1719     bo = dri_bo_alloc(i965->intel.bufmgr,
1720                       "cc viewport",
1721                       sizeof(struct i965_cc_viewport),
1722                       4096);
1723     assert(bo);
1724     render_state->cc.viewport = bo;
1725
1726     /* BLEND STATE */
1727     dri_bo_unreference(render_state->cc.blend);
1728     bo = dri_bo_alloc(i965->intel.bufmgr,
1729                       "blend state",
1730                       sizeof(struct gen6_blend_state),
1731                       4096);
1732     assert(bo);
1733     render_state->cc.blend = bo;
1734
1735     /* DEPTH & STENCIL STATE */
1736     dri_bo_unreference(render_state->cc.depth_stencil);
1737     bo = dri_bo_alloc(i965->intel.bufmgr,
1738                       "depth & stencil state",
1739                       sizeof(struct gen6_depth_stencil_state),
1740                       4096);
1741     assert(bo);
1742     render_state->cc.depth_stencil = bo;
1743 }
1744
1745 static void
1746 gen6_render_color_calc_state(VADriverContextP ctx)
1747 {
1748     struct i965_driver_data *i965 = i965_driver_data(ctx);
1749     struct i965_render_state *render_state = &i965->render_state;
1750     struct gen6_color_calc_state *color_calc_state;
1751     
1752     dri_bo_map(render_state->cc.state, 1);
1753     assert(render_state->cc.state->virtual);
1754     color_calc_state = render_state->cc.state->virtual;
1755     memset(color_calc_state, 0, sizeof(*color_calc_state));
1756     color_calc_state->constant_r = 1.0;
1757     color_calc_state->constant_g = 0.0;
1758     color_calc_state->constant_b = 1.0;
1759     color_calc_state->constant_a = 1.0;
1760     dri_bo_unmap(render_state->cc.state);
1761 }
1762
1763 static void
1764 gen6_render_blend_state(VADriverContextP ctx)
1765 {
1766     struct i965_driver_data *i965 = i965_driver_data(ctx);
1767     struct i965_render_state *render_state = &i965->render_state;
1768     struct gen6_blend_state *blend_state;
1769     
1770     dri_bo_map(render_state->cc.blend, 1);
1771     assert(render_state->cc.blend->virtual);
1772     blend_state = render_state->cc.blend->virtual;
1773     memset(blend_state, 0, sizeof(*blend_state));
1774     blend_state->blend1.logic_op_enable = 1;
1775     blend_state->blend1.logic_op_func = 0xc;
1776     dri_bo_unmap(render_state->cc.blend);
1777 }
1778
1779 static void
1780 gen6_render_depth_stencil_state(VADriverContextP ctx)
1781 {
1782     struct i965_driver_data *i965 = i965_driver_data(ctx);
1783     struct i965_render_state *render_state = &i965->render_state;
1784     struct gen6_depth_stencil_state *depth_stencil_state;
1785     
1786     dri_bo_map(render_state->cc.depth_stencil, 1);
1787     assert(render_state->cc.depth_stencil->virtual);
1788     depth_stencil_state = render_state->cc.depth_stencil->virtual;
1789     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
1790     dri_bo_unmap(render_state->cc.depth_stencil);
1791 }
1792
1793 static void
1794 gen6_render_setup_states(
1795     VADriverContextP   ctx,
1796     struct object_surface *obj_surface,
1797     const VARectangle *src_rect,
1798     const VARectangle *dst_rect,
1799     unsigned int       flags
1800 )
1801 {
1802     i965_render_dest_surface_state(ctx, 0);
1803     i965_render_src_surfaces_state(ctx, obj_surface, flags);
1804     i965_render_sampler(ctx);
1805     i965_render_cc_viewport(ctx);
1806     gen6_render_color_calc_state(ctx);
1807     gen6_render_blend_state(ctx);
1808     gen6_render_depth_stencil_state(ctx);
1809     i965_render_upload_constants(ctx, obj_surface);
1810     i965_render_upload_vertex(ctx, obj_surface, src_rect, dst_rect);
1811 }
1812
1813 static void
1814 gen6_emit_invarient_states(VADriverContextP ctx)
1815 {
1816     struct i965_driver_data *i965 = i965_driver_data(ctx);
1817     struct intel_batchbuffer *batch = i965->batch;
1818
1819     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1820
1821     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (3 - 2));
1822     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
1823               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
1824     OUT_BATCH(batch, 0);
1825
1826     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
1827     OUT_BATCH(batch, 1);
1828
1829     /* Set system instruction pointer */
1830     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1831     OUT_BATCH(batch, 0);
1832 }
1833
1834 static void
1835 gen6_emit_state_base_address(VADriverContextP ctx)
1836 {
1837     struct i965_driver_data *i965 = i965_driver_data(ctx);
1838     struct intel_batchbuffer *batch = i965->batch;
1839     struct i965_render_state *render_state = &i965->render_state;
1840
1841     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
1842     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
1843     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1844     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
1845     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
1846     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
1847     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
1848     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
1849     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
1850     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
1851 }
1852
1853 static void
1854 gen6_emit_viewport_state_pointers(VADriverContextP ctx)
1855 {
1856     struct i965_driver_data *i965 = i965_driver_data(ctx);
1857     struct intel_batchbuffer *batch = i965->batch;
1858     struct i965_render_state *render_state = &i965->render_state;
1859
1860     OUT_BATCH(batch, GEN6_3DSTATE_VIEWPORT_STATE_POINTERS |
1861               GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CC |
1862               (4 - 2));
1863     OUT_BATCH(batch, 0);
1864     OUT_BATCH(batch, 0);
1865     OUT_RELOC(batch, render_state->cc.viewport, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1866 }
1867
1868 static void
1869 gen6_emit_urb(VADriverContextP ctx)
1870 {
1871     struct i965_driver_data *i965 = i965_driver_data(ctx);
1872     struct intel_batchbuffer *batch = i965->batch;
1873
1874     OUT_BATCH(batch, GEN6_3DSTATE_URB | (3 - 2));
1875     OUT_BATCH(batch, ((1 - 1) << GEN6_3DSTATE_URB_VS_SIZE_SHIFT) |
1876               (24 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */
1877     OUT_BATCH(batch, (0 << GEN6_3DSTATE_URB_GS_SIZE_SHIFT) |
1878               (0 << GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT)); /* no GS thread */
1879 }
1880
1881 static void
1882 gen6_emit_cc_state_pointers(VADriverContextP ctx)
1883 {
1884     struct i965_driver_data *i965 = i965_driver_data(ctx);
1885     struct intel_batchbuffer *batch = i965->batch;
1886     struct i965_render_state *render_state = &i965->render_state;
1887
1888     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2));
1889     OUT_RELOC(batch, render_state->cc.blend, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1890     OUT_RELOC(batch, render_state->cc.depth_stencil, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1891     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1892 }
1893
1894 static void
1895 gen6_emit_sampler_state_pointers(VADriverContextP ctx)
1896 {
1897     struct i965_driver_data *i965 = i965_driver_data(ctx);
1898     struct intel_batchbuffer *batch = i965->batch;
1899     struct i965_render_state *render_state = &i965->render_state;
1900
1901     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLER_STATE_POINTERS |
1902               GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS |
1903               (4 - 2));
1904     OUT_BATCH(batch, 0); /* VS */
1905     OUT_BATCH(batch, 0); /* GS */
1906     OUT_RELOC(batch,render_state->wm.sampler, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1907 }
1908
1909 static void
1910 gen6_emit_binding_table(VADriverContextP ctx)
1911 {
1912     struct i965_driver_data *i965 = i965_driver_data(ctx);
1913     struct intel_batchbuffer *batch = i965->batch;
1914
1915     /* Binding table pointers */
1916     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS |
1917               GEN6_BINDING_TABLE_MODIFY_PS |
1918               (4 - 2));
1919     OUT_BATCH(batch, 0);                /* vs */
1920     OUT_BATCH(batch, 0);                /* gs */
1921     /* Only the PS uses the binding table */
1922     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1923 }
1924
1925 static void
1926 gen6_emit_depth_buffer_state(VADriverContextP ctx)
1927 {
1928     struct i965_driver_data *i965 = i965_driver_data(ctx);
1929     struct intel_batchbuffer *batch = i965->batch;
1930
1931     OUT_BATCH(batch, CMD_DEPTH_BUFFER | (7 - 2));
1932     OUT_BATCH(batch, (I965_SURFACE_NULL << CMD_DEPTH_BUFFER_TYPE_SHIFT) |
1933               (I965_DEPTHFORMAT_D32_FLOAT << CMD_DEPTH_BUFFER_FORMAT_SHIFT));
1934     OUT_BATCH(batch, 0);
1935     OUT_BATCH(batch, 0);
1936     OUT_BATCH(batch, 0);
1937     OUT_BATCH(batch, 0);
1938     OUT_BATCH(batch, 0);
1939
1940     OUT_BATCH(batch, CMD_CLEAR_PARAMS | (2 - 2));
1941     OUT_BATCH(batch, 0);
1942 }
1943
1944 static void
1945 gen6_emit_drawing_rectangle(VADriverContextP ctx)
1946 {
1947     i965_render_drawing_rectangle(ctx);
1948 }
1949
1950 static void 
1951 gen6_emit_vs_state(VADriverContextP ctx)
1952 {
1953     struct i965_driver_data *i965 = i965_driver_data(ctx);
1954     struct intel_batchbuffer *batch = i965->batch;
1955
1956     /* disable VS constant buffer */
1957     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (5 - 2));
1958     OUT_BATCH(batch, 0);
1959     OUT_BATCH(batch, 0);
1960     OUT_BATCH(batch, 0);
1961     OUT_BATCH(batch, 0);
1962         
1963     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
1964     OUT_BATCH(batch, 0); /* without VS kernel */
1965     OUT_BATCH(batch, 0);
1966     OUT_BATCH(batch, 0);
1967     OUT_BATCH(batch, 0);
1968     OUT_BATCH(batch, 0); /* pass-through */
1969 }
1970
1971 static void 
1972 gen6_emit_gs_state(VADriverContextP ctx)
1973 {
1974     struct i965_driver_data *i965 = i965_driver_data(ctx);
1975     struct intel_batchbuffer *batch = i965->batch;
1976
1977     /* disable GS constant buffer */
1978     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (5 - 2));
1979     OUT_BATCH(batch, 0);
1980     OUT_BATCH(batch, 0);
1981     OUT_BATCH(batch, 0);
1982     OUT_BATCH(batch, 0);
1983         
1984     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
1985     OUT_BATCH(batch, 0); /* without GS kernel */
1986     OUT_BATCH(batch, 0);
1987     OUT_BATCH(batch, 0);
1988     OUT_BATCH(batch, 0);
1989     OUT_BATCH(batch, 0);
1990     OUT_BATCH(batch, 0); /* pass-through */
1991 }
1992
1993 static void 
1994 gen6_emit_clip_state(VADriverContextP ctx)
1995 {
1996     struct i965_driver_data *i965 = i965_driver_data(ctx);
1997     struct intel_batchbuffer *batch = i965->batch;
1998
1999     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
2000     OUT_BATCH(batch, 0);
2001     OUT_BATCH(batch, 0); /* pass-through */
2002     OUT_BATCH(batch, 0);
2003 }
2004
2005 static void 
2006 gen6_emit_sf_state(VADriverContextP ctx)
2007 {
2008     struct i965_driver_data *i965 = i965_driver_data(ctx);
2009     struct intel_batchbuffer *batch = i965->batch;
2010
2011     OUT_BATCH(batch, GEN6_3DSTATE_SF | (20 - 2));
2012     OUT_BATCH(batch, (1 << GEN6_3DSTATE_SF_NUM_OUTPUTS_SHIFT) |
2013               (1 << GEN6_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT) |
2014               (0 << GEN6_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT));
2015     OUT_BATCH(batch, 0);
2016     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
2017     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT); /* DW4 */
2018     OUT_BATCH(batch, 0);
2019     OUT_BATCH(batch, 0);
2020     OUT_BATCH(batch, 0);
2021     OUT_BATCH(batch, 0);
2022     OUT_BATCH(batch, 0); /* DW9 */
2023     OUT_BATCH(batch, 0);
2024     OUT_BATCH(batch, 0);
2025     OUT_BATCH(batch, 0);
2026     OUT_BATCH(batch, 0);
2027     OUT_BATCH(batch, 0); /* DW14 */
2028     OUT_BATCH(batch, 0);
2029     OUT_BATCH(batch, 0);
2030     OUT_BATCH(batch, 0);
2031     OUT_BATCH(batch, 0);
2032     OUT_BATCH(batch, 0); /* DW19 */
2033 }
2034
2035 static void 
2036 gen6_emit_wm_state(VADriverContextP ctx, int kernel)
2037 {
2038     struct i965_driver_data *i965 = i965_driver_data(ctx);
2039     struct intel_batchbuffer *batch = i965->batch;
2040     struct i965_render_state *render_state = &i965->render_state;
2041
2042     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS |
2043               GEN6_3DSTATE_CONSTANT_BUFFER_0_ENABLE |
2044               (5 - 2));
2045     OUT_RELOC(batch, 
2046               render_state->curbe.bo,
2047               I915_GEM_DOMAIN_INSTRUCTION, 0,
2048               0);
2049     OUT_BATCH(batch, 0);
2050     OUT_BATCH(batch, 0);
2051     OUT_BATCH(batch, 0);
2052
2053     OUT_BATCH(batch, GEN6_3DSTATE_WM | (9 - 2));
2054     OUT_RELOC(batch, render_state->render_kernels[kernel].bo,
2055               I915_GEM_DOMAIN_INSTRUCTION, 0,
2056               0);
2057     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHITF) |
2058               (5 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT));
2059     OUT_BATCH(batch, 0);
2060     OUT_BATCH(batch, (6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT)); /* DW4 */
2061     OUT_BATCH(batch, ((render_state->max_wm_threads - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT) |
2062               GEN6_3DSTATE_WM_DISPATCH_ENABLE |
2063               GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
2064     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT) |
2065               GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2066     OUT_BATCH(batch, 0);
2067     OUT_BATCH(batch, 0);
2068 }
2069
2070 static void
2071 gen6_emit_vertex_element_state(VADriverContextP ctx)
2072 {
2073     struct i965_driver_data *i965 = i965_driver_data(ctx);
2074     struct intel_batchbuffer *batch = i965->batch;
2075
2076     /* Set up our vertex elements, sourced from the single vertex buffer. */
2077     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2078     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2079     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2080               GEN6_VE0_VALID |
2081               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2082               (0 << VE0_OFFSET_SHIFT));
2083     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2084               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2085               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2086               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2087     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2088     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2089               GEN6_VE0_VALID |
2090               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2091               (8 << VE0_OFFSET_SHIFT));
2092     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2093               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2094               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2095               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2096 }
2097
2098 static void
2099 gen6_emit_vertices(VADriverContextP ctx)
2100 {
2101     struct i965_driver_data *i965 = i965_driver_data(ctx);
2102     struct intel_batchbuffer *batch = i965->batch;
2103     struct i965_render_state *render_state = &i965->render_state;
2104
2105     BEGIN_BATCH(batch, 11);
2106     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
2107     OUT_BATCH(batch, 
2108               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2109               GEN6_VB0_VERTEXDATA |
2110               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2111     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2112     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2113     OUT_BATCH(batch, 0);
2114
2115     OUT_BATCH(batch, 
2116               CMD_3DPRIMITIVE |
2117               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
2118               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
2119               (0 << 9) |
2120               4);
2121     OUT_BATCH(batch, 3); /* vertex count per instance */
2122     OUT_BATCH(batch, 0); /* start vertex offset */
2123     OUT_BATCH(batch, 1); /* single instance */
2124     OUT_BATCH(batch, 0); /* start instance location */
2125     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
2126     ADVANCE_BATCH(batch);
2127 }
2128
2129 static void
2130 gen6_render_emit_states(VADriverContextP ctx, int kernel)
2131 {
2132     struct i965_driver_data *i965 = i965_driver_data(ctx);
2133     struct intel_batchbuffer *batch = i965->batch;
2134
2135     intel_batchbuffer_start_atomic(batch, 0x1000);
2136     intel_batchbuffer_emit_mi_flush(batch);
2137     gen6_emit_invarient_states(ctx);
2138     gen6_emit_state_base_address(ctx);
2139     gen6_emit_viewport_state_pointers(ctx);
2140     gen6_emit_urb(ctx);
2141     gen6_emit_cc_state_pointers(ctx);
2142     gen6_emit_sampler_state_pointers(ctx);
2143     gen6_emit_vs_state(ctx);
2144     gen6_emit_gs_state(ctx);
2145     gen6_emit_clip_state(ctx);
2146     gen6_emit_sf_state(ctx);
2147     gen6_emit_wm_state(ctx, kernel);
2148     gen6_emit_binding_table(ctx);
2149     gen6_emit_depth_buffer_state(ctx);
2150     gen6_emit_drawing_rectangle(ctx);
2151     gen6_emit_vertex_element_state(ctx);
2152     gen6_emit_vertices(ctx);
2153     intel_batchbuffer_end_atomic(batch);
2154 }
2155
2156 static void
2157 gen6_render_put_surface(
2158     VADriverContextP   ctx,
2159     struct object_surface *obj_surface,
2160     const VARectangle *src_rect,
2161     const VARectangle *dst_rect,
2162     unsigned int       flags
2163 )
2164 {
2165     struct i965_driver_data *i965 = i965_driver_data(ctx);
2166     struct intel_batchbuffer *batch = i965->batch;
2167
2168     gen6_render_initialize(ctx);
2169     gen6_render_setup_states(ctx, obj_surface, src_rect, dst_rect, flags);
2170     i965_clear_dest_region(ctx);
2171     gen6_render_emit_states(ctx, PS_KERNEL);
2172     intel_batchbuffer_flush(batch);
2173 }
2174
2175 static void
2176 gen6_subpicture_render_blend_state(VADriverContextP ctx)
2177 {
2178     struct i965_driver_data *i965 = i965_driver_data(ctx);
2179     struct i965_render_state *render_state = &i965->render_state;
2180     struct gen6_blend_state *blend_state;
2181
2182     dri_bo_unmap(render_state->cc.state);    
2183     dri_bo_map(render_state->cc.blend, 1);
2184     assert(render_state->cc.blend->virtual);
2185     blend_state = render_state->cc.blend->virtual;
2186     memset(blend_state, 0, sizeof(*blend_state));
2187     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2188     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2189     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2190     blend_state->blend0.blend_enable = 1;
2191     blend_state->blend1.post_blend_clamp_enable = 1;
2192     blend_state->blend1.pre_blend_clamp_enable = 1;
2193     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2194     dri_bo_unmap(render_state->cc.blend);
2195 }
2196
2197 static void
2198 gen6_subpicture_render_setup_states(
2199     VADriverContextP   ctx,
2200     struct object_surface *obj_surface,
2201     const VARectangle *src_rect,
2202     const VARectangle *dst_rect
2203 )
2204 {
2205     i965_render_dest_surface_state(ctx, 0);
2206     i965_subpic_render_src_surfaces_state(ctx, obj_surface);
2207     i965_render_sampler(ctx);
2208     i965_render_cc_viewport(ctx);
2209     gen6_render_color_calc_state(ctx);
2210     gen6_subpicture_render_blend_state(ctx);
2211     gen6_render_depth_stencil_state(ctx);
2212     i965_subpic_render_upload_constants(ctx, obj_surface);
2213     i965_subpic_render_upload_vertex(ctx, obj_surface, dst_rect);
2214 }
2215
2216 static void
2217 gen6_render_put_subpicture(
2218     VADriverContextP   ctx,
2219     struct object_surface *obj_surface,
2220     const VARectangle *src_rect,
2221     const VARectangle *dst_rect
2222 )
2223 {
2224     struct i965_driver_data *i965 = i965_driver_data(ctx);
2225     struct intel_batchbuffer *batch = i965->batch;
2226     unsigned int index = obj_surface->subpic_render_idx;
2227     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic[index]);
2228
2229     assert(obj_subpic);
2230     gen6_render_initialize(ctx);
2231     gen6_subpicture_render_setup_states(ctx, obj_surface, src_rect, dst_rect);
2232     gen6_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2233     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2234     intel_batchbuffer_flush(batch);
2235 }
2236
2237 /*
2238  * for GEN7
2239  */
2240 static void 
2241 gen7_render_initialize(VADriverContextP ctx)
2242 {
2243     struct i965_driver_data *i965 = i965_driver_data(ctx);
2244     struct i965_render_state *render_state = &i965->render_state;
2245     dri_bo *bo;
2246
2247     /* VERTEX BUFFER */
2248     dri_bo_unreference(render_state->vb.vertex_buffer);
2249     bo = dri_bo_alloc(i965->intel.bufmgr,
2250                       "vertex buffer",
2251                       4096,
2252                       4096);
2253     assert(bo);
2254     render_state->vb.vertex_buffer = bo;
2255
2256     /* WM */
2257     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
2258     bo = dri_bo_alloc(i965->intel.bufmgr,
2259                       "surface state & binding table",
2260                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
2261                       4096);
2262     assert(bo);
2263     render_state->wm.surface_state_binding_table_bo = bo;
2264
2265     dri_bo_unreference(render_state->wm.sampler);
2266     bo = dri_bo_alloc(i965->intel.bufmgr,
2267                       "sampler state",
2268                       MAX_SAMPLERS * sizeof(struct gen7_sampler_state),
2269                       4096);
2270     assert(bo);
2271     render_state->wm.sampler = bo;
2272     render_state->wm.sampler_count = 0;
2273
2274     /* COLOR CALCULATOR */
2275     dri_bo_unreference(render_state->cc.state);
2276     bo = dri_bo_alloc(i965->intel.bufmgr,
2277                       "color calc state",
2278                       sizeof(struct gen6_color_calc_state),
2279                       4096);
2280     assert(bo);
2281     render_state->cc.state = bo;
2282
2283     /* CC VIEWPORT */
2284     dri_bo_unreference(render_state->cc.viewport);
2285     bo = dri_bo_alloc(i965->intel.bufmgr,
2286                       "cc viewport",
2287                       sizeof(struct i965_cc_viewport),
2288                       4096);
2289     assert(bo);
2290     render_state->cc.viewport = bo;
2291
2292     /* BLEND STATE */
2293     dri_bo_unreference(render_state->cc.blend);
2294     bo = dri_bo_alloc(i965->intel.bufmgr,
2295                       "blend state",
2296                       sizeof(struct gen6_blend_state),
2297                       4096);
2298     assert(bo);
2299     render_state->cc.blend = bo;
2300
2301     /* DEPTH & STENCIL STATE */
2302     dri_bo_unreference(render_state->cc.depth_stencil);
2303     bo = dri_bo_alloc(i965->intel.bufmgr,
2304                       "depth & stencil state",
2305                       sizeof(struct gen6_depth_stencil_state),
2306                       4096);
2307     assert(bo);
2308     render_state->cc.depth_stencil = bo;
2309 }
2310
2311 static void
2312 gen7_render_color_calc_state(VADriverContextP ctx)
2313 {
2314     struct i965_driver_data *i965 = i965_driver_data(ctx);
2315     struct i965_render_state *render_state = &i965->render_state;
2316     struct gen6_color_calc_state *color_calc_state;
2317     
2318     dri_bo_map(render_state->cc.state, 1);
2319     assert(render_state->cc.state->virtual);
2320     color_calc_state = render_state->cc.state->virtual;
2321     memset(color_calc_state, 0, sizeof(*color_calc_state));
2322     color_calc_state->constant_r = 1.0;
2323     color_calc_state->constant_g = 0.0;
2324     color_calc_state->constant_b = 1.0;
2325     color_calc_state->constant_a = 1.0;
2326     dri_bo_unmap(render_state->cc.state);
2327 }
2328
2329 static void
2330 gen7_render_blend_state(VADriverContextP ctx)
2331 {
2332     struct i965_driver_data *i965 = i965_driver_data(ctx);
2333     struct i965_render_state *render_state = &i965->render_state;
2334     struct gen6_blend_state *blend_state;
2335     
2336     dri_bo_map(render_state->cc.blend, 1);
2337     assert(render_state->cc.blend->virtual);
2338     blend_state = render_state->cc.blend->virtual;
2339     memset(blend_state, 0, sizeof(*blend_state));
2340     blend_state->blend1.logic_op_enable = 1;
2341     blend_state->blend1.logic_op_func = 0xc;
2342     blend_state->blend1.pre_blend_clamp_enable = 1;
2343     dri_bo_unmap(render_state->cc.blend);
2344 }
2345
2346 static void
2347 gen7_render_depth_stencil_state(VADriverContextP ctx)
2348 {
2349     struct i965_driver_data *i965 = i965_driver_data(ctx);
2350     struct i965_render_state *render_state = &i965->render_state;
2351     struct gen6_depth_stencil_state *depth_stencil_state;
2352     
2353     dri_bo_map(render_state->cc.depth_stencil, 1);
2354     assert(render_state->cc.depth_stencil->virtual);
2355     depth_stencil_state = render_state->cc.depth_stencil->virtual;
2356     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
2357     dri_bo_unmap(render_state->cc.depth_stencil);
2358 }
2359
2360 static void 
2361 gen7_render_sampler(VADriverContextP ctx)
2362 {
2363     struct i965_driver_data *i965 = i965_driver_data(ctx);
2364     struct i965_render_state *render_state = &i965->render_state;
2365     struct gen7_sampler_state *sampler_state;
2366     int i;
2367     
2368     assert(render_state->wm.sampler_count > 0);
2369     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
2370
2371     dri_bo_map(render_state->wm.sampler, 1);
2372     assert(render_state->wm.sampler->virtual);
2373     sampler_state = render_state->wm.sampler->virtual;
2374     for (i = 0; i < render_state->wm.sampler_count; i++) {
2375         memset(sampler_state, 0, sizeof(*sampler_state));
2376         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
2377         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
2378         sampler_state->ss3.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2379         sampler_state->ss3.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2380         sampler_state->ss3.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2381         sampler_state++;
2382     }
2383
2384     dri_bo_unmap(render_state->wm.sampler);
2385 }
2386
2387 static void
2388 gen7_render_setup_states(
2389     VADriverContextP   ctx,
2390     struct object_surface *obj_surface,
2391     const VARectangle *src_rect,
2392     const VARectangle *dst_rect,
2393     unsigned int       flags
2394 )
2395 {
2396     i965_render_dest_surface_state(ctx, 0);
2397     i965_render_src_surfaces_state(ctx, obj_surface, flags);
2398     gen7_render_sampler(ctx);
2399     i965_render_cc_viewport(ctx);
2400     gen7_render_color_calc_state(ctx);
2401     gen7_render_blend_state(ctx);
2402     gen7_render_depth_stencil_state(ctx);
2403     i965_render_upload_constants(ctx, obj_surface);
2404     i965_render_upload_vertex(ctx, obj_surface, src_rect, dst_rect);
2405 }
2406
2407 static void
2408 gen7_emit_invarient_states(VADriverContextP ctx)
2409 {
2410     struct i965_driver_data *i965 = i965_driver_data(ctx);
2411     struct intel_batchbuffer *batch = i965->batch;
2412
2413     BEGIN_BATCH(batch, 1);
2414     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
2415     ADVANCE_BATCH(batch);
2416
2417     BEGIN_BATCH(batch, 4);
2418     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (4 - 2));
2419     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
2420               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
2421     OUT_BATCH(batch, 0);
2422     OUT_BATCH(batch, 0);
2423     ADVANCE_BATCH(batch);
2424
2425     BEGIN_BATCH(batch, 2);
2426     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
2427     OUT_BATCH(batch, 1);
2428     ADVANCE_BATCH(batch);
2429
2430     /* Set system instruction pointer */
2431     BEGIN_BATCH(batch, 2);
2432     OUT_BATCH(batch, CMD_STATE_SIP | 0);
2433     OUT_BATCH(batch, 0);
2434     ADVANCE_BATCH(batch);
2435 }
2436
2437 static void
2438 gen7_emit_state_base_address(VADriverContextP ctx)
2439 {
2440     struct i965_driver_data *i965 = i965_driver_data(ctx);
2441     struct intel_batchbuffer *batch = i965->batch;
2442     struct i965_render_state *render_state = &i965->render_state;
2443
2444     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
2445     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
2446     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
2447     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
2448     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
2449     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
2450     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
2451     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
2452     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
2453     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
2454 }
2455
2456 static void
2457 gen7_emit_viewport_state_pointers(VADriverContextP ctx)
2458 {
2459     struct i965_driver_data *i965 = i965_driver_data(ctx);
2460     struct intel_batchbuffer *batch = i965->batch;
2461     struct i965_render_state *render_state = &i965->render_state;
2462
2463     BEGIN_BATCH(batch, 2);
2464     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC | (2 - 2));
2465     OUT_RELOC(batch,
2466               render_state->cc.viewport,
2467               I915_GEM_DOMAIN_INSTRUCTION, 0,
2468               0);
2469     ADVANCE_BATCH(batch);
2470
2471     BEGIN_BATCH(batch, 2);
2472     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL | (2 - 2));
2473     OUT_BATCH(batch, 0);
2474     ADVANCE_BATCH(batch);
2475 }
2476
2477 /*
2478  * URB layout on GEN7 
2479  * ----------------------------------------
2480  * | PS Push Constants (8KB) | VS entries |
2481  * ----------------------------------------
2482  */
2483 static void
2484 gen7_emit_urb(VADriverContextP ctx)
2485 {
2486     struct i965_driver_data *i965 = i965_driver_data(ctx);
2487     struct intel_batchbuffer *batch = i965->batch;
2488     unsigned int num_urb_entries = 32;
2489
2490     if (IS_HASWELL(i965->intel.device_id))
2491         num_urb_entries = 64;
2492
2493     BEGIN_BATCH(batch, 2);
2494     OUT_BATCH(batch, GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS | (2 - 2));
2495     OUT_BATCH(batch, 8); /* in 1KBs */
2496     ADVANCE_BATCH(batch);
2497
2498     BEGIN_BATCH(batch, 2);
2499     OUT_BATCH(batch, GEN7_3DSTATE_URB_VS | (2 - 2));
2500     OUT_BATCH(batch, 
2501               (num_urb_entries << GEN7_URB_ENTRY_NUMBER_SHIFT) |
2502               (2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
2503               (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2504    ADVANCE_BATCH(batch);
2505
2506    BEGIN_BATCH(batch, 2);
2507    OUT_BATCH(batch, GEN7_3DSTATE_URB_GS | (2 - 2));
2508    OUT_BATCH(batch,
2509              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2510              (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2511    ADVANCE_BATCH(batch);
2512
2513    BEGIN_BATCH(batch, 2);
2514    OUT_BATCH(batch, GEN7_3DSTATE_URB_HS | (2 - 2));
2515    OUT_BATCH(batch,
2516              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2517              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2518    ADVANCE_BATCH(batch);
2519
2520    BEGIN_BATCH(batch, 2);
2521    OUT_BATCH(batch, GEN7_3DSTATE_URB_DS | (2 - 2));
2522    OUT_BATCH(batch,
2523              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2524              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2525    ADVANCE_BATCH(batch);
2526 }
2527
2528 static void
2529 gen7_emit_cc_state_pointers(VADriverContextP ctx)
2530 {
2531     struct i965_driver_data *i965 = i965_driver_data(ctx);
2532     struct intel_batchbuffer *batch = i965->batch;
2533     struct i965_render_state *render_state = &i965->render_state;
2534
2535     BEGIN_BATCH(batch, 2);
2536     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (2 - 2));
2537     OUT_RELOC(batch,
2538               render_state->cc.state,
2539               I915_GEM_DOMAIN_INSTRUCTION, 0,
2540               1);
2541     ADVANCE_BATCH(batch);
2542
2543     BEGIN_BATCH(batch, 2);
2544     OUT_BATCH(batch, GEN7_3DSTATE_BLEND_STATE_POINTERS | (2 - 2));
2545     OUT_RELOC(batch,
2546               render_state->cc.blend,
2547               I915_GEM_DOMAIN_INSTRUCTION, 0,
2548               1);
2549     ADVANCE_BATCH(batch);
2550
2551     BEGIN_BATCH(batch, 2);
2552     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS | (2 - 2));
2553     OUT_RELOC(batch,
2554               render_state->cc.depth_stencil,
2555               I915_GEM_DOMAIN_INSTRUCTION, 0, 
2556               1);
2557     ADVANCE_BATCH(batch);
2558 }
2559
2560 static void
2561 gen7_emit_sampler_state_pointers(VADriverContextP ctx)
2562 {
2563     struct i965_driver_data *i965 = i965_driver_data(ctx);
2564     struct intel_batchbuffer *batch = i965->batch;
2565     struct i965_render_state *render_state = &i965->render_state;
2566
2567     BEGIN_BATCH(batch, 2);
2568     OUT_BATCH(batch, GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
2569     OUT_RELOC(batch,
2570               render_state->wm.sampler,
2571               I915_GEM_DOMAIN_INSTRUCTION, 0,
2572               0);
2573     ADVANCE_BATCH(batch);
2574 }
2575
2576 static void
2577 gen7_emit_binding_table(VADriverContextP ctx)
2578 {
2579     struct i965_driver_data *i965 = i965_driver_data(ctx);
2580     struct intel_batchbuffer *batch = i965->batch;
2581
2582     BEGIN_BATCH(batch, 2);
2583     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS | (2 - 2));
2584     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
2585     ADVANCE_BATCH(batch);
2586 }
2587
2588 static void
2589 gen7_emit_depth_buffer_state(VADriverContextP ctx)
2590 {
2591     struct i965_driver_data *i965 = i965_driver_data(ctx);
2592     struct intel_batchbuffer *batch = i965->batch;
2593
2594     BEGIN_BATCH(batch, 7);
2595     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_BUFFER | (7 - 2));
2596     OUT_BATCH(batch,
2597               (I965_DEPTHFORMAT_D32_FLOAT << 18) |
2598               (I965_SURFACE_NULL << 29));
2599     OUT_BATCH(batch, 0);
2600     OUT_BATCH(batch, 0);
2601     OUT_BATCH(batch, 0);
2602     OUT_BATCH(batch, 0);
2603     OUT_BATCH(batch, 0);
2604     ADVANCE_BATCH(batch);
2605
2606     BEGIN_BATCH(batch, 3);
2607     OUT_BATCH(batch, GEN7_3DSTATE_CLEAR_PARAMS | (3 - 2));
2608     OUT_BATCH(batch, 0);
2609     OUT_BATCH(batch, 0);
2610     ADVANCE_BATCH(batch);
2611 }
2612
2613 static void
2614 gen7_emit_drawing_rectangle(VADriverContextP ctx)
2615 {
2616     i965_render_drawing_rectangle(ctx);
2617 }
2618
2619 static void 
2620 gen7_emit_vs_state(VADriverContextP ctx)
2621 {
2622     struct i965_driver_data *i965 = i965_driver_data(ctx);
2623     struct intel_batchbuffer *batch = i965->batch;
2624
2625     /* disable VS constant buffer */
2626     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (7 - 2));
2627     OUT_BATCH(batch, 0);
2628     OUT_BATCH(batch, 0);
2629     OUT_BATCH(batch, 0);
2630     OUT_BATCH(batch, 0);
2631     OUT_BATCH(batch, 0);
2632     OUT_BATCH(batch, 0);
2633         
2634     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
2635     OUT_BATCH(batch, 0); /* without VS kernel */
2636     OUT_BATCH(batch, 0);
2637     OUT_BATCH(batch, 0);
2638     OUT_BATCH(batch, 0);
2639     OUT_BATCH(batch, 0); /* pass-through */
2640 }
2641
2642 static void 
2643 gen7_emit_bypass_state(VADriverContextP ctx)
2644 {
2645     struct i965_driver_data *i965 = i965_driver_data(ctx);
2646     struct intel_batchbuffer *batch = i965->batch;
2647
2648     /* bypass GS */
2649     BEGIN_BATCH(batch, 7);
2650     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (7 - 2));
2651     OUT_BATCH(batch, 0);
2652     OUT_BATCH(batch, 0);
2653     OUT_BATCH(batch, 0);
2654     OUT_BATCH(batch, 0);
2655     OUT_BATCH(batch, 0);
2656     OUT_BATCH(batch, 0);
2657     ADVANCE_BATCH(batch);
2658
2659     BEGIN_BATCH(batch, 7);      
2660     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
2661     OUT_BATCH(batch, 0); /* without GS kernel */
2662     OUT_BATCH(batch, 0);
2663     OUT_BATCH(batch, 0);
2664     OUT_BATCH(batch, 0);
2665     OUT_BATCH(batch, 0);
2666     OUT_BATCH(batch, 0); /* pass-through */
2667     ADVANCE_BATCH(batch);
2668
2669     BEGIN_BATCH(batch, 2);
2670     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_GS | (2 - 2));
2671     OUT_BATCH(batch, 0);
2672     ADVANCE_BATCH(batch);
2673
2674     /* disable HS */
2675     BEGIN_BATCH(batch, 7);
2676     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_HS | (7 - 2));
2677     OUT_BATCH(batch, 0);
2678     OUT_BATCH(batch, 0);
2679     OUT_BATCH(batch, 0);
2680     OUT_BATCH(batch, 0);
2681     OUT_BATCH(batch, 0);
2682     OUT_BATCH(batch, 0);
2683     ADVANCE_BATCH(batch);
2684
2685     BEGIN_BATCH(batch, 7);
2686     OUT_BATCH(batch, GEN7_3DSTATE_HS | (7 - 2));
2687     OUT_BATCH(batch, 0);
2688     OUT_BATCH(batch, 0);
2689     OUT_BATCH(batch, 0);
2690     OUT_BATCH(batch, 0);
2691     OUT_BATCH(batch, 0);
2692     OUT_BATCH(batch, 0);
2693     ADVANCE_BATCH(batch);
2694
2695     BEGIN_BATCH(batch, 2);
2696     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_HS | (2 - 2));
2697     OUT_BATCH(batch, 0);
2698     ADVANCE_BATCH(batch);
2699
2700     /* Disable TE */
2701     BEGIN_BATCH(batch, 4);
2702     OUT_BATCH(batch, GEN7_3DSTATE_TE | (4 - 2));
2703     OUT_BATCH(batch, 0);
2704     OUT_BATCH(batch, 0);
2705     OUT_BATCH(batch, 0);
2706     ADVANCE_BATCH(batch);
2707
2708     /* Disable DS */
2709     BEGIN_BATCH(batch, 7);
2710     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_DS | (7 - 2));
2711     OUT_BATCH(batch, 0);
2712     OUT_BATCH(batch, 0);
2713     OUT_BATCH(batch, 0);
2714     OUT_BATCH(batch, 0);
2715     OUT_BATCH(batch, 0);
2716     OUT_BATCH(batch, 0);
2717     ADVANCE_BATCH(batch);
2718
2719     BEGIN_BATCH(batch, 6);
2720     OUT_BATCH(batch, GEN7_3DSTATE_DS | (6 - 2));
2721     OUT_BATCH(batch, 0);
2722     OUT_BATCH(batch, 0);
2723     OUT_BATCH(batch, 0);
2724     OUT_BATCH(batch, 0);
2725     OUT_BATCH(batch, 0);
2726     ADVANCE_BATCH(batch);
2727
2728     BEGIN_BATCH(batch, 2);
2729     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_DS | (2 - 2));
2730     OUT_BATCH(batch, 0);
2731     ADVANCE_BATCH(batch);
2732
2733     /* Disable STREAMOUT */
2734     BEGIN_BATCH(batch, 3);
2735     OUT_BATCH(batch, GEN7_3DSTATE_STREAMOUT | (3 - 2));
2736     OUT_BATCH(batch, 0);
2737     OUT_BATCH(batch, 0);
2738     ADVANCE_BATCH(batch);
2739 }
2740
2741 static void 
2742 gen7_emit_clip_state(VADriverContextP ctx)
2743 {
2744     struct i965_driver_data *i965 = i965_driver_data(ctx);
2745     struct intel_batchbuffer *batch = i965->batch;
2746
2747     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
2748     OUT_BATCH(batch, 0);
2749     OUT_BATCH(batch, 0); /* pass-through */
2750     OUT_BATCH(batch, 0);
2751 }
2752
2753 static void 
2754 gen7_emit_sf_state(VADriverContextP ctx)
2755 {
2756     struct i965_driver_data *i965 = i965_driver_data(ctx);
2757     struct intel_batchbuffer *batch = i965->batch;
2758
2759     BEGIN_BATCH(batch, 14);
2760     OUT_BATCH(batch, GEN7_3DSTATE_SBE | (14 - 2));
2761     OUT_BATCH(batch,
2762               (1 << GEN7_SBE_NUM_OUTPUTS_SHIFT) |
2763               (1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT) |
2764               (0 << GEN7_SBE_URB_ENTRY_READ_OFFSET_SHIFT));
2765     OUT_BATCH(batch, 0);
2766     OUT_BATCH(batch, 0);
2767     OUT_BATCH(batch, 0); /* DW4 */
2768     OUT_BATCH(batch, 0);
2769     OUT_BATCH(batch, 0);
2770     OUT_BATCH(batch, 0);
2771     OUT_BATCH(batch, 0);
2772     OUT_BATCH(batch, 0); /* DW9 */
2773     OUT_BATCH(batch, 0);
2774     OUT_BATCH(batch, 0);
2775     OUT_BATCH(batch, 0);
2776     OUT_BATCH(batch, 0);
2777     ADVANCE_BATCH(batch);
2778
2779     BEGIN_BATCH(batch, 7);
2780     OUT_BATCH(batch, GEN6_3DSTATE_SF | (7 - 2));
2781     OUT_BATCH(batch, 0);
2782     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
2783     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT);
2784     OUT_BATCH(batch, 0);
2785     OUT_BATCH(batch, 0);
2786     OUT_BATCH(batch, 0);
2787     ADVANCE_BATCH(batch);
2788 }
2789
2790 static void 
2791 gen7_emit_wm_state(VADriverContextP ctx, int kernel)
2792 {
2793     struct i965_driver_data *i965 = i965_driver_data(ctx);
2794     struct intel_batchbuffer *batch = i965->batch;
2795     struct i965_render_state *render_state = &i965->render_state;
2796     unsigned int max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_IVB;
2797     unsigned int num_samples = 0;
2798
2799     if (IS_HASWELL(i965->intel.device_id)) {
2800         max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_HSW;
2801         num_samples = 1 << GEN7_PS_SAMPLE_MASK_SHIFT_HSW;
2802     }
2803
2804     BEGIN_BATCH(batch, 3);
2805     OUT_BATCH(batch, GEN6_3DSTATE_WM | (3 - 2));
2806     OUT_BATCH(batch,
2807               GEN7_WM_DISPATCH_ENABLE |
2808               GEN7_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2809     OUT_BATCH(batch, 0);
2810     ADVANCE_BATCH(batch);
2811
2812     BEGIN_BATCH(batch, 7);
2813     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS | (7 - 2));
2814     OUT_BATCH(batch, 1);
2815     OUT_BATCH(batch, 0);
2816     OUT_RELOC(batch, 
2817               render_state->curbe.bo,
2818               I915_GEM_DOMAIN_INSTRUCTION, 0,
2819               0);
2820     OUT_BATCH(batch, 0);
2821     OUT_BATCH(batch, 0);
2822     OUT_BATCH(batch, 0);
2823     ADVANCE_BATCH(batch);
2824
2825     BEGIN_BATCH(batch, 8);
2826     OUT_BATCH(batch, GEN7_3DSTATE_PS | (8 - 2));
2827     OUT_RELOC(batch, 
2828               render_state->render_kernels[kernel].bo,
2829               I915_GEM_DOMAIN_INSTRUCTION, 0,
2830               0);
2831     OUT_BATCH(batch, 
2832               (1 << GEN7_PS_SAMPLER_COUNT_SHIFT) |
2833               (5 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
2834     OUT_BATCH(batch, 0); /* scratch space base offset */
2835     OUT_BATCH(batch, 
2836               ((render_state->max_wm_threads - 1) << max_threads_shift) | num_samples |
2837               GEN7_PS_PUSH_CONSTANT_ENABLE |
2838               GEN7_PS_ATTRIBUTE_ENABLE |
2839               GEN7_PS_16_DISPATCH_ENABLE);
2840     OUT_BATCH(batch, 
2841               (6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0));
2842     OUT_BATCH(batch, 0); /* kernel 1 pointer */
2843     OUT_BATCH(batch, 0); /* kernel 2 pointer */
2844     ADVANCE_BATCH(batch);
2845 }
2846
2847 static void
2848 gen7_emit_vertex_element_state(VADriverContextP ctx)
2849 {
2850     struct i965_driver_data *i965 = i965_driver_data(ctx);
2851     struct intel_batchbuffer *batch = i965->batch;
2852
2853     /* Set up our vertex elements, sourced from the single vertex buffer. */
2854     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2855     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2856     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2857               GEN6_VE0_VALID |
2858               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2859               (0 << VE0_OFFSET_SHIFT));
2860     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2861               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2862               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2863               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2864     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2865     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2866               GEN6_VE0_VALID |
2867               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2868               (8 << VE0_OFFSET_SHIFT));
2869     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2870               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2871               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2872               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2873 }
2874
2875 static void
2876 gen7_emit_vertices(VADriverContextP ctx)
2877 {
2878     struct i965_driver_data *i965 = i965_driver_data(ctx);
2879     struct intel_batchbuffer *batch = i965->batch;
2880     struct i965_render_state *render_state = &i965->render_state;
2881
2882     BEGIN_BATCH(batch, 5);
2883     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | (5 - 2));
2884     OUT_BATCH(batch, 
2885               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2886               GEN6_VB0_VERTEXDATA |
2887               GEN7_VB0_ADDRESS_MODIFYENABLE |
2888               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2889     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2890     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2891     OUT_BATCH(batch, 0);
2892     ADVANCE_BATCH(batch);
2893
2894     BEGIN_BATCH(batch, 7);
2895     OUT_BATCH(batch, CMD_3DPRIMITIVE | (7 - 2));
2896     OUT_BATCH(batch,
2897               _3DPRIM_RECTLIST |
2898               GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL);
2899     OUT_BATCH(batch, 3); /* vertex count per instance */
2900     OUT_BATCH(batch, 0); /* start vertex offset */
2901     OUT_BATCH(batch, 1); /* single instance */
2902     OUT_BATCH(batch, 0); /* start instance location */
2903     OUT_BATCH(batch, 0);
2904     ADVANCE_BATCH(batch);
2905 }
2906
2907 static void
2908 gen7_render_emit_states(VADriverContextP ctx, int kernel)
2909 {
2910     struct i965_driver_data *i965 = i965_driver_data(ctx);
2911     struct intel_batchbuffer *batch = i965->batch;
2912
2913     intel_batchbuffer_start_atomic(batch, 0x1000);
2914     intel_batchbuffer_emit_mi_flush(batch);
2915     gen7_emit_invarient_states(ctx);
2916     gen7_emit_state_base_address(ctx);
2917     gen7_emit_viewport_state_pointers(ctx);
2918     gen7_emit_urb(ctx);
2919     gen7_emit_cc_state_pointers(ctx);
2920     gen7_emit_sampler_state_pointers(ctx);
2921     gen7_emit_bypass_state(ctx);
2922     gen7_emit_vs_state(ctx);
2923     gen7_emit_clip_state(ctx);
2924     gen7_emit_sf_state(ctx);
2925     gen7_emit_wm_state(ctx, kernel);
2926     gen7_emit_binding_table(ctx);
2927     gen7_emit_depth_buffer_state(ctx);
2928     gen7_emit_drawing_rectangle(ctx);
2929     gen7_emit_vertex_element_state(ctx);
2930     gen7_emit_vertices(ctx);
2931     intel_batchbuffer_end_atomic(batch);
2932 }
2933
2934 static void
2935 gen7_render_put_surface(
2936     VADriverContextP   ctx,
2937     struct object_surface *obj_surface,    
2938     const VARectangle *src_rect,
2939     const VARectangle *dst_rect,
2940     unsigned int       flags
2941 )
2942 {
2943     struct i965_driver_data *i965 = i965_driver_data(ctx);
2944     struct intel_batchbuffer *batch = i965->batch;
2945
2946     gen7_render_initialize(ctx);
2947     gen7_render_setup_states(ctx, obj_surface, src_rect, dst_rect, flags);
2948     i965_clear_dest_region(ctx);
2949     gen7_render_emit_states(ctx, PS_KERNEL);
2950     intel_batchbuffer_flush(batch);
2951 }
2952
2953 static void
2954 gen7_subpicture_render_blend_state(VADriverContextP ctx)
2955 {
2956     struct i965_driver_data *i965 = i965_driver_data(ctx);
2957     struct i965_render_state *render_state = &i965->render_state;
2958     struct gen6_blend_state *blend_state;
2959
2960     dri_bo_unmap(render_state->cc.state);    
2961     dri_bo_map(render_state->cc.blend, 1);
2962     assert(render_state->cc.blend->virtual);
2963     blend_state = render_state->cc.blend->virtual;
2964     memset(blend_state, 0, sizeof(*blend_state));
2965     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2966     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2967     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2968     blend_state->blend0.blend_enable = 1;
2969     blend_state->blend1.post_blend_clamp_enable = 1;
2970     blend_state->blend1.pre_blend_clamp_enable = 1;
2971     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2972     dri_bo_unmap(render_state->cc.blend);
2973 }
2974
2975 static void
2976 gen7_subpicture_render_setup_states(
2977     VADriverContextP   ctx,
2978     struct object_surface *obj_surface,
2979     const VARectangle *src_rect,
2980     const VARectangle *dst_rect
2981 )
2982 {
2983     i965_render_dest_surface_state(ctx, 0);
2984     i965_subpic_render_src_surfaces_state(ctx, obj_surface);
2985     i965_render_sampler(ctx);
2986     i965_render_cc_viewport(ctx);
2987     gen7_render_color_calc_state(ctx);
2988     gen7_subpicture_render_blend_state(ctx);
2989     gen7_render_depth_stencil_state(ctx);
2990     i965_subpic_render_upload_constants(ctx, obj_surface);
2991     i965_subpic_render_upload_vertex(ctx, obj_surface, dst_rect);
2992 }
2993
2994 static void
2995 gen7_render_put_subpicture(
2996     VADriverContextP   ctx,
2997     struct object_surface *obj_surface,
2998     const VARectangle *src_rect,
2999     const VARectangle *dst_rect
3000 )
3001 {
3002     struct i965_driver_data *i965 = i965_driver_data(ctx);
3003     struct intel_batchbuffer *batch = i965->batch;
3004     unsigned int index = obj_surface->subpic_render_idx;
3005     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic[index]);
3006
3007     assert(obj_subpic);
3008     gen7_render_initialize(ctx);
3009     gen7_subpicture_render_setup_states(ctx, obj_surface, src_rect, dst_rect);
3010     gen7_render_emit_states(ctx, PS_SUBPIC_KERNEL);
3011     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
3012     intel_batchbuffer_flush(batch);
3013 }
3014
3015
3016 /*
3017  * global functions
3018  */
3019 VAStatus 
3020 i965_DestroySurfaces(VADriverContextP ctx,
3021                      VASurfaceID *surface_list,
3022                      int num_surfaces);
3023 void
3024 intel_render_put_surface(
3025     VADriverContextP   ctx,
3026     VASurfaceID        surface,
3027     struct object_surface *obj_surface,
3028     const VARectangle *src_rect,
3029     const VARectangle *dst_rect,
3030     unsigned int       flags
3031 )
3032 {
3033     struct i965_driver_data *i965 = i965_driver_data(ctx);
3034     int has_done_scaling = 0;
3035     VASurfaceID out_surface_id = i965_post_processing(ctx, surface, src_rect, dst_rect, flags, &has_done_scaling);
3036
3037     assert((!has_done_scaling) || (out_surface_id != VA_INVALID_ID));
3038
3039     if (out_surface_id != VA_INVALID_ID) {
3040         struct object_surface *new_obj_surface = SURFACE(out_surface_id);
3041         
3042         if (new_obj_surface && new_obj_surface->bo)
3043             obj_surface = new_obj_surface;
3044
3045         if (has_done_scaling)
3046             src_rect = dst_rect;
3047     }
3048
3049     if (IS_GEN7(i965->intel.device_id))
3050         gen7_render_put_surface(ctx, obj_surface, src_rect, dst_rect, flags);
3051     else if (IS_GEN6(i965->intel.device_id))
3052         gen6_render_put_surface(ctx, obj_surface, src_rect, dst_rect, flags);
3053     else
3054         i965_render_put_surface(ctx, obj_surface, src_rect, dst_rect, flags);
3055
3056     if (out_surface_id != VA_INVALID_ID)
3057         i965_DestroySurfaces(ctx, &out_surface_id, 1);
3058 }
3059
3060 void
3061 intel_render_put_subpicture(
3062     VADriverContextP   ctx,
3063     struct object_surface *obj_surface,
3064     const VARectangle *src_rect,
3065     const VARectangle *dst_rect
3066 )
3067 {
3068     struct i965_driver_data *i965 = i965_driver_data(ctx);
3069
3070     if (IS_GEN7(i965->intel.device_id))
3071         gen7_render_put_subpicture(ctx, obj_surface, src_rect, dst_rect);
3072     else if (IS_GEN6(i965->intel.device_id))
3073         gen6_render_put_subpicture(ctx, obj_surface, src_rect, dst_rect);
3074     else
3075         i965_render_put_subpicture(ctx, obj_surface, src_rect, dst_rect);
3076 }
3077
3078 bool 
3079 i965_render_init(VADriverContextP ctx)
3080 {
3081     struct i965_driver_data *i965 = i965_driver_data(ctx);
3082     struct i965_render_state *render_state = &i965->render_state;
3083     int i;
3084
3085     /* kernel */
3086     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen5) / 
3087                                  sizeof(render_kernels_gen5[0])));
3088     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen6) / 
3089                                  sizeof(render_kernels_gen6[0])));
3090
3091     if (IS_GEN7(i965->intel.device_id))
3092         memcpy(render_state->render_kernels,
3093                (IS_HASWELL(i965->intel.device_id) ? render_kernels_gen7_haswell : render_kernels_gen7),
3094                sizeof(render_state->render_kernels));
3095     else if (IS_GEN6(i965->intel.device_id))
3096         memcpy(render_state->render_kernels, render_kernels_gen6, sizeof(render_state->render_kernels));
3097     else if (IS_IRONLAKE(i965->intel.device_id))
3098         memcpy(render_state->render_kernels, render_kernels_gen5, sizeof(render_state->render_kernels));
3099     else
3100         memcpy(render_state->render_kernels, render_kernels_gen4, sizeof(render_state->render_kernels));
3101
3102     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
3103         struct i965_kernel *kernel = &render_state->render_kernels[i];
3104
3105         if (!kernel->size)
3106             continue;
3107
3108         kernel->bo = dri_bo_alloc(i965->intel.bufmgr, 
3109                                   kernel->name, 
3110                                   kernel->size, 0x1000);
3111         assert(kernel->bo);
3112         dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
3113     }
3114
3115     /* constant buffer */
3116     render_state->curbe.bo = dri_bo_alloc(i965->intel.bufmgr,
3117                       "constant buffer",
3118                       4096, 64);
3119     assert(render_state->curbe.bo);
3120
3121     if (IS_IVB_GT1(i965->intel.device_id) ||
3122         IS_HSW_GT1(i965->intel.device_id)) {
3123         render_state->max_wm_threads = 48;
3124     } else if (IS_IVB_GT2(i965->intel.device_id) ||
3125                IS_HSW_GT2(i965->intel.device_id)) {
3126         render_state->max_wm_threads = 172;
3127     } else if (IS_SNB_GT1(i965->intel.device_id)) {
3128         render_state->max_wm_threads = 40;
3129     } else if (IS_SNB_GT2(i965->intel.device_id)) {
3130         render_state->max_wm_threads = 80;
3131     } else if (IS_IRONLAKE(i965->intel.device_id)) {
3132         render_state->max_wm_threads = 72; /* 12 * 6 */
3133     } else if (IS_G4X(i965->intel.device_id)) {
3134         render_state->max_wm_threads = 50; /* 12 * 5 */
3135     } else {
3136         /* should never get here !!! */
3137         assert(0);
3138     }
3139
3140     return true;
3141 }
3142
3143 void 
3144 i965_render_terminate(VADriverContextP ctx)
3145 {
3146     int i;
3147     struct i965_driver_data *i965 = i965_driver_data(ctx);
3148     struct i965_render_state *render_state = &i965->render_state;
3149
3150     dri_bo_unreference(render_state->curbe.bo);
3151     render_state->curbe.bo = NULL;
3152
3153     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
3154         struct i965_kernel *kernel = &render_state->render_kernels[i];
3155         
3156         dri_bo_unreference(kernel->bo);
3157         kernel->bo = NULL;
3158     }
3159
3160     dri_bo_unreference(render_state->vb.vertex_buffer);
3161     render_state->vb.vertex_buffer = NULL;
3162     dri_bo_unreference(render_state->vs.state);
3163     render_state->vs.state = NULL;
3164     dri_bo_unreference(render_state->sf.state);
3165     render_state->sf.state = NULL;
3166     dri_bo_unreference(render_state->wm.sampler);
3167     render_state->wm.sampler = NULL;
3168     dri_bo_unreference(render_state->wm.state);
3169     render_state->wm.state = NULL;
3170     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
3171     dri_bo_unreference(render_state->cc.viewport);
3172     render_state->cc.viewport = NULL;
3173     dri_bo_unreference(render_state->cc.state);
3174     render_state->cc.state = NULL;
3175     dri_bo_unreference(render_state->cc.blend);
3176     render_state->cc.blend = NULL;
3177     dri_bo_unreference(render_state->cc.depth_stencil);
3178     render_state->cc.depth_stencil = NULL;
3179
3180     if (render_state->draw_region) {
3181         dri_bo_unreference(render_state->draw_region->bo);
3182         free(render_state->draw_region);
3183         render_state->draw_region = NULL;
3184     }
3185 }
3186