ad8de1ef420137d8ffc980edb4c097948230ba5f
[profile/ivi/vaapi-intel-driver.git] / src / i965_render.c
1 /*
2  * Copyright © 2006 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *    Xiang Haihao <haihao.xiang@intel.com>
27  *
28  */
29
30 /*
31  * Most of rendering codes are ported from xf86-video-intel/src/i965_video.c
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <assert.h>
38
39 #include <va/va_backend.h>
40 #include <va/va_dricommon.h>
41
42 #include "intel_batchbuffer.h"
43 #include "intel_driver.h"
44 #include "i965_defines.h"
45 #include "i965_drv_video.h"
46 #include "i965_structs.h"
47
48 #include "i965_render.h"
49
50 #define SF_KERNEL_NUM_GRF       16
51 #define SF_MAX_THREADS          1
52
53 static const uint32_t sf_kernel_static[][4] = 
54 {
55 #include "shaders/render/exa_sf.g4b"
56 };
57
58 #define PS_KERNEL_NUM_GRF       32
59 #define PS_MAX_THREADS          32
60
61 #define I965_GRF_BLOCKS(nreg)   ((nreg + 15) / 16 - 1)
62
63 static const uint32_t ps_kernel_static[][4] = 
64 {
65 #include "shaders/render/exa_wm_xy.g4b"
66 #include "shaders/render/exa_wm_src_affine.g4b"
67 #include "shaders/render/exa_wm_src_sample_planar.g4b"
68 #include "shaders/render/exa_wm_yuv_rgb.g4b"
69 #include "shaders/render/exa_wm_write.g4b"
70 };
71 static const uint32_t ps_subpic_kernel_static[][4] = 
72 {
73 #include "shaders/render/exa_wm_xy.g4b"
74 #include "shaders/render/exa_wm_src_affine.g4b"
75 #include "shaders/render/exa_wm_src_sample_argb.g4b"
76 #include "shaders/render/exa_wm_write.g4b"
77 };
78
79 /* On IRONLAKE */
80 static const uint32_t sf_kernel_static_gen5[][4] = 
81 {
82 #include "shaders/render/exa_sf.g4b.gen5"
83 };
84
85 static const uint32_t ps_kernel_static_gen5[][4] = 
86 {
87 #include "shaders/render/exa_wm_xy.g4b.gen5"
88 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
89 #include "shaders/render/exa_wm_src_sample_planar.g4b.gen5"
90 #include "shaders/render/exa_wm_yuv_rgb.g4b.gen5"
91 #include "shaders/render/exa_wm_write.g4b.gen5"
92 };
93 static const uint32_t ps_subpic_kernel_static_gen5[][4] = 
94 {
95 #include "shaders/render/exa_wm_xy.g4b.gen5"
96 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
97 #include "shaders/render/exa_wm_src_sample_argb.g4b.gen5"
98 #include "shaders/render/exa_wm_write.g4b.gen5"
99 };
100
101 /* programs for Sandybridge */
102 static const uint32_t sf_kernel_static_gen6[][4] = 
103 {
104 };
105
106 static const uint32_t ps_kernel_static_gen6[][4] = {
107 #include "shaders/render/exa_wm_src_affine.g6b"
108 #include "shaders/render/exa_wm_src_sample_planar.g6b"
109 #include "shaders/render/exa_wm_yuv_rgb.g6b"
110 #include "shaders/render/exa_wm_write.g6b"
111 };
112
113 static const uint32_t ps_subpic_kernel_static_gen6[][4] = {
114 #include "shaders/render/exa_wm_src_affine.g6b"
115 #include "shaders/render/exa_wm_src_sample_argb.g6b"
116 #include "shaders/render/exa_wm_write.g6b"
117 };
118
119 /* programs for Ivybridge */
120 static const uint32_t sf_kernel_static_gen7[][4] = 
121 {
122 };
123
124 static const uint32_t ps_kernel_static_gen7[][4] = {
125 #include "shaders/render/exa_wm_src_affine.g7b"
126 #include "shaders/render/exa_wm_src_sample_planar.g7b"
127 #include "shaders/render/exa_wm_yuv_rgb.g7b"
128 #include "shaders/render/exa_wm_write.g7b"
129 };
130
131 static const uint32_t ps_subpic_kernel_static_gen7[][4] = {
132 #include "shaders/render/exa_wm_src_affine.g7b"
133 #include "shaders/render/exa_wm_src_sample_argb.g7b"
134 #include "shaders/render/exa_wm_write.g7b"
135 };
136
137 #define SURFACE_STATE_PADDED_SIZE_I965  ALIGN(sizeof(struct i965_surface_state), 32)
138 #define SURFACE_STATE_PADDED_SIZE_GEN7  ALIGN(sizeof(struct gen7_surface_state), 32)
139 #define SURFACE_STATE_PADDED_SIZE       MAX(SURFACE_STATE_PADDED_SIZE_I965, SURFACE_STATE_PADDED_SIZE_GEN7)
140 #define SURFACE_STATE_OFFSET(index)     (SURFACE_STATE_PADDED_SIZE * index)
141 #define BINDING_TABLE_OFFSET            SURFACE_STATE_OFFSET(MAX_RENDER_SURFACES)
142
143 static uint32_t float_to_uint (float f) 
144 {
145     union {
146         uint32_t i; 
147         float f;
148     } x;
149
150     x.f = f;
151     return x.i;
152 }
153
154 enum 
155 {
156     SF_KERNEL = 0,
157     PS_KERNEL,
158     PS_SUBPIC_KERNEL
159 };
160
161 static struct i965_kernel render_kernels_gen4[] = {
162     {
163         "SF",
164         SF_KERNEL,
165         sf_kernel_static,
166         sizeof(sf_kernel_static),
167         NULL
168     },
169     {
170         "PS",
171         PS_KERNEL,
172         ps_kernel_static,
173         sizeof(ps_kernel_static),
174         NULL
175     },
176
177     {
178         "PS_SUBPIC",
179         PS_SUBPIC_KERNEL,
180         ps_subpic_kernel_static,
181         sizeof(ps_subpic_kernel_static),
182         NULL
183     }
184 };
185
186 static struct i965_kernel render_kernels_gen5[] = {
187     {
188         "SF",
189         SF_KERNEL,
190         sf_kernel_static_gen5,
191         sizeof(sf_kernel_static_gen5),
192         NULL
193     },
194     {
195         "PS",
196         PS_KERNEL,
197         ps_kernel_static_gen5,
198         sizeof(ps_kernel_static_gen5),
199         NULL
200     },
201
202     {
203         "PS_SUBPIC",
204         PS_SUBPIC_KERNEL,
205         ps_subpic_kernel_static_gen5,
206         sizeof(ps_subpic_kernel_static_gen5),
207         NULL
208     }
209 };
210
211 static struct i965_kernel render_kernels_gen6[] = {
212     {
213         "SF",
214         SF_KERNEL,
215         sf_kernel_static_gen6,
216         sizeof(sf_kernel_static_gen6),
217         NULL
218     },
219     {
220         "PS",
221         PS_KERNEL,
222         ps_kernel_static_gen6,
223         sizeof(ps_kernel_static_gen6),
224         NULL
225     },
226
227     {
228         "PS_SUBPIC",
229         PS_SUBPIC_KERNEL,
230         ps_subpic_kernel_static_gen6,
231         sizeof(ps_subpic_kernel_static_gen6),
232         NULL
233     }
234 };
235
236 static struct i965_kernel render_kernels_gen7[] = {
237     {
238         "SF",
239         SF_KERNEL,
240         sf_kernel_static_gen7,
241         sizeof(sf_kernel_static_gen7),
242         NULL
243     },
244     {
245         "PS",
246         PS_KERNEL,
247         ps_kernel_static_gen7,
248         sizeof(ps_kernel_static_gen7),
249         NULL
250     },
251
252     {
253         "PS_SUBPIC",
254         PS_SUBPIC_KERNEL,
255         ps_subpic_kernel_static_gen7,
256         sizeof(ps_subpic_kernel_static_gen7),
257         NULL
258     }
259 };
260
261 #define URB_VS_ENTRIES        8
262 #define URB_VS_ENTRY_SIZE     1
263
264 #define URB_GS_ENTRIES        0
265 #define URB_GS_ENTRY_SIZE     0
266
267 #define URB_CLIP_ENTRIES      0
268 #define URB_CLIP_ENTRY_SIZE   0
269
270 #define URB_SF_ENTRIES        1
271 #define URB_SF_ENTRY_SIZE     2
272
273 #define URB_CS_ENTRIES        1
274 #define URB_CS_ENTRY_SIZE     1
275
276 static void
277 i965_render_vs_unit(VADriverContextP ctx)
278 {
279     struct i965_driver_data *i965 = i965_driver_data(ctx);
280     struct i965_render_state *render_state = &i965->render_state;
281     struct i965_vs_unit_state *vs_state;
282
283     dri_bo_map(render_state->vs.state, 1);
284     assert(render_state->vs.state->virtual);
285     vs_state = render_state->vs.state->virtual;
286     memset(vs_state, 0, sizeof(*vs_state));
287
288     if (IS_IRONLAKE(i965->intel.device_id))
289         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2;
290     else
291         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES;
292
293     vs_state->thread4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1;
294     vs_state->vs6.vs_enable = 0;
295     vs_state->vs6.vert_cache_disable = 1;
296     
297     dri_bo_unmap(render_state->vs.state);
298 }
299
300 static void
301 i965_render_sf_unit(VADriverContextP ctx)
302 {
303     struct i965_driver_data *i965 = i965_driver_data(ctx);
304     struct i965_render_state *render_state = &i965->render_state;
305     struct i965_sf_unit_state *sf_state;
306
307     dri_bo_map(render_state->sf.state, 1);
308     assert(render_state->sf.state->virtual);
309     sf_state = render_state->sf.state->virtual;
310     memset(sf_state, 0, sizeof(*sf_state));
311
312     sf_state->thread0.grf_reg_count = I965_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
313     sf_state->thread0.kernel_start_pointer = render_state->render_kernels[SF_KERNEL].bo->offset >> 6;
314
315     sf_state->sf1.single_program_flow = 1; /* XXX */
316     sf_state->sf1.binding_table_entry_count = 0;
317     sf_state->sf1.thread_priority = 0;
318     sf_state->sf1.floating_point_mode = 0; /* Mesa does this */
319     sf_state->sf1.illegal_op_exception_enable = 1;
320     sf_state->sf1.mask_stack_exception_enable = 1;
321     sf_state->sf1.sw_exception_enable = 1;
322
323     /* scratch space is not used in our kernel */
324     sf_state->thread2.per_thread_scratch_space = 0;
325     sf_state->thread2.scratch_space_base_pointer = 0;
326
327     sf_state->thread3.const_urb_entry_read_length = 0; /* no const URBs */
328     sf_state->thread3.const_urb_entry_read_offset = 0; /* no const URBs */
329     sf_state->thread3.urb_entry_read_length = 1; /* 1 URB per vertex */
330     sf_state->thread3.urb_entry_read_offset = 0;
331     sf_state->thread3.dispatch_grf_start_reg = 3;
332
333     sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
334     sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
335     sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
336     sf_state->thread4.stats_enable = 1;
337
338     sf_state->sf5.viewport_transform = 0; /* skip viewport */
339
340     sf_state->sf6.cull_mode = I965_CULLMODE_NONE;
341     sf_state->sf6.scissor = 0;
342
343     sf_state->sf7.trifan_pv = 2;
344
345     sf_state->sf6.dest_org_vbias = 0x8;
346     sf_state->sf6.dest_org_hbias = 0x8;
347
348     dri_bo_emit_reloc(render_state->sf.state,
349                       I915_GEM_DOMAIN_INSTRUCTION, 0,
350                       sf_state->thread0.grf_reg_count << 1,
351                       offsetof(struct i965_sf_unit_state, thread0),
352                       render_state->render_kernels[SF_KERNEL].bo);
353
354     dri_bo_unmap(render_state->sf.state);
355 }
356
357 static void 
358 i965_render_sampler(VADriverContextP ctx)
359 {
360     struct i965_driver_data *i965 = i965_driver_data(ctx);
361     struct i965_render_state *render_state = &i965->render_state;
362     struct i965_sampler_state *sampler_state;
363     int i;
364     
365     assert(render_state->wm.sampler_count > 0);
366     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
367
368     dri_bo_map(render_state->wm.sampler, 1);
369     assert(render_state->wm.sampler->virtual);
370     sampler_state = render_state->wm.sampler->virtual;
371     for (i = 0; i < render_state->wm.sampler_count; i++) {
372         memset(sampler_state, 0, sizeof(*sampler_state));
373         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
374         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
375         sampler_state->ss1.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
376         sampler_state->ss1.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
377         sampler_state->ss1.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
378         sampler_state++;
379     }
380
381     dri_bo_unmap(render_state->wm.sampler);
382 }
383 static void
384 i965_subpic_render_wm_unit(VADriverContextP ctx)
385 {
386     struct i965_driver_data *i965 = i965_driver_data(ctx);
387     struct i965_render_state *render_state = &i965->render_state;
388     struct i965_wm_unit_state *wm_state;
389
390     assert(render_state->wm.sampler);
391
392     dri_bo_map(render_state->wm.state, 1);
393     assert(render_state->wm.state->virtual);
394     wm_state = render_state->wm.state->virtual;
395     memset(wm_state, 0, sizeof(*wm_state));
396
397     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
398     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_SUBPIC_KERNEL].bo->offset >> 6;
399
400     wm_state->thread1.single_program_flow = 1; /* XXX */
401
402     if (IS_IRONLAKE(i965->intel.device_id))
403         wm_state->thread1.binding_table_entry_count = 0; /* hardware requirement */
404     else
405         wm_state->thread1.binding_table_entry_count = 7;
406
407     wm_state->thread2.scratch_space_base_pointer = 0;
408     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
409
410     wm_state->thread3.dispatch_grf_start_reg = 3; /* XXX */
411     wm_state->thread3.const_urb_entry_read_length = 0;
412     wm_state->thread3.const_urb_entry_read_offset = 0;
413     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
414     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
415
416     wm_state->wm4.stats_enable = 0;
417     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
418
419     if (IS_IRONLAKE(i965->intel.device_id)) {
420         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
421         wm_state->wm5.max_threads = 12 * 6 - 1;
422     } else {
423         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
424         wm_state->wm5.max_threads = 10 * 5 - 1;
425     }
426
427     wm_state->wm5.thread_dispatch_enable = 1;
428     wm_state->wm5.enable_16_pix = 1;
429     wm_state->wm5.enable_8_pix = 0;
430     wm_state->wm5.early_depth_test = 1;
431
432     dri_bo_emit_reloc(render_state->wm.state,
433                       I915_GEM_DOMAIN_INSTRUCTION, 0,
434                       wm_state->thread0.grf_reg_count << 1,
435                       offsetof(struct i965_wm_unit_state, thread0),
436                       render_state->render_kernels[PS_SUBPIC_KERNEL].bo);
437
438     dri_bo_emit_reloc(render_state->wm.state,
439                       I915_GEM_DOMAIN_INSTRUCTION, 0,
440                       wm_state->wm4.sampler_count << 2,
441                       offsetof(struct i965_wm_unit_state, wm4),
442                       render_state->wm.sampler);
443
444     dri_bo_unmap(render_state->wm.state);
445 }
446
447
448 static void
449 i965_render_wm_unit(VADriverContextP ctx)
450 {
451     struct i965_driver_data *i965 = i965_driver_data(ctx);
452     struct i965_render_state *render_state = &i965->render_state;
453     struct i965_wm_unit_state *wm_state;
454
455     assert(render_state->wm.sampler);
456
457     dri_bo_map(render_state->wm.state, 1);
458     assert(render_state->wm.state->virtual);
459     wm_state = render_state->wm.state->virtual;
460     memset(wm_state, 0, sizeof(*wm_state));
461
462     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
463     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_KERNEL].bo->offset >> 6;
464
465     wm_state->thread1.single_program_flow = 1; /* XXX */
466
467     if (IS_IRONLAKE(i965->intel.device_id))
468         wm_state->thread1.binding_table_entry_count = 0;        /* hardware requirement */
469     else
470         wm_state->thread1.binding_table_entry_count = 7;
471
472     wm_state->thread2.scratch_space_base_pointer = 0;
473     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
474
475     wm_state->thread3.dispatch_grf_start_reg = 2; /* XXX */
476     wm_state->thread3.const_urb_entry_read_length = 1;
477     wm_state->thread3.const_urb_entry_read_offset = 0;
478     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
479     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
480
481     wm_state->wm4.stats_enable = 0;
482     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
483
484     if (IS_IRONLAKE(i965->intel.device_id)) {
485         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
486         wm_state->wm5.max_threads = 12 * 6 - 1;
487     } else {
488         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
489         wm_state->wm5.max_threads = 10 * 5 - 1;
490     }
491
492     wm_state->wm5.thread_dispatch_enable = 1;
493     wm_state->wm5.enable_16_pix = 1;
494     wm_state->wm5.enable_8_pix = 0;
495     wm_state->wm5.early_depth_test = 1;
496
497     dri_bo_emit_reloc(render_state->wm.state,
498                       I915_GEM_DOMAIN_INSTRUCTION, 0,
499                       wm_state->thread0.grf_reg_count << 1,
500                       offsetof(struct i965_wm_unit_state, thread0),
501                       render_state->render_kernels[PS_KERNEL].bo);
502
503     dri_bo_emit_reloc(render_state->wm.state,
504                       I915_GEM_DOMAIN_INSTRUCTION, 0,
505                       wm_state->wm4.sampler_count << 2,
506                       offsetof(struct i965_wm_unit_state, wm4),
507                       render_state->wm.sampler);
508
509     dri_bo_unmap(render_state->wm.state);
510 }
511
512 static void 
513 i965_render_cc_viewport(VADriverContextP ctx)
514 {
515     struct i965_driver_data *i965 = i965_driver_data(ctx);
516     struct i965_render_state *render_state = &i965->render_state;
517     struct i965_cc_viewport *cc_viewport;
518
519     dri_bo_map(render_state->cc.viewport, 1);
520     assert(render_state->cc.viewport->virtual);
521     cc_viewport = render_state->cc.viewport->virtual;
522     memset(cc_viewport, 0, sizeof(*cc_viewport));
523     
524     cc_viewport->min_depth = -1.e35;
525     cc_viewport->max_depth = 1.e35;
526
527     dri_bo_unmap(render_state->cc.viewport);
528 }
529
530 static void 
531 i965_subpic_render_cc_unit(VADriverContextP ctx)
532 {
533     struct i965_driver_data *i965 = i965_driver_data(ctx);
534     struct i965_render_state *render_state = &i965->render_state;
535     struct i965_cc_unit_state *cc_state;
536
537     assert(render_state->cc.viewport);
538
539     dri_bo_map(render_state->cc.state, 1);
540     assert(render_state->cc.state->virtual);
541     cc_state = render_state->cc.state->virtual;
542     memset(cc_state, 0, sizeof(*cc_state));
543
544     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
545     cc_state->cc2.depth_test = 0;       /* disable depth test */
546     cc_state->cc2.logicop_enable = 0;   /* disable logic op */
547     cc_state->cc3.ia_blend_enable = 0 ;  /* blend alpha just like colors */
548     cc_state->cc3.blend_enable = 1;     /* enable color blend */
549     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
550     cc_state->cc3.alpha_test_format = 0;//0:ALPHATEST_UNORM8;       /*store alpha value with UNORM8 */
551     cc_state->cc3.alpha_test_func = 5;//COMPAREFUNCTION_LESS;       /*pass if less than the reference */
552     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
553
554     cc_state->cc5.dither_enable = 0;    /* disable dither */
555     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
556     cc_state->cc5.statistics_enable = 1;
557     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
558     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
559     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
560
561     cc_state->cc6.clamp_post_alpha_blend = 0; 
562     cc_state->cc6.clamp_pre_alpha_blend  =0; 
563     
564     /*final color = src_color*src_blend_factor +/- dst_color*dest_color_blend_factor*/
565     cc_state->cc6.blend_function = I965_BLENDFUNCTION_ADD;
566     cc_state->cc6.src_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
567     cc_state->cc6.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
568    
569     /*alpha test reference*/
570     cc_state->cc7.alpha_ref.f =0.0 ;
571
572
573     dri_bo_emit_reloc(render_state->cc.state,
574                       I915_GEM_DOMAIN_INSTRUCTION, 0,
575                       0,
576                       offsetof(struct i965_cc_unit_state, cc4),
577                       render_state->cc.viewport);
578
579     dri_bo_unmap(render_state->cc.state);
580 }
581
582
583 static void 
584 i965_render_cc_unit(VADriverContextP ctx)
585 {
586     struct i965_driver_data *i965 = i965_driver_data(ctx);
587     struct i965_render_state *render_state = &i965->render_state;
588     struct i965_cc_unit_state *cc_state;
589
590     assert(render_state->cc.viewport);
591
592     dri_bo_map(render_state->cc.state, 1);
593     assert(render_state->cc.state->virtual);
594     cc_state = render_state->cc.state->virtual;
595     memset(cc_state, 0, sizeof(*cc_state));
596
597     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
598     cc_state->cc2.depth_test = 0;       /* disable depth test */
599     cc_state->cc2.logicop_enable = 1;   /* enable logic op */
600     cc_state->cc3.ia_blend_enable = 0;  /* blend alpha just like colors */
601     cc_state->cc3.blend_enable = 0;     /* disable color blend */
602     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
603     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
604
605     cc_state->cc5.dither_enable = 0;    /* disable dither */
606     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
607     cc_state->cc5.statistics_enable = 1;
608     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
609     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_ONE;
610     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_ONE;
611
612     dri_bo_emit_reloc(render_state->cc.state,
613                       I915_GEM_DOMAIN_INSTRUCTION, 0,
614                       0,
615                       offsetof(struct i965_cc_unit_state, cc4),
616                       render_state->cc.viewport);
617
618     dri_bo_unmap(render_state->cc.state);
619 }
620
621 static void
622 i965_render_set_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
623 {
624     switch (tiling) {
625     case I915_TILING_NONE:
626         ss->ss3.tiled_surface = 0;
627         ss->ss3.tile_walk = 0;
628         break;
629     case I915_TILING_X:
630         ss->ss3.tiled_surface = 1;
631         ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
632         break;
633     case I915_TILING_Y:
634         ss->ss3.tiled_surface = 1;
635         ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
636         break;
637     }
638 }
639
640 static void
641 i965_render_set_surface_state(struct i965_surface_state *ss,
642                               dri_bo *bo, unsigned long offset,
643                               int width, int height,
644                               int pitch, int format)
645 {
646     unsigned int tiling;
647     unsigned int swizzle;
648
649     memset(ss, 0, sizeof(*ss));
650     ss->ss0.surface_type = I965_SURFACE_2D;
651     ss->ss0.surface_format = format;
652     ss->ss0.color_blend = 1;
653
654     ss->ss1.base_addr = bo->offset + offset;
655
656     ss->ss2.width = width - 1;
657     ss->ss2.height = height - 1;
658
659     ss->ss3.pitch = pitch - 1;
660
661     dri_bo_get_tiling(bo, &tiling, &swizzle);
662     i965_render_set_surface_tiling(ss, tiling);
663 }
664
665 static void
666 gen7_render_set_surface_tiling(struct gen7_surface_state *ss, uint32_t tiling)
667 {
668    switch (tiling) {
669    case I915_TILING_NONE:
670       ss->ss0.tiled_surface = 0;
671       ss->ss0.tile_walk = 0;
672       break;
673    case I915_TILING_X:
674       ss->ss0.tiled_surface = 1;
675       ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
676       break;
677    case I915_TILING_Y:
678       ss->ss0.tiled_surface = 1;
679       ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
680       break;
681    }
682 }
683
684 static void
685 gen7_render_set_surface_state(struct gen7_surface_state *ss,
686                               dri_bo *bo, unsigned long offset,
687                               int width, int height,
688                               int pitch, int format)
689 {
690     unsigned int tiling;
691     unsigned int swizzle;
692
693     memset(ss, 0, sizeof(*ss));
694
695     ss->ss0.surface_type = I965_SURFACE_2D;
696     ss->ss0.surface_format = format;
697
698     ss->ss1.base_addr = bo->offset + offset;
699
700     ss->ss2.width = width - 1;
701     ss->ss2.height = height - 1;
702
703     ss->ss3.pitch = pitch - 1;
704
705     dri_bo_get_tiling(bo, &tiling, &swizzle);
706     gen7_render_set_surface_tiling(ss, tiling);
707 }
708
709 static void
710 i965_render_src_surface_state(VADriverContextP ctx, 
711                               int index,
712                               dri_bo *region,
713                               unsigned long offset,
714                               int w, int h,
715                               int pitch, int format)
716 {
717     struct i965_driver_data *i965 = i965_driver_data(ctx);  
718     struct i965_render_state *render_state = &i965->render_state;
719     void *ss;
720     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
721
722     assert(index < MAX_RENDER_SURFACES);
723
724     dri_bo_map(ss_bo, 1);
725     assert(ss_bo->virtual);
726     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
727
728     if (IS_GEN7(i965->intel.device_id)) {
729         gen7_render_set_surface_state(ss,
730                                       region, offset,
731                                       w, h,
732                                       pitch, format);
733         dri_bo_emit_reloc(ss_bo,
734                           I915_GEM_DOMAIN_SAMPLER, 0,
735                           offset,
736                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
737                           region);
738     } else {
739         i965_render_set_surface_state(ss,
740                                       region, offset,
741                                       w, h,
742                                       pitch, format);
743         dri_bo_emit_reloc(ss_bo,
744                           I915_GEM_DOMAIN_SAMPLER, 0,
745                           offset,
746                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
747                           region);
748     }
749
750     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
751     dri_bo_unmap(ss_bo);
752     render_state->wm.sampler_count++;
753 }
754
755 static void
756 i965_render_src_surfaces_state(VADriverContextP ctx,
757                               VASurfaceID surface)
758 {
759     struct i965_driver_data *i965 = i965_driver_data(ctx);  
760     struct object_surface *obj_surface;
761     int w, h;
762     int rw, rh;
763     dri_bo *region;
764
765     obj_surface = SURFACE(surface);
766     assert(obj_surface);
767
768     w = obj_surface->width;
769     h = obj_surface->height;
770     rw = obj_surface->orig_width;
771     rh = obj_surface->orig_height;
772     region = obj_surface->bo;
773
774     i965_render_src_surface_state(ctx, 1, region, 0, rw, rh, w, I965_SURFACEFORMAT_R8_UNORM);     /* Y */
775     i965_render_src_surface_state(ctx, 2, region, 0, rw, rh, w, I965_SURFACEFORMAT_R8_UNORM);
776
777     if (obj_surface->fourcc == VA_FOURCC('Y','V','1','2')) {
778         int u3 = 5, u4 = 6, v5 = 3, v6 = 4;
779
780         i965_render_src_surface_state(ctx, u3, region, w * h, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM); /* U */
781         i965_render_src_surface_state(ctx, u4, region, w * h, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);
782         i965_render_src_surface_state(ctx, v5, region, w * h + w * h / 4, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);     /* V */
783         i965_render_src_surface_state(ctx, v6, region, w * h + w * h / 4, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);
784     } else {
785         if (obj_surface->fourcc == VA_FOURCC('N','V','1','2')) {
786             i965_render_src_surface_state(ctx, 3, region, w * h, rw / 2, rh / 2, w, I965_SURFACEFORMAT_R8G8_UNORM); /* UV */
787             i965_render_src_surface_state(ctx, 4, region, w * h, rw / 2, rh / 2, w, I965_SURFACEFORMAT_R8G8_UNORM);
788         } else {
789             int u3 = 3, u4 = 4, v5 = 5, v6 = 6;
790             
791             i965_render_src_surface_state(ctx, u3, region, w * h, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM); /* U */
792             i965_render_src_surface_state(ctx, u4, region, w * h, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);
793             i965_render_src_surface_state(ctx, v5, region, w * h + w * h / 4, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);     /* V */
794             i965_render_src_surface_state(ctx, v6, region, w * h + w * h / 4, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);
795         }
796     }
797 }
798
799 static void
800 i965_subpic_render_src_surfaces_state(VADriverContextP ctx,
801                               VASurfaceID surface)
802 {
803     struct i965_driver_data *i965 = i965_driver_data(ctx);  
804     struct object_surface *obj_surface = SURFACE(surface);
805     int w, h;
806     dri_bo *region;
807     dri_bo *subpic_region;
808     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
809     struct object_image *obj_image = IMAGE(obj_subpic->image);
810     assert(obj_surface);
811     assert(obj_surface->bo);
812     w = obj_surface->width;
813     h = obj_surface->height;
814     region = obj_surface->bo;
815     subpic_region = obj_image->bo;
816     /*subpicture surface*/
817     i965_render_src_surface_state(ctx, 1, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format);     
818     i965_render_src_surface_state(ctx, 2, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format);     
819 }
820
821 static void
822 i965_render_dest_surface_state(VADriverContextP ctx, int index)
823 {
824     struct i965_driver_data *i965 = i965_driver_data(ctx);  
825     struct i965_render_state *render_state = &i965->render_state;
826     struct intel_region *dest_region = render_state->draw_region;
827     void *ss;
828     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
829     int format;
830     assert(index < MAX_RENDER_SURFACES);
831
832     if (dest_region->cpp == 2) {
833         format = I965_SURFACEFORMAT_B5G6R5_UNORM;
834     } else {
835         format = I965_SURFACEFORMAT_B8G8R8A8_UNORM;
836     }
837
838     dri_bo_map(ss_bo, 1);
839     assert(ss_bo->virtual);
840     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
841
842     if (IS_GEN7(i965->intel.device_id)) {
843         gen7_render_set_surface_state(ss,
844                                       dest_region->bo, 0,
845                                       dest_region->width, dest_region->height,
846                                       dest_region->pitch, format);
847         dri_bo_emit_reloc(ss_bo,
848                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
849                           0,
850                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
851                           dest_region->bo);
852     } else {
853         i965_render_set_surface_state(ss,
854                                       dest_region->bo, 0,
855                                       dest_region->width, dest_region->height,
856                                       dest_region->pitch, format);
857         dri_bo_emit_reloc(ss_bo,
858                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
859                           0,
860                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
861                           dest_region->bo);
862     }
863
864     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
865     dri_bo_unmap(ss_bo);
866 }
867
868 static void 
869 i965_subpic_render_upload_vertex(VADriverContextP ctx,
870                                  VASurfaceID surface,
871                                  const VARectangle *output_rect)
872 {    
873     struct i965_driver_data  *i965         = i965_driver_data(ctx);
874     struct i965_render_state *render_state = &i965->render_state;
875     struct object_surface    *obj_surface  = SURFACE(surface);
876     struct object_subpic     *obj_subpic   = SUBPIC(obj_surface->subpic);
877     VARectangle dst_rect;
878     float *vb, tx1, tx2, ty1, ty2, x1, x2, y1, y2;
879     int i = 0;
880
881     if (obj_subpic->flags & VA_SUBPICTURE_DESTINATION_IS_SCREEN_COORD)
882         dst_rect = obj_subpic->dst_rect;
883     else {
884         const float sx  = (float)output_rect->width  / obj_surface->orig_width;
885         const float sy  = (float)output_rect->height / obj_surface->orig_height;
886         dst_rect.x      = output_rect->x + sx * obj_subpic->dst_rect.x;
887         dst_rect.y      = output_rect->y + sy * obj_subpic->dst_rect.y;
888         dst_rect.width  = sx * obj_subpic->dst_rect.width;
889         dst_rect.height = sy * obj_subpic->dst_rect.height;
890     }
891
892     dri_bo_map(render_state->vb.vertex_buffer, 1);
893     assert(render_state->vb.vertex_buffer->virtual);
894     vb = render_state->vb.vertex_buffer->virtual;
895
896     tx1 = (float)obj_subpic->src_rect.x / obj_subpic->width;
897     ty1 = (float)obj_subpic->src_rect.y / obj_subpic->height;
898     tx2 = (float)(obj_subpic->src_rect.x + obj_subpic->src_rect.width) / obj_subpic->width;
899     ty2 = (float)(obj_subpic->src_rect.y + obj_subpic->src_rect.height) / obj_subpic->height;
900
901     x1 = (float)dst_rect.x;
902     y1 = (float)dst_rect.y;
903     x2 = (float)(dst_rect.x + dst_rect.width);
904     y2 = (float)(dst_rect.y + dst_rect.height);
905
906     vb[i++] = tx2;
907     vb[i++] = ty2;
908     vb[i++] = x2;
909     vb[i++] = y2;
910
911     vb[i++] = tx1;
912     vb[i++] = ty2;
913     vb[i++] = x1;
914     vb[i++] = y2;
915
916     vb[i++] = tx1;
917     vb[i++] = ty1;
918     vb[i++] = x1;
919     vb[i++] = y1;
920     dri_bo_unmap(render_state->vb.vertex_buffer);
921 }
922
923 static void 
924 i965_render_upload_vertex(
925     VADriverContextP   ctx,
926     VASurfaceID        surface,
927     const VARectangle *src_rect,
928     const VARectangle *dst_rect
929 )
930 {
931     struct i965_driver_data *i965 = i965_driver_data(ctx);
932     struct i965_render_state *render_state = &i965->render_state;
933     struct intel_region *dest_region = render_state->draw_region;
934     struct object_surface *obj_surface;
935     float *vb;
936
937     float u1, v1, u2, v2;
938     int i, width, height;
939     int box_x1 = dest_region->x + dst_rect->x;
940     int box_y1 = dest_region->y + dst_rect->y;
941     int box_x2 = box_x1 + dst_rect->width;
942     int box_y2 = box_y1 + dst_rect->height;
943
944     obj_surface = SURFACE(surface);
945     assert(surface);
946     width = obj_surface->orig_width;
947     height = obj_surface->orig_height;
948
949     u1 = (float)src_rect->x / width;
950     v1 = (float)src_rect->y / height;
951     u2 = (float)(src_rect->x + src_rect->width) / width;
952     v2 = (float)(src_rect->y + src_rect->height) / height;
953
954     dri_bo_map(render_state->vb.vertex_buffer, 1);
955     assert(render_state->vb.vertex_buffer->virtual);
956     vb = render_state->vb.vertex_buffer->virtual;
957
958     i = 0;
959     vb[i++] = u2;
960     vb[i++] = v2;
961     vb[i++] = (float)box_x2;
962     vb[i++] = (float)box_y2;
963     
964     vb[i++] = u1;
965     vb[i++] = v2;
966     vb[i++] = (float)box_x1;
967     vb[i++] = (float)box_y2;
968
969     vb[i++] = u1;
970     vb[i++] = v1;
971     vb[i++] = (float)box_x1;
972     vb[i++] = (float)box_y1;
973
974     dri_bo_unmap(render_state->vb.vertex_buffer);
975 }
976
977 static void
978 i965_render_upload_constants(VADriverContextP ctx)
979 {
980     struct i965_driver_data *i965 = i965_driver_data(ctx);
981     struct i965_render_state *render_state = &i965->render_state;
982     unsigned short *constant_buffer;
983
984     if (render_state->curbe.upload)
985         return;
986
987     dri_bo_map(render_state->curbe.bo, 1);
988     assert(render_state->curbe.bo->virtual);
989     constant_buffer = render_state->curbe.bo->virtual;
990
991     if (render_state->interleaved_uv)
992         *constant_buffer = 1;
993     else
994         *constant_buffer = 0;
995
996     dri_bo_unmap(render_state->curbe.bo);
997     render_state->curbe.upload = 1;
998 }
999
1000 static void
1001 i965_surface_render_state_setup(
1002     VADriverContextP   ctx,
1003     VASurfaceID        surface,
1004     const VARectangle *src_rect,
1005     const VARectangle *dst_rect
1006 )
1007 {
1008     i965_render_vs_unit(ctx);
1009     i965_render_sf_unit(ctx);
1010     i965_render_dest_surface_state(ctx, 0);
1011     i965_render_src_surfaces_state(ctx, surface);
1012     i965_render_sampler(ctx);
1013     i965_render_wm_unit(ctx);
1014     i965_render_cc_viewport(ctx);
1015     i965_render_cc_unit(ctx);
1016     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1017     i965_render_upload_constants(ctx);
1018 }
1019 static void
1020 i965_subpic_render_state_setup(
1021     VADriverContextP   ctx,
1022     VASurfaceID        surface,
1023     const VARectangle *src_rect,
1024     const VARectangle *dst_rect
1025 )
1026 {
1027     i965_render_vs_unit(ctx);
1028     i965_render_sf_unit(ctx);
1029     i965_render_dest_surface_state(ctx, 0);
1030     i965_subpic_render_src_surfaces_state(ctx, surface);
1031     i965_render_sampler(ctx);
1032     i965_subpic_render_wm_unit(ctx);
1033     i965_render_cc_viewport(ctx);
1034     i965_subpic_render_cc_unit(ctx);
1035     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
1036 }
1037
1038
1039 static void
1040 i965_render_pipeline_select(VADriverContextP ctx)
1041 {
1042     struct i965_driver_data *i965 = i965_driver_data(ctx);
1043     struct intel_batchbuffer *batch = i965->batch;
1044  
1045     BEGIN_BATCH(batch, 1);
1046     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1047     ADVANCE_BATCH(batch);
1048 }
1049
1050 static void
1051 i965_render_state_sip(VADriverContextP ctx)
1052 {
1053     struct i965_driver_data *i965 = i965_driver_data(ctx);
1054     struct intel_batchbuffer *batch = i965->batch;
1055
1056     BEGIN_BATCH(batch, 2);
1057     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1058     OUT_BATCH(batch, 0);
1059     ADVANCE_BATCH(batch);
1060 }
1061
1062 static void
1063 i965_render_state_base_address(VADriverContextP ctx)
1064 {
1065     struct i965_driver_data *i965 = i965_driver_data(ctx);
1066     struct intel_batchbuffer *batch = i965->batch;
1067     struct i965_render_state *render_state = &i965->render_state;
1068
1069     if (IS_IRONLAKE(i965->intel.device_id)) {
1070         BEGIN_BATCH(batch, 8);
1071         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
1072         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1073         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1074         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1075         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1076         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1077         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1078         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1079         ADVANCE_BATCH(batch);
1080     } else {
1081         BEGIN_BATCH(batch, 6);
1082         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 4);
1083         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1084         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1085         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1086         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1087         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1088         ADVANCE_BATCH(batch);
1089     }
1090 }
1091
1092 static void
1093 i965_render_binding_table_pointers(VADriverContextP ctx)
1094 {
1095     struct i965_driver_data *i965 = i965_driver_data(ctx);
1096     struct intel_batchbuffer *batch = i965->batch;
1097
1098     BEGIN_BATCH(batch, 6);
1099     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS | 4);
1100     OUT_BATCH(batch, 0); /* vs */
1101     OUT_BATCH(batch, 0); /* gs */
1102     OUT_BATCH(batch, 0); /* clip */
1103     OUT_BATCH(batch, 0); /* sf */
1104     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1105     ADVANCE_BATCH(batch);
1106 }
1107
1108 static void 
1109 i965_render_constant_color(VADriverContextP ctx)
1110 {
1111     struct i965_driver_data *i965 = i965_driver_data(ctx);
1112     struct intel_batchbuffer *batch = i965->batch;
1113
1114     BEGIN_BATCH(batch, 5);
1115     OUT_BATCH(batch, CMD_CONSTANT_COLOR | 3);
1116     OUT_BATCH(batch, float_to_uint(1.0));
1117     OUT_BATCH(batch, float_to_uint(0.0));
1118     OUT_BATCH(batch, float_to_uint(1.0));
1119     OUT_BATCH(batch, float_to_uint(1.0));
1120     ADVANCE_BATCH(batch);
1121 }
1122
1123 static void
1124 i965_render_pipelined_pointers(VADriverContextP ctx)
1125 {
1126     struct i965_driver_data *i965 = i965_driver_data(ctx);
1127     struct intel_batchbuffer *batch = i965->batch;
1128     struct i965_render_state *render_state = &i965->render_state;
1129
1130     BEGIN_BATCH(batch, 7);
1131     OUT_BATCH(batch, CMD_PIPELINED_POINTERS | 5);
1132     OUT_RELOC(batch, render_state->vs.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1133     OUT_BATCH(batch, 0);  /* disable GS */
1134     OUT_BATCH(batch, 0);  /* disable CLIP */
1135     OUT_RELOC(batch, render_state->sf.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1136     OUT_RELOC(batch, render_state->wm.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1137     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1138     ADVANCE_BATCH(batch);
1139 }
1140
1141 static void
1142 i965_render_urb_layout(VADriverContextP ctx)
1143 {
1144     struct i965_driver_data *i965 = i965_driver_data(ctx);
1145     struct intel_batchbuffer *batch = i965->batch;
1146     int urb_vs_start, urb_vs_size;
1147     int urb_gs_start, urb_gs_size;
1148     int urb_clip_start, urb_clip_size;
1149     int urb_sf_start, urb_sf_size;
1150     int urb_cs_start, urb_cs_size;
1151
1152     urb_vs_start = 0;
1153     urb_vs_size = URB_VS_ENTRIES * URB_VS_ENTRY_SIZE;
1154     urb_gs_start = urb_vs_start + urb_vs_size;
1155     urb_gs_size = URB_GS_ENTRIES * URB_GS_ENTRY_SIZE;
1156     urb_clip_start = urb_gs_start + urb_gs_size;
1157     urb_clip_size = URB_CLIP_ENTRIES * URB_CLIP_ENTRY_SIZE;
1158     urb_sf_start = urb_clip_start + urb_clip_size;
1159     urb_sf_size = URB_SF_ENTRIES * URB_SF_ENTRY_SIZE;
1160     urb_cs_start = urb_sf_start + urb_sf_size;
1161     urb_cs_size = URB_CS_ENTRIES * URB_CS_ENTRY_SIZE;
1162
1163     BEGIN_BATCH(batch, 3);
1164     OUT_BATCH(batch, 
1165               CMD_URB_FENCE |
1166               UF0_CS_REALLOC |
1167               UF0_SF_REALLOC |
1168               UF0_CLIP_REALLOC |
1169               UF0_GS_REALLOC |
1170               UF0_VS_REALLOC |
1171               1);
1172     OUT_BATCH(batch, 
1173               ((urb_clip_start + urb_clip_size) << UF1_CLIP_FENCE_SHIFT) |
1174               ((urb_gs_start + urb_gs_size) << UF1_GS_FENCE_SHIFT) |
1175               ((urb_vs_start + urb_vs_size) << UF1_VS_FENCE_SHIFT));
1176     OUT_BATCH(batch,
1177               ((urb_cs_start + urb_cs_size) << UF2_CS_FENCE_SHIFT) |
1178               ((urb_sf_start + urb_sf_size) << UF2_SF_FENCE_SHIFT));
1179     ADVANCE_BATCH(batch);
1180 }
1181
1182 static void 
1183 i965_render_cs_urb_layout(VADriverContextP ctx)
1184 {
1185     struct i965_driver_data *i965 = i965_driver_data(ctx);
1186     struct intel_batchbuffer *batch = i965->batch;
1187
1188     BEGIN_BATCH(batch, 2);
1189     OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
1190     OUT_BATCH(batch,
1191               ((URB_CS_ENTRY_SIZE - 1) << 4) |          /* URB Entry Allocation Size */
1192               (URB_CS_ENTRIES << 0));                /* Number of URB Entries */
1193     ADVANCE_BATCH(batch);
1194 }
1195
1196 static void
1197 i965_render_constant_buffer(VADriverContextP ctx)
1198 {
1199     struct i965_driver_data *i965 = i965_driver_data(ctx);
1200     struct intel_batchbuffer *batch = i965->batch;
1201     struct i965_render_state *render_state = &i965->render_state;
1202
1203     BEGIN_BATCH(batch, 2);
1204     OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
1205     OUT_RELOC(batch, render_state->curbe.bo,
1206               I915_GEM_DOMAIN_INSTRUCTION, 0,
1207               URB_CS_ENTRY_SIZE - 1);
1208     ADVANCE_BATCH(batch);    
1209 }
1210
1211 static void
1212 i965_render_drawing_rectangle(VADriverContextP ctx)
1213 {
1214     struct i965_driver_data *i965 = i965_driver_data(ctx);
1215     struct intel_batchbuffer *batch = i965->batch;
1216     struct i965_render_state *render_state = &i965->render_state;
1217     struct intel_region *dest_region = render_state->draw_region;
1218
1219     BEGIN_BATCH(batch, 4);
1220     OUT_BATCH(batch, CMD_DRAWING_RECTANGLE | 2);
1221     OUT_BATCH(batch, 0x00000000);
1222     OUT_BATCH(batch, (dest_region->width - 1) | (dest_region->height - 1) << 16);
1223     OUT_BATCH(batch, 0x00000000);         
1224     ADVANCE_BATCH(batch);
1225 }
1226
1227 static void
1228 i965_render_vertex_elements(VADriverContextP ctx)
1229 {
1230     struct i965_driver_data *i965 = i965_driver_data(ctx);
1231     struct intel_batchbuffer *batch = i965->batch;
1232
1233     if (IS_IRONLAKE(i965->intel.device_id)) {
1234         BEGIN_BATCH(batch, 5);
1235         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1236         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1237         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1238                   VE0_VALID |
1239                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1240                   (0 << VE0_OFFSET_SHIFT));
1241         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1242                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1243                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1244                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1245         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1246         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1247                   VE0_VALID |
1248                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1249                   (8 << VE0_OFFSET_SHIFT));
1250         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1251                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1252                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1253                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1254         ADVANCE_BATCH(batch);
1255     } else {
1256         BEGIN_BATCH(batch, 5);
1257         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1258         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1259         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1260                   VE0_VALID |
1261                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1262                   (0 << VE0_OFFSET_SHIFT));
1263         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1264                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1265                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1266                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1267                   (0 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1268         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1269         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1270                   VE0_VALID |
1271                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1272                   (8 << VE0_OFFSET_SHIFT));
1273         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1274                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1275                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1276                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1277                   (4 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1278         ADVANCE_BATCH(batch);
1279     }
1280 }
1281
1282 static void
1283 i965_render_upload_image_palette(
1284     VADriverContextP ctx,
1285     VAImageID        image_id,
1286     unsigned int     alpha
1287 )
1288 {
1289     struct i965_driver_data *i965 = i965_driver_data(ctx);
1290     struct intel_batchbuffer *batch = i965->batch;
1291     unsigned int i;
1292
1293     struct object_image *obj_image = IMAGE(image_id);
1294     assert(obj_image);
1295
1296     if (obj_image->image.num_palette_entries == 0)
1297         return;
1298
1299     BEGIN_BATCH(batch, 1 + obj_image->image.num_palette_entries);
1300     OUT_BATCH(batch, CMD_SAMPLER_PALETTE_LOAD | (obj_image->image.num_palette_entries - 1));
1301     /*fill palette*/
1302     //int32_t out[16]; //0-23:color 23-31:alpha
1303     for (i = 0; i < obj_image->image.num_palette_entries; i++)
1304         OUT_BATCH(batch, (alpha << 24) | obj_image->palette[i]);
1305     ADVANCE_BATCH(batch);
1306 }
1307
1308 static void
1309 i965_render_startup(VADriverContextP ctx)
1310 {
1311     struct i965_driver_data *i965 = i965_driver_data(ctx);
1312     struct intel_batchbuffer *batch = i965->batch;
1313     struct i965_render_state *render_state = &i965->render_state;
1314
1315     BEGIN_BATCH(batch, 11);
1316     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
1317     OUT_BATCH(batch, 
1318               (0 << VB0_BUFFER_INDEX_SHIFT) |
1319               VB0_VERTEXDATA |
1320               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
1321     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
1322
1323     if (IS_IRONLAKE(i965->intel.device_id))
1324         OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
1325     else
1326         OUT_BATCH(batch, 3);
1327
1328     OUT_BATCH(batch, 0);
1329
1330     OUT_BATCH(batch, 
1331               CMD_3DPRIMITIVE |
1332               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
1333               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
1334               (0 << 9) |
1335               4);
1336     OUT_BATCH(batch, 3); /* vertex count per instance */
1337     OUT_BATCH(batch, 0); /* start vertex offset */
1338     OUT_BATCH(batch, 1); /* single instance */
1339     OUT_BATCH(batch, 0); /* start instance location */
1340     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
1341     ADVANCE_BATCH(batch);
1342 }
1343
1344 static void 
1345 i965_clear_dest_region(VADriverContextP ctx)
1346 {
1347     struct i965_driver_data *i965 = i965_driver_data(ctx);
1348     struct intel_batchbuffer *batch = i965->batch;
1349     struct i965_render_state *render_state = &i965->render_state;
1350     struct intel_region *dest_region = render_state->draw_region;
1351     unsigned int blt_cmd, br13;
1352     int pitch;
1353
1354     blt_cmd = XY_COLOR_BLT_CMD;
1355     br13 = 0xf0 << 16;
1356     pitch = dest_region->pitch;
1357
1358     if (dest_region->cpp == 4) {
1359         br13 |= BR13_8888;
1360         blt_cmd |= (XY_COLOR_BLT_WRITE_RGB | XY_COLOR_BLT_WRITE_ALPHA);
1361     } else {
1362         assert(dest_region->cpp == 2);
1363         br13 |= BR13_565;
1364     }
1365
1366     if (dest_region->tiling != I915_TILING_NONE) {
1367         blt_cmd |= XY_COLOR_BLT_DST_TILED;
1368         pitch /= 4;
1369     }
1370
1371     br13 |= pitch;
1372
1373     if (IS_GEN6(i965->intel.device_id) ||
1374         IS_GEN7(i965->intel.device_id)) {
1375         intel_batchbuffer_start_atomic_blt(batch, 24);
1376         BEGIN_BLT_BATCH(batch, 6);
1377     } else {
1378         intel_batchbuffer_start_atomic(batch, 24);
1379         BEGIN_BATCH(batch, 6);
1380     }
1381
1382     OUT_BATCH(batch, blt_cmd);
1383     OUT_BATCH(batch, br13);
1384     OUT_BATCH(batch, (dest_region->y << 16) | (dest_region->x));
1385     OUT_BATCH(batch, ((dest_region->y + dest_region->height) << 16) |
1386               (dest_region->x + dest_region->width));
1387     OUT_RELOC(batch, dest_region->bo, 
1388               I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1389               0);
1390     OUT_BATCH(batch, 0x0);
1391     ADVANCE_BATCH(batch);
1392     intel_batchbuffer_end_atomic(batch);
1393 }
1394
1395 static void
1396 i965_surface_render_pipeline_setup(VADriverContextP ctx)
1397 {
1398     struct i965_driver_data *i965 = i965_driver_data(ctx);
1399     struct intel_batchbuffer *batch = i965->batch;
1400
1401     i965_clear_dest_region(ctx);
1402     intel_batchbuffer_start_atomic(batch, 0x1000);
1403     intel_batchbuffer_emit_mi_flush(batch);
1404     i965_render_pipeline_select(ctx);
1405     i965_render_state_sip(ctx);
1406     i965_render_state_base_address(ctx);
1407     i965_render_binding_table_pointers(ctx);
1408     i965_render_constant_color(ctx);
1409     i965_render_pipelined_pointers(ctx);
1410     i965_render_urb_layout(ctx);
1411     i965_render_cs_urb_layout(ctx);
1412     i965_render_constant_buffer(ctx);
1413     i965_render_drawing_rectangle(ctx);
1414     i965_render_vertex_elements(ctx);
1415     i965_render_startup(ctx);
1416     intel_batchbuffer_end_atomic(batch);
1417 }
1418
1419 static void
1420 i965_subpic_render_pipeline_setup(VADriverContextP ctx)
1421 {
1422     struct i965_driver_data *i965 = i965_driver_data(ctx);
1423     struct intel_batchbuffer *batch = i965->batch;
1424
1425     intel_batchbuffer_start_atomic(batch, 0x1000);
1426     intel_batchbuffer_emit_mi_flush(batch);
1427     i965_render_pipeline_select(ctx);
1428     i965_render_state_sip(ctx);
1429     i965_render_state_base_address(ctx);
1430     i965_render_binding_table_pointers(ctx);
1431     i965_render_constant_color(ctx);
1432     i965_render_pipelined_pointers(ctx);
1433     i965_render_urb_layout(ctx);
1434     i965_render_cs_urb_layout(ctx);
1435     i965_render_drawing_rectangle(ctx);
1436     i965_render_vertex_elements(ctx);
1437     i965_render_startup(ctx);
1438     intel_batchbuffer_end_atomic(batch);
1439 }
1440
1441
1442 static void 
1443 i965_render_initialize(VADriverContextP ctx)
1444 {
1445     struct i965_driver_data *i965 = i965_driver_data(ctx);
1446     struct i965_render_state *render_state = &i965->render_state;
1447     dri_bo *bo;
1448
1449     /* VERTEX BUFFER */
1450     dri_bo_unreference(render_state->vb.vertex_buffer);
1451     bo = dri_bo_alloc(i965->intel.bufmgr,
1452                       "vertex buffer",
1453                       4096,
1454                       4096);
1455     assert(bo);
1456     render_state->vb.vertex_buffer = bo;
1457
1458     /* VS */
1459     dri_bo_unreference(render_state->vs.state);
1460     bo = dri_bo_alloc(i965->intel.bufmgr,
1461                       "vs state",
1462                       sizeof(struct i965_vs_unit_state),
1463                       64);
1464     assert(bo);
1465     render_state->vs.state = bo;
1466
1467     /* GS */
1468     /* CLIP */
1469     /* SF */
1470     dri_bo_unreference(render_state->sf.state);
1471     bo = dri_bo_alloc(i965->intel.bufmgr,
1472                       "sf state",
1473                       sizeof(struct i965_sf_unit_state),
1474                       64);
1475     assert(bo);
1476     render_state->sf.state = bo;
1477
1478     /* WM */
1479     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1480     bo = dri_bo_alloc(i965->intel.bufmgr,
1481                       "surface state & binding table",
1482                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1483                       4096);
1484     assert(bo);
1485     render_state->wm.surface_state_binding_table_bo = bo;
1486
1487     dri_bo_unreference(render_state->wm.sampler);
1488     bo = dri_bo_alloc(i965->intel.bufmgr,
1489                       "sampler state",
1490                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1491                       64);
1492     assert(bo);
1493     render_state->wm.sampler = bo;
1494     render_state->wm.sampler_count = 0;
1495
1496     dri_bo_unreference(render_state->wm.state);
1497     bo = dri_bo_alloc(i965->intel.bufmgr,
1498                       "wm state",
1499                       sizeof(struct i965_wm_unit_state),
1500                       64);
1501     assert(bo);
1502     render_state->wm.state = bo;
1503
1504     /* COLOR CALCULATOR */
1505     dri_bo_unreference(render_state->cc.state);
1506     bo = dri_bo_alloc(i965->intel.bufmgr,
1507                       "color calc state",
1508                       sizeof(struct i965_cc_unit_state),
1509                       64);
1510     assert(bo);
1511     render_state->cc.state = bo;
1512
1513     dri_bo_unreference(render_state->cc.viewport);
1514     bo = dri_bo_alloc(i965->intel.bufmgr,
1515                       "cc viewport",
1516                       sizeof(struct i965_cc_viewport),
1517                       64);
1518     assert(bo);
1519     render_state->cc.viewport = bo;
1520 }
1521
1522 static void
1523 i965_render_put_surface(
1524     VADriverContextP   ctx,
1525     VASurfaceID        surface,
1526     const VARectangle *src_rect,
1527     const VARectangle *dst_rect,
1528     unsigned int       flags
1529 )
1530 {
1531     struct i965_driver_data *i965 = i965_driver_data(ctx);
1532     struct intel_batchbuffer *batch = i965->batch;
1533
1534     i965_render_initialize(ctx);
1535     i965_surface_render_state_setup(ctx, surface, src_rect, dst_rect);
1536     i965_surface_render_pipeline_setup(ctx);
1537     intel_batchbuffer_flush(batch);
1538 }
1539
1540 static void
1541 i965_render_put_subpicture(
1542     VADriverContextP   ctx,
1543     VASurfaceID        surface,
1544     const VARectangle *src_rect,
1545     const VARectangle *dst_rect
1546 )
1547 {
1548     struct i965_driver_data *i965 = i965_driver_data(ctx);
1549     struct intel_batchbuffer *batch = i965->batch;
1550     struct object_surface *obj_surface = SURFACE(surface);
1551     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
1552
1553     assert(obj_subpic);
1554
1555     i965_render_initialize(ctx);
1556     i965_subpic_render_state_setup(ctx, surface, src_rect, dst_rect);
1557     i965_subpic_render_pipeline_setup(ctx);
1558     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
1559     intel_batchbuffer_flush(batch);
1560 }
1561
1562 /*
1563  * for GEN6+
1564  */
1565 static void 
1566 gen6_render_initialize(VADriverContextP ctx)
1567 {
1568     struct i965_driver_data *i965 = i965_driver_data(ctx);
1569     struct i965_render_state *render_state = &i965->render_state;
1570     dri_bo *bo;
1571
1572     /* VERTEX BUFFER */
1573     dri_bo_unreference(render_state->vb.vertex_buffer);
1574     bo = dri_bo_alloc(i965->intel.bufmgr,
1575                       "vertex buffer",
1576                       4096,
1577                       4096);
1578     assert(bo);
1579     render_state->vb.vertex_buffer = bo;
1580
1581     /* WM */
1582     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1583     bo = dri_bo_alloc(i965->intel.bufmgr,
1584                       "surface state & binding table",
1585                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1586                       4096);
1587     assert(bo);
1588     render_state->wm.surface_state_binding_table_bo = bo;
1589
1590     dri_bo_unreference(render_state->wm.sampler);
1591     bo = dri_bo_alloc(i965->intel.bufmgr,
1592                       "sampler state",
1593                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1594                       4096);
1595     assert(bo);
1596     render_state->wm.sampler = bo;
1597     render_state->wm.sampler_count = 0;
1598
1599     /* COLOR CALCULATOR */
1600     dri_bo_unreference(render_state->cc.state);
1601     bo = dri_bo_alloc(i965->intel.bufmgr,
1602                       "color calc state",
1603                       sizeof(struct gen6_color_calc_state),
1604                       4096);
1605     assert(bo);
1606     render_state->cc.state = bo;
1607
1608     /* CC VIEWPORT */
1609     dri_bo_unreference(render_state->cc.viewport);
1610     bo = dri_bo_alloc(i965->intel.bufmgr,
1611                       "cc viewport",
1612                       sizeof(struct i965_cc_viewport),
1613                       4096);
1614     assert(bo);
1615     render_state->cc.viewport = bo;
1616
1617     /* BLEND STATE */
1618     dri_bo_unreference(render_state->cc.blend);
1619     bo = dri_bo_alloc(i965->intel.bufmgr,
1620                       "blend state",
1621                       sizeof(struct gen6_blend_state),
1622                       4096);
1623     assert(bo);
1624     render_state->cc.blend = bo;
1625
1626     /* DEPTH & STENCIL STATE */
1627     dri_bo_unreference(render_state->cc.depth_stencil);
1628     bo = dri_bo_alloc(i965->intel.bufmgr,
1629                       "depth & stencil state",
1630                       sizeof(struct gen6_depth_stencil_state),
1631                       4096);
1632     assert(bo);
1633     render_state->cc.depth_stencil = bo;
1634 }
1635
1636 static void
1637 gen6_render_color_calc_state(VADriverContextP ctx)
1638 {
1639     struct i965_driver_data *i965 = i965_driver_data(ctx);
1640     struct i965_render_state *render_state = &i965->render_state;
1641     struct gen6_color_calc_state *color_calc_state;
1642     
1643     dri_bo_map(render_state->cc.state, 1);
1644     assert(render_state->cc.state->virtual);
1645     color_calc_state = render_state->cc.state->virtual;
1646     memset(color_calc_state, 0, sizeof(*color_calc_state));
1647     color_calc_state->constant_r = 1.0;
1648     color_calc_state->constant_g = 0.0;
1649     color_calc_state->constant_b = 1.0;
1650     color_calc_state->constant_a = 1.0;
1651     dri_bo_unmap(render_state->cc.state);
1652 }
1653
1654 static void
1655 gen6_render_blend_state(VADriverContextP ctx)
1656 {
1657     struct i965_driver_data *i965 = i965_driver_data(ctx);
1658     struct i965_render_state *render_state = &i965->render_state;
1659     struct gen6_blend_state *blend_state;
1660     
1661     dri_bo_map(render_state->cc.blend, 1);
1662     assert(render_state->cc.blend->virtual);
1663     blend_state = render_state->cc.blend->virtual;
1664     memset(blend_state, 0, sizeof(*blend_state));
1665     blend_state->blend1.logic_op_enable = 1;
1666     blend_state->blend1.logic_op_func = 0xc;
1667     dri_bo_unmap(render_state->cc.blend);
1668 }
1669
1670 static void
1671 gen6_render_depth_stencil_state(VADriverContextP ctx)
1672 {
1673     struct i965_driver_data *i965 = i965_driver_data(ctx);
1674     struct i965_render_state *render_state = &i965->render_state;
1675     struct gen6_depth_stencil_state *depth_stencil_state;
1676     
1677     dri_bo_map(render_state->cc.depth_stencil, 1);
1678     assert(render_state->cc.depth_stencil->virtual);
1679     depth_stencil_state = render_state->cc.depth_stencil->virtual;
1680     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
1681     dri_bo_unmap(render_state->cc.depth_stencil);
1682 }
1683
1684 static void
1685 gen6_render_setup_states(
1686     VADriverContextP   ctx,
1687     VASurfaceID        surface,
1688     const VARectangle *src_rect,
1689     const VARectangle *dst_rect
1690 )
1691 {
1692     i965_render_dest_surface_state(ctx, 0);
1693     i965_render_src_surfaces_state(ctx, surface);
1694     i965_render_sampler(ctx);
1695     i965_render_cc_viewport(ctx);
1696     gen6_render_color_calc_state(ctx);
1697     gen6_render_blend_state(ctx);
1698     gen6_render_depth_stencil_state(ctx);
1699     i965_render_upload_constants(ctx);
1700     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1701 }
1702
1703 static void
1704 gen6_emit_invarient_states(VADriverContextP ctx)
1705 {
1706     struct i965_driver_data *i965 = i965_driver_data(ctx);
1707     struct intel_batchbuffer *batch = i965->batch;
1708
1709     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1710
1711     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (3 - 2));
1712     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
1713               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
1714     OUT_BATCH(batch, 0);
1715
1716     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
1717     OUT_BATCH(batch, 1);
1718
1719     /* Set system instruction pointer */
1720     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1721     OUT_BATCH(batch, 0);
1722 }
1723
1724 static void
1725 gen6_emit_state_base_address(VADriverContextP ctx)
1726 {
1727     struct i965_driver_data *i965 = i965_driver_data(ctx);
1728     struct intel_batchbuffer *batch = i965->batch;
1729     struct i965_render_state *render_state = &i965->render_state;
1730
1731     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
1732     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
1733     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1734     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
1735     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
1736     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
1737     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
1738     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
1739     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
1740     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
1741 }
1742
1743 static void
1744 gen6_emit_viewport_state_pointers(VADriverContextP ctx)
1745 {
1746     struct i965_driver_data *i965 = i965_driver_data(ctx);
1747     struct intel_batchbuffer *batch = i965->batch;
1748     struct i965_render_state *render_state = &i965->render_state;
1749
1750     OUT_BATCH(batch, GEN6_3DSTATE_VIEWPORT_STATE_POINTERS |
1751               GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CC |
1752               (4 - 2));
1753     OUT_BATCH(batch, 0);
1754     OUT_BATCH(batch, 0);
1755     OUT_RELOC(batch, render_state->cc.viewport, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1756 }
1757
1758 static void
1759 gen6_emit_urb(VADriverContextP ctx)
1760 {
1761     struct i965_driver_data *i965 = i965_driver_data(ctx);
1762     struct intel_batchbuffer *batch = i965->batch;
1763
1764     OUT_BATCH(batch, GEN6_3DSTATE_URB | (3 - 2));
1765     OUT_BATCH(batch, ((1 - 1) << GEN6_3DSTATE_URB_VS_SIZE_SHIFT) |
1766               (24 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */
1767     OUT_BATCH(batch, (0 << GEN6_3DSTATE_URB_GS_SIZE_SHIFT) |
1768               (0 << GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT)); /* no GS thread */
1769 }
1770
1771 static void
1772 gen6_emit_cc_state_pointers(VADriverContextP ctx)
1773 {
1774     struct i965_driver_data *i965 = i965_driver_data(ctx);
1775     struct intel_batchbuffer *batch = i965->batch;
1776     struct i965_render_state *render_state = &i965->render_state;
1777
1778     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2));
1779     OUT_RELOC(batch, render_state->cc.blend, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1780     OUT_RELOC(batch, render_state->cc.depth_stencil, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1781     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1782 }
1783
1784 static void
1785 gen6_emit_sampler_state_pointers(VADriverContextP ctx)
1786 {
1787     struct i965_driver_data *i965 = i965_driver_data(ctx);
1788     struct intel_batchbuffer *batch = i965->batch;
1789     struct i965_render_state *render_state = &i965->render_state;
1790
1791     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLER_STATE_POINTERS |
1792               GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS |
1793               (4 - 2));
1794     OUT_BATCH(batch, 0); /* VS */
1795     OUT_BATCH(batch, 0); /* GS */
1796     OUT_RELOC(batch,render_state->wm.sampler, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1797 }
1798
1799 static void
1800 gen6_emit_binding_table(VADriverContextP ctx)
1801 {
1802     struct i965_driver_data *i965 = i965_driver_data(ctx);
1803     struct intel_batchbuffer *batch = i965->batch;
1804
1805     /* Binding table pointers */
1806     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS |
1807               GEN6_BINDING_TABLE_MODIFY_PS |
1808               (4 - 2));
1809     OUT_BATCH(batch, 0);                /* vs */
1810     OUT_BATCH(batch, 0);                /* gs */
1811     /* Only the PS uses the binding table */
1812     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1813 }
1814
1815 static void
1816 gen6_emit_depth_buffer_state(VADriverContextP ctx)
1817 {
1818     struct i965_driver_data *i965 = i965_driver_data(ctx);
1819     struct intel_batchbuffer *batch = i965->batch;
1820
1821     OUT_BATCH(batch, CMD_DEPTH_BUFFER | (7 - 2));
1822     OUT_BATCH(batch, (I965_SURFACE_NULL << CMD_DEPTH_BUFFER_TYPE_SHIFT) |
1823               (I965_DEPTHFORMAT_D32_FLOAT << CMD_DEPTH_BUFFER_FORMAT_SHIFT));
1824     OUT_BATCH(batch, 0);
1825     OUT_BATCH(batch, 0);
1826     OUT_BATCH(batch, 0);
1827     OUT_BATCH(batch, 0);
1828     OUT_BATCH(batch, 0);
1829
1830     OUT_BATCH(batch, CMD_CLEAR_PARAMS | (2 - 2));
1831     OUT_BATCH(batch, 0);
1832 }
1833
1834 static void
1835 gen6_emit_drawing_rectangle(VADriverContextP ctx)
1836 {
1837     i965_render_drawing_rectangle(ctx);
1838 }
1839
1840 static void 
1841 gen6_emit_vs_state(VADriverContextP ctx)
1842 {
1843     struct i965_driver_data *i965 = i965_driver_data(ctx);
1844     struct intel_batchbuffer *batch = i965->batch;
1845
1846     /* disable VS constant buffer */
1847     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (5 - 2));
1848     OUT_BATCH(batch, 0);
1849     OUT_BATCH(batch, 0);
1850     OUT_BATCH(batch, 0);
1851     OUT_BATCH(batch, 0);
1852         
1853     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
1854     OUT_BATCH(batch, 0); /* without VS kernel */
1855     OUT_BATCH(batch, 0);
1856     OUT_BATCH(batch, 0);
1857     OUT_BATCH(batch, 0);
1858     OUT_BATCH(batch, 0); /* pass-through */
1859 }
1860
1861 static void 
1862 gen6_emit_gs_state(VADriverContextP ctx)
1863 {
1864     struct i965_driver_data *i965 = i965_driver_data(ctx);
1865     struct intel_batchbuffer *batch = i965->batch;
1866
1867     /* disable GS constant buffer */
1868     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (5 - 2));
1869     OUT_BATCH(batch, 0);
1870     OUT_BATCH(batch, 0);
1871     OUT_BATCH(batch, 0);
1872     OUT_BATCH(batch, 0);
1873         
1874     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
1875     OUT_BATCH(batch, 0); /* without GS kernel */
1876     OUT_BATCH(batch, 0);
1877     OUT_BATCH(batch, 0);
1878     OUT_BATCH(batch, 0);
1879     OUT_BATCH(batch, 0);
1880     OUT_BATCH(batch, 0); /* pass-through */
1881 }
1882
1883 static void 
1884 gen6_emit_clip_state(VADriverContextP ctx)
1885 {
1886     struct i965_driver_data *i965 = i965_driver_data(ctx);
1887     struct intel_batchbuffer *batch = i965->batch;
1888
1889     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
1890     OUT_BATCH(batch, 0);
1891     OUT_BATCH(batch, 0); /* pass-through */
1892     OUT_BATCH(batch, 0);
1893 }
1894
1895 static void 
1896 gen6_emit_sf_state(VADriverContextP ctx)
1897 {
1898     struct i965_driver_data *i965 = i965_driver_data(ctx);
1899     struct intel_batchbuffer *batch = i965->batch;
1900
1901     OUT_BATCH(batch, GEN6_3DSTATE_SF | (20 - 2));
1902     OUT_BATCH(batch, (1 << GEN6_3DSTATE_SF_NUM_OUTPUTS_SHIFT) |
1903               (1 << GEN6_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT) |
1904               (0 << GEN6_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT));
1905     OUT_BATCH(batch, 0);
1906     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
1907     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT); /* DW4 */
1908     OUT_BATCH(batch, 0);
1909     OUT_BATCH(batch, 0);
1910     OUT_BATCH(batch, 0);
1911     OUT_BATCH(batch, 0);
1912     OUT_BATCH(batch, 0); /* DW9 */
1913     OUT_BATCH(batch, 0);
1914     OUT_BATCH(batch, 0);
1915     OUT_BATCH(batch, 0);
1916     OUT_BATCH(batch, 0);
1917     OUT_BATCH(batch, 0); /* DW14 */
1918     OUT_BATCH(batch, 0);
1919     OUT_BATCH(batch, 0);
1920     OUT_BATCH(batch, 0);
1921     OUT_BATCH(batch, 0);
1922     OUT_BATCH(batch, 0); /* DW19 */
1923 }
1924
1925 static void 
1926 gen6_emit_wm_state(VADriverContextP ctx, int kernel)
1927 {
1928     struct i965_driver_data *i965 = i965_driver_data(ctx);
1929     struct intel_batchbuffer *batch = i965->batch;
1930     struct i965_render_state *render_state = &i965->render_state;
1931
1932     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS |
1933               GEN6_3DSTATE_CONSTANT_BUFFER_0_ENABLE |
1934               (5 - 2));
1935     OUT_RELOC(batch, 
1936               render_state->curbe.bo,
1937               I915_GEM_DOMAIN_INSTRUCTION, 0,
1938               0);
1939     OUT_BATCH(batch, 0);
1940     OUT_BATCH(batch, 0);
1941     OUT_BATCH(batch, 0);
1942
1943     OUT_BATCH(batch, GEN6_3DSTATE_WM | (9 - 2));
1944     OUT_RELOC(batch, render_state->render_kernels[kernel].bo,
1945               I915_GEM_DOMAIN_INSTRUCTION, 0,
1946               0);
1947     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHITF) |
1948               (5 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT));
1949     OUT_BATCH(batch, 0);
1950     OUT_BATCH(batch, (6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT)); /* DW4 */
1951     OUT_BATCH(batch, ((40 - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT) |
1952               GEN6_3DSTATE_WM_DISPATCH_ENABLE |
1953               GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
1954     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT) |
1955               GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
1956     OUT_BATCH(batch, 0);
1957     OUT_BATCH(batch, 0);
1958 }
1959
1960 static void
1961 gen6_emit_vertex_element_state(VADriverContextP ctx)
1962 {
1963     struct i965_driver_data *i965 = i965_driver_data(ctx);
1964     struct intel_batchbuffer *batch = i965->batch;
1965
1966     /* Set up our vertex elements, sourced from the single vertex buffer. */
1967     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
1968     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1969     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1970               GEN6_VE0_VALID |
1971               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1972               (0 << VE0_OFFSET_SHIFT));
1973     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1974               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1975               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1976               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1977     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1978     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1979               GEN6_VE0_VALID |
1980               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1981               (8 << VE0_OFFSET_SHIFT));
1982     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
1983               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1984               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1985               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1986 }
1987
1988 static void
1989 gen6_emit_vertices(VADriverContextP ctx)
1990 {
1991     struct i965_driver_data *i965 = i965_driver_data(ctx);
1992     struct intel_batchbuffer *batch = i965->batch;
1993     struct i965_render_state *render_state = &i965->render_state;
1994
1995     BEGIN_BATCH(batch, 11);
1996     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
1997     OUT_BATCH(batch, 
1998               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
1999               GEN6_VB0_VERTEXDATA |
2000               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2001     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2002     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2003     OUT_BATCH(batch, 0);
2004
2005     OUT_BATCH(batch, 
2006               CMD_3DPRIMITIVE |
2007               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
2008               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
2009               (0 << 9) |
2010               4);
2011     OUT_BATCH(batch, 3); /* vertex count per instance */
2012     OUT_BATCH(batch, 0); /* start vertex offset */
2013     OUT_BATCH(batch, 1); /* single instance */
2014     OUT_BATCH(batch, 0); /* start instance location */
2015     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
2016     ADVANCE_BATCH(batch);
2017 }
2018
2019 static void
2020 gen6_render_emit_states(VADriverContextP ctx, int kernel)
2021 {
2022     struct i965_driver_data *i965 = i965_driver_data(ctx);
2023     struct intel_batchbuffer *batch = i965->batch;
2024
2025     intel_batchbuffer_start_atomic(batch, 0x1000);
2026     intel_batchbuffer_emit_mi_flush(batch);
2027     gen6_emit_invarient_states(ctx);
2028     gen6_emit_state_base_address(ctx);
2029     gen6_emit_viewport_state_pointers(ctx);
2030     gen6_emit_urb(ctx);
2031     gen6_emit_cc_state_pointers(ctx);
2032     gen6_emit_sampler_state_pointers(ctx);
2033     gen6_emit_vs_state(ctx);
2034     gen6_emit_gs_state(ctx);
2035     gen6_emit_clip_state(ctx);
2036     gen6_emit_sf_state(ctx);
2037     gen6_emit_wm_state(ctx, kernel);
2038     gen6_emit_binding_table(ctx);
2039     gen6_emit_depth_buffer_state(ctx);
2040     gen6_emit_drawing_rectangle(ctx);
2041     gen6_emit_vertex_element_state(ctx);
2042     gen6_emit_vertices(ctx);
2043     intel_batchbuffer_end_atomic(batch);
2044 }
2045
2046 static void
2047 gen6_render_put_surface(
2048     VADriverContextP   ctx,
2049     VASurfaceID        surface,
2050     const VARectangle *src_rect,
2051     const VARectangle *dst_rect,
2052     unsigned int       flags
2053 )
2054 {
2055     struct i965_driver_data *i965 = i965_driver_data(ctx);
2056     struct intel_batchbuffer *batch = i965->batch;
2057
2058     gen6_render_initialize(ctx);
2059     gen6_render_setup_states(ctx, surface, src_rect, dst_rect);
2060     i965_clear_dest_region(ctx);
2061     gen6_render_emit_states(ctx, PS_KERNEL);
2062     intel_batchbuffer_flush(batch);
2063 }
2064
2065 static void
2066 gen6_subpicture_render_blend_state(VADriverContextP ctx)
2067 {
2068     struct i965_driver_data *i965 = i965_driver_data(ctx);
2069     struct i965_render_state *render_state = &i965->render_state;
2070     struct gen6_blend_state *blend_state;
2071
2072     dri_bo_unmap(render_state->cc.state);    
2073     dri_bo_map(render_state->cc.blend, 1);
2074     assert(render_state->cc.blend->virtual);
2075     blend_state = render_state->cc.blend->virtual;
2076     memset(blend_state, 0, sizeof(*blend_state));
2077     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2078     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2079     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2080     blend_state->blend0.blend_enable = 1;
2081     blend_state->blend1.post_blend_clamp_enable = 1;
2082     blend_state->blend1.pre_blend_clamp_enable = 1;
2083     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2084     dri_bo_unmap(render_state->cc.blend);
2085 }
2086
2087 static void
2088 gen6_subpicture_render_setup_states(
2089     VADriverContextP   ctx,
2090     VASurfaceID        surface,
2091     const VARectangle *src_rect,
2092     const VARectangle *dst_rect
2093 )
2094 {
2095     i965_render_dest_surface_state(ctx, 0);
2096     i965_subpic_render_src_surfaces_state(ctx, surface);
2097     i965_render_sampler(ctx);
2098     i965_render_cc_viewport(ctx);
2099     gen6_render_color_calc_state(ctx);
2100     gen6_subpicture_render_blend_state(ctx);
2101     gen6_render_depth_stencil_state(ctx);
2102     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2103 }
2104
2105 static void
2106 gen6_render_put_subpicture(
2107     VADriverContextP   ctx,
2108     VASurfaceID        surface,
2109     const VARectangle *src_rect,
2110     const VARectangle *dst_rect
2111 )
2112 {
2113     struct i965_driver_data *i965 = i965_driver_data(ctx);
2114     struct intel_batchbuffer *batch = i965->batch;
2115     struct object_surface *obj_surface = SURFACE(surface);
2116     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
2117
2118     assert(obj_subpic);
2119     gen6_render_initialize(ctx);
2120     gen6_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2121     gen6_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2122     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2123     intel_batchbuffer_flush(batch);
2124 }
2125
2126 /*
2127  * for GEN7
2128  */
2129 static void 
2130 gen7_render_initialize(VADriverContextP ctx)
2131 {
2132     struct i965_driver_data *i965 = i965_driver_data(ctx);
2133     struct i965_render_state *render_state = &i965->render_state;
2134     dri_bo *bo;
2135
2136     /* VERTEX BUFFER */
2137     dri_bo_unreference(render_state->vb.vertex_buffer);
2138     bo = dri_bo_alloc(i965->intel.bufmgr,
2139                       "vertex buffer",
2140                       4096,
2141                       4096);
2142     assert(bo);
2143     render_state->vb.vertex_buffer = bo;
2144
2145     /* WM */
2146     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
2147     bo = dri_bo_alloc(i965->intel.bufmgr,
2148                       "surface state & binding table",
2149                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
2150                       4096);
2151     assert(bo);
2152     render_state->wm.surface_state_binding_table_bo = bo;
2153
2154     dri_bo_unreference(render_state->wm.sampler);
2155     bo = dri_bo_alloc(i965->intel.bufmgr,
2156                       "sampler state",
2157                       MAX_SAMPLERS * sizeof(struct gen7_sampler_state),
2158                       4096);
2159     assert(bo);
2160     render_state->wm.sampler = bo;
2161     render_state->wm.sampler_count = 0;
2162
2163     /* COLOR CALCULATOR */
2164     dri_bo_unreference(render_state->cc.state);
2165     bo = dri_bo_alloc(i965->intel.bufmgr,
2166                       "color calc state",
2167                       sizeof(struct gen6_color_calc_state),
2168                       4096);
2169     assert(bo);
2170     render_state->cc.state = bo;
2171
2172     /* CC VIEWPORT */
2173     dri_bo_unreference(render_state->cc.viewport);
2174     bo = dri_bo_alloc(i965->intel.bufmgr,
2175                       "cc viewport",
2176                       sizeof(struct i965_cc_viewport),
2177                       4096);
2178     assert(bo);
2179     render_state->cc.viewport = bo;
2180
2181     /* BLEND STATE */
2182     dri_bo_unreference(render_state->cc.blend);
2183     bo = dri_bo_alloc(i965->intel.bufmgr,
2184                       "blend state",
2185                       sizeof(struct gen6_blend_state),
2186                       4096);
2187     assert(bo);
2188     render_state->cc.blend = bo;
2189
2190     /* DEPTH & STENCIL STATE */
2191     dri_bo_unreference(render_state->cc.depth_stencil);
2192     bo = dri_bo_alloc(i965->intel.bufmgr,
2193                       "depth & stencil state",
2194                       sizeof(struct gen6_depth_stencil_state),
2195                       4096);
2196     assert(bo);
2197     render_state->cc.depth_stencil = bo;
2198 }
2199
2200 static void
2201 gen7_render_color_calc_state(VADriverContextP ctx)
2202 {
2203     struct i965_driver_data *i965 = i965_driver_data(ctx);
2204     struct i965_render_state *render_state = &i965->render_state;
2205     struct gen6_color_calc_state *color_calc_state;
2206     
2207     dri_bo_map(render_state->cc.state, 1);
2208     assert(render_state->cc.state->virtual);
2209     color_calc_state = render_state->cc.state->virtual;
2210     memset(color_calc_state, 0, sizeof(*color_calc_state));
2211     color_calc_state->constant_r = 1.0;
2212     color_calc_state->constant_g = 0.0;
2213     color_calc_state->constant_b = 1.0;
2214     color_calc_state->constant_a = 1.0;
2215     dri_bo_unmap(render_state->cc.state);
2216 }
2217
2218 static void
2219 gen7_render_blend_state(VADriverContextP ctx)
2220 {
2221     struct i965_driver_data *i965 = i965_driver_data(ctx);
2222     struct i965_render_state *render_state = &i965->render_state;
2223     struct gen6_blend_state *blend_state;
2224     
2225     dri_bo_map(render_state->cc.blend, 1);
2226     assert(render_state->cc.blend->virtual);
2227     blend_state = render_state->cc.blend->virtual;
2228     memset(blend_state, 0, sizeof(*blend_state));
2229     blend_state->blend1.logic_op_enable = 1;
2230     blend_state->blend1.logic_op_func = 0xc;
2231     blend_state->blend1.pre_blend_clamp_enable = 1;
2232     dri_bo_unmap(render_state->cc.blend);
2233 }
2234
2235 static void
2236 gen7_render_depth_stencil_state(VADriverContextP ctx)
2237 {
2238     struct i965_driver_data *i965 = i965_driver_data(ctx);
2239     struct i965_render_state *render_state = &i965->render_state;
2240     struct gen6_depth_stencil_state *depth_stencil_state;
2241     
2242     dri_bo_map(render_state->cc.depth_stencil, 1);
2243     assert(render_state->cc.depth_stencil->virtual);
2244     depth_stencil_state = render_state->cc.depth_stencil->virtual;
2245     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
2246     dri_bo_unmap(render_state->cc.depth_stencil);
2247 }
2248
2249 static void 
2250 gen7_render_sampler(VADriverContextP ctx)
2251 {
2252     struct i965_driver_data *i965 = i965_driver_data(ctx);
2253     struct i965_render_state *render_state = &i965->render_state;
2254     struct gen7_sampler_state *sampler_state;
2255     int i;
2256     
2257     assert(render_state->wm.sampler_count > 0);
2258     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
2259
2260     dri_bo_map(render_state->wm.sampler, 1);
2261     assert(render_state->wm.sampler->virtual);
2262     sampler_state = render_state->wm.sampler->virtual;
2263     for (i = 0; i < render_state->wm.sampler_count; i++) {
2264         memset(sampler_state, 0, sizeof(*sampler_state));
2265         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
2266         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
2267         sampler_state->ss3.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2268         sampler_state->ss3.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2269         sampler_state->ss3.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2270         sampler_state++;
2271     }
2272
2273     dri_bo_unmap(render_state->wm.sampler);
2274 }
2275
2276 static void
2277 gen7_render_setup_states(
2278     VADriverContextP   ctx,
2279     VASurfaceID        surface,
2280     const VARectangle *src_rect,
2281     const VARectangle *dst_rect
2282 )
2283 {
2284     i965_render_dest_surface_state(ctx, 0);
2285     i965_render_src_surfaces_state(ctx, surface);
2286     gen7_render_sampler(ctx);
2287     i965_render_cc_viewport(ctx);
2288     gen7_render_color_calc_state(ctx);
2289     gen7_render_blend_state(ctx);
2290     gen7_render_depth_stencil_state(ctx);
2291     i965_render_upload_constants(ctx);
2292     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
2293 }
2294
2295 static void
2296 gen7_emit_invarient_states(VADriverContextP ctx)
2297 {
2298     struct i965_driver_data *i965 = i965_driver_data(ctx);
2299     struct intel_batchbuffer *batch = i965->batch;
2300
2301     BEGIN_BATCH(batch, 1);
2302     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
2303     ADVANCE_BATCH(batch);
2304
2305     BEGIN_BATCH(batch, 4);
2306     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (4 - 2));
2307     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
2308               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
2309     OUT_BATCH(batch, 0);
2310     OUT_BATCH(batch, 0);
2311     ADVANCE_BATCH(batch);
2312
2313     BEGIN_BATCH(batch, 2);
2314     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
2315     OUT_BATCH(batch, 1);
2316     ADVANCE_BATCH(batch);
2317
2318     /* Set system instruction pointer */
2319     BEGIN_BATCH(batch, 2);
2320     OUT_BATCH(batch, CMD_STATE_SIP | 0);
2321     OUT_BATCH(batch, 0);
2322     ADVANCE_BATCH(batch);
2323 }
2324
2325 static void
2326 gen7_emit_state_base_address(VADriverContextP ctx)
2327 {
2328     struct i965_driver_data *i965 = i965_driver_data(ctx);
2329     struct intel_batchbuffer *batch = i965->batch;
2330     struct i965_render_state *render_state = &i965->render_state;
2331
2332     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
2333     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
2334     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
2335     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
2336     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
2337     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
2338     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
2339     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
2340     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
2341     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
2342 }
2343
2344 static void
2345 gen7_emit_viewport_state_pointers(VADriverContextP ctx)
2346 {
2347     struct i965_driver_data *i965 = i965_driver_data(ctx);
2348     struct intel_batchbuffer *batch = i965->batch;
2349     struct i965_render_state *render_state = &i965->render_state;
2350
2351     BEGIN_BATCH(batch, 2);
2352     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC | (2 - 2));
2353     OUT_RELOC(batch,
2354               render_state->cc.viewport,
2355               I915_GEM_DOMAIN_INSTRUCTION, 0,
2356               0);
2357     ADVANCE_BATCH(batch);
2358
2359     BEGIN_BATCH(batch, 2);
2360     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL | (2 - 2));
2361     OUT_BATCH(batch, 0);
2362     ADVANCE_BATCH(batch);
2363 }
2364
2365 /*
2366  * URB layout on GEN7 
2367  * ----------------------------------------
2368  * | PS Push Constants (8KB) | VS entries |
2369  * ----------------------------------------
2370  */
2371 static void
2372 gen7_emit_urb(VADriverContextP ctx)
2373 {
2374     struct i965_driver_data *i965 = i965_driver_data(ctx);
2375     struct intel_batchbuffer *batch = i965->batch;
2376
2377     BEGIN_BATCH(batch, 2);
2378     OUT_BATCH(batch, GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS | (2 - 2));
2379     OUT_BATCH(batch, 8); /* in 1KBs */
2380     ADVANCE_BATCH(batch);
2381
2382     BEGIN_BATCH(batch, 2);
2383     OUT_BATCH(batch, GEN7_3DSTATE_URB_VS | (2 - 2));
2384     OUT_BATCH(batch, 
2385               (32 << GEN7_URB_ENTRY_NUMBER_SHIFT) | /* at least 32 */
2386               (2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
2387               (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2388    ADVANCE_BATCH(batch);
2389
2390    BEGIN_BATCH(batch, 2);
2391    OUT_BATCH(batch, GEN7_3DSTATE_URB_GS | (2 - 2));
2392    OUT_BATCH(batch,
2393              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2394              (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2395    ADVANCE_BATCH(batch);
2396
2397    BEGIN_BATCH(batch, 2);
2398    OUT_BATCH(batch, GEN7_3DSTATE_URB_HS | (2 - 2));
2399    OUT_BATCH(batch,
2400              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2401              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2402    ADVANCE_BATCH(batch);
2403
2404    BEGIN_BATCH(batch, 2);
2405    OUT_BATCH(batch, GEN7_3DSTATE_URB_DS | (2 - 2));
2406    OUT_BATCH(batch,
2407              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2408              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2409    ADVANCE_BATCH(batch);
2410 }
2411
2412 static void
2413 gen7_emit_cc_state_pointers(VADriverContextP ctx)
2414 {
2415     struct i965_driver_data *i965 = i965_driver_data(ctx);
2416     struct intel_batchbuffer *batch = i965->batch;
2417     struct i965_render_state *render_state = &i965->render_state;
2418
2419     BEGIN_BATCH(batch, 2);
2420     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (2 - 2));
2421     OUT_RELOC(batch,
2422               render_state->cc.state,
2423               I915_GEM_DOMAIN_INSTRUCTION, 0,
2424               1);
2425     ADVANCE_BATCH(batch);
2426
2427     BEGIN_BATCH(batch, 2);
2428     OUT_BATCH(batch, GEN7_3DSTATE_BLEND_STATE_POINTERS | (2 - 2));
2429     OUT_RELOC(batch,
2430               render_state->cc.blend,
2431               I915_GEM_DOMAIN_INSTRUCTION, 0,
2432               1);
2433     ADVANCE_BATCH(batch);
2434
2435     BEGIN_BATCH(batch, 2);
2436     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS | (2 - 2));
2437     OUT_RELOC(batch,
2438               render_state->cc.depth_stencil,
2439               I915_GEM_DOMAIN_INSTRUCTION, 0, 
2440               1);
2441     ADVANCE_BATCH(batch);
2442 }
2443
2444 static void
2445 gen7_emit_sampler_state_pointers(VADriverContextP ctx)
2446 {
2447     struct i965_driver_data *i965 = i965_driver_data(ctx);
2448     struct intel_batchbuffer *batch = i965->batch;
2449     struct i965_render_state *render_state = &i965->render_state;
2450
2451     BEGIN_BATCH(batch, 2);
2452     OUT_BATCH(batch, GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
2453     OUT_RELOC(batch,
2454               render_state->wm.sampler,
2455               I915_GEM_DOMAIN_INSTRUCTION, 0,
2456               0);
2457     ADVANCE_BATCH(batch);
2458 }
2459
2460 static void
2461 gen7_emit_binding_table(VADriverContextP ctx)
2462 {
2463     struct i965_driver_data *i965 = i965_driver_data(ctx);
2464     struct intel_batchbuffer *batch = i965->batch;
2465
2466     BEGIN_BATCH(batch, 2);
2467     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS | (2 - 2));
2468     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
2469     ADVANCE_BATCH(batch);
2470 }
2471
2472 static void
2473 gen7_emit_depth_buffer_state(VADriverContextP ctx)
2474 {
2475     struct i965_driver_data *i965 = i965_driver_data(ctx);
2476     struct intel_batchbuffer *batch = i965->batch;
2477
2478     BEGIN_BATCH(batch, 7);
2479     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_BUFFER | (7 - 2));
2480     OUT_BATCH(batch,
2481               (I965_DEPTHFORMAT_D32_FLOAT << 18) |
2482               (I965_SURFACE_NULL << 29));
2483     OUT_BATCH(batch, 0);
2484     OUT_BATCH(batch, 0);
2485     OUT_BATCH(batch, 0);
2486     OUT_BATCH(batch, 0);
2487     OUT_BATCH(batch, 0);
2488     ADVANCE_BATCH(batch);
2489
2490     BEGIN_BATCH(batch, 3);
2491     OUT_BATCH(batch, GEN7_3DSTATE_CLEAR_PARAMS | (3 - 2));
2492     OUT_BATCH(batch, 0);
2493     OUT_BATCH(batch, 0);
2494     ADVANCE_BATCH(batch);
2495 }
2496
2497 static void
2498 gen7_emit_drawing_rectangle(VADriverContextP ctx)
2499 {
2500     i965_render_drawing_rectangle(ctx);
2501 }
2502
2503 static void 
2504 gen7_emit_vs_state(VADriverContextP ctx)
2505 {
2506     struct i965_driver_data *i965 = i965_driver_data(ctx);
2507     struct intel_batchbuffer *batch = i965->batch;
2508
2509     /* disable VS constant buffer */
2510     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (7 - 2));
2511     OUT_BATCH(batch, 0);
2512     OUT_BATCH(batch, 0);
2513     OUT_BATCH(batch, 0);
2514     OUT_BATCH(batch, 0);
2515     OUT_BATCH(batch, 0);
2516     OUT_BATCH(batch, 0);
2517         
2518     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
2519     OUT_BATCH(batch, 0); /* without VS kernel */
2520     OUT_BATCH(batch, 0);
2521     OUT_BATCH(batch, 0);
2522     OUT_BATCH(batch, 0);
2523     OUT_BATCH(batch, 0); /* pass-through */
2524 }
2525
2526 static void 
2527 gen7_emit_bypass_state(VADriverContextP ctx)
2528 {
2529     struct i965_driver_data *i965 = i965_driver_data(ctx);
2530     struct intel_batchbuffer *batch = i965->batch;
2531
2532     /* bypass GS */
2533     BEGIN_BATCH(batch, 7);
2534     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (7 - 2));
2535     OUT_BATCH(batch, 0);
2536     OUT_BATCH(batch, 0);
2537     OUT_BATCH(batch, 0);
2538     OUT_BATCH(batch, 0);
2539     OUT_BATCH(batch, 0);
2540     OUT_BATCH(batch, 0);
2541     ADVANCE_BATCH(batch);
2542
2543     BEGIN_BATCH(batch, 7);      
2544     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
2545     OUT_BATCH(batch, 0); /* without GS kernel */
2546     OUT_BATCH(batch, 0);
2547     OUT_BATCH(batch, 0);
2548     OUT_BATCH(batch, 0);
2549     OUT_BATCH(batch, 0);
2550     OUT_BATCH(batch, 0); /* pass-through */
2551     ADVANCE_BATCH(batch);
2552
2553     BEGIN_BATCH(batch, 2);
2554     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_GS | (2 - 2));
2555     OUT_BATCH(batch, 0);
2556     ADVANCE_BATCH(batch);
2557
2558     /* disable HS */
2559     BEGIN_BATCH(batch, 7);
2560     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_HS | (7 - 2));
2561     OUT_BATCH(batch, 0);
2562     OUT_BATCH(batch, 0);
2563     OUT_BATCH(batch, 0);
2564     OUT_BATCH(batch, 0);
2565     OUT_BATCH(batch, 0);
2566     OUT_BATCH(batch, 0);
2567     ADVANCE_BATCH(batch);
2568
2569     BEGIN_BATCH(batch, 7);
2570     OUT_BATCH(batch, GEN7_3DSTATE_HS | (7 - 2));
2571     OUT_BATCH(batch, 0);
2572     OUT_BATCH(batch, 0);
2573     OUT_BATCH(batch, 0);
2574     OUT_BATCH(batch, 0);
2575     OUT_BATCH(batch, 0);
2576     OUT_BATCH(batch, 0);
2577     ADVANCE_BATCH(batch);
2578
2579     BEGIN_BATCH(batch, 2);
2580     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_HS | (2 - 2));
2581     OUT_BATCH(batch, 0);
2582     ADVANCE_BATCH(batch);
2583
2584     /* Disable TE */
2585     BEGIN_BATCH(batch, 4);
2586     OUT_BATCH(batch, GEN7_3DSTATE_TE | (4 - 2));
2587     OUT_BATCH(batch, 0);
2588     OUT_BATCH(batch, 0);
2589     OUT_BATCH(batch, 0);
2590     ADVANCE_BATCH(batch);
2591
2592     /* Disable DS */
2593     BEGIN_BATCH(batch, 7);
2594     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_DS | (7 - 2));
2595     OUT_BATCH(batch, 0);
2596     OUT_BATCH(batch, 0);
2597     OUT_BATCH(batch, 0);
2598     OUT_BATCH(batch, 0);
2599     OUT_BATCH(batch, 0);
2600     OUT_BATCH(batch, 0);
2601     ADVANCE_BATCH(batch);
2602
2603     BEGIN_BATCH(batch, 6);
2604     OUT_BATCH(batch, GEN7_3DSTATE_DS | (6 - 2));
2605     OUT_BATCH(batch, 0);
2606     OUT_BATCH(batch, 0);
2607     OUT_BATCH(batch, 0);
2608     OUT_BATCH(batch, 0);
2609     OUT_BATCH(batch, 0);
2610     ADVANCE_BATCH(batch);
2611
2612     BEGIN_BATCH(batch, 2);
2613     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_DS | (2 - 2));
2614     OUT_BATCH(batch, 0);
2615     ADVANCE_BATCH(batch);
2616
2617     /* Disable STREAMOUT */
2618     BEGIN_BATCH(batch, 3);
2619     OUT_BATCH(batch, GEN7_3DSTATE_STREAMOUT | (3 - 2));
2620     OUT_BATCH(batch, 0);
2621     OUT_BATCH(batch, 0);
2622     ADVANCE_BATCH(batch);
2623 }
2624
2625 static void 
2626 gen7_emit_clip_state(VADriverContextP ctx)
2627 {
2628     struct i965_driver_data *i965 = i965_driver_data(ctx);
2629     struct intel_batchbuffer *batch = i965->batch;
2630
2631     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
2632     OUT_BATCH(batch, 0);
2633     OUT_BATCH(batch, 0); /* pass-through */
2634     OUT_BATCH(batch, 0);
2635 }
2636
2637 static void 
2638 gen7_emit_sf_state(VADriverContextP ctx)
2639 {
2640     struct i965_driver_data *i965 = i965_driver_data(ctx);
2641     struct intel_batchbuffer *batch = i965->batch;
2642
2643     BEGIN_BATCH(batch, 14);
2644     OUT_BATCH(batch, GEN7_3DSTATE_SBE | (14 - 2));
2645     OUT_BATCH(batch,
2646               (1 << GEN7_SBE_NUM_OUTPUTS_SHIFT) |
2647               (1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT) |
2648               (0 << GEN7_SBE_URB_ENTRY_READ_OFFSET_SHIFT));
2649     OUT_BATCH(batch, 0);
2650     OUT_BATCH(batch, 0);
2651     OUT_BATCH(batch, 0); /* DW4 */
2652     OUT_BATCH(batch, 0);
2653     OUT_BATCH(batch, 0);
2654     OUT_BATCH(batch, 0);
2655     OUT_BATCH(batch, 0);
2656     OUT_BATCH(batch, 0); /* DW9 */
2657     OUT_BATCH(batch, 0);
2658     OUT_BATCH(batch, 0);
2659     OUT_BATCH(batch, 0);
2660     OUT_BATCH(batch, 0);
2661     ADVANCE_BATCH(batch);
2662
2663     BEGIN_BATCH(batch, 7);
2664     OUT_BATCH(batch, GEN6_3DSTATE_SF | (7 - 2));
2665     OUT_BATCH(batch, 0);
2666     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
2667     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT);
2668     OUT_BATCH(batch, 0);
2669     OUT_BATCH(batch, 0);
2670     OUT_BATCH(batch, 0);
2671     ADVANCE_BATCH(batch);
2672 }
2673
2674 static void 
2675 gen7_emit_wm_state(VADriverContextP ctx, int kernel)
2676 {
2677     struct i965_driver_data *i965 = i965_driver_data(ctx);
2678     struct intel_batchbuffer *batch = i965->batch;
2679     struct i965_render_state *render_state = &i965->render_state;
2680
2681     BEGIN_BATCH(batch, 3);
2682     OUT_BATCH(batch, GEN6_3DSTATE_WM | (3 - 2));
2683     OUT_BATCH(batch,
2684               GEN7_WM_DISPATCH_ENABLE |
2685               GEN7_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2686     OUT_BATCH(batch, 0);
2687     ADVANCE_BATCH(batch);
2688
2689     BEGIN_BATCH(batch, 7);
2690     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS | (7 - 2));
2691     OUT_BATCH(batch, 1);
2692     OUT_BATCH(batch, 0);
2693     OUT_RELOC(batch, 
2694               render_state->curbe.bo,
2695               I915_GEM_DOMAIN_INSTRUCTION, 0,
2696               0);
2697     OUT_BATCH(batch, 0);
2698     OUT_BATCH(batch, 0);
2699     OUT_BATCH(batch, 0);
2700     ADVANCE_BATCH(batch);
2701
2702     BEGIN_BATCH(batch, 8);
2703     OUT_BATCH(batch, GEN7_3DSTATE_PS | (8 - 2));
2704     OUT_RELOC(batch, 
2705               render_state->render_kernels[kernel].bo,
2706               I915_GEM_DOMAIN_INSTRUCTION, 0,
2707               0);
2708     OUT_BATCH(batch, 
2709               (1 << GEN7_PS_SAMPLER_COUNT_SHIFT) |
2710               (5 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
2711     OUT_BATCH(batch, 0); /* scratch space base offset */
2712     OUT_BATCH(batch, 
2713               ((86 - 1) << GEN7_PS_MAX_THREADS_SHIFT) |
2714               GEN7_PS_PUSH_CONSTANT_ENABLE |
2715               GEN7_PS_ATTRIBUTE_ENABLE |
2716               GEN7_PS_16_DISPATCH_ENABLE);
2717     OUT_BATCH(batch, 
2718               (6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0));
2719     OUT_BATCH(batch, 0); /* kernel 1 pointer */
2720     OUT_BATCH(batch, 0); /* kernel 2 pointer */
2721     ADVANCE_BATCH(batch);
2722 }
2723
2724 static void
2725 gen7_emit_vertex_element_state(VADriverContextP ctx)
2726 {
2727     struct i965_driver_data *i965 = i965_driver_data(ctx);
2728     struct intel_batchbuffer *batch = i965->batch;
2729
2730     /* Set up our vertex elements, sourced from the single vertex buffer. */
2731     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2732     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2733     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2734               GEN6_VE0_VALID |
2735               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2736               (0 << VE0_OFFSET_SHIFT));
2737     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2738               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2739               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2740               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2741     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2742     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2743               GEN6_VE0_VALID |
2744               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2745               (8 << VE0_OFFSET_SHIFT));
2746     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2747               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2748               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2749               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2750 }
2751
2752 static void
2753 gen7_emit_vertices(VADriverContextP ctx)
2754 {
2755     struct i965_driver_data *i965 = i965_driver_data(ctx);
2756     struct intel_batchbuffer *batch = i965->batch;
2757     struct i965_render_state *render_state = &i965->render_state;
2758
2759     BEGIN_BATCH(batch, 5);
2760     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | (5 - 2));
2761     OUT_BATCH(batch, 
2762               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2763               GEN6_VB0_VERTEXDATA |
2764               GEN7_VB0_ADDRESS_MODIFYENABLE |
2765               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2766     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2767     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2768     OUT_BATCH(batch, 0);
2769     ADVANCE_BATCH(batch);
2770
2771     BEGIN_BATCH(batch, 7);
2772     OUT_BATCH(batch, CMD_3DPRIMITIVE | (7 - 2));
2773     OUT_BATCH(batch,
2774               _3DPRIM_RECTLIST |
2775               GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL);
2776     OUT_BATCH(batch, 3); /* vertex count per instance */
2777     OUT_BATCH(batch, 0); /* start vertex offset */
2778     OUT_BATCH(batch, 1); /* single instance */
2779     OUT_BATCH(batch, 0); /* start instance location */
2780     OUT_BATCH(batch, 0);
2781     ADVANCE_BATCH(batch);
2782 }
2783
2784 static void
2785 gen7_render_emit_states(VADriverContextP ctx, int kernel)
2786 {
2787     struct i965_driver_data *i965 = i965_driver_data(ctx);
2788     struct intel_batchbuffer *batch = i965->batch;
2789
2790     intel_batchbuffer_start_atomic(batch, 0x1000);
2791     intel_batchbuffer_emit_mi_flush(batch);
2792     gen7_emit_invarient_states(ctx);
2793     gen7_emit_state_base_address(ctx);
2794     gen7_emit_viewport_state_pointers(ctx);
2795     gen7_emit_urb(ctx);
2796     gen7_emit_cc_state_pointers(ctx);
2797     gen7_emit_sampler_state_pointers(ctx);
2798     gen7_emit_bypass_state(ctx);
2799     gen7_emit_vs_state(ctx);
2800     gen7_emit_clip_state(ctx);
2801     gen7_emit_sf_state(ctx);
2802     gen7_emit_wm_state(ctx, kernel);
2803     gen7_emit_binding_table(ctx);
2804     gen7_emit_depth_buffer_state(ctx);
2805     gen7_emit_drawing_rectangle(ctx);
2806     gen7_emit_vertex_element_state(ctx);
2807     gen7_emit_vertices(ctx);
2808     intel_batchbuffer_end_atomic(batch);
2809 }
2810
2811 static void
2812 gen7_render_put_surface(
2813     VADriverContextP   ctx,
2814     VASurfaceID        surface,
2815     const VARectangle *src_rect,
2816     const VARectangle *dst_rect,
2817     unsigned int       flags
2818 )
2819 {
2820     struct i965_driver_data *i965 = i965_driver_data(ctx);
2821     struct intel_batchbuffer *batch = i965->batch;
2822
2823     gen7_render_initialize(ctx);
2824     gen7_render_setup_states(ctx, surface, src_rect, dst_rect);
2825     i965_clear_dest_region(ctx);
2826     gen7_render_emit_states(ctx, PS_KERNEL);
2827     intel_batchbuffer_flush(batch);
2828 }
2829
2830 static void
2831 gen7_subpicture_render_blend_state(VADriverContextP ctx)
2832 {
2833     struct i965_driver_data *i965 = i965_driver_data(ctx);
2834     struct i965_render_state *render_state = &i965->render_state;
2835     struct gen6_blend_state *blend_state;
2836
2837     dri_bo_unmap(render_state->cc.state);    
2838     dri_bo_map(render_state->cc.blend, 1);
2839     assert(render_state->cc.blend->virtual);
2840     blend_state = render_state->cc.blend->virtual;
2841     memset(blend_state, 0, sizeof(*blend_state));
2842     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2843     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2844     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2845     blend_state->blend0.blend_enable = 1;
2846     blend_state->blend1.post_blend_clamp_enable = 1;
2847     blend_state->blend1.pre_blend_clamp_enable = 1;
2848     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2849     dri_bo_unmap(render_state->cc.blend);
2850 }
2851
2852 static void
2853 gen7_subpicture_render_setup_states(
2854     VADriverContextP   ctx,
2855     VASurfaceID        surface,
2856     const VARectangle *src_rect,
2857     const VARectangle *dst_rect
2858 )
2859 {
2860     i965_render_dest_surface_state(ctx, 0);
2861     i965_subpic_render_src_surfaces_state(ctx, surface);
2862     i965_render_sampler(ctx);
2863     i965_render_cc_viewport(ctx);
2864     gen7_render_color_calc_state(ctx);
2865     gen7_subpicture_render_blend_state(ctx);
2866     gen7_render_depth_stencil_state(ctx);
2867     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2868 }
2869
2870 static void
2871 gen7_render_put_subpicture(
2872     VADriverContextP   ctx,
2873     VASurfaceID        surface,
2874     const VARectangle *src_rect,
2875     const VARectangle *dst_rect
2876 )
2877 {
2878     struct i965_driver_data *i965 = i965_driver_data(ctx);
2879     struct intel_batchbuffer *batch = i965->batch;
2880     struct object_surface *obj_surface = SURFACE(surface);
2881     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
2882
2883     assert(obj_subpic);
2884     gen7_render_initialize(ctx);
2885     gen7_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2886     gen7_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2887     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2888     intel_batchbuffer_flush(batch);
2889 }
2890
2891
2892 /*
2893  * global functions
2894  */
2895 VAStatus 
2896 i965_DestroySurfaces(VADriverContextP ctx,
2897                      VASurfaceID *surface_list,
2898                      int num_surfaces);
2899 void
2900 intel_render_put_surface(
2901     VADriverContextP   ctx,
2902     VASurfaceID        surface,
2903     const VARectangle *src_rect,
2904     const VARectangle *dst_rect,
2905     unsigned int       flags
2906 )
2907 {
2908     struct i965_driver_data *i965 = i965_driver_data(ctx);
2909     int has_done_scaling = 0;
2910     VASurfaceID in_surface_id = surface;
2911     VASurfaceID out_surface_id = i965_post_processing(ctx, surface, src_rect, dst_rect, flags, &has_done_scaling);
2912
2913     assert((!has_done_scaling) || (out_surface_id != VA_INVALID_ID));
2914
2915     if (out_surface_id != VA_INVALID_ID)
2916         in_surface_id = out_surface_id;
2917
2918     if (IS_GEN7(i965->intel.device_id))
2919         gen7_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
2920     else if (IS_GEN6(i965->intel.device_id))
2921         gen6_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
2922     else
2923         i965_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
2924
2925     if (in_surface_id != surface)
2926         i965_DestroySurfaces(ctx, &in_surface_id, 1);
2927 }
2928
2929 void
2930 intel_render_put_subpicture(
2931     VADriverContextP   ctx,
2932     VASurfaceID        surface,
2933     const VARectangle *src_rect,
2934     const VARectangle *dst_rect
2935 )
2936 {
2937     struct i965_driver_data *i965 = i965_driver_data(ctx);
2938
2939     if (IS_GEN7(i965->intel.device_id))
2940         gen7_render_put_subpicture(ctx, surface, src_rect, dst_rect);
2941     else if (IS_GEN6(i965->intel.device_id))
2942         gen6_render_put_subpicture(ctx, surface, src_rect, dst_rect);
2943     else
2944         i965_render_put_subpicture(ctx, surface, src_rect, dst_rect);
2945 }
2946
2947 Bool 
2948 i965_render_init(VADriverContextP ctx)
2949 {
2950     struct i965_driver_data *i965 = i965_driver_data(ctx);
2951     struct i965_render_state *render_state = &i965->render_state;
2952     int i;
2953
2954     /* kernel */
2955     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen5) / 
2956                                  sizeof(render_kernels_gen5[0])));
2957     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen6) / 
2958                                  sizeof(render_kernels_gen6[0])));
2959
2960     if (IS_GEN7(i965->intel.device_id))
2961         memcpy(render_state->render_kernels, render_kernels_gen7, sizeof(render_state->render_kernels));
2962     else if (IS_GEN6(i965->intel.device_id))
2963         memcpy(render_state->render_kernels, render_kernels_gen6, sizeof(render_state->render_kernels));
2964     else if (IS_IRONLAKE(i965->intel.device_id))
2965         memcpy(render_state->render_kernels, render_kernels_gen5, sizeof(render_state->render_kernels));
2966     else
2967         memcpy(render_state->render_kernels, render_kernels_gen4, sizeof(render_state->render_kernels));
2968
2969     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
2970         struct i965_kernel *kernel = &render_state->render_kernels[i];
2971
2972         if (!kernel->size)
2973             continue;
2974
2975         kernel->bo = dri_bo_alloc(i965->intel.bufmgr, 
2976                                   kernel->name, 
2977                                   kernel->size, 0x1000);
2978         assert(kernel->bo);
2979         dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
2980     }
2981
2982     /* constant buffer */
2983     render_state->curbe.bo = dri_bo_alloc(i965->intel.bufmgr,
2984                       "constant buffer",
2985                       4096, 64);
2986     assert(render_state->curbe.bo);
2987     render_state->curbe.upload = 0;
2988
2989     return True;
2990 }
2991
2992 Bool 
2993 i965_render_terminate(VADriverContextP ctx)
2994 {
2995     int i;
2996     struct i965_driver_data *i965 = i965_driver_data(ctx);
2997     struct i965_render_state *render_state = &i965->render_state;
2998
2999     dri_bo_unreference(render_state->curbe.bo);
3000     render_state->curbe.bo = NULL;
3001
3002     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
3003         struct i965_kernel *kernel = &render_state->render_kernels[i];
3004         
3005         dri_bo_unreference(kernel->bo);
3006         kernel->bo = NULL;
3007     }
3008
3009     dri_bo_unreference(render_state->vb.vertex_buffer);
3010     render_state->vb.vertex_buffer = NULL;
3011     dri_bo_unreference(render_state->vs.state);
3012     render_state->vs.state = NULL;
3013     dri_bo_unreference(render_state->sf.state);
3014     render_state->sf.state = NULL;
3015     dri_bo_unreference(render_state->wm.sampler);
3016     render_state->wm.sampler = NULL;
3017     dri_bo_unreference(render_state->wm.state);
3018     render_state->wm.state = NULL;
3019     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
3020     dri_bo_unreference(render_state->cc.viewport);
3021     render_state->cc.viewport = NULL;
3022     dri_bo_unreference(render_state->cc.state);
3023     render_state->cc.state = NULL;
3024     dri_bo_unreference(render_state->cc.blend);
3025     render_state->cc.blend = NULL;
3026     dri_bo_unreference(render_state->cc.depth_stencil);
3027     render_state->cc.depth_stencil = NULL;
3028
3029     if (render_state->draw_region) {
3030         dri_bo_unreference(render_state->draw_region->bo);
3031         free(render_state->draw_region);
3032         render_state->draw_region = NULL;
3033     }
3034
3035     return True;
3036 }
3037