1742fe560a0186890faac5ed3f69b61bd2e7126e
[platform/upstream/libva.git] / i965_drv_video / i965_render.c
1 /*
2  * Copyright © 2006 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *    Xiang Haihao <haihao.xiang@intel.com>
27  *
28  */
29
30 /*
31  * Most of rendering codes are ported from xf86-video-intel/src/i965_video.c
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <assert.h>
38
39 #include <va/va_backend.h>
40 #include "va/x11/va_dricommon.h"
41
42 #include "intel_batchbuffer.h"
43 #include "intel_driver.h"
44 #include "i965_defines.h"
45 #include "i965_drv_video.h"
46 #include "i965_structs.h"
47
48 #include "i965_render.h"
49
50 #define SF_KERNEL_NUM_GRF       16
51 #define SF_MAX_THREADS          1
52
53 static const uint32_t sf_kernel_static[][4] = 
54 {
55 #include "shaders/render/exa_sf.g4b"
56 };
57
58 #define PS_KERNEL_NUM_GRF       32
59 #define PS_MAX_THREADS          32
60
61 #define I965_GRF_BLOCKS(nreg)   ((nreg + 15) / 16 - 1)
62
63 static const uint32_t ps_kernel_static[][4] = 
64 {
65 #include "shaders/render/exa_wm_xy.g4b"
66 #include "shaders/render/exa_wm_src_affine.g4b"
67 #include "shaders/render/exa_wm_src_sample_planar.g4b"
68 #include "shaders/render/exa_wm_yuv_rgb.g4b"
69 #include "shaders/render/exa_wm_write.g4b"
70 };
71 static const uint32_t ps_subpic_kernel_static[][4] = 
72 {
73 #include "shaders/render/exa_wm_xy.g4b"
74 #include "shaders/render/exa_wm_src_affine.g4b"
75 #include "shaders/render/exa_wm_src_sample_argb.g4b"
76 #include "shaders/render/exa_wm_write.g4b"
77 };
78
79 /* On IRONLAKE */
80 static const uint32_t sf_kernel_static_gen5[][4] = 
81 {
82 #include "shaders/render/exa_sf.g4b.gen5"
83 };
84
85 static const uint32_t ps_kernel_static_gen5[][4] = 
86 {
87 #include "shaders/render/exa_wm_xy.g4b.gen5"
88 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
89 #include "shaders/render/exa_wm_src_sample_planar.g4b.gen5"
90 #include "shaders/render/exa_wm_yuv_rgb.g4b.gen5"
91 #include "shaders/render/exa_wm_write.g4b.gen5"
92 };
93 static const uint32_t ps_subpic_kernel_static_gen5[][4] = 
94 {
95 #include "shaders/render/exa_wm_xy.g4b.gen5"
96 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
97 #include "shaders/render/exa_wm_src_sample_argb.g4b.gen5"
98 #include "shaders/render/exa_wm_write.g4b.gen5"
99 };
100
101 /* programs for Sandybridge */
102 static const uint32_t sf_kernel_static_gen6[][4] = 
103 {
104 };
105
106 static const uint32_t ps_kernel_static_gen6[][4] = {
107 #include "shaders/render/exa_wm_src_affine.g6b"
108 #include "shaders/render/exa_wm_src_sample_planar.g6b"
109 #include "shaders/render/exa_wm_yuv_rgb.g6b"
110 #include "shaders/render/exa_wm_write.g6b"
111 };
112
113 static const uint32_t ps_subpic_kernel_static_gen6[][4] = {
114 #include "shaders/render/exa_wm_src_affine.g6b"
115 #include "shaders/render/exa_wm_src_sample_argb.g6b"
116 #include "shaders/render/exa_wm_write.g6b"
117 };
118
119 /* programs for Ivybridge */
120 static const uint32_t sf_kernel_static_gen7[][4] = 
121 {
122 };
123
124 static const uint32_t ps_kernel_static_gen7[][4] = {
125 #include "shaders/render/exa_wm_src_affine.g7b"
126 #include "shaders/render/exa_wm_src_sample_planar.g7b"
127 #include "shaders/render/exa_wm_yuv_rgb.g7b"
128 #include "shaders/render/exa_wm_write.g7b"
129 };
130
131 static const uint32_t ps_subpic_kernel_static_gen7[][4] = {
132 #include "shaders/render/exa_wm_src_affine.g7b"
133 #include "shaders/render/exa_wm_src_sample_argb.g7b"
134 #include "shaders/render/exa_wm_write.g7b"
135 };
136
137 #define SURFACE_STATE_PADDED_SIZE_I965  ALIGN(sizeof(struct i965_surface_state), 32)
138 #define SURFACE_STATE_PADDED_SIZE_GEN7  ALIGN(sizeof(struct gen7_surface_state), 32)
139 #define SURFACE_STATE_PADDED_SIZE       MAX(SURFACE_STATE_PADDED_SIZE_I965, SURFACE_STATE_PADDED_SIZE_GEN7)
140 #define SURFACE_STATE_OFFSET(index)     (SURFACE_STATE_PADDED_SIZE * index)
141 #define BINDING_TABLE_OFFSET            SURFACE_STATE_OFFSET(MAX_RENDER_SURFACES)
142
143 static uint32_t float_to_uint (float f) 
144 {
145     union {
146         uint32_t i; 
147         float f;
148     } x;
149
150     x.f = f;
151     return x.i;
152 }
153
154 enum 
155 {
156     SF_KERNEL = 0,
157     PS_KERNEL,
158     PS_SUBPIC_KERNEL
159 };
160
161 static struct i965_kernel render_kernels_gen4[] = {
162     {
163         "SF",
164         SF_KERNEL,
165         sf_kernel_static,
166         sizeof(sf_kernel_static),
167         NULL
168     },
169     {
170         "PS",
171         PS_KERNEL,
172         ps_kernel_static,
173         sizeof(ps_kernel_static),
174         NULL
175     },
176
177     {
178         "PS_SUBPIC",
179         PS_SUBPIC_KERNEL,
180         ps_subpic_kernel_static,
181         sizeof(ps_subpic_kernel_static),
182         NULL
183     }
184 };
185
186 static struct i965_kernel render_kernels_gen5[] = {
187     {
188         "SF",
189         SF_KERNEL,
190         sf_kernel_static_gen5,
191         sizeof(sf_kernel_static_gen5),
192         NULL
193     },
194     {
195         "PS",
196         PS_KERNEL,
197         ps_kernel_static_gen5,
198         sizeof(ps_kernel_static_gen5),
199         NULL
200     },
201
202     {
203         "PS_SUBPIC",
204         PS_SUBPIC_KERNEL,
205         ps_subpic_kernel_static_gen5,
206         sizeof(ps_subpic_kernel_static_gen5),
207         NULL
208     }
209 };
210
211 static struct i965_kernel render_kernels_gen6[] = {
212     {
213         "SF",
214         SF_KERNEL,
215         sf_kernel_static_gen6,
216         sizeof(sf_kernel_static_gen6),
217         NULL
218     },
219     {
220         "PS",
221         PS_KERNEL,
222         ps_kernel_static_gen6,
223         sizeof(ps_kernel_static_gen6),
224         NULL
225     },
226
227     {
228         "PS_SUBPIC",
229         PS_SUBPIC_KERNEL,
230         ps_subpic_kernel_static_gen6,
231         sizeof(ps_subpic_kernel_static_gen6),
232         NULL
233     }
234 };
235
236 static struct i965_kernel render_kernels_gen7[] = {
237     {
238         "SF",
239         SF_KERNEL,
240         sf_kernel_static_gen7,
241         sizeof(sf_kernel_static_gen7),
242         NULL
243     },
244     {
245         "PS",
246         PS_KERNEL,
247         ps_kernel_static_gen7,
248         sizeof(ps_kernel_static_gen7),
249         NULL
250     },
251
252     {
253         "PS_SUBPIC",
254         PS_SUBPIC_KERNEL,
255         ps_subpic_kernel_static_gen7,
256         sizeof(ps_subpic_kernel_static_gen7),
257         NULL
258     }
259 };
260
261 #define URB_VS_ENTRIES        8
262 #define URB_VS_ENTRY_SIZE     1
263
264 #define URB_GS_ENTRIES        0
265 #define URB_GS_ENTRY_SIZE     0
266
267 #define URB_CLIP_ENTRIES      0
268 #define URB_CLIP_ENTRY_SIZE   0
269
270 #define URB_SF_ENTRIES        1
271 #define URB_SF_ENTRY_SIZE     2
272
273 #define URB_CS_ENTRIES        1
274 #define URB_CS_ENTRY_SIZE     1
275
276 static void
277 i965_render_vs_unit(VADriverContextP ctx)
278 {
279     struct i965_driver_data *i965 = i965_driver_data(ctx);
280     struct i965_render_state *render_state = &i965->render_state;
281     struct i965_vs_unit_state *vs_state;
282
283     dri_bo_map(render_state->vs.state, 1);
284     assert(render_state->vs.state->virtual);
285     vs_state = render_state->vs.state->virtual;
286     memset(vs_state, 0, sizeof(*vs_state));
287
288     if (IS_IRONLAKE(i965->intel.device_id))
289         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2;
290     else
291         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES;
292
293     vs_state->thread4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1;
294     vs_state->vs6.vs_enable = 0;
295     vs_state->vs6.vert_cache_disable = 1;
296     
297     dri_bo_unmap(render_state->vs.state);
298 }
299
300 static void
301 i965_render_sf_unit(VADriverContextP ctx)
302 {
303     struct i965_driver_data *i965 = i965_driver_data(ctx);
304     struct i965_render_state *render_state = &i965->render_state;
305     struct i965_sf_unit_state *sf_state;
306
307     dri_bo_map(render_state->sf.state, 1);
308     assert(render_state->sf.state->virtual);
309     sf_state = render_state->sf.state->virtual;
310     memset(sf_state, 0, sizeof(*sf_state));
311
312     sf_state->thread0.grf_reg_count = I965_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
313     sf_state->thread0.kernel_start_pointer = render_state->render_kernels[SF_KERNEL].bo->offset >> 6;
314
315     sf_state->sf1.single_program_flow = 1; /* XXX */
316     sf_state->sf1.binding_table_entry_count = 0;
317     sf_state->sf1.thread_priority = 0;
318     sf_state->sf1.floating_point_mode = 0; /* Mesa does this */
319     sf_state->sf1.illegal_op_exception_enable = 1;
320     sf_state->sf1.mask_stack_exception_enable = 1;
321     sf_state->sf1.sw_exception_enable = 1;
322
323     /* scratch space is not used in our kernel */
324     sf_state->thread2.per_thread_scratch_space = 0;
325     sf_state->thread2.scratch_space_base_pointer = 0;
326
327     sf_state->thread3.const_urb_entry_read_length = 0; /* no const URBs */
328     sf_state->thread3.const_urb_entry_read_offset = 0; /* no const URBs */
329     sf_state->thread3.urb_entry_read_length = 1; /* 1 URB per vertex */
330     sf_state->thread3.urb_entry_read_offset = 0;
331     sf_state->thread3.dispatch_grf_start_reg = 3;
332
333     sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
334     sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
335     sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
336     sf_state->thread4.stats_enable = 1;
337
338     sf_state->sf5.viewport_transform = 0; /* skip viewport */
339
340     sf_state->sf6.cull_mode = I965_CULLMODE_NONE;
341     sf_state->sf6.scissor = 0;
342
343     sf_state->sf7.trifan_pv = 2;
344
345     sf_state->sf6.dest_org_vbias = 0x8;
346     sf_state->sf6.dest_org_hbias = 0x8;
347
348     dri_bo_emit_reloc(render_state->sf.state,
349                       I915_GEM_DOMAIN_INSTRUCTION, 0,
350                       sf_state->thread0.grf_reg_count << 1,
351                       offsetof(struct i965_sf_unit_state, thread0),
352                       render_state->render_kernels[SF_KERNEL].bo);
353
354     dri_bo_unmap(render_state->sf.state);
355 }
356
357 static void 
358 i965_render_sampler(VADriverContextP ctx)
359 {
360     struct i965_driver_data *i965 = i965_driver_data(ctx);
361     struct i965_render_state *render_state = &i965->render_state;
362     struct i965_sampler_state *sampler_state;
363     int i;
364     
365     assert(render_state->wm.sampler_count > 0);
366     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
367
368     dri_bo_map(render_state->wm.sampler, 1);
369     assert(render_state->wm.sampler->virtual);
370     sampler_state = render_state->wm.sampler->virtual;
371     for (i = 0; i < render_state->wm.sampler_count; i++) {
372         memset(sampler_state, 0, sizeof(*sampler_state));
373         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
374         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
375         sampler_state->ss1.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
376         sampler_state->ss1.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
377         sampler_state->ss1.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
378         sampler_state++;
379     }
380
381     dri_bo_unmap(render_state->wm.sampler);
382 }
383 static void
384 i965_subpic_render_wm_unit(VADriverContextP ctx)
385 {
386     struct i965_driver_data *i965 = i965_driver_data(ctx);
387     struct i965_render_state *render_state = &i965->render_state;
388     struct i965_wm_unit_state *wm_state;
389
390     assert(render_state->wm.sampler);
391
392     dri_bo_map(render_state->wm.state, 1);
393     assert(render_state->wm.state->virtual);
394     wm_state = render_state->wm.state->virtual;
395     memset(wm_state, 0, sizeof(*wm_state));
396
397     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
398     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_SUBPIC_KERNEL].bo->offset >> 6;
399
400     wm_state->thread1.single_program_flow = 1; /* XXX */
401
402     if (IS_IRONLAKE(i965->intel.device_id))
403         wm_state->thread1.binding_table_entry_count = 0; /* hardware requirement */
404     else
405         wm_state->thread1.binding_table_entry_count = 7;
406
407     wm_state->thread2.scratch_space_base_pointer = 0;
408     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
409
410     wm_state->thread3.dispatch_grf_start_reg = 3; /* XXX */
411     wm_state->thread3.const_urb_entry_read_length = 0;
412     wm_state->thread3.const_urb_entry_read_offset = 0;
413     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
414     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
415
416     wm_state->wm4.stats_enable = 0;
417     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
418
419     if (IS_IRONLAKE(i965->intel.device_id)) {
420         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
421         wm_state->wm5.max_threads = 12 * 6 - 1;
422     } else {
423         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
424         wm_state->wm5.max_threads = 10 * 5 - 1;
425     }
426
427     wm_state->wm5.thread_dispatch_enable = 1;
428     wm_state->wm5.enable_16_pix = 1;
429     wm_state->wm5.enable_8_pix = 0;
430     wm_state->wm5.early_depth_test = 1;
431
432     dri_bo_emit_reloc(render_state->wm.state,
433                       I915_GEM_DOMAIN_INSTRUCTION, 0,
434                       wm_state->thread0.grf_reg_count << 1,
435                       offsetof(struct i965_wm_unit_state, thread0),
436                       render_state->render_kernels[PS_SUBPIC_KERNEL].bo);
437
438     dri_bo_emit_reloc(render_state->wm.state,
439                       I915_GEM_DOMAIN_INSTRUCTION, 0,
440                       wm_state->wm4.sampler_count << 2,
441                       offsetof(struct i965_wm_unit_state, wm4),
442                       render_state->wm.sampler);
443
444     dri_bo_unmap(render_state->wm.state);
445 }
446
447
448 static void
449 i965_render_wm_unit(VADriverContextP ctx)
450 {
451     struct i965_driver_data *i965 = i965_driver_data(ctx);
452     struct i965_render_state *render_state = &i965->render_state;
453     struct i965_wm_unit_state *wm_state;
454
455     assert(render_state->wm.sampler);
456
457     dri_bo_map(render_state->wm.state, 1);
458     assert(render_state->wm.state->virtual);
459     wm_state = render_state->wm.state->virtual;
460     memset(wm_state, 0, sizeof(*wm_state));
461
462     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
463     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_KERNEL].bo->offset >> 6;
464
465     wm_state->thread1.single_program_flow = 1; /* XXX */
466
467     if (IS_IRONLAKE(i965->intel.device_id))
468         wm_state->thread1.binding_table_entry_count = 0;        /* hardware requirement */
469     else
470         wm_state->thread1.binding_table_entry_count = 7;
471
472     wm_state->thread2.scratch_space_base_pointer = 0;
473     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
474
475     wm_state->thread3.dispatch_grf_start_reg = 2; /* XXX */
476     wm_state->thread3.const_urb_entry_read_length = 1;
477     wm_state->thread3.const_urb_entry_read_offset = 0;
478     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
479     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
480
481     wm_state->wm4.stats_enable = 0;
482     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
483
484     if (IS_IRONLAKE(i965->intel.device_id)) {
485         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
486         wm_state->wm5.max_threads = 12 * 6 - 1;
487     } else {
488         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
489         wm_state->wm5.max_threads = 10 * 5 - 1;
490     }
491
492     wm_state->wm5.thread_dispatch_enable = 1;
493     wm_state->wm5.enable_16_pix = 1;
494     wm_state->wm5.enable_8_pix = 0;
495     wm_state->wm5.early_depth_test = 1;
496
497     dri_bo_emit_reloc(render_state->wm.state,
498                       I915_GEM_DOMAIN_INSTRUCTION, 0,
499                       wm_state->thread0.grf_reg_count << 1,
500                       offsetof(struct i965_wm_unit_state, thread0),
501                       render_state->render_kernels[PS_KERNEL].bo);
502
503     dri_bo_emit_reloc(render_state->wm.state,
504                       I915_GEM_DOMAIN_INSTRUCTION, 0,
505                       wm_state->wm4.sampler_count << 2,
506                       offsetof(struct i965_wm_unit_state, wm4),
507                       render_state->wm.sampler);
508
509     dri_bo_unmap(render_state->wm.state);
510 }
511
512 static void 
513 i965_render_cc_viewport(VADriverContextP ctx)
514 {
515     struct i965_driver_data *i965 = i965_driver_data(ctx);
516     struct i965_render_state *render_state = &i965->render_state;
517     struct i965_cc_viewport *cc_viewport;
518
519     dri_bo_map(render_state->cc.viewport, 1);
520     assert(render_state->cc.viewport->virtual);
521     cc_viewport = render_state->cc.viewport->virtual;
522     memset(cc_viewport, 0, sizeof(*cc_viewport));
523     
524     cc_viewport->min_depth = -1.e35;
525     cc_viewport->max_depth = 1.e35;
526
527     dri_bo_unmap(render_state->cc.viewport);
528 }
529
530 static void 
531 i965_subpic_render_cc_unit(VADriverContextP ctx)
532 {
533     struct i965_driver_data *i965 = i965_driver_data(ctx);
534     struct i965_render_state *render_state = &i965->render_state;
535     struct i965_cc_unit_state *cc_state;
536
537     assert(render_state->cc.viewport);
538
539     dri_bo_map(render_state->cc.state, 1);
540     assert(render_state->cc.state->virtual);
541     cc_state = render_state->cc.state->virtual;
542     memset(cc_state, 0, sizeof(*cc_state));
543
544     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
545     cc_state->cc2.depth_test = 0;       /* disable depth test */
546     cc_state->cc2.logicop_enable = 0;   /* disable logic op */
547     cc_state->cc3.ia_blend_enable = 0 ;  /* blend alpha just like colors */
548     cc_state->cc3.blend_enable = 1;     /* enable color blend */
549     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
550     cc_state->cc3.alpha_test_format = 0;//0:ALPHATEST_UNORM8;       /*store alpha value with UNORM8 */
551     cc_state->cc3.alpha_test_func = 5;//COMPAREFUNCTION_LESS;       /*pass if less than the reference */
552     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
553
554     cc_state->cc5.dither_enable = 0;    /* disable dither */
555     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
556     cc_state->cc5.statistics_enable = 1;
557     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
558     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
559     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
560
561     cc_state->cc6.clamp_post_alpha_blend = 0; 
562     cc_state->cc6.clamp_pre_alpha_blend  =0; 
563     
564     /*final color = src_color*src_blend_factor +/- dst_color*dest_color_blend_factor*/
565     cc_state->cc6.blend_function = I965_BLENDFUNCTION_ADD;
566     cc_state->cc6.src_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
567     cc_state->cc6.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
568    
569     /*alpha test reference*/
570     cc_state->cc7.alpha_ref.f =0.0 ;
571
572
573     dri_bo_emit_reloc(render_state->cc.state,
574                       I915_GEM_DOMAIN_INSTRUCTION, 0,
575                       0,
576                       offsetof(struct i965_cc_unit_state, cc4),
577                       render_state->cc.viewport);
578
579     dri_bo_unmap(render_state->cc.state);
580 }
581
582
583 static void 
584 i965_render_cc_unit(VADriverContextP ctx)
585 {
586     struct i965_driver_data *i965 = i965_driver_data(ctx);
587     struct i965_render_state *render_state = &i965->render_state;
588     struct i965_cc_unit_state *cc_state;
589
590     assert(render_state->cc.viewport);
591
592     dri_bo_map(render_state->cc.state, 1);
593     assert(render_state->cc.state->virtual);
594     cc_state = render_state->cc.state->virtual;
595     memset(cc_state, 0, sizeof(*cc_state));
596
597     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
598     cc_state->cc2.depth_test = 0;       /* disable depth test */
599     cc_state->cc2.logicop_enable = 1;   /* enable logic op */
600     cc_state->cc3.ia_blend_enable = 0;  /* blend alpha just like colors */
601     cc_state->cc3.blend_enable = 0;     /* disable color blend */
602     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
603     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
604
605     cc_state->cc5.dither_enable = 0;    /* disable dither */
606     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
607     cc_state->cc5.statistics_enable = 1;
608     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
609     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_ONE;
610     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_ONE;
611
612     dri_bo_emit_reloc(render_state->cc.state,
613                       I915_GEM_DOMAIN_INSTRUCTION, 0,
614                       0,
615                       offsetof(struct i965_cc_unit_state, cc4),
616                       render_state->cc.viewport);
617
618     dri_bo_unmap(render_state->cc.state);
619 }
620
621 static void
622 i965_render_set_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
623 {
624     switch (tiling) {
625     case I915_TILING_NONE:
626         ss->ss3.tiled_surface = 0;
627         ss->ss3.tile_walk = 0;
628         break;
629     case I915_TILING_X:
630         ss->ss3.tiled_surface = 1;
631         ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
632         break;
633     case I915_TILING_Y:
634         ss->ss3.tiled_surface = 1;
635         ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
636         break;
637     }
638 }
639
640 static void
641 i965_render_set_surface_state(struct i965_surface_state *ss,
642                               dri_bo *bo, unsigned long offset,
643                               int width, int height,
644                               int pitch, int format)
645 {
646     unsigned int tiling;
647     unsigned int swizzle;
648
649     memset(ss, 0, sizeof(*ss));
650     ss->ss0.surface_type = I965_SURFACE_2D;
651     ss->ss0.surface_format = format;
652     ss->ss0.color_blend = 1;
653
654     ss->ss1.base_addr = bo->offset + offset;
655
656     ss->ss2.width = width - 1;
657     ss->ss2.height = height - 1;
658
659     ss->ss3.pitch = pitch - 1;
660
661     dri_bo_get_tiling(bo, &tiling, &swizzle);
662     i965_render_set_surface_tiling(ss, tiling);
663 }
664
665 static void
666 gen7_render_set_surface_tiling(struct gen7_surface_state *ss, uint32_t tiling)
667 {
668    switch (tiling) {
669    case I915_TILING_NONE:
670       ss->ss0.tiled_surface = 0;
671       ss->ss0.tile_walk = 0;
672       break;
673    case I915_TILING_X:
674       ss->ss0.tiled_surface = 1;
675       ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
676       break;
677    case I915_TILING_Y:
678       ss->ss0.tiled_surface = 1;
679       ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
680       break;
681    }
682 }
683
684 static void
685 gen7_render_set_surface_state(struct gen7_surface_state *ss,
686                               dri_bo *bo, unsigned long offset,
687                               int width, int height,
688                               int pitch, int format)
689 {
690     unsigned int tiling;
691     unsigned int swizzle;
692
693     memset(ss, 0, sizeof(*ss));
694
695     ss->ss0.surface_type = I965_SURFACE_2D;
696     ss->ss0.surface_format = format;
697
698     ss->ss1.base_addr = bo->offset + offset;
699
700     ss->ss2.width = width - 1;
701     ss->ss2.height = height - 1;
702
703     ss->ss3.pitch = pitch - 1;
704
705     dri_bo_get_tiling(bo, &tiling, &swizzle);
706     gen7_render_set_surface_tiling(ss, tiling);
707 }
708
709 static void
710 i965_render_src_surface_state(VADriverContextP ctx, 
711                               int index,
712                               dri_bo *region,
713                               unsigned long offset,
714                               int w, int h,
715                               int pitch, int format)
716 {
717     struct i965_driver_data *i965 = i965_driver_data(ctx);  
718     struct i965_render_state *render_state = &i965->render_state;
719     void *ss;
720     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
721
722     assert(index < MAX_RENDER_SURFACES);
723
724     dri_bo_map(ss_bo, 1);
725     assert(ss_bo->virtual);
726     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
727
728     if (IS_GEN7(i965->intel.device_id)) {
729         gen7_render_set_surface_state(ss,
730                                       region, offset,
731                                       w, h,
732                                       pitch, format);
733         dri_bo_emit_reloc(ss_bo,
734                           I915_GEM_DOMAIN_SAMPLER, 0,
735                           offset,
736                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
737                           region);
738     } else {
739         i965_render_set_surface_state(ss,
740                                       region, offset,
741                                       w, h,
742                                       pitch, format);
743         dri_bo_emit_reloc(ss_bo,
744                           I915_GEM_DOMAIN_SAMPLER, 0,
745                           offset,
746                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
747                           region);
748     }
749
750     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
751     dri_bo_unmap(ss_bo);
752     render_state->wm.sampler_count++;
753 }
754
755 static void
756 i965_render_src_surfaces_state(VADriverContextP ctx,
757                               VASurfaceID surface)
758 {
759     struct i965_driver_data *i965 = i965_driver_data(ctx);  
760     struct i965_render_state *render_state = &i965->render_state;
761     struct object_surface *obj_surface;
762     int w, h;
763     int rw, rh;
764     dri_bo *region;
765
766     obj_surface = SURFACE(surface);
767     assert(obj_surface);
768
769     if (obj_surface->pp_out_bo) {
770         w = obj_surface->pp_out_width;
771         h = obj_surface->pp_out_height;
772         rw = obj_surface->orig_pp_out_width;
773         rh = obj_surface->orig_pp_out_height;
774         region = obj_surface->pp_out_bo;
775     } else {
776         w = obj_surface->width;
777         h = obj_surface->height;
778         rw = obj_surface->orig_width;
779         rh = obj_surface->orig_height;
780         region = obj_surface->bo;
781     }
782
783     i965_render_src_surface_state(ctx, 1, region, 0, rw, rh, w, I965_SURFACEFORMAT_R8_UNORM);     /* Y */
784     i965_render_src_surface_state(ctx, 2, region, 0, rw, rh, w, I965_SURFACEFORMAT_R8_UNORM);
785
786     if (obj_surface->fourcc == VA_FOURCC('Y','V','1','2')) {
787         int u3 = 5, u4 = 6, v5 = 3, v6 = 4;
788
789         i965_render_src_surface_state(ctx, u3, region, w * h, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM); /* U */
790         i965_render_src_surface_state(ctx, u4, region, w * h, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);
791         i965_render_src_surface_state(ctx, v5, region, w * h + w * h / 4, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);     /* V */
792         i965_render_src_surface_state(ctx, v6, region, w * h + w * h / 4, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);
793     } else {
794         if (obj_surface->fourcc == VA_FOURCC('N','V','1','2')) {
795             i965_render_src_surface_state(ctx, 3, region, w * h, rw / 2, rh / 2, w, I965_SURFACEFORMAT_R8G8_UNORM); /* UV */
796             i965_render_src_surface_state(ctx, 4, region, w * h, rw / 2, rh / 2, w, I965_SURFACEFORMAT_R8G8_UNORM);
797         } else {
798             int u3 = 3, u4 = 4, v5 = 5, v6 = 6;
799             
800             i965_render_src_surface_state(ctx, u3, region, w * h, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM); /* U */
801             i965_render_src_surface_state(ctx, u4, region, w * h, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);
802             i965_render_src_surface_state(ctx, v5, region, w * h + w * h / 4, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);     /* V */
803             i965_render_src_surface_state(ctx, v6, region, w * h + w * h / 4, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);
804         }
805     }
806 }
807
808 static void
809 i965_subpic_render_src_surfaces_state(VADriverContextP ctx,
810                               VASurfaceID surface)
811 {
812     struct i965_driver_data *i965 = i965_driver_data(ctx);  
813     struct object_surface *obj_surface = SURFACE(surface);
814     int w, h;
815     dri_bo *region;
816     dri_bo *subpic_region;
817     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
818     struct object_image *obj_image = IMAGE(obj_subpic->image);
819     assert(obj_surface);
820     assert(obj_surface->bo);
821     w = obj_surface->width;
822     h = obj_surface->height;
823     region = obj_surface->bo;
824     subpic_region = obj_image->bo;
825     /*subpicture surface*/
826     i965_render_src_surface_state(ctx, 1, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format);     
827     i965_render_src_surface_state(ctx, 2, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format);     
828 }
829
830 static void
831 i965_render_dest_surface_state(VADriverContextP ctx, int index)
832 {
833     struct i965_driver_data *i965 = i965_driver_data(ctx);  
834     struct i965_render_state *render_state = &i965->render_state;
835     struct intel_region *dest_region = render_state->draw_region;
836     void *ss;
837     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
838     int format;
839     assert(index < MAX_RENDER_SURFACES);
840
841     if (dest_region->cpp == 2) {
842         format = I965_SURFACEFORMAT_B5G6R5_UNORM;
843     } else {
844         format = I965_SURFACEFORMAT_B8G8R8A8_UNORM;
845     }
846
847     dri_bo_map(ss_bo, 1);
848     assert(ss_bo->virtual);
849     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
850
851     if (IS_GEN7(i965->intel.device_id)) {
852         gen7_render_set_surface_state(ss,
853                                       dest_region->bo, 0,
854                                       dest_region->width, dest_region->height,
855                                       dest_region->pitch, format);
856         dri_bo_emit_reloc(ss_bo,
857                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
858                           0,
859                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
860                           dest_region->bo);
861     } else {
862         i965_render_set_surface_state(ss,
863                                       dest_region->bo, 0,
864                                       dest_region->width, dest_region->height,
865                                       dest_region->pitch, format);
866         dri_bo_emit_reloc(ss_bo,
867                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
868                           0,
869                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
870                           dest_region->bo);
871     }
872
873     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
874     dri_bo_unmap(ss_bo);
875 }
876
877 static void 
878 i965_subpic_render_upload_vertex(VADriverContextP ctx,
879                                  VASurfaceID surface,
880                                  const VARectangle *output_rect)
881 {    
882     struct i965_driver_data  *i965         = i965_driver_data(ctx);
883     struct i965_render_state *render_state = &i965->render_state;
884     struct object_surface    *obj_surface  = SURFACE(surface);
885     struct object_subpic     *obj_subpic   = SUBPIC(obj_surface->subpic);
886
887     const float sx = (float)output_rect->width  / (float)obj_surface->orig_width;
888     const float sy = (float)output_rect->height / (float)obj_surface->orig_height;
889     float *vb, tx1, tx2, ty1, ty2, x1, x2, y1, y2;
890     int i = 0;
891
892     VARectangle dst_rect;
893     dst_rect.x      = output_rect->x + sx * (float)obj_subpic->dst_rect.x;
894     dst_rect.y      = output_rect->y + sx * (float)obj_subpic->dst_rect.y;
895     dst_rect.width  = sx * (float)obj_subpic->dst_rect.width;
896     dst_rect.height = sy * (float)obj_subpic->dst_rect.height;
897
898     dri_bo_map(render_state->vb.vertex_buffer, 1);
899     assert(render_state->vb.vertex_buffer->virtual);
900     vb = render_state->vb.vertex_buffer->virtual;
901
902     tx1 = (float)obj_subpic->src_rect.x / (float)obj_subpic->width;
903     ty1 = (float)obj_subpic->src_rect.y / (float)obj_subpic->height;
904     tx2 = (float)(obj_subpic->src_rect.x + obj_subpic->src_rect.width) / (float)obj_subpic->width;
905     ty2 = (float)(obj_subpic->src_rect.y + obj_subpic->src_rect.height) / (float)obj_subpic->height;
906
907     x1 = (float)dst_rect.x;
908     y1 = (float)dst_rect.y;
909     x2 = (float)(dst_rect.x + dst_rect.width);
910     y2 = (float)(dst_rect.y + dst_rect.height);
911
912     vb[i++] = tx2;
913     vb[i++] = ty2;
914     vb[i++] = x2;
915     vb[i++] = y2;
916
917     vb[i++] = tx1;
918     vb[i++] = ty2;
919     vb[i++] = x1;
920     vb[i++] = y2;
921
922     vb[i++] = tx1;
923     vb[i++] = ty1;
924     vb[i++] = x1;
925     vb[i++] = y1;
926     dri_bo_unmap(render_state->vb.vertex_buffer);
927 }
928
929 static void 
930 i965_render_upload_vertex(
931     VADriverContextP   ctx,
932     VASurfaceID        surface,
933     const VARectangle *src_rect,
934     const VARectangle *dst_rect
935 )
936 {
937     struct i965_driver_data *i965 = i965_driver_data(ctx);
938     struct i965_render_state *render_state = &i965->render_state;
939     struct intel_region *dest_region = render_state->draw_region;
940     struct object_surface *obj_surface;
941     float *vb;
942
943     float u1, v1, u2, v2;
944     int i, width, height;
945     int box_x1 = dest_region->x + dst_rect->x;
946     int box_y1 = dest_region->y + dst_rect->y;
947     int box_x2 = box_x1 + dst_rect->width;
948     int box_y2 = box_y1 + dst_rect->height;
949
950     obj_surface = SURFACE(surface);
951     assert(surface);
952     width = obj_surface->orig_width;
953     height = obj_surface->orig_height;
954
955     u1 = (float)src_rect->x / width;
956     v1 = (float)src_rect->y / height;
957     u2 = (float)(src_rect->x + src_rect->width) / width;
958     v2 = (float)(src_rect->y + src_rect->height) / height;
959
960     dri_bo_map(render_state->vb.vertex_buffer, 1);
961     assert(render_state->vb.vertex_buffer->virtual);
962     vb = render_state->vb.vertex_buffer->virtual;
963
964     i = 0;
965     vb[i++] = u2;
966     vb[i++] = v2;
967     vb[i++] = (float)box_x2;
968     vb[i++] = (float)box_y2;
969     
970     vb[i++] = u1;
971     vb[i++] = v2;
972     vb[i++] = (float)box_x1;
973     vb[i++] = (float)box_y2;
974
975     vb[i++] = u1;
976     vb[i++] = v1;
977     vb[i++] = (float)box_x1;
978     vb[i++] = (float)box_y1;
979
980     dri_bo_unmap(render_state->vb.vertex_buffer);
981 }
982
983 static void
984 i965_render_upload_constants(VADriverContextP ctx)
985 {
986     struct i965_driver_data *i965 = i965_driver_data(ctx);
987     struct i965_render_state *render_state = &i965->render_state;
988     unsigned short *constant_buffer;
989
990     if (render_state->curbe.upload)
991         return;
992
993     dri_bo_map(render_state->curbe.bo, 1);
994     assert(render_state->curbe.bo->virtual);
995     constant_buffer = render_state->curbe.bo->virtual;
996
997     if (render_state->interleaved_uv)
998         *constant_buffer = 1;
999     else
1000         *constant_buffer = 0;
1001
1002     dri_bo_unmap(render_state->curbe.bo);
1003     render_state->curbe.upload = 1;
1004 }
1005
1006 static void
1007 i965_surface_render_state_setup(
1008     VADriverContextP   ctx,
1009     VASurfaceID        surface,
1010     const VARectangle *src_rect,
1011     const VARectangle *dst_rect
1012 )
1013 {
1014     i965_render_vs_unit(ctx);
1015     i965_render_sf_unit(ctx);
1016     i965_render_dest_surface_state(ctx, 0);
1017     i965_render_src_surfaces_state(ctx, surface);
1018     i965_render_sampler(ctx);
1019     i965_render_wm_unit(ctx);
1020     i965_render_cc_viewport(ctx);
1021     i965_render_cc_unit(ctx);
1022     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1023     i965_render_upload_constants(ctx);
1024 }
1025 static void
1026 i965_subpic_render_state_setup(
1027     VADriverContextP   ctx,
1028     VASurfaceID        surface,
1029     const VARectangle *src_rect,
1030     const VARectangle *dst_rect
1031 )
1032 {
1033     i965_render_vs_unit(ctx);
1034     i965_render_sf_unit(ctx);
1035     i965_render_dest_surface_state(ctx, 0);
1036     i965_subpic_render_src_surfaces_state(ctx, surface);
1037     i965_render_sampler(ctx);
1038     i965_subpic_render_wm_unit(ctx);
1039     i965_render_cc_viewport(ctx);
1040     i965_subpic_render_cc_unit(ctx);
1041     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
1042 }
1043
1044
1045 static void
1046 i965_render_pipeline_select(VADriverContextP ctx)
1047 {
1048     struct i965_driver_data *i965 = i965_driver_data(ctx);
1049     struct intel_batchbuffer *batch = i965->batch;
1050  
1051     BEGIN_BATCH(batch, 1);
1052     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1053     ADVANCE_BATCH(batch);
1054 }
1055
1056 static void
1057 i965_render_state_sip(VADriverContextP ctx)
1058 {
1059     struct i965_driver_data *i965 = i965_driver_data(ctx);
1060     struct intel_batchbuffer *batch = i965->batch;
1061
1062     BEGIN_BATCH(batch, 2);
1063     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1064     OUT_BATCH(batch, 0);
1065     ADVANCE_BATCH(batch);
1066 }
1067
1068 static void
1069 i965_render_state_base_address(VADriverContextP ctx)
1070 {
1071     struct i965_driver_data *i965 = i965_driver_data(ctx);
1072     struct intel_batchbuffer *batch = i965->batch;
1073     struct i965_render_state *render_state = &i965->render_state;
1074
1075     if (IS_IRONLAKE(i965->intel.device_id)) {
1076         BEGIN_BATCH(batch, 8);
1077         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
1078         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1079         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1080         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1081         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1082         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1083         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1084         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1085         ADVANCE_BATCH(batch);
1086     } else {
1087         BEGIN_BATCH(batch, 6);
1088         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 4);
1089         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1090         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1091         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1092         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1093         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1094         ADVANCE_BATCH(batch);
1095     }
1096 }
1097
1098 static void
1099 i965_render_binding_table_pointers(VADriverContextP ctx)
1100 {
1101     struct i965_driver_data *i965 = i965_driver_data(ctx);
1102     struct intel_batchbuffer *batch = i965->batch;
1103
1104     BEGIN_BATCH(batch, 6);
1105     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS | 4);
1106     OUT_BATCH(batch, 0); /* vs */
1107     OUT_BATCH(batch, 0); /* gs */
1108     OUT_BATCH(batch, 0); /* clip */
1109     OUT_BATCH(batch, 0); /* sf */
1110     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1111     ADVANCE_BATCH(batch);
1112 }
1113
1114 static void 
1115 i965_render_constant_color(VADriverContextP ctx)
1116 {
1117     struct i965_driver_data *i965 = i965_driver_data(ctx);
1118     struct intel_batchbuffer *batch = i965->batch;
1119
1120     BEGIN_BATCH(batch, 5);
1121     OUT_BATCH(batch, CMD_CONSTANT_COLOR | 3);
1122     OUT_BATCH(batch, float_to_uint(1.0));
1123     OUT_BATCH(batch, float_to_uint(0.0));
1124     OUT_BATCH(batch, float_to_uint(1.0));
1125     OUT_BATCH(batch, float_to_uint(1.0));
1126     ADVANCE_BATCH(batch);
1127 }
1128
1129 static void
1130 i965_render_pipelined_pointers(VADriverContextP ctx)
1131 {
1132     struct i965_driver_data *i965 = i965_driver_data(ctx);
1133     struct intel_batchbuffer *batch = i965->batch;
1134     struct i965_render_state *render_state = &i965->render_state;
1135
1136     BEGIN_BATCH(batch, 7);
1137     OUT_BATCH(batch, CMD_PIPELINED_POINTERS | 5);
1138     OUT_RELOC(batch, render_state->vs.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1139     OUT_BATCH(batch, 0);  /* disable GS */
1140     OUT_BATCH(batch, 0);  /* disable CLIP */
1141     OUT_RELOC(batch, render_state->sf.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1142     OUT_RELOC(batch, render_state->wm.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1143     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1144     ADVANCE_BATCH(batch);
1145 }
1146
1147 static void
1148 i965_render_urb_layout(VADriverContextP ctx)
1149 {
1150     struct i965_driver_data *i965 = i965_driver_data(ctx);
1151     struct intel_batchbuffer *batch = i965->batch;
1152     int urb_vs_start, urb_vs_size;
1153     int urb_gs_start, urb_gs_size;
1154     int urb_clip_start, urb_clip_size;
1155     int urb_sf_start, urb_sf_size;
1156     int urb_cs_start, urb_cs_size;
1157
1158     urb_vs_start = 0;
1159     urb_vs_size = URB_VS_ENTRIES * URB_VS_ENTRY_SIZE;
1160     urb_gs_start = urb_vs_start + urb_vs_size;
1161     urb_gs_size = URB_GS_ENTRIES * URB_GS_ENTRY_SIZE;
1162     urb_clip_start = urb_gs_start + urb_gs_size;
1163     urb_clip_size = URB_CLIP_ENTRIES * URB_CLIP_ENTRY_SIZE;
1164     urb_sf_start = urb_clip_start + urb_clip_size;
1165     urb_sf_size = URB_SF_ENTRIES * URB_SF_ENTRY_SIZE;
1166     urb_cs_start = urb_sf_start + urb_sf_size;
1167     urb_cs_size = URB_CS_ENTRIES * URB_CS_ENTRY_SIZE;
1168
1169     BEGIN_BATCH(batch, 3);
1170     OUT_BATCH(batch, 
1171               CMD_URB_FENCE |
1172               UF0_CS_REALLOC |
1173               UF0_SF_REALLOC |
1174               UF0_CLIP_REALLOC |
1175               UF0_GS_REALLOC |
1176               UF0_VS_REALLOC |
1177               1);
1178     OUT_BATCH(batch, 
1179               ((urb_clip_start + urb_clip_size) << UF1_CLIP_FENCE_SHIFT) |
1180               ((urb_gs_start + urb_gs_size) << UF1_GS_FENCE_SHIFT) |
1181               ((urb_vs_start + urb_vs_size) << UF1_VS_FENCE_SHIFT));
1182     OUT_BATCH(batch,
1183               ((urb_cs_start + urb_cs_size) << UF2_CS_FENCE_SHIFT) |
1184               ((urb_sf_start + urb_sf_size) << UF2_SF_FENCE_SHIFT));
1185     ADVANCE_BATCH(batch);
1186 }
1187
1188 static void 
1189 i965_render_cs_urb_layout(VADriverContextP ctx)
1190 {
1191     struct i965_driver_data *i965 = i965_driver_data(ctx);
1192     struct intel_batchbuffer *batch = i965->batch;
1193
1194     BEGIN_BATCH(batch, 2);
1195     OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
1196     OUT_BATCH(batch,
1197               ((URB_CS_ENTRY_SIZE - 1) << 4) |          /* URB Entry Allocation Size */
1198               (URB_CS_ENTRIES << 0));                /* Number of URB Entries */
1199     ADVANCE_BATCH(batch);
1200 }
1201
1202 static void
1203 i965_render_constant_buffer(VADriverContextP ctx)
1204 {
1205     struct i965_driver_data *i965 = i965_driver_data(ctx);
1206     struct intel_batchbuffer *batch = i965->batch;
1207     struct i965_render_state *render_state = &i965->render_state;
1208
1209     BEGIN_BATCH(batch, 2);
1210     OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
1211     OUT_RELOC(batch, render_state->curbe.bo,
1212               I915_GEM_DOMAIN_INSTRUCTION, 0,
1213               URB_CS_ENTRY_SIZE - 1);
1214     ADVANCE_BATCH(batch);    
1215 }
1216
1217 static void
1218 i965_render_drawing_rectangle(VADriverContextP ctx)
1219 {
1220     struct i965_driver_data *i965 = i965_driver_data(ctx);
1221     struct intel_batchbuffer *batch = i965->batch;
1222     struct i965_render_state *render_state = &i965->render_state;
1223     struct intel_region *dest_region = render_state->draw_region;
1224
1225     BEGIN_BATCH(batch, 4);
1226     OUT_BATCH(batch, CMD_DRAWING_RECTANGLE | 2);
1227     OUT_BATCH(batch, 0x00000000);
1228     OUT_BATCH(batch, (dest_region->width - 1) | (dest_region->height - 1) << 16);
1229     OUT_BATCH(batch, 0x00000000);         
1230     ADVANCE_BATCH(batch);
1231 }
1232
1233 static void
1234 i965_render_vertex_elements(VADriverContextP ctx)
1235 {
1236     struct i965_driver_data *i965 = i965_driver_data(ctx);
1237     struct intel_batchbuffer *batch = i965->batch;
1238
1239     if (IS_IRONLAKE(i965->intel.device_id)) {
1240         BEGIN_BATCH(batch, 5);
1241         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1242         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1243         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1244                   VE0_VALID |
1245                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1246                   (0 << VE0_OFFSET_SHIFT));
1247         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1248                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1249                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1250                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1251         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1252         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1253                   VE0_VALID |
1254                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1255                   (8 << VE0_OFFSET_SHIFT));
1256         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1257                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1258                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1259                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1260         ADVANCE_BATCH(batch);
1261     } else {
1262         BEGIN_BATCH(batch, 5);
1263         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1264         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1265         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1266                   VE0_VALID |
1267                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1268                   (0 << VE0_OFFSET_SHIFT));
1269         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1270                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1271                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1272                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1273                   (0 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1274         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1275         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1276                   VE0_VALID |
1277                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1278                   (8 << VE0_OFFSET_SHIFT));
1279         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1280                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1281                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1282                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1283                   (4 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1284         ADVANCE_BATCH(batch);
1285     }
1286 }
1287
1288 static void
1289 i965_render_upload_image_palette(
1290     VADriverContextP ctx,
1291     VAImageID        image_id,
1292     unsigned int     alpha
1293 )
1294 {
1295     struct i965_driver_data *i965 = i965_driver_data(ctx);
1296     struct intel_batchbuffer *batch = i965->batch;
1297     unsigned int i;
1298
1299     struct object_image *obj_image = IMAGE(image_id);
1300     assert(obj_image);
1301
1302     if (obj_image->image.num_palette_entries == 0)
1303         return;
1304
1305     BEGIN_BATCH(batch, 1 + obj_image->image.num_palette_entries);
1306     OUT_BATCH(batch, CMD_SAMPLER_PALETTE_LOAD | (obj_image->image.num_palette_entries - 1));
1307     /*fill palette*/
1308     //int32_t out[16]; //0-23:color 23-31:alpha
1309     for (i = 0; i < obj_image->image.num_palette_entries; i++)
1310         OUT_BATCH(batch, (alpha << 24) | obj_image->palette[i]);
1311     ADVANCE_BATCH(batch);
1312 }
1313
1314 static void
1315 i965_render_startup(VADriverContextP ctx)
1316 {
1317     struct i965_driver_data *i965 = i965_driver_data(ctx);
1318     struct intel_batchbuffer *batch = i965->batch;
1319     struct i965_render_state *render_state = &i965->render_state;
1320
1321     BEGIN_BATCH(batch, 11);
1322     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
1323     OUT_BATCH(batch, 
1324               (0 << VB0_BUFFER_INDEX_SHIFT) |
1325               VB0_VERTEXDATA |
1326               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
1327     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
1328
1329     if (IS_IRONLAKE(i965->intel.device_id))
1330         OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
1331     else
1332         OUT_BATCH(batch, 3);
1333
1334     OUT_BATCH(batch, 0);
1335
1336     OUT_BATCH(batch, 
1337               CMD_3DPRIMITIVE |
1338               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
1339               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
1340               (0 << 9) |
1341               4);
1342     OUT_BATCH(batch, 3); /* vertex count per instance */
1343     OUT_BATCH(batch, 0); /* start vertex offset */
1344     OUT_BATCH(batch, 1); /* single instance */
1345     OUT_BATCH(batch, 0); /* start instance location */
1346     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
1347     ADVANCE_BATCH(batch);
1348 }
1349
1350 static void 
1351 i965_clear_dest_region(VADriverContextP ctx)
1352 {
1353     struct i965_driver_data *i965 = i965_driver_data(ctx);
1354     struct intel_batchbuffer *batch = i965->batch;
1355     struct i965_render_state *render_state = &i965->render_state;
1356     struct intel_region *dest_region = render_state->draw_region;
1357     unsigned int blt_cmd, br13;
1358     int pitch;
1359
1360     blt_cmd = XY_COLOR_BLT_CMD;
1361     br13 = 0xf0 << 16;
1362     pitch = dest_region->pitch;
1363
1364     if (dest_region->cpp == 4) {
1365         br13 |= BR13_8888;
1366         blt_cmd |= (XY_COLOR_BLT_WRITE_RGB | XY_COLOR_BLT_WRITE_ALPHA);
1367     } else {
1368         assert(dest_region->cpp == 2);
1369         br13 |= BR13_565;
1370     }
1371
1372     if (dest_region->tiling != I915_TILING_NONE) {
1373         blt_cmd |= XY_COLOR_BLT_DST_TILED;
1374         pitch /= 4;
1375     }
1376
1377     br13 |= pitch;
1378
1379     if (IS_GEN6(i965->intel.device_id) ||
1380         IS_GEN7(i965->intel.device_id)) {
1381         intel_batchbuffer_start_atomic_blt(batch, 24);
1382         BEGIN_BLT_BATCH(batch, 6);
1383     } else {
1384         intel_batchbuffer_start_atomic(batch, 24);
1385         BEGIN_BATCH(batch, 6);
1386     }
1387
1388     OUT_BATCH(batch, blt_cmd);
1389     OUT_BATCH(batch, br13);
1390     OUT_BATCH(batch, (dest_region->y << 16) | (dest_region->x));
1391     OUT_BATCH(batch, ((dest_region->y + dest_region->height) << 16) |
1392               (dest_region->x + dest_region->width));
1393     OUT_RELOC(batch, dest_region->bo, 
1394               I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1395               0);
1396     OUT_BATCH(batch, 0x0);
1397     ADVANCE_BATCH(batch);
1398     intel_batchbuffer_end_atomic(batch);
1399 }
1400
1401 static void
1402 i965_surface_render_pipeline_setup(VADriverContextP ctx)
1403 {
1404     struct i965_driver_data *i965 = i965_driver_data(ctx);
1405     struct intel_batchbuffer *batch = i965->batch;
1406
1407     i965_clear_dest_region(ctx);
1408     intel_batchbuffer_start_atomic(batch, 0x1000);
1409     intel_batchbuffer_emit_mi_flush(batch);
1410     i965_render_pipeline_select(ctx);
1411     i965_render_state_sip(ctx);
1412     i965_render_state_base_address(ctx);
1413     i965_render_binding_table_pointers(ctx);
1414     i965_render_constant_color(ctx);
1415     i965_render_pipelined_pointers(ctx);
1416     i965_render_urb_layout(ctx);
1417     i965_render_cs_urb_layout(ctx);
1418     i965_render_constant_buffer(ctx);
1419     i965_render_drawing_rectangle(ctx);
1420     i965_render_vertex_elements(ctx);
1421     i965_render_startup(ctx);
1422     intel_batchbuffer_end_atomic(batch);
1423 }
1424
1425 static void
1426 i965_subpic_render_pipeline_setup(VADriverContextP ctx)
1427 {
1428     struct i965_driver_data *i965 = i965_driver_data(ctx);
1429     struct intel_batchbuffer *batch = i965->batch;
1430
1431     intel_batchbuffer_start_atomic(batch, 0x1000);
1432     intel_batchbuffer_emit_mi_flush(batch);
1433     i965_render_pipeline_select(ctx);
1434     i965_render_state_sip(ctx);
1435     i965_render_state_base_address(ctx);
1436     i965_render_binding_table_pointers(ctx);
1437     i965_render_constant_color(ctx);
1438     i965_render_pipelined_pointers(ctx);
1439     i965_render_urb_layout(ctx);
1440     i965_render_cs_urb_layout(ctx);
1441     i965_render_drawing_rectangle(ctx);
1442     i965_render_vertex_elements(ctx);
1443     i965_render_startup(ctx);
1444     intel_batchbuffer_end_atomic(batch);
1445 }
1446
1447
1448 static void 
1449 i965_render_initialize(VADriverContextP ctx)
1450 {
1451     struct i965_driver_data *i965 = i965_driver_data(ctx);
1452     struct i965_render_state *render_state = &i965->render_state;
1453     dri_bo *bo;
1454
1455     /* VERTEX BUFFER */
1456     dri_bo_unreference(render_state->vb.vertex_buffer);
1457     bo = dri_bo_alloc(i965->intel.bufmgr,
1458                       "vertex buffer",
1459                       4096,
1460                       4096);
1461     assert(bo);
1462     render_state->vb.vertex_buffer = bo;
1463
1464     /* VS */
1465     dri_bo_unreference(render_state->vs.state);
1466     bo = dri_bo_alloc(i965->intel.bufmgr,
1467                       "vs state",
1468                       sizeof(struct i965_vs_unit_state),
1469                       64);
1470     assert(bo);
1471     render_state->vs.state = bo;
1472
1473     /* GS */
1474     /* CLIP */
1475     /* SF */
1476     dri_bo_unreference(render_state->sf.state);
1477     bo = dri_bo_alloc(i965->intel.bufmgr,
1478                       "sf state",
1479                       sizeof(struct i965_sf_unit_state),
1480                       64);
1481     assert(bo);
1482     render_state->sf.state = bo;
1483
1484     /* WM */
1485     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1486     bo = dri_bo_alloc(i965->intel.bufmgr,
1487                       "surface state & binding table",
1488                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1489                       4096);
1490     assert(bo);
1491     render_state->wm.surface_state_binding_table_bo = bo;
1492
1493     dri_bo_unreference(render_state->wm.sampler);
1494     bo = dri_bo_alloc(i965->intel.bufmgr,
1495                       "sampler state",
1496                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1497                       64);
1498     assert(bo);
1499     render_state->wm.sampler = bo;
1500     render_state->wm.sampler_count = 0;
1501
1502     dri_bo_unreference(render_state->wm.state);
1503     bo = dri_bo_alloc(i965->intel.bufmgr,
1504                       "wm state",
1505                       sizeof(struct i965_wm_unit_state),
1506                       64);
1507     assert(bo);
1508     render_state->wm.state = bo;
1509
1510     /* COLOR CALCULATOR */
1511     dri_bo_unreference(render_state->cc.state);
1512     bo = dri_bo_alloc(i965->intel.bufmgr,
1513                       "color calc state",
1514                       sizeof(struct i965_cc_unit_state),
1515                       64);
1516     assert(bo);
1517     render_state->cc.state = bo;
1518
1519     dri_bo_unreference(render_state->cc.viewport);
1520     bo = dri_bo_alloc(i965->intel.bufmgr,
1521                       "cc viewport",
1522                       sizeof(struct i965_cc_viewport),
1523                       64);
1524     assert(bo);
1525     render_state->cc.viewport = bo;
1526 }
1527
1528 static void
1529 i965_render_put_surface(
1530     VADriverContextP   ctx,
1531     VASurfaceID        surface,
1532     const VARectangle *src_rect,
1533     const VARectangle *dst_rect,
1534     unsigned int       flags
1535 )
1536 {
1537     struct i965_driver_data *i965 = i965_driver_data(ctx);
1538     struct intel_batchbuffer *batch = i965->batch;
1539
1540     i965_render_initialize(ctx);
1541     i965_surface_render_state_setup(ctx, surface, src_rect, dst_rect);
1542     i965_surface_render_pipeline_setup(ctx);
1543     intel_batchbuffer_flush(batch);
1544 }
1545
1546 static void
1547 i965_render_put_subpicture(
1548     VADriverContextP   ctx,
1549     VASurfaceID        surface,
1550     const VARectangle *src_rect,
1551     const VARectangle *dst_rect
1552 )
1553 {
1554     struct i965_driver_data *i965 = i965_driver_data(ctx);
1555     struct intel_batchbuffer *batch = i965->batch;
1556     struct object_surface *obj_surface = SURFACE(surface);
1557     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
1558
1559     assert(obj_subpic);
1560
1561     i965_render_initialize(ctx);
1562     i965_subpic_render_state_setup(ctx, surface, src_rect, dst_rect);
1563     i965_subpic_render_pipeline_setup(ctx);
1564     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
1565     intel_batchbuffer_flush(batch);
1566 }
1567
1568 /*
1569  * for GEN6+
1570  */
1571 static void 
1572 gen6_render_initialize(VADriverContextP ctx)
1573 {
1574     struct i965_driver_data *i965 = i965_driver_data(ctx);
1575     struct i965_render_state *render_state = &i965->render_state;
1576     dri_bo *bo;
1577
1578     /* VERTEX BUFFER */
1579     dri_bo_unreference(render_state->vb.vertex_buffer);
1580     bo = dri_bo_alloc(i965->intel.bufmgr,
1581                       "vertex buffer",
1582                       4096,
1583                       4096);
1584     assert(bo);
1585     render_state->vb.vertex_buffer = bo;
1586
1587     /* WM */
1588     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1589     bo = dri_bo_alloc(i965->intel.bufmgr,
1590                       "surface state & binding table",
1591                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1592                       4096);
1593     assert(bo);
1594     render_state->wm.surface_state_binding_table_bo = bo;
1595
1596     dri_bo_unreference(render_state->wm.sampler);
1597     bo = dri_bo_alloc(i965->intel.bufmgr,
1598                       "sampler state",
1599                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1600                       4096);
1601     assert(bo);
1602     render_state->wm.sampler = bo;
1603     render_state->wm.sampler_count = 0;
1604
1605     /* COLOR CALCULATOR */
1606     dri_bo_unreference(render_state->cc.state);
1607     bo = dri_bo_alloc(i965->intel.bufmgr,
1608                       "color calc state",
1609                       sizeof(struct gen6_color_calc_state),
1610                       4096);
1611     assert(bo);
1612     render_state->cc.state = bo;
1613
1614     /* CC VIEWPORT */
1615     dri_bo_unreference(render_state->cc.viewport);
1616     bo = dri_bo_alloc(i965->intel.bufmgr,
1617                       "cc viewport",
1618                       sizeof(struct i965_cc_viewport),
1619                       4096);
1620     assert(bo);
1621     render_state->cc.viewport = bo;
1622
1623     /* BLEND STATE */
1624     dri_bo_unreference(render_state->cc.blend);
1625     bo = dri_bo_alloc(i965->intel.bufmgr,
1626                       "blend state",
1627                       sizeof(struct gen6_blend_state),
1628                       4096);
1629     assert(bo);
1630     render_state->cc.blend = bo;
1631
1632     /* DEPTH & STENCIL STATE */
1633     dri_bo_unreference(render_state->cc.depth_stencil);
1634     bo = dri_bo_alloc(i965->intel.bufmgr,
1635                       "depth & stencil state",
1636                       sizeof(struct gen6_depth_stencil_state),
1637                       4096);
1638     assert(bo);
1639     render_state->cc.depth_stencil = bo;
1640 }
1641
1642 static void
1643 gen6_render_color_calc_state(VADriverContextP ctx)
1644 {
1645     struct i965_driver_data *i965 = i965_driver_data(ctx);
1646     struct i965_render_state *render_state = &i965->render_state;
1647     struct gen6_color_calc_state *color_calc_state;
1648     
1649     dri_bo_map(render_state->cc.state, 1);
1650     assert(render_state->cc.state->virtual);
1651     color_calc_state = render_state->cc.state->virtual;
1652     memset(color_calc_state, 0, sizeof(*color_calc_state));
1653     color_calc_state->constant_r = 1.0;
1654     color_calc_state->constant_g = 0.0;
1655     color_calc_state->constant_b = 1.0;
1656     color_calc_state->constant_a = 1.0;
1657     dri_bo_unmap(render_state->cc.state);
1658 }
1659
1660 static void
1661 gen6_render_blend_state(VADriverContextP ctx)
1662 {
1663     struct i965_driver_data *i965 = i965_driver_data(ctx);
1664     struct i965_render_state *render_state = &i965->render_state;
1665     struct gen6_blend_state *blend_state;
1666     
1667     dri_bo_map(render_state->cc.blend, 1);
1668     assert(render_state->cc.blend->virtual);
1669     blend_state = render_state->cc.blend->virtual;
1670     memset(blend_state, 0, sizeof(*blend_state));
1671     blend_state->blend1.logic_op_enable = 1;
1672     blend_state->blend1.logic_op_func = 0xc;
1673     dri_bo_unmap(render_state->cc.blend);
1674 }
1675
1676 static void
1677 gen6_render_depth_stencil_state(VADriverContextP ctx)
1678 {
1679     struct i965_driver_data *i965 = i965_driver_data(ctx);
1680     struct i965_render_state *render_state = &i965->render_state;
1681     struct gen6_depth_stencil_state *depth_stencil_state;
1682     
1683     dri_bo_map(render_state->cc.depth_stencil, 1);
1684     assert(render_state->cc.depth_stencil->virtual);
1685     depth_stencil_state = render_state->cc.depth_stencil->virtual;
1686     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
1687     dri_bo_unmap(render_state->cc.depth_stencil);
1688 }
1689
1690 static void
1691 gen6_render_setup_states(
1692     VADriverContextP   ctx,
1693     VASurfaceID        surface,
1694     const VARectangle *src_rect,
1695     const VARectangle *dst_rect
1696 )
1697 {
1698     i965_render_dest_surface_state(ctx, 0);
1699     i965_render_src_surfaces_state(ctx, surface);
1700     i965_render_sampler(ctx);
1701     i965_render_cc_viewport(ctx);
1702     gen6_render_color_calc_state(ctx);
1703     gen6_render_blend_state(ctx);
1704     gen6_render_depth_stencil_state(ctx);
1705     i965_render_upload_constants(ctx);
1706     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1707 }
1708
1709 static void
1710 gen6_emit_invarient_states(VADriverContextP ctx)
1711 {
1712     struct i965_driver_data *i965 = i965_driver_data(ctx);
1713     struct intel_batchbuffer *batch = i965->batch;
1714
1715     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1716
1717     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (3 - 2));
1718     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
1719               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
1720     OUT_BATCH(batch, 0);
1721
1722     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
1723     OUT_BATCH(batch, 1);
1724
1725     /* Set system instruction pointer */
1726     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1727     OUT_BATCH(batch, 0);
1728 }
1729
1730 static void
1731 gen6_emit_state_base_address(VADriverContextP ctx)
1732 {
1733     struct i965_driver_data *i965 = i965_driver_data(ctx);
1734     struct intel_batchbuffer *batch = i965->batch;
1735     struct i965_render_state *render_state = &i965->render_state;
1736
1737     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
1738     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
1739     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1740     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
1741     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
1742     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
1743     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
1744     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
1745     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
1746     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
1747 }
1748
1749 static void
1750 gen6_emit_viewport_state_pointers(VADriverContextP ctx)
1751 {
1752     struct i965_driver_data *i965 = i965_driver_data(ctx);
1753     struct intel_batchbuffer *batch = i965->batch;
1754     struct i965_render_state *render_state = &i965->render_state;
1755
1756     OUT_BATCH(batch, GEN6_3DSTATE_VIEWPORT_STATE_POINTERS |
1757               GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CC |
1758               (4 - 2));
1759     OUT_BATCH(batch, 0);
1760     OUT_BATCH(batch, 0);
1761     OUT_RELOC(batch, render_state->cc.viewport, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1762 }
1763
1764 static void
1765 gen6_emit_urb(VADriverContextP ctx)
1766 {
1767     struct i965_driver_data *i965 = i965_driver_data(ctx);
1768     struct intel_batchbuffer *batch = i965->batch;
1769
1770     OUT_BATCH(batch, GEN6_3DSTATE_URB | (3 - 2));
1771     OUT_BATCH(batch, ((1 - 1) << GEN6_3DSTATE_URB_VS_SIZE_SHIFT) |
1772               (24 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */
1773     OUT_BATCH(batch, (0 << GEN6_3DSTATE_URB_GS_SIZE_SHIFT) |
1774               (0 << GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT)); /* no GS thread */
1775 }
1776
1777 static void
1778 gen6_emit_cc_state_pointers(VADriverContextP ctx)
1779 {
1780     struct i965_driver_data *i965 = i965_driver_data(ctx);
1781     struct intel_batchbuffer *batch = i965->batch;
1782     struct i965_render_state *render_state = &i965->render_state;
1783
1784     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2));
1785     OUT_RELOC(batch, render_state->cc.blend, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1786     OUT_RELOC(batch, render_state->cc.depth_stencil, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1787     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1788 }
1789
1790 static void
1791 gen6_emit_sampler_state_pointers(VADriverContextP ctx)
1792 {
1793     struct i965_driver_data *i965 = i965_driver_data(ctx);
1794     struct intel_batchbuffer *batch = i965->batch;
1795     struct i965_render_state *render_state = &i965->render_state;
1796
1797     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLER_STATE_POINTERS |
1798               GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS |
1799               (4 - 2));
1800     OUT_BATCH(batch, 0); /* VS */
1801     OUT_BATCH(batch, 0); /* GS */
1802     OUT_RELOC(batch,render_state->wm.sampler, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1803 }
1804
1805 static void
1806 gen6_emit_binding_table(VADriverContextP ctx)
1807 {
1808     struct i965_driver_data *i965 = i965_driver_data(ctx);
1809     struct intel_batchbuffer *batch = i965->batch;
1810
1811     /* Binding table pointers */
1812     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS |
1813               GEN6_BINDING_TABLE_MODIFY_PS |
1814               (4 - 2));
1815     OUT_BATCH(batch, 0);                /* vs */
1816     OUT_BATCH(batch, 0);                /* gs */
1817     /* Only the PS uses the binding table */
1818     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1819 }
1820
1821 static void
1822 gen6_emit_depth_buffer_state(VADriverContextP ctx)
1823 {
1824     struct i965_driver_data *i965 = i965_driver_data(ctx);
1825     struct intel_batchbuffer *batch = i965->batch;
1826
1827     OUT_BATCH(batch, CMD_DEPTH_BUFFER | (7 - 2));
1828     OUT_BATCH(batch, (I965_SURFACE_NULL << CMD_DEPTH_BUFFER_TYPE_SHIFT) |
1829               (I965_DEPTHFORMAT_D32_FLOAT << CMD_DEPTH_BUFFER_FORMAT_SHIFT));
1830     OUT_BATCH(batch, 0);
1831     OUT_BATCH(batch, 0);
1832     OUT_BATCH(batch, 0);
1833     OUT_BATCH(batch, 0);
1834     OUT_BATCH(batch, 0);
1835
1836     OUT_BATCH(batch, CMD_CLEAR_PARAMS | (2 - 2));
1837     OUT_BATCH(batch, 0);
1838 }
1839
1840 static void
1841 gen6_emit_drawing_rectangle(VADriverContextP ctx)
1842 {
1843     i965_render_drawing_rectangle(ctx);
1844 }
1845
1846 static void 
1847 gen6_emit_vs_state(VADriverContextP ctx)
1848 {
1849     struct i965_driver_data *i965 = i965_driver_data(ctx);
1850     struct intel_batchbuffer *batch = i965->batch;
1851
1852     /* disable VS constant buffer */
1853     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (5 - 2));
1854     OUT_BATCH(batch, 0);
1855     OUT_BATCH(batch, 0);
1856     OUT_BATCH(batch, 0);
1857     OUT_BATCH(batch, 0);
1858         
1859     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
1860     OUT_BATCH(batch, 0); /* without VS kernel */
1861     OUT_BATCH(batch, 0);
1862     OUT_BATCH(batch, 0);
1863     OUT_BATCH(batch, 0);
1864     OUT_BATCH(batch, 0); /* pass-through */
1865 }
1866
1867 static void 
1868 gen6_emit_gs_state(VADriverContextP ctx)
1869 {
1870     struct i965_driver_data *i965 = i965_driver_data(ctx);
1871     struct intel_batchbuffer *batch = i965->batch;
1872
1873     /* disable GS constant buffer */
1874     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (5 - 2));
1875     OUT_BATCH(batch, 0);
1876     OUT_BATCH(batch, 0);
1877     OUT_BATCH(batch, 0);
1878     OUT_BATCH(batch, 0);
1879         
1880     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
1881     OUT_BATCH(batch, 0); /* without GS kernel */
1882     OUT_BATCH(batch, 0);
1883     OUT_BATCH(batch, 0);
1884     OUT_BATCH(batch, 0);
1885     OUT_BATCH(batch, 0);
1886     OUT_BATCH(batch, 0); /* pass-through */
1887 }
1888
1889 static void 
1890 gen6_emit_clip_state(VADriverContextP ctx)
1891 {
1892     struct i965_driver_data *i965 = i965_driver_data(ctx);
1893     struct intel_batchbuffer *batch = i965->batch;
1894
1895     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
1896     OUT_BATCH(batch, 0);
1897     OUT_BATCH(batch, 0); /* pass-through */
1898     OUT_BATCH(batch, 0);
1899 }
1900
1901 static void 
1902 gen6_emit_sf_state(VADriverContextP ctx)
1903 {
1904     struct i965_driver_data *i965 = i965_driver_data(ctx);
1905     struct intel_batchbuffer *batch = i965->batch;
1906
1907     OUT_BATCH(batch, GEN6_3DSTATE_SF | (20 - 2));
1908     OUT_BATCH(batch, (1 << GEN6_3DSTATE_SF_NUM_OUTPUTS_SHIFT) |
1909               (1 << GEN6_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT) |
1910               (0 << GEN6_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT));
1911     OUT_BATCH(batch, 0);
1912     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
1913     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT); /* DW4 */
1914     OUT_BATCH(batch, 0);
1915     OUT_BATCH(batch, 0);
1916     OUT_BATCH(batch, 0);
1917     OUT_BATCH(batch, 0);
1918     OUT_BATCH(batch, 0); /* DW9 */
1919     OUT_BATCH(batch, 0);
1920     OUT_BATCH(batch, 0);
1921     OUT_BATCH(batch, 0);
1922     OUT_BATCH(batch, 0);
1923     OUT_BATCH(batch, 0); /* DW14 */
1924     OUT_BATCH(batch, 0);
1925     OUT_BATCH(batch, 0);
1926     OUT_BATCH(batch, 0);
1927     OUT_BATCH(batch, 0);
1928     OUT_BATCH(batch, 0); /* DW19 */
1929 }
1930
1931 static void 
1932 gen6_emit_wm_state(VADriverContextP ctx, int kernel)
1933 {
1934     struct i965_driver_data *i965 = i965_driver_data(ctx);
1935     struct intel_batchbuffer *batch = i965->batch;
1936     struct i965_render_state *render_state = &i965->render_state;
1937
1938     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS |
1939               GEN6_3DSTATE_CONSTANT_BUFFER_0_ENABLE |
1940               (5 - 2));
1941     OUT_RELOC(batch, 
1942               render_state->curbe.bo,
1943               I915_GEM_DOMAIN_INSTRUCTION, 0,
1944               0);
1945     OUT_BATCH(batch, 0);
1946     OUT_BATCH(batch, 0);
1947     OUT_BATCH(batch, 0);
1948
1949     OUT_BATCH(batch, GEN6_3DSTATE_WM | (9 - 2));
1950     OUT_RELOC(batch, render_state->render_kernels[kernel].bo,
1951               I915_GEM_DOMAIN_INSTRUCTION, 0,
1952               0);
1953     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHITF) |
1954               (5 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT));
1955     OUT_BATCH(batch, 0);
1956     OUT_BATCH(batch, (6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT)); /* DW4 */
1957     OUT_BATCH(batch, ((40 - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT) |
1958               GEN6_3DSTATE_WM_DISPATCH_ENABLE |
1959               GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
1960     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT) |
1961               GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
1962     OUT_BATCH(batch, 0);
1963     OUT_BATCH(batch, 0);
1964 }
1965
1966 static void
1967 gen6_emit_vertex_element_state(VADriverContextP ctx)
1968 {
1969     struct i965_driver_data *i965 = i965_driver_data(ctx);
1970     struct intel_batchbuffer *batch = i965->batch;
1971
1972     /* Set up our vertex elements, sourced from the single vertex buffer. */
1973     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
1974     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1975     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1976               GEN6_VE0_VALID |
1977               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1978               (0 << VE0_OFFSET_SHIFT));
1979     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1980               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1981               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1982               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1983     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1984     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1985               GEN6_VE0_VALID |
1986               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1987               (8 << VE0_OFFSET_SHIFT));
1988     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
1989               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1990               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1991               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1992 }
1993
1994 static void
1995 gen6_emit_vertices(VADriverContextP ctx)
1996 {
1997     struct i965_driver_data *i965 = i965_driver_data(ctx);
1998     struct intel_batchbuffer *batch = i965->batch;
1999     struct i965_render_state *render_state = &i965->render_state;
2000
2001     BEGIN_BATCH(batch, 11);
2002     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
2003     OUT_BATCH(batch, 
2004               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2005               GEN6_VB0_VERTEXDATA |
2006               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2007     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2008     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2009     OUT_BATCH(batch, 0);
2010
2011     OUT_BATCH(batch, 
2012               CMD_3DPRIMITIVE |
2013               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
2014               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
2015               (0 << 9) |
2016               4);
2017     OUT_BATCH(batch, 3); /* vertex count per instance */
2018     OUT_BATCH(batch, 0); /* start vertex offset */
2019     OUT_BATCH(batch, 1); /* single instance */
2020     OUT_BATCH(batch, 0); /* start instance location */
2021     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
2022     ADVANCE_BATCH(batch);
2023 }
2024
2025 static void
2026 gen6_render_emit_states(VADriverContextP ctx, int kernel)
2027 {
2028     struct i965_driver_data *i965 = i965_driver_data(ctx);
2029     struct intel_batchbuffer *batch = i965->batch;
2030
2031     intel_batchbuffer_start_atomic(batch, 0x1000);
2032     intel_batchbuffer_emit_mi_flush(batch);
2033     gen6_emit_invarient_states(ctx);
2034     gen6_emit_state_base_address(ctx);
2035     gen6_emit_viewport_state_pointers(ctx);
2036     gen6_emit_urb(ctx);
2037     gen6_emit_cc_state_pointers(ctx);
2038     gen6_emit_sampler_state_pointers(ctx);
2039     gen6_emit_vs_state(ctx);
2040     gen6_emit_gs_state(ctx);
2041     gen6_emit_clip_state(ctx);
2042     gen6_emit_sf_state(ctx);
2043     gen6_emit_wm_state(ctx, kernel);
2044     gen6_emit_binding_table(ctx);
2045     gen6_emit_depth_buffer_state(ctx);
2046     gen6_emit_drawing_rectangle(ctx);
2047     gen6_emit_vertex_element_state(ctx);
2048     gen6_emit_vertices(ctx);
2049     intel_batchbuffer_end_atomic(batch);
2050 }
2051
2052 static void
2053 gen6_render_put_surface(
2054     VADriverContextP   ctx,
2055     VASurfaceID        surface,
2056     const VARectangle *src_rect,
2057     const VARectangle *dst_rect,
2058     unsigned int       flags
2059 )
2060 {
2061     struct i965_driver_data *i965 = i965_driver_data(ctx);
2062     struct intel_batchbuffer *batch = i965->batch;
2063
2064     gen6_render_initialize(ctx);
2065     gen6_render_setup_states(ctx, surface, src_rect, dst_rect);
2066     i965_clear_dest_region(ctx);
2067     gen6_render_emit_states(ctx, PS_KERNEL);
2068     intel_batchbuffer_flush(batch);
2069 }
2070
2071 static void
2072 gen6_subpicture_render_blend_state(VADriverContextP ctx)
2073 {
2074     struct i965_driver_data *i965 = i965_driver_data(ctx);
2075     struct i965_render_state *render_state = &i965->render_state;
2076     struct gen6_blend_state *blend_state;
2077
2078     dri_bo_unmap(render_state->cc.state);    
2079     dri_bo_map(render_state->cc.blend, 1);
2080     assert(render_state->cc.blend->virtual);
2081     blend_state = render_state->cc.blend->virtual;
2082     memset(blend_state, 0, sizeof(*blend_state));
2083     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2084     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2085     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2086     blend_state->blend0.blend_enable = 1;
2087     blend_state->blend1.post_blend_clamp_enable = 1;
2088     blend_state->blend1.pre_blend_clamp_enable = 1;
2089     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2090     dri_bo_unmap(render_state->cc.blend);
2091 }
2092
2093 static void
2094 gen6_subpicture_render_setup_states(
2095     VADriverContextP   ctx,
2096     VASurfaceID        surface,
2097     const VARectangle *src_rect,
2098     const VARectangle *dst_rect
2099 )
2100 {
2101     i965_render_dest_surface_state(ctx, 0);
2102     i965_subpic_render_src_surfaces_state(ctx, surface);
2103     i965_render_sampler(ctx);
2104     i965_render_cc_viewport(ctx);
2105     gen6_render_color_calc_state(ctx);
2106     gen6_subpicture_render_blend_state(ctx);
2107     gen6_render_depth_stencil_state(ctx);
2108     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2109 }
2110
2111 static void
2112 gen6_render_put_subpicture(
2113     VADriverContextP   ctx,
2114     VASurfaceID        surface,
2115     const VARectangle *src_rect,
2116     const VARectangle *dst_rect
2117 )
2118 {
2119     struct i965_driver_data *i965 = i965_driver_data(ctx);
2120     struct intel_batchbuffer *batch = i965->batch;
2121     struct object_surface *obj_surface = SURFACE(surface);
2122     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
2123
2124     assert(obj_subpic);
2125     gen6_render_initialize(ctx);
2126     gen6_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2127     gen6_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2128     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2129     intel_batchbuffer_flush(batch);
2130 }
2131
2132 /*
2133  * for GEN7
2134  */
2135 static void 
2136 gen7_render_initialize(VADriverContextP ctx)
2137 {
2138     struct i965_driver_data *i965 = i965_driver_data(ctx);
2139     struct i965_render_state *render_state = &i965->render_state;
2140     dri_bo *bo;
2141
2142     /* VERTEX BUFFER */
2143     dri_bo_unreference(render_state->vb.vertex_buffer);
2144     bo = dri_bo_alloc(i965->intel.bufmgr,
2145                       "vertex buffer",
2146                       4096,
2147                       4096);
2148     assert(bo);
2149     render_state->vb.vertex_buffer = bo;
2150
2151     /* WM */
2152     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
2153     bo = dri_bo_alloc(i965->intel.bufmgr,
2154                       "surface state & binding table",
2155                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
2156                       4096);
2157     assert(bo);
2158     render_state->wm.surface_state_binding_table_bo = bo;
2159
2160     dri_bo_unreference(render_state->wm.sampler);
2161     bo = dri_bo_alloc(i965->intel.bufmgr,
2162                       "sampler state",
2163                       MAX_SAMPLERS * sizeof(struct gen7_sampler_state),
2164                       4096);
2165     assert(bo);
2166     render_state->wm.sampler = bo;
2167     render_state->wm.sampler_count = 0;
2168
2169     /* COLOR CALCULATOR */
2170     dri_bo_unreference(render_state->cc.state);
2171     bo = dri_bo_alloc(i965->intel.bufmgr,
2172                       "color calc state",
2173                       sizeof(struct gen6_color_calc_state),
2174                       4096);
2175     assert(bo);
2176     render_state->cc.state = bo;
2177
2178     /* CC VIEWPORT */
2179     dri_bo_unreference(render_state->cc.viewport);
2180     bo = dri_bo_alloc(i965->intel.bufmgr,
2181                       "cc viewport",
2182                       sizeof(struct i965_cc_viewport),
2183                       4096);
2184     assert(bo);
2185     render_state->cc.viewport = bo;
2186
2187     /* BLEND STATE */
2188     dri_bo_unreference(render_state->cc.blend);
2189     bo = dri_bo_alloc(i965->intel.bufmgr,
2190                       "blend state",
2191                       sizeof(struct gen6_blend_state),
2192                       4096);
2193     assert(bo);
2194     render_state->cc.blend = bo;
2195
2196     /* DEPTH & STENCIL STATE */
2197     dri_bo_unreference(render_state->cc.depth_stencil);
2198     bo = dri_bo_alloc(i965->intel.bufmgr,
2199                       "depth & stencil state",
2200                       sizeof(struct gen6_depth_stencil_state),
2201                       4096);
2202     assert(bo);
2203     render_state->cc.depth_stencil = bo;
2204 }
2205
2206 static void
2207 gen7_render_color_calc_state(VADriverContextP ctx)
2208 {
2209     struct i965_driver_data *i965 = i965_driver_data(ctx);
2210     struct i965_render_state *render_state = &i965->render_state;
2211     struct gen6_color_calc_state *color_calc_state;
2212     
2213     dri_bo_map(render_state->cc.state, 1);
2214     assert(render_state->cc.state->virtual);
2215     color_calc_state = render_state->cc.state->virtual;
2216     memset(color_calc_state, 0, sizeof(*color_calc_state));
2217     color_calc_state->constant_r = 1.0;
2218     color_calc_state->constant_g = 0.0;
2219     color_calc_state->constant_b = 1.0;
2220     color_calc_state->constant_a = 1.0;
2221     dri_bo_unmap(render_state->cc.state);
2222 }
2223
2224 static void
2225 gen7_render_blend_state(VADriverContextP ctx)
2226 {
2227     struct i965_driver_data *i965 = i965_driver_data(ctx);
2228     struct i965_render_state *render_state = &i965->render_state;
2229     struct gen6_blend_state *blend_state;
2230     
2231     dri_bo_map(render_state->cc.blend, 1);
2232     assert(render_state->cc.blend->virtual);
2233     blend_state = render_state->cc.blend->virtual;
2234     memset(blend_state, 0, sizeof(*blend_state));
2235     blend_state->blend1.logic_op_enable = 1;
2236     blend_state->blend1.logic_op_func = 0xc;
2237     blend_state->blend1.pre_blend_clamp_enable = 1;
2238     dri_bo_unmap(render_state->cc.blend);
2239 }
2240
2241 static void
2242 gen7_render_depth_stencil_state(VADriverContextP ctx)
2243 {
2244     struct i965_driver_data *i965 = i965_driver_data(ctx);
2245     struct i965_render_state *render_state = &i965->render_state;
2246     struct gen6_depth_stencil_state *depth_stencil_state;
2247     
2248     dri_bo_map(render_state->cc.depth_stencil, 1);
2249     assert(render_state->cc.depth_stencil->virtual);
2250     depth_stencil_state = render_state->cc.depth_stencil->virtual;
2251     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
2252     dri_bo_unmap(render_state->cc.depth_stencil);
2253 }
2254
2255 static void 
2256 gen7_render_sampler(VADriverContextP ctx)
2257 {
2258     struct i965_driver_data *i965 = i965_driver_data(ctx);
2259     struct i965_render_state *render_state = &i965->render_state;
2260     struct gen7_sampler_state *sampler_state;
2261     int i;
2262     
2263     assert(render_state->wm.sampler_count > 0);
2264     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
2265
2266     dri_bo_map(render_state->wm.sampler, 1);
2267     assert(render_state->wm.sampler->virtual);
2268     sampler_state = render_state->wm.sampler->virtual;
2269     for (i = 0; i < render_state->wm.sampler_count; i++) {
2270         memset(sampler_state, 0, sizeof(*sampler_state));
2271         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
2272         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
2273         sampler_state->ss3.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2274         sampler_state->ss3.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2275         sampler_state->ss3.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2276         sampler_state++;
2277     }
2278
2279     dri_bo_unmap(render_state->wm.sampler);
2280 }
2281
2282 static void
2283 gen7_render_setup_states(
2284     VADriverContextP   ctx,
2285     VASurfaceID        surface,
2286     const VARectangle *src_rect,
2287     const VARectangle *dst_rect
2288 )
2289 {
2290     i965_render_dest_surface_state(ctx, 0);
2291     i965_render_src_surfaces_state(ctx, surface);
2292     gen7_render_sampler(ctx);
2293     i965_render_cc_viewport(ctx);
2294     gen7_render_color_calc_state(ctx);
2295     gen7_render_blend_state(ctx);
2296     gen7_render_depth_stencil_state(ctx);
2297     i965_render_upload_constants(ctx);
2298     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
2299 }
2300
2301 static void
2302 gen7_emit_invarient_states(VADriverContextP ctx)
2303 {
2304     struct i965_driver_data *i965 = i965_driver_data(ctx);
2305     struct intel_batchbuffer *batch = i965->batch;
2306
2307     BEGIN_BATCH(batch, 1);
2308     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
2309     ADVANCE_BATCH(batch);
2310
2311     BEGIN_BATCH(batch, 4);
2312     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (4 - 2));
2313     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
2314               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
2315     OUT_BATCH(batch, 0);
2316     OUT_BATCH(batch, 0);
2317     ADVANCE_BATCH(batch);
2318
2319     BEGIN_BATCH(batch, 2);
2320     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
2321     OUT_BATCH(batch, 1);
2322     ADVANCE_BATCH(batch);
2323
2324     /* Set system instruction pointer */
2325     BEGIN_BATCH(batch, 2);
2326     OUT_BATCH(batch, CMD_STATE_SIP | 0);
2327     OUT_BATCH(batch, 0);
2328     ADVANCE_BATCH(batch);
2329 }
2330
2331 static void
2332 gen7_emit_state_base_address(VADriverContextP ctx)
2333 {
2334     struct i965_driver_data *i965 = i965_driver_data(ctx);
2335     struct intel_batchbuffer *batch = i965->batch;
2336     struct i965_render_state *render_state = &i965->render_state;
2337
2338     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
2339     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
2340     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
2341     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
2342     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
2343     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
2344     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
2345     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
2346     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
2347     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
2348 }
2349
2350 static void
2351 gen7_emit_viewport_state_pointers(VADriverContextP ctx)
2352 {
2353     struct i965_driver_data *i965 = i965_driver_data(ctx);
2354     struct intel_batchbuffer *batch = i965->batch;
2355     struct i965_render_state *render_state = &i965->render_state;
2356
2357     BEGIN_BATCH(batch, 2);
2358     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC | (2 - 2));
2359     OUT_RELOC(batch,
2360               render_state->cc.viewport,
2361               I915_GEM_DOMAIN_INSTRUCTION, 0,
2362               0);
2363     ADVANCE_BATCH(batch);
2364
2365     BEGIN_BATCH(batch, 2);
2366     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL | (2 - 2));
2367     OUT_BATCH(batch, 0);
2368     ADVANCE_BATCH(batch);
2369 }
2370
2371 /*
2372  * URB layout on GEN7 
2373  * ----------------------------------------
2374  * | PS Push Constants (8KB) | VS entries |
2375  * ----------------------------------------
2376  */
2377 static void
2378 gen7_emit_urb(VADriverContextP ctx)
2379 {
2380     struct i965_driver_data *i965 = i965_driver_data(ctx);
2381     struct intel_batchbuffer *batch = i965->batch;
2382
2383     BEGIN_BATCH(batch, 2);
2384     OUT_BATCH(batch, GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS | (2 - 2));
2385     OUT_BATCH(batch, 8); /* in 1KBs */
2386     ADVANCE_BATCH(batch);
2387
2388     BEGIN_BATCH(batch, 2);
2389     OUT_BATCH(batch, GEN7_3DSTATE_URB_VS | (2 - 2));
2390     OUT_BATCH(batch, 
2391               (32 << GEN7_URB_ENTRY_NUMBER_SHIFT) | /* at least 32 */
2392               (2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
2393               (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2394    ADVANCE_BATCH(batch);
2395
2396    BEGIN_BATCH(batch, 2);
2397    OUT_BATCH(batch, GEN7_3DSTATE_URB_GS | (2 - 2));
2398    OUT_BATCH(batch,
2399              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2400              (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2401    ADVANCE_BATCH(batch);
2402
2403    BEGIN_BATCH(batch, 2);
2404    OUT_BATCH(batch, GEN7_3DSTATE_URB_HS | (2 - 2));
2405    OUT_BATCH(batch,
2406              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2407              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2408    ADVANCE_BATCH(batch);
2409
2410    BEGIN_BATCH(batch, 2);
2411    OUT_BATCH(batch, GEN7_3DSTATE_URB_DS | (2 - 2));
2412    OUT_BATCH(batch,
2413              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2414              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2415    ADVANCE_BATCH(batch);
2416 }
2417
2418 static void
2419 gen7_emit_cc_state_pointers(VADriverContextP ctx)
2420 {
2421     struct i965_driver_data *i965 = i965_driver_data(ctx);
2422     struct intel_batchbuffer *batch = i965->batch;
2423     struct i965_render_state *render_state = &i965->render_state;
2424
2425     BEGIN_BATCH(batch, 2);
2426     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (2 - 2));
2427     OUT_RELOC(batch,
2428               render_state->cc.state,
2429               I915_GEM_DOMAIN_INSTRUCTION, 0,
2430               1);
2431     ADVANCE_BATCH(batch);
2432
2433     BEGIN_BATCH(batch, 2);
2434     OUT_BATCH(batch, GEN7_3DSTATE_BLEND_STATE_POINTERS | (2 - 2));
2435     OUT_RELOC(batch,
2436               render_state->cc.blend,
2437               I915_GEM_DOMAIN_INSTRUCTION, 0,
2438               1);
2439     ADVANCE_BATCH(batch);
2440
2441     BEGIN_BATCH(batch, 2);
2442     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS | (2 - 2));
2443     OUT_RELOC(batch,
2444               render_state->cc.depth_stencil,
2445               I915_GEM_DOMAIN_INSTRUCTION, 0, 
2446               1);
2447     ADVANCE_BATCH(batch);
2448 }
2449
2450 static void
2451 gen7_emit_sampler_state_pointers(VADriverContextP ctx)
2452 {
2453     struct i965_driver_data *i965 = i965_driver_data(ctx);
2454     struct intel_batchbuffer *batch = i965->batch;
2455     struct i965_render_state *render_state = &i965->render_state;
2456
2457     BEGIN_BATCH(batch, 2);
2458     OUT_BATCH(batch, GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
2459     OUT_RELOC(batch,
2460               render_state->wm.sampler,
2461               I915_GEM_DOMAIN_INSTRUCTION, 0,
2462               0);
2463     ADVANCE_BATCH(batch);
2464 }
2465
2466 static void
2467 gen7_emit_binding_table(VADriverContextP ctx)
2468 {
2469     struct i965_driver_data *i965 = i965_driver_data(ctx);
2470     struct intel_batchbuffer *batch = i965->batch;
2471
2472     BEGIN_BATCH(batch, 2);
2473     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS | (2 - 2));
2474     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
2475     ADVANCE_BATCH(batch);
2476 }
2477
2478 static void
2479 gen7_emit_depth_buffer_state(VADriverContextP ctx)
2480 {
2481     struct i965_driver_data *i965 = i965_driver_data(ctx);
2482     struct intel_batchbuffer *batch = i965->batch;
2483
2484     BEGIN_BATCH(batch, 7);
2485     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_BUFFER | (7 - 2));
2486     OUT_BATCH(batch,
2487               (I965_DEPTHFORMAT_D32_FLOAT << 18) |
2488               (I965_SURFACE_NULL << 29));
2489     OUT_BATCH(batch, 0);
2490     OUT_BATCH(batch, 0);
2491     OUT_BATCH(batch, 0);
2492     OUT_BATCH(batch, 0);
2493     OUT_BATCH(batch, 0);
2494     ADVANCE_BATCH(batch);
2495
2496     BEGIN_BATCH(batch, 3);
2497     OUT_BATCH(batch, GEN7_3DSTATE_CLEAR_PARAMS | (3 - 2));
2498     OUT_BATCH(batch, 0);
2499     OUT_BATCH(batch, 0);
2500     ADVANCE_BATCH(batch);
2501 }
2502
2503 static void
2504 gen7_emit_drawing_rectangle(VADriverContextP ctx)
2505 {
2506     i965_render_drawing_rectangle(ctx);
2507 }
2508
2509 static void 
2510 gen7_emit_vs_state(VADriverContextP ctx)
2511 {
2512     struct i965_driver_data *i965 = i965_driver_data(ctx);
2513     struct intel_batchbuffer *batch = i965->batch;
2514
2515     /* disable VS constant buffer */
2516     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (7 - 2));
2517     OUT_BATCH(batch, 0);
2518     OUT_BATCH(batch, 0);
2519     OUT_BATCH(batch, 0);
2520     OUT_BATCH(batch, 0);
2521     OUT_BATCH(batch, 0);
2522     OUT_BATCH(batch, 0);
2523         
2524     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
2525     OUT_BATCH(batch, 0); /* without VS kernel */
2526     OUT_BATCH(batch, 0);
2527     OUT_BATCH(batch, 0);
2528     OUT_BATCH(batch, 0);
2529     OUT_BATCH(batch, 0); /* pass-through */
2530 }
2531
2532 static void 
2533 gen7_emit_bypass_state(VADriverContextP ctx)
2534 {
2535     struct i965_driver_data *i965 = i965_driver_data(ctx);
2536     struct intel_batchbuffer *batch = i965->batch;
2537
2538     /* bypass GS */
2539     BEGIN_BATCH(batch, 7);
2540     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (7 - 2));
2541     OUT_BATCH(batch, 0);
2542     OUT_BATCH(batch, 0);
2543     OUT_BATCH(batch, 0);
2544     OUT_BATCH(batch, 0);
2545     OUT_BATCH(batch, 0);
2546     OUT_BATCH(batch, 0);
2547     ADVANCE_BATCH(batch);
2548
2549     BEGIN_BATCH(batch, 7);      
2550     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
2551     OUT_BATCH(batch, 0); /* without GS kernel */
2552     OUT_BATCH(batch, 0);
2553     OUT_BATCH(batch, 0);
2554     OUT_BATCH(batch, 0);
2555     OUT_BATCH(batch, 0);
2556     OUT_BATCH(batch, 0); /* pass-through */
2557     ADVANCE_BATCH(batch);
2558
2559     BEGIN_BATCH(batch, 2);
2560     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_GS | (2 - 2));
2561     OUT_BATCH(batch, 0);
2562     ADVANCE_BATCH(batch);
2563
2564     /* disable HS */
2565     BEGIN_BATCH(batch, 7);
2566     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_HS | (7 - 2));
2567     OUT_BATCH(batch, 0);
2568     OUT_BATCH(batch, 0);
2569     OUT_BATCH(batch, 0);
2570     OUT_BATCH(batch, 0);
2571     OUT_BATCH(batch, 0);
2572     OUT_BATCH(batch, 0);
2573     ADVANCE_BATCH(batch);
2574
2575     BEGIN_BATCH(batch, 7);
2576     OUT_BATCH(batch, GEN7_3DSTATE_HS | (7 - 2));
2577     OUT_BATCH(batch, 0);
2578     OUT_BATCH(batch, 0);
2579     OUT_BATCH(batch, 0);
2580     OUT_BATCH(batch, 0);
2581     OUT_BATCH(batch, 0);
2582     OUT_BATCH(batch, 0);
2583     ADVANCE_BATCH(batch);
2584
2585     BEGIN_BATCH(batch, 2);
2586     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_HS | (2 - 2));
2587     OUT_BATCH(batch, 0);
2588     ADVANCE_BATCH(batch);
2589
2590     /* Disable TE */
2591     BEGIN_BATCH(batch, 4);
2592     OUT_BATCH(batch, GEN7_3DSTATE_TE | (4 - 2));
2593     OUT_BATCH(batch, 0);
2594     OUT_BATCH(batch, 0);
2595     OUT_BATCH(batch, 0);
2596     ADVANCE_BATCH(batch);
2597
2598     /* Disable DS */
2599     BEGIN_BATCH(batch, 7);
2600     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_DS | (7 - 2));
2601     OUT_BATCH(batch, 0);
2602     OUT_BATCH(batch, 0);
2603     OUT_BATCH(batch, 0);
2604     OUT_BATCH(batch, 0);
2605     OUT_BATCH(batch, 0);
2606     OUT_BATCH(batch, 0);
2607     ADVANCE_BATCH(batch);
2608
2609     BEGIN_BATCH(batch, 6);
2610     OUT_BATCH(batch, GEN7_3DSTATE_DS | (6 - 2));
2611     OUT_BATCH(batch, 0);
2612     OUT_BATCH(batch, 0);
2613     OUT_BATCH(batch, 0);
2614     OUT_BATCH(batch, 0);
2615     OUT_BATCH(batch, 0);
2616     ADVANCE_BATCH(batch);
2617
2618     BEGIN_BATCH(batch, 2);
2619     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_DS | (2 - 2));
2620     OUT_BATCH(batch, 0);
2621     ADVANCE_BATCH(batch);
2622
2623     /* Disable STREAMOUT */
2624     BEGIN_BATCH(batch, 3);
2625     OUT_BATCH(batch, GEN7_3DSTATE_STREAMOUT | (3 - 2));
2626     OUT_BATCH(batch, 0);
2627     OUT_BATCH(batch, 0);
2628     ADVANCE_BATCH(batch);
2629 }
2630
2631 static void 
2632 gen7_emit_clip_state(VADriverContextP ctx)
2633 {
2634     struct i965_driver_data *i965 = i965_driver_data(ctx);
2635     struct intel_batchbuffer *batch = i965->batch;
2636
2637     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
2638     OUT_BATCH(batch, 0);
2639     OUT_BATCH(batch, 0); /* pass-through */
2640     OUT_BATCH(batch, 0);
2641 }
2642
2643 static void 
2644 gen7_emit_sf_state(VADriverContextP ctx)
2645 {
2646     struct i965_driver_data *i965 = i965_driver_data(ctx);
2647     struct intel_batchbuffer *batch = i965->batch;
2648
2649     BEGIN_BATCH(batch, 14);
2650     OUT_BATCH(batch, GEN7_3DSTATE_SBE | (14 - 2));
2651     OUT_BATCH(batch,
2652               (1 << GEN7_SBE_NUM_OUTPUTS_SHIFT) |
2653               (1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT) |
2654               (0 << GEN7_SBE_URB_ENTRY_READ_OFFSET_SHIFT));
2655     OUT_BATCH(batch, 0);
2656     OUT_BATCH(batch, 0);
2657     OUT_BATCH(batch, 0); /* DW4 */
2658     OUT_BATCH(batch, 0);
2659     OUT_BATCH(batch, 0);
2660     OUT_BATCH(batch, 0);
2661     OUT_BATCH(batch, 0);
2662     OUT_BATCH(batch, 0); /* DW9 */
2663     OUT_BATCH(batch, 0);
2664     OUT_BATCH(batch, 0);
2665     OUT_BATCH(batch, 0);
2666     OUT_BATCH(batch, 0);
2667     ADVANCE_BATCH(batch);
2668
2669     BEGIN_BATCH(batch, 7);
2670     OUT_BATCH(batch, GEN6_3DSTATE_SF | (7 - 2));
2671     OUT_BATCH(batch, 0);
2672     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
2673     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT);
2674     OUT_BATCH(batch, 0);
2675     OUT_BATCH(batch, 0);
2676     OUT_BATCH(batch, 0);
2677     ADVANCE_BATCH(batch);
2678 }
2679
2680 static void 
2681 gen7_emit_wm_state(VADriverContextP ctx, int kernel)
2682 {
2683     struct i965_driver_data *i965 = i965_driver_data(ctx);
2684     struct intel_batchbuffer *batch = i965->batch;
2685     struct i965_render_state *render_state = &i965->render_state;
2686
2687     BEGIN_BATCH(batch, 3);
2688     OUT_BATCH(batch, GEN6_3DSTATE_WM | (3 - 2));
2689     OUT_BATCH(batch,
2690               GEN7_WM_DISPATCH_ENABLE |
2691               GEN7_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2692     OUT_BATCH(batch, 0);
2693     ADVANCE_BATCH(batch);
2694
2695     BEGIN_BATCH(batch, 7);
2696     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS | (7 - 2));
2697     OUT_BATCH(batch, 1);
2698     OUT_BATCH(batch, 0);
2699     OUT_RELOC(batch, 
2700               render_state->curbe.bo,
2701               I915_GEM_DOMAIN_INSTRUCTION, 0,
2702               0);
2703     OUT_BATCH(batch, 0);
2704     OUT_BATCH(batch, 0);
2705     OUT_BATCH(batch, 0);
2706     ADVANCE_BATCH(batch);
2707
2708     BEGIN_BATCH(batch, 8);
2709     OUT_BATCH(batch, GEN7_3DSTATE_PS | (8 - 2));
2710     OUT_RELOC(batch, 
2711               render_state->render_kernels[kernel].bo,
2712               I915_GEM_DOMAIN_INSTRUCTION, 0,
2713               0);
2714     OUT_BATCH(batch, 
2715               (1 << GEN7_PS_SAMPLER_COUNT_SHIFT) |
2716               (5 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
2717     OUT_BATCH(batch, 0); /* scratch space base offset */
2718     OUT_BATCH(batch, 
2719               ((86 - 1) << GEN7_PS_MAX_THREADS_SHIFT) |
2720               GEN7_PS_PUSH_CONSTANT_ENABLE |
2721               GEN7_PS_ATTRIBUTE_ENABLE |
2722               GEN7_PS_16_DISPATCH_ENABLE);
2723     OUT_BATCH(batch, 
2724               (6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0));
2725     OUT_BATCH(batch, 0); /* kernel 1 pointer */
2726     OUT_BATCH(batch, 0); /* kernel 2 pointer */
2727     ADVANCE_BATCH(batch);
2728 }
2729
2730 static void
2731 gen7_emit_vertex_element_state(VADriverContextP ctx)
2732 {
2733     struct i965_driver_data *i965 = i965_driver_data(ctx);
2734     struct intel_batchbuffer *batch = i965->batch;
2735
2736     /* Set up our vertex elements, sourced from the single vertex buffer. */
2737     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2738     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2739     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2740               GEN6_VE0_VALID |
2741               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2742               (0 << VE0_OFFSET_SHIFT));
2743     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2744               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2745               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2746               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2747     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2748     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2749               GEN6_VE0_VALID |
2750               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2751               (8 << VE0_OFFSET_SHIFT));
2752     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2753               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2754               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2755               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2756 }
2757
2758 static void
2759 gen7_emit_vertices(VADriverContextP ctx)
2760 {
2761     struct i965_driver_data *i965 = i965_driver_data(ctx);
2762     struct intel_batchbuffer *batch = i965->batch;
2763     struct i965_render_state *render_state = &i965->render_state;
2764
2765     BEGIN_BATCH(batch, 5);
2766     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | (5 - 2));
2767     OUT_BATCH(batch, 
2768               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2769               GEN6_VB0_VERTEXDATA |
2770               GEN7_VB0_ADDRESS_MODIFYENABLE |
2771               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2772     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2773     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2774     OUT_BATCH(batch, 0);
2775     ADVANCE_BATCH(batch);
2776
2777     BEGIN_BATCH(batch, 7);
2778     OUT_BATCH(batch, CMD_3DPRIMITIVE | (7 - 2));
2779     OUT_BATCH(batch,
2780               _3DPRIM_RECTLIST |
2781               GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL);
2782     OUT_BATCH(batch, 3); /* vertex count per instance */
2783     OUT_BATCH(batch, 0); /* start vertex offset */
2784     OUT_BATCH(batch, 1); /* single instance */
2785     OUT_BATCH(batch, 0); /* start instance location */
2786     OUT_BATCH(batch, 0);
2787     ADVANCE_BATCH(batch);
2788 }
2789
2790 static void
2791 gen7_render_emit_states(VADriverContextP ctx, int kernel)
2792 {
2793     struct i965_driver_data *i965 = i965_driver_data(ctx);
2794     struct intel_batchbuffer *batch = i965->batch;
2795
2796     intel_batchbuffer_start_atomic(batch, 0x1000);
2797     intel_batchbuffer_emit_mi_flush(batch);
2798     gen7_emit_invarient_states(ctx);
2799     gen7_emit_state_base_address(ctx);
2800     gen7_emit_viewport_state_pointers(ctx);
2801     gen7_emit_urb(ctx);
2802     gen7_emit_cc_state_pointers(ctx);
2803     gen7_emit_sampler_state_pointers(ctx);
2804     gen7_emit_bypass_state(ctx);
2805     gen7_emit_vs_state(ctx);
2806     gen7_emit_clip_state(ctx);
2807     gen7_emit_sf_state(ctx);
2808     gen7_emit_wm_state(ctx, kernel);
2809     gen7_emit_binding_table(ctx);
2810     gen7_emit_depth_buffer_state(ctx);
2811     gen7_emit_drawing_rectangle(ctx);
2812     gen7_emit_vertex_element_state(ctx);
2813     gen7_emit_vertices(ctx);
2814     intel_batchbuffer_end_atomic(batch);
2815 }
2816
2817 static void
2818 gen7_render_put_surface(
2819     VADriverContextP   ctx,
2820     VASurfaceID        surface,
2821     const VARectangle *src_rect,
2822     const VARectangle *dst_rect,
2823     unsigned int       flags
2824 )
2825 {
2826     struct i965_driver_data *i965 = i965_driver_data(ctx);
2827     struct intel_batchbuffer *batch = i965->batch;
2828
2829     gen7_render_initialize(ctx);
2830     gen7_render_setup_states(ctx, surface, src_rect, dst_rect);
2831     i965_clear_dest_region(ctx);
2832     gen7_render_emit_states(ctx, PS_KERNEL);
2833     intel_batchbuffer_flush(batch);
2834 }
2835
2836 static void
2837 gen7_subpicture_render_blend_state(VADriverContextP ctx)
2838 {
2839     struct i965_driver_data *i965 = i965_driver_data(ctx);
2840     struct i965_render_state *render_state = &i965->render_state;
2841     struct gen6_blend_state *blend_state;
2842
2843     dri_bo_unmap(render_state->cc.state);    
2844     dri_bo_map(render_state->cc.blend, 1);
2845     assert(render_state->cc.blend->virtual);
2846     blend_state = render_state->cc.blend->virtual;
2847     memset(blend_state, 0, sizeof(*blend_state));
2848     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2849     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2850     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2851     blend_state->blend0.blend_enable = 1;
2852     blend_state->blend1.post_blend_clamp_enable = 1;
2853     blend_state->blend1.pre_blend_clamp_enable = 1;
2854     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2855     dri_bo_unmap(render_state->cc.blend);
2856 }
2857
2858 static void
2859 gen7_subpicture_render_setup_states(
2860     VADriverContextP   ctx,
2861     VASurfaceID        surface,
2862     const VARectangle *src_rect,
2863     const VARectangle *dst_rect
2864 )
2865 {
2866     i965_render_dest_surface_state(ctx, 0);
2867     i965_subpic_render_src_surfaces_state(ctx, surface);
2868     i965_render_sampler(ctx);
2869     i965_render_cc_viewport(ctx);
2870     gen7_render_color_calc_state(ctx);
2871     gen7_subpicture_render_blend_state(ctx);
2872     gen7_render_depth_stencil_state(ctx);
2873     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2874 }
2875
2876 static void
2877 gen7_render_put_subpicture(
2878     VADriverContextP   ctx,
2879     VASurfaceID        surface,
2880     const VARectangle *src_rect,
2881     const VARectangle *dst_rect
2882 )
2883 {
2884     struct i965_driver_data *i965 = i965_driver_data(ctx);
2885     struct intel_batchbuffer *batch = i965->batch;
2886     struct object_surface *obj_surface = SURFACE(surface);
2887     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
2888
2889     assert(obj_subpic);
2890     gen7_render_initialize(ctx);
2891     gen7_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2892     gen7_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2893     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2894     intel_batchbuffer_flush(batch);
2895 }
2896
2897
2898 /*
2899  * global functions
2900  */
2901
2902 void
2903 intel_render_put_surface(
2904     VADriverContextP   ctx,
2905     VASurfaceID        surface,
2906     const VARectangle *src_rect,
2907     const VARectangle *dst_rect,
2908     unsigned int       flags
2909 )
2910 {
2911     struct i965_driver_data *i965 = i965_driver_data(ctx);
2912
2913     i965_post_processing(ctx, surface, src_rect, dst_rect, flags);
2914
2915     if (IS_GEN7(i965->intel.device_id))
2916         gen7_render_put_surface(ctx, surface, src_rect, dst_rect, flags);
2917     else if (IS_GEN6(i965->intel.device_id))
2918         gen6_render_put_surface(ctx, surface, src_rect, dst_rect, flags);
2919     else
2920         i965_render_put_surface(ctx, surface, src_rect, dst_rect, flags);
2921 }
2922
2923 void
2924 intel_render_put_subpicture(
2925     VADriverContextP   ctx,
2926     VASurfaceID        surface,
2927     const VARectangle *src_rect,
2928     const VARectangle *dst_rect
2929 )
2930 {
2931     struct i965_driver_data *i965 = i965_driver_data(ctx);
2932
2933     if (IS_GEN7(i965->intel.device_id))
2934         gen7_render_put_subpicture(ctx, surface, src_rect, dst_rect);
2935     else if (IS_GEN6(i965->intel.device_id))
2936         gen6_render_put_subpicture(ctx, surface, src_rect, dst_rect);
2937     else
2938         i965_render_put_subpicture(ctx, surface, src_rect, dst_rect);
2939 }
2940
2941 Bool 
2942 i965_render_init(VADriverContextP ctx)
2943 {
2944     struct i965_driver_data *i965 = i965_driver_data(ctx);
2945     struct i965_render_state *render_state = &i965->render_state;
2946     int i;
2947
2948     /* kernel */
2949     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen5) / 
2950                                  sizeof(render_kernels_gen5[0])));
2951     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen6) / 
2952                                  sizeof(render_kernels_gen6[0])));
2953
2954     if (IS_GEN7(i965->intel.device_id))
2955         memcpy(render_state->render_kernels, render_kernels_gen7, sizeof(render_state->render_kernels));
2956     else if (IS_GEN6(i965->intel.device_id))
2957         memcpy(render_state->render_kernels, render_kernels_gen6, sizeof(render_state->render_kernels));
2958     else if (IS_IRONLAKE(i965->intel.device_id))
2959         memcpy(render_state->render_kernels, render_kernels_gen5, sizeof(render_state->render_kernels));
2960     else
2961         memcpy(render_state->render_kernels, render_kernels_gen4, sizeof(render_state->render_kernels));
2962
2963     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
2964         struct i965_kernel *kernel = &render_state->render_kernels[i];
2965
2966         if (!kernel->size)
2967             continue;
2968
2969         kernel->bo = dri_bo_alloc(i965->intel.bufmgr, 
2970                                   kernel->name, 
2971                                   kernel->size, 0x1000);
2972         assert(kernel->bo);
2973         dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
2974     }
2975
2976     /* constant buffer */
2977     render_state->curbe.bo = dri_bo_alloc(i965->intel.bufmgr,
2978                       "constant buffer",
2979                       4096, 64);
2980     assert(render_state->curbe.bo);
2981     render_state->curbe.upload = 0;
2982
2983     return True;
2984 }
2985
2986 Bool 
2987 i965_render_terminate(VADriverContextP ctx)
2988 {
2989     int i;
2990     struct i965_driver_data *i965 = i965_driver_data(ctx);
2991     struct i965_render_state *render_state = &i965->render_state;
2992
2993     dri_bo_unreference(render_state->curbe.bo);
2994     render_state->curbe.bo = NULL;
2995
2996     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
2997         struct i965_kernel *kernel = &render_state->render_kernels[i];
2998         
2999         dri_bo_unreference(kernel->bo);
3000         kernel->bo = NULL;
3001     }
3002
3003     dri_bo_unreference(render_state->vb.vertex_buffer);
3004     render_state->vb.vertex_buffer = NULL;
3005     dri_bo_unreference(render_state->vs.state);
3006     render_state->vs.state = NULL;
3007     dri_bo_unreference(render_state->sf.state);
3008     render_state->sf.state = NULL;
3009     dri_bo_unreference(render_state->wm.sampler);
3010     render_state->wm.sampler = NULL;
3011     dri_bo_unreference(render_state->wm.state);
3012     render_state->wm.state = NULL;
3013     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
3014     dri_bo_unreference(render_state->cc.viewport);
3015     render_state->cc.viewport = NULL;
3016     dri_bo_unreference(render_state->cc.state);
3017     render_state->cc.state = NULL;
3018     dri_bo_unreference(render_state->cc.blend);
3019     render_state->cc.blend = NULL;
3020     dri_bo_unreference(render_state->cc.depth_stencil);
3021     render_state->cc.depth_stencil = NULL;
3022
3023     if (render_state->draw_region) {
3024         dri_bo_unreference(render_state->draw_region->bo);
3025         free(render_state->draw_region);
3026         render_state->draw_region = NULL;
3027     }
3028
3029     return True;
3030 }
3031