i965_drv_video: check the internal format of a surface before rendering
[platform/upstream/libva-intel-driver.git] / src / i965_render.c
1 /*
2  * Copyright © 2006 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *    Xiang Haihao <haihao.xiang@intel.com>
27  *
28  */
29
30 /*
31  * Most of rendering codes are ported from xf86-video-intel/src/i965_video.c
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <assert.h>
38
39 #include <va/va_backend.h>
40 #include <va/va_dricommon.h>
41
42 #include "intel_batchbuffer.h"
43 #include "intel_driver.h"
44 #include "i965_defines.h"
45 #include "i965_drv_video.h"
46 #include "i965_structs.h"
47
48 #include "i965_render.h"
49
50 #define SF_KERNEL_NUM_GRF       16
51 #define SF_MAX_THREADS          1
52
53 static const uint32_t sf_kernel_static[][4] = 
54 {
55 #include "shaders/render/exa_sf.g4b"
56 };
57
58 #define PS_KERNEL_NUM_GRF       32
59 #define PS_MAX_THREADS          32
60
61 #define I965_GRF_BLOCKS(nreg)   ((nreg + 15) / 16 - 1)
62
63 static const uint32_t ps_kernel_static[][4] = 
64 {
65 #include "shaders/render/exa_wm_xy.g4b"
66 #include "shaders/render/exa_wm_src_affine.g4b"
67 #include "shaders/render/exa_wm_src_sample_planar.g4b"
68 #include "shaders/render/exa_wm_yuv_rgb.g4b"
69 #include "shaders/render/exa_wm_write.g4b"
70 };
71 static const uint32_t ps_subpic_kernel_static[][4] = 
72 {
73 #include "shaders/render/exa_wm_xy.g4b"
74 #include "shaders/render/exa_wm_src_affine.g4b"
75 #include "shaders/render/exa_wm_src_sample_argb.g4b"
76 #include "shaders/render/exa_wm_write.g4b"
77 };
78
79 /* On IRONLAKE */
80 static const uint32_t sf_kernel_static_gen5[][4] = 
81 {
82 #include "shaders/render/exa_sf.g4b.gen5"
83 };
84
85 static const uint32_t ps_kernel_static_gen5[][4] = 
86 {
87 #include "shaders/render/exa_wm_xy.g4b.gen5"
88 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
89 #include "shaders/render/exa_wm_src_sample_planar.g4b.gen5"
90 #include "shaders/render/exa_wm_yuv_rgb.g4b.gen5"
91 #include "shaders/render/exa_wm_write.g4b.gen5"
92 };
93 static const uint32_t ps_subpic_kernel_static_gen5[][4] = 
94 {
95 #include "shaders/render/exa_wm_xy.g4b.gen5"
96 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
97 #include "shaders/render/exa_wm_src_sample_argb.g4b.gen5"
98 #include "shaders/render/exa_wm_write.g4b.gen5"
99 };
100
101 /* programs for Sandybridge */
102 static const uint32_t sf_kernel_static_gen6[][4] = 
103 {
104 };
105
106 static const uint32_t ps_kernel_static_gen6[][4] = {
107 #include "shaders/render/exa_wm_src_affine.g6b"
108 #include "shaders/render/exa_wm_src_sample_planar.g6b"
109 #include "shaders/render/exa_wm_yuv_rgb.g6b"
110 #include "shaders/render/exa_wm_write.g6b"
111 };
112
113 static const uint32_t ps_subpic_kernel_static_gen6[][4] = {
114 #include "shaders/render/exa_wm_src_affine.g6b"
115 #include "shaders/render/exa_wm_src_sample_argb.g6b"
116 #include "shaders/render/exa_wm_write.g6b"
117 };
118
119 /* programs for Ivybridge */
120 static const uint32_t sf_kernel_static_gen7[][4] = 
121 {
122 };
123
124 static const uint32_t ps_kernel_static_gen7[][4] = {
125 #include "shaders/render/exa_wm_src_affine.g7b"
126 #include "shaders/render/exa_wm_src_sample_planar.g7b"
127 #include "shaders/render/exa_wm_yuv_rgb.g7b"
128 #include "shaders/render/exa_wm_write.g7b"
129 };
130
131 static const uint32_t ps_subpic_kernel_static_gen7[][4] = {
132 #include "shaders/render/exa_wm_src_affine.g7b"
133 #include "shaders/render/exa_wm_src_sample_argb.g7b"
134 #include "shaders/render/exa_wm_write.g7b"
135 };
136
137 #define SURFACE_STATE_PADDED_SIZE_I965  ALIGN(sizeof(struct i965_surface_state), 32)
138 #define SURFACE_STATE_PADDED_SIZE_GEN7  ALIGN(sizeof(struct gen7_surface_state), 32)
139 #define SURFACE_STATE_PADDED_SIZE       MAX(SURFACE_STATE_PADDED_SIZE_I965, SURFACE_STATE_PADDED_SIZE_GEN7)
140 #define SURFACE_STATE_OFFSET(index)     (SURFACE_STATE_PADDED_SIZE * index)
141 #define BINDING_TABLE_OFFSET            SURFACE_STATE_OFFSET(MAX_RENDER_SURFACES)
142
143 static uint32_t float_to_uint (float f) 
144 {
145     union {
146         uint32_t i; 
147         float f;
148     } x;
149
150     x.f = f;
151     return x.i;
152 }
153
154 enum 
155 {
156     SF_KERNEL = 0,
157     PS_KERNEL,
158     PS_SUBPIC_KERNEL
159 };
160
161 static struct i965_kernel render_kernels_gen4[] = {
162     {
163         "SF",
164         SF_KERNEL,
165         sf_kernel_static,
166         sizeof(sf_kernel_static),
167         NULL
168     },
169     {
170         "PS",
171         PS_KERNEL,
172         ps_kernel_static,
173         sizeof(ps_kernel_static),
174         NULL
175     },
176
177     {
178         "PS_SUBPIC",
179         PS_SUBPIC_KERNEL,
180         ps_subpic_kernel_static,
181         sizeof(ps_subpic_kernel_static),
182         NULL
183     }
184 };
185
186 static struct i965_kernel render_kernels_gen5[] = {
187     {
188         "SF",
189         SF_KERNEL,
190         sf_kernel_static_gen5,
191         sizeof(sf_kernel_static_gen5),
192         NULL
193     },
194     {
195         "PS",
196         PS_KERNEL,
197         ps_kernel_static_gen5,
198         sizeof(ps_kernel_static_gen5),
199         NULL
200     },
201
202     {
203         "PS_SUBPIC",
204         PS_SUBPIC_KERNEL,
205         ps_subpic_kernel_static_gen5,
206         sizeof(ps_subpic_kernel_static_gen5),
207         NULL
208     }
209 };
210
211 static struct i965_kernel render_kernels_gen6[] = {
212     {
213         "SF",
214         SF_KERNEL,
215         sf_kernel_static_gen6,
216         sizeof(sf_kernel_static_gen6),
217         NULL
218     },
219     {
220         "PS",
221         PS_KERNEL,
222         ps_kernel_static_gen6,
223         sizeof(ps_kernel_static_gen6),
224         NULL
225     },
226
227     {
228         "PS_SUBPIC",
229         PS_SUBPIC_KERNEL,
230         ps_subpic_kernel_static_gen6,
231         sizeof(ps_subpic_kernel_static_gen6),
232         NULL
233     }
234 };
235
236 static struct i965_kernel render_kernels_gen7[] = {
237     {
238         "SF",
239         SF_KERNEL,
240         sf_kernel_static_gen7,
241         sizeof(sf_kernel_static_gen7),
242         NULL
243     },
244     {
245         "PS",
246         PS_KERNEL,
247         ps_kernel_static_gen7,
248         sizeof(ps_kernel_static_gen7),
249         NULL
250     },
251
252     {
253         "PS_SUBPIC",
254         PS_SUBPIC_KERNEL,
255         ps_subpic_kernel_static_gen7,
256         sizeof(ps_subpic_kernel_static_gen7),
257         NULL
258     }
259 };
260
261 #define URB_VS_ENTRIES        8
262 #define URB_VS_ENTRY_SIZE     1
263
264 #define URB_GS_ENTRIES        0
265 #define URB_GS_ENTRY_SIZE     0
266
267 #define URB_CLIP_ENTRIES      0
268 #define URB_CLIP_ENTRY_SIZE   0
269
270 #define URB_SF_ENTRIES        1
271 #define URB_SF_ENTRY_SIZE     2
272
273 #define URB_CS_ENTRIES        1
274 #define URB_CS_ENTRY_SIZE     1
275
276 static void
277 i965_render_vs_unit(VADriverContextP ctx)
278 {
279     struct i965_driver_data *i965 = i965_driver_data(ctx);
280     struct i965_render_state *render_state = &i965->render_state;
281     struct i965_vs_unit_state *vs_state;
282
283     dri_bo_map(render_state->vs.state, 1);
284     assert(render_state->vs.state->virtual);
285     vs_state = render_state->vs.state->virtual;
286     memset(vs_state, 0, sizeof(*vs_state));
287
288     if (IS_IRONLAKE(i965->intel.device_id))
289         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2;
290     else
291         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES;
292
293     vs_state->thread4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1;
294     vs_state->vs6.vs_enable = 0;
295     vs_state->vs6.vert_cache_disable = 1;
296     
297     dri_bo_unmap(render_state->vs.state);
298 }
299
300 static void
301 i965_render_sf_unit(VADriverContextP ctx)
302 {
303     struct i965_driver_data *i965 = i965_driver_data(ctx);
304     struct i965_render_state *render_state = &i965->render_state;
305     struct i965_sf_unit_state *sf_state;
306
307     dri_bo_map(render_state->sf.state, 1);
308     assert(render_state->sf.state->virtual);
309     sf_state = render_state->sf.state->virtual;
310     memset(sf_state, 0, sizeof(*sf_state));
311
312     sf_state->thread0.grf_reg_count = I965_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
313     sf_state->thread0.kernel_start_pointer = render_state->render_kernels[SF_KERNEL].bo->offset >> 6;
314
315     sf_state->sf1.single_program_flow = 1; /* XXX */
316     sf_state->sf1.binding_table_entry_count = 0;
317     sf_state->sf1.thread_priority = 0;
318     sf_state->sf1.floating_point_mode = 0; /* Mesa does this */
319     sf_state->sf1.illegal_op_exception_enable = 1;
320     sf_state->sf1.mask_stack_exception_enable = 1;
321     sf_state->sf1.sw_exception_enable = 1;
322
323     /* scratch space is not used in our kernel */
324     sf_state->thread2.per_thread_scratch_space = 0;
325     sf_state->thread2.scratch_space_base_pointer = 0;
326
327     sf_state->thread3.const_urb_entry_read_length = 0; /* no const URBs */
328     sf_state->thread3.const_urb_entry_read_offset = 0; /* no const URBs */
329     sf_state->thread3.urb_entry_read_length = 1; /* 1 URB per vertex */
330     sf_state->thread3.urb_entry_read_offset = 0;
331     sf_state->thread3.dispatch_grf_start_reg = 3;
332
333     sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
334     sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
335     sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
336     sf_state->thread4.stats_enable = 1;
337
338     sf_state->sf5.viewport_transform = 0; /* skip viewport */
339
340     sf_state->sf6.cull_mode = I965_CULLMODE_NONE;
341     sf_state->sf6.scissor = 0;
342
343     sf_state->sf7.trifan_pv = 2;
344
345     sf_state->sf6.dest_org_vbias = 0x8;
346     sf_state->sf6.dest_org_hbias = 0x8;
347
348     dri_bo_emit_reloc(render_state->sf.state,
349                       I915_GEM_DOMAIN_INSTRUCTION, 0,
350                       sf_state->thread0.grf_reg_count << 1,
351                       offsetof(struct i965_sf_unit_state, thread0),
352                       render_state->render_kernels[SF_KERNEL].bo);
353
354     dri_bo_unmap(render_state->sf.state);
355 }
356
357 static void 
358 i965_render_sampler(VADriverContextP ctx)
359 {
360     struct i965_driver_data *i965 = i965_driver_data(ctx);
361     struct i965_render_state *render_state = &i965->render_state;
362     struct i965_sampler_state *sampler_state;
363     int i;
364     
365     assert(render_state->wm.sampler_count > 0);
366     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
367
368     dri_bo_map(render_state->wm.sampler, 1);
369     assert(render_state->wm.sampler->virtual);
370     sampler_state = render_state->wm.sampler->virtual;
371     for (i = 0; i < render_state->wm.sampler_count; i++) {
372         memset(sampler_state, 0, sizeof(*sampler_state));
373         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
374         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
375         sampler_state->ss1.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
376         sampler_state->ss1.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
377         sampler_state->ss1.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
378         sampler_state++;
379     }
380
381     dri_bo_unmap(render_state->wm.sampler);
382 }
383 static void
384 i965_subpic_render_wm_unit(VADriverContextP ctx)
385 {
386     struct i965_driver_data *i965 = i965_driver_data(ctx);
387     struct i965_render_state *render_state = &i965->render_state;
388     struct i965_wm_unit_state *wm_state;
389
390     assert(render_state->wm.sampler);
391
392     dri_bo_map(render_state->wm.state, 1);
393     assert(render_state->wm.state->virtual);
394     wm_state = render_state->wm.state->virtual;
395     memset(wm_state, 0, sizeof(*wm_state));
396
397     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
398     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_SUBPIC_KERNEL].bo->offset >> 6;
399
400     wm_state->thread1.single_program_flow = 1; /* XXX */
401
402     if (IS_IRONLAKE(i965->intel.device_id))
403         wm_state->thread1.binding_table_entry_count = 0; /* hardware requirement */
404     else
405         wm_state->thread1.binding_table_entry_count = 7;
406
407     wm_state->thread2.scratch_space_base_pointer = 0;
408     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
409
410     wm_state->thread3.dispatch_grf_start_reg = 3; /* XXX */
411     wm_state->thread3.const_urb_entry_read_length = 0;
412     wm_state->thread3.const_urb_entry_read_offset = 0;
413     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
414     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
415
416     wm_state->wm4.stats_enable = 0;
417     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
418
419     if (IS_IRONLAKE(i965->intel.device_id)) {
420         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
421         wm_state->wm5.max_threads = 12 * 6 - 1;
422     } else {
423         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
424         wm_state->wm5.max_threads = 10 * 5 - 1;
425     }
426
427     wm_state->wm5.thread_dispatch_enable = 1;
428     wm_state->wm5.enable_16_pix = 1;
429     wm_state->wm5.enable_8_pix = 0;
430     wm_state->wm5.early_depth_test = 1;
431
432     dri_bo_emit_reloc(render_state->wm.state,
433                       I915_GEM_DOMAIN_INSTRUCTION, 0,
434                       wm_state->thread0.grf_reg_count << 1,
435                       offsetof(struct i965_wm_unit_state, thread0),
436                       render_state->render_kernels[PS_SUBPIC_KERNEL].bo);
437
438     dri_bo_emit_reloc(render_state->wm.state,
439                       I915_GEM_DOMAIN_INSTRUCTION, 0,
440                       wm_state->wm4.sampler_count << 2,
441                       offsetof(struct i965_wm_unit_state, wm4),
442                       render_state->wm.sampler);
443
444     dri_bo_unmap(render_state->wm.state);
445 }
446
447
448 static void
449 i965_render_wm_unit(VADriverContextP ctx)
450 {
451     struct i965_driver_data *i965 = i965_driver_data(ctx);
452     struct i965_render_state *render_state = &i965->render_state;
453     struct i965_wm_unit_state *wm_state;
454
455     assert(render_state->wm.sampler);
456
457     dri_bo_map(render_state->wm.state, 1);
458     assert(render_state->wm.state->virtual);
459     wm_state = render_state->wm.state->virtual;
460     memset(wm_state, 0, sizeof(*wm_state));
461
462     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
463     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_KERNEL].bo->offset >> 6;
464
465     wm_state->thread1.single_program_flow = 1; /* XXX */
466
467     if (IS_IRONLAKE(i965->intel.device_id))
468         wm_state->thread1.binding_table_entry_count = 0;        /* hardware requirement */
469     else
470         wm_state->thread1.binding_table_entry_count = 7;
471
472     wm_state->thread2.scratch_space_base_pointer = 0;
473     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
474
475     wm_state->thread3.dispatch_grf_start_reg = 2; /* XXX */
476     wm_state->thread3.const_urb_entry_read_length = 1;
477     wm_state->thread3.const_urb_entry_read_offset = 0;
478     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
479     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
480
481     wm_state->wm4.stats_enable = 0;
482     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
483
484     if (IS_IRONLAKE(i965->intel.device_id)) {
485         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
486         wm_state->wm5.max_threads = 12 * 6 - 1;
487     } else {
488         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
489         wm_state->wm5.max_threads = 10 * 5 - 1;
490     }
491
492     wm_state->wm5.thread_dispatch_enable = 1;
493     wm_state->wm5.enable_16_pix = 1;
494     wm_state->wm5.enable_8_pix = 0;
495     wm_state->wm5.early_depth_test = 1;
496
497     dri_bo_emit_reloc(render_state->wm.state,
498                       I915_GEM_DOMAIN_INSTRUCTION, 0,
499                       wm_state->thread0.grf_reg_count << 1,
500                       offsetof(struct i965_wm_unit_state, thread0),
501                       render_state->render_kernels[PS_KERNEL].bo);
502
503     dri_bo_emit_reloc(render_state->wm.state,
504                       I915_GEM_DOMAIN_INSTRUCTION, 0,
505                       wm_state->wm4.sampler_count << 2,
506                       offsetof(struct i965_wm_unit_state, wm4),
507                       render_state->wm.sampler);
508
509     dri_bo_unmap(render_state->wm.state);
510 }
511
512 static void 
513 i965_render_cc_viewport(VADriverContextP ctx)
514 {
515     struct i965_driver_data *i965 = i965_driver_data(ctx);
516     struct i965_render_state *render_state = &i965->render_state;
517     struct i965_cc_viewport *cc_viewport;
518
519     dri_bo_map(render_state->cc.viewport, 1);
520     assert(render_state->cc.viewport->virtual);
521     cc_viewport = render_state->cc.viewport->virtual;
522     memset(cc_viewport, 0, sizeof(*cc_viewport));
523     
524     cc_viewport->min_depth = -1.e35;
525     cc_viewport->max_depth = 1.e35;
526
527     dri_bo_unmap(render_state->cc.viewport);
528 }
529
530 static void 
531 i965_subpic_render_cc_unit(VADriverContextP ctx)
532 {
533     struct i965_driver_data *i965 = i965_driver_data(ctx);
534     struct i965_render_state *render_state = &i965->render_state;
535     struct i965_cc_unit_state *cc_state;
536
537     assert(render_state->cc.viewport);
538
539     dri_bo_map(render_state->cc.state, 1);
540     assert(render_state->cc.state->virtual);
541     cc_state = render_state->cc.state->virtual;
542     memset(cc_state, 0, sizeof(*cc_state));
543
544     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
545     cc_state->cc2.depth_test = 0;       /* disable depth test */
546     cc_state->cc2.logicop_enable = 0;   /* disable logic op */
547     cc_state->cc3.ia_blend_enable = 0 ;  /* blend alpha just like colors */
548     cc_state->cc3.blend_enable = 1;     /* enable color blend */
549     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
550     cc_state->cc3.alpha_test_format = 0;//0:ALPHATEST_UNORM8;       /*store alpha value with UNORM8 */
551     cc_state->cc3.alpha_test_func = 5;//COMPAREFUNCTION_LESS;       /*pass if less than the reference */
552     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
553
554     cc_state->cc5.dither_enable = 0;    /* disable dither */
555     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
556     cc_state->cc5.statistics_enable = 1;
557     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
558     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
559     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
560
561     cc_state->cc6.clamp_post_alpha_blend = 0; 
562     cc_state->cc6.clamp_pre_alpha_blend  =0; 
563     
564     /*final color = src_color*src_blend_factor +/- dst_color*dest_color_blend_factor*/
565     cc_state->cc6.blend_function = I965_BLENDFUNCTION_ADD;
566     cc_state->cc6.src_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
567     cc_state->cc6.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
568    
569     /*alpha test reference*/
570     cc_state->cc7.alpha_ref.f =0.0 ;
571
572
573     dri_bo_emit_reloc(render_state->cc.state,
574                       I915_GEM_DOMAIN_INSTRUCTION, 0,
575                       0,
576                       offsetof(struct i965_cc_unit_state, cc4),
577                       render_state->cc.viewport);
578
579     dri_bo_unmap(render_state->cc.state);
580 }
581
582
583 static void 
584 i965_render_cc_unit(VADriverContextP ctx)
585 {
586     struct i965_driver_data *i965 = i965_driver_data(ctx);
587     struct i965_render_state *render_state = &i965->render_state;
588     struct i965_cc_unit_state *cc_state;
589
590     assert(render_state->cc.viewport);
591
592     dri_bo_map(render_state->cc.state, 1);
593     assert(render_state->cc.state->virtual);
594     cc_state = render_state->cc.state->virtual;
595     memset(cc_state, 0, sizeof(*cc_state));
596
597     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
598     cc_state->cc2.depth_test = 0;       /* disable depth test */
599     cc_state->cc2.logicop_enable = 1;   /* enable logic op */
600     cc_state->cc3.ia_blend_enable = 0;  /* blend alpha just like colors */
601     cc_state->cc3.blend_enable = 0;     /* disable color blend */
602     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
603     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
604
605     cc_state->cc5.dither_enable = 0;    /* disable dither */
606     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
607     cc_state->cc5.statistics_enable = 1;
608     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
609     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_ONE;
610     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_ONE;
611
612     dri_bo_emit_reloc(render_state->cc.state,
613                       I915_GEM_DOMAIN_INSTRUCTION, 0,
614                       0,
615                       offsetof(struct i965_cc_unit_state, cc4),
616                       render_state->cc.viewport);
617
618     dri_bo_unmap(render_state->cc.state);
619 }
620
621 static void
622 i965_render_set_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
623 {
624     switch (tiling) {
625     case I915_TILING_NONE:
626         ss->ss3.tiled_surface = 0;
627         ss->ss3.tile_walk = 0;
628         break;
629     case I915_TILING_X:
630         ss->ss3.tiled_surface = 1;
631         ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
632         break;
633     case I915_TILING_Y:
634         ss->ss3.tiled_surface = 1;
635         ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
636         break;
637     }
638 }
639
640 static void
641 i965_render_set_surface_state(struct i965_surface_state *ss,
642                               dri_bo *bo, unsigned long offset,
643                               int width, int height,
644                               int pitch, int format)
645 {
646     unsigned int tiling;
647     unsigned int swizzle;
648
649     memset(ss, 0, sizeof(*ss));
650     ss->ss0.surface_type = I965_SURFACE_2D;
651     ss->ss0.surface_format = format;
652     ss->ss0.color_blend = 1;
653
654     ss->ss1.base_addr = bo->offset + offset;
655
656     ss->ss2.width = width - 1;
657     ss->ss2.height = height - 1;
658
659     ss->ss3.pitch = pitch - 1;
660
661     dri_bo_get_tiling(bo, &tiling, &swizzle);
662     i965_render_set_surface_tiling(ss, tiling);
663 }
664
665 static void
666 gen7_render_set_surface_tiling(struct gen7_surface_state *ss, uint32_t tiling)
667 {
668    switch (tiling) {
669    case I915_TILING_NONE:
670       ss->ss0.tiled_surface = 0;
671       ss->ss0.tile_walk = 0;
672       break;
673    case I915_TILING_X:
674       ss->ss0.tiled_surface = 1;
675       ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
676       break;
677    case I915_TILING_Y:
678       ss->ss0.tiled_surface = 1;
679       ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
680       break;
681    }
682 }
683
684 static void
685 gen7_render_set_surface_state(struct gen7_surface_state *ss,
686                               dri_bo *bo, unsigned long offset,
687                               int width, int height,
688                               int pitch, int format)
689 {
690     unsigned int tiling;
691     unsigned int swizzle;
692
693     memset(ss, 0, sizeof(*ss));
694
695     ss->ss0.surface_type = I965_SURFACE_2D;
696     ss->ss0.surface_format = format;
697
698     ss->ss1.base_addr = bo->offset + offset;
699
700     ss->ss2.width = width - 1;
701     ss->ss2.height = height - 1;
702
703     ss->ss3.pitch = pitch - 1;
704
705     dri_bo_get_tiling(bo, &tiling, &swizzle);
706     gen7_render_set_surface_tiling(ss, tiling);
707 }
708
709 static void
710 i965_render_src_surface_state(VADriverContextP ctx, 
711                               int index,
712                               dri_bo *region,
713                               unsigned long offset,
714                               int w, int h,
715                               int pitch, int format)
716 {
717     struct i965_driver_data *i965 = i965_driver_data(ctx);  
718     struct i965_render_state *render_state = &i965->render_state;
719     void *ss;
720     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
721
722     assert(index < MAX_RENDER_SURFACES);
723
724     dri_bo_map(ss_bo, 1);
725     assert(ss_bo->virtual);
726     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
727
728     if (IS_GEN7(i965->intel.device_id)) {
729         gen7_render_set_surface_state(ss,
730                                       region, offset,
731                                       w, h,
732                                       pitch, format);
733         dri_bo_emit_reloc(ss_bo,
734                           I915_GEM_DOMAIN_SAMPLER, 0,
735                           offset,
736                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
737                           region);
738     } else {
739         i965_render_set_surface_state(ss,
740                                       region, offset,
741                                       w, h,
742                                       pitch, format);
743         dri_bo_emit_reloc(ss_bo,
744                           I915_GEM_DOMAIN_SAMPLER, 0,
745                           offset,
746                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
747                           region);
748     }
749
750     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
751     dri_bo_unmap(ss_bo);
752     render_state->wm.sampler_count++;
753 }
754
755 static void
756 i965_render_src_surfaces_state(VADriverContextP ctx,
757                               VASurfaceID surface)
758 {
759     struct i965_driver_data *i965 = i965_driver_data(ctx);  
760     struct object_surface *obj_surface;
761     int w, h;
762     int rw, rh;
763     dri_bo *region;
764
765     obj_surface = SURFACE(surface);
766     assert(obj_surface);
767
768     w = obj_surface->width;
769     h = obj_surface->height;
770     rw = obj_surface->orig_width;
771     rh = obj_surface->orig_height;
772     region = obj_surface->bo;
773
774     i965_render_src_surface_state(ctx, 1, region, 0, rw, rh, w, I965_SURFACEFORMAT_R8_UNORM);     /* Y */
775     i965_render_src_surface_state(ctx, 2, region, 0, rw, rh, w, I965_SURFACEFORMAT_R8_UNORM);
776
777     if (obj_surface->fourcc == VA_FOURCC('Y','V','1','2')) {
778         int u3 = 5, u4 = 6, v5 = 3, v6 = 4;
779
780         i965_render_src_surface_state(ctx, u3, region, w * h, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM); /* U */
781         i965_render_src_surface_state(ctx, u4, region, w * h, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);
782         i965_render_src_surface_state(ctx, v5, region, w * h + w * h / 4, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);     /* V */
783         i965_render_src_surface_state(ctx, v6, region, w * h + w * h / 4, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);
784     } else {
785         if (obj_surface->fourcc == VA_FOURCC('N','V','1','2')) {
786             i965_render_src_surface_state(ctx, 3, region, w * h, rw / 2, rh / 2, w, I965_SURFACEFORMAT_R8G8_UNORM); /* UV */
787             i965_render_src_surface_state(ctx, 4, region, w * h, rw / 2, rh / 2, w, I965_SURFACEFORMAT_R8G8_UNORM);
788         } else {
789             int u3 = 3, u4 = 4, v5 = 5, v6 = 6;
790             
791             i965_render_src_surface_state(ctx, u3, region, w * h, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM); /* U */
792             i965_render_src_surface_state(ctx, u4, region, w * h, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);
793             i965_render_src_surface_state(ctx, v5, region, w * h + w * h / 4, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);     /* V */
794             i965_render_src_surface_state(ctx, v6, region, w * h + w * h / 4, rw / 2, rh / 2, w / 2, I965_SURFACEFORMAT_R8_UNORM);
795         }
796     }
797 }
798
799 static void
800 i965_subpic_render_src_surfaces_state(VADriverContextP ctx,
801                               VASurfaceID surface)
802 {
803     struct i965_driver_data *i965 = i965_driver_data(ctx);  
804     struct object_surface *obj_surface = SURFACE(surface);
805     int w, h;
806     dri_bo *region;
807     dri_bo *subpic_region;
808     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
809     struct object_image *obj_image = IMAGE(obj_subpic->image);
810     assert(obj_surface);
811     assert(obj_surface->bo);
812     w = obj_surface->width;
813     h = obj_surface->height;
814     region = obj_surface->bo;
815     subpic_region = obj_image->bo;
816     /*subpicture surface*/
817     i965_render_src_surface_state(ctx, 1, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format);     
818     i965_render_src_surface_state(ctx, 2, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format);     
819 }
820
821 static void
822 i965_render_dest_surface_state(VADriverContextP ctx, int index)
823 {
824     struct i965_driver_data *i965 = i965_driver_data(ctx);  
825     struct i965_render_state *render_state = &i965->render_state;
826     struct intel_region *dest_region = render_state->draw_region;
827     void *ss;
828     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
829     int format;
830     assert(index < MAX_RENDER_SURFACES);
831
832     if (dest_region->cpp == 2) {
833         format = I965_SURFACEFORMAT_B5G6R5_UNORM;
834     } else {
835         format = I965_SURFACEFORMAT_B8G8R8A8_UNORM;
836     }
837
838     dri_bo_map(ss_bo, 1);
839     assert(ss_bo->virtual);
840     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
841
842     if (IS_GEN7(i965->intel.device_id)) {
843         gen7_render_set_surface_state(ss,
844                                       dest_region->bo, 0,
845                                       dest_region->width, dest_region->height,
846                                       dest_region->pitch, format);
847         dri_bo_emit_reloc(ss_bo,
848                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
849                           0,
850                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
851                           dest_region->bo);
852     } else {
853         i965_render_set_surface_state(ss,
854                                       dest_region->bo, 0,
855                                       dest_region->width, dest_region->height,
856                                       dest_region->pitch, format);
857         dri_bo_emit_reloc(ss_bo,
858                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
859                           0,
860                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
861                           dest_region->bo);
862     }
863
864     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
865     dri_bo_unmap(ss_bo);
866 }
867
868 static void 
869 i965_subpic_render_upload_vertex(VADriverContextP ctx,
870                                  VASurfaceID surface,
871                                  const VARectangle *output_rect)
872 {    
873     struct i965_driver_data  *i965         = i965_driver_data(ctx);
874     struct i965_render_state *render_state = &i965->render_state;
875     struct object_surface    *obj_surface  = SURFACE(surface);
876     struct object_subpic     *obj_subpic   = SUBPIC(obj_surface->subpic);
877     VARectangle dst_rect;
878     float *vb, tx1, tx2, ty1, ty2, x1, x2, y1, y2;
879     int i = 0;
880
881     if (obj_subpic->flags & VA_SUBPICTURE_DESTINATION_IS_SCREEN_COORD)
882         dst_rect = obj_subpic->dst_rect;
883     else {
884         const float sx  = (float)output_rect->width  / obj_surface->orig_width;
885         const float sy  = (float)output_rect->height / obj_surface->orig_height;
886         dst_rect.x      = output_rect->x + sx * obj_subpic->dst_rect.x;
887         dst_rect.y      = output_rect->y + sy * obj_subpic->dst_rect.y;
888         dst_rect.width  = sx * obj_subpic->dst_rect.width;
889         dst_rect.height = sy * obj_subpic->dst_rect.height;
890     }
891
892     dri_bo_map(render_state->vb.vertex_buffer, 1);
893     assert(render_state->vb.vertex_buffer->virtual);
894     vb = render_state->vb.vertex_buffer->virtual;
895
896     tx1 = (float)obj_subpic->src_rect.x / obj_subpic->width;
897     ty1 = (float)obj_subpic->src_rect.y / obj_subpic->height;
898     tx2 = (float)(obj_subpic->src_rect.x + obj_subpic->src_rect.width) / obj_subpic->width;
899     ty2 = (float)(obj_subpic->src_rect.y + obj_subpic->src_rect.height) / obj_subpic->height;
900
901     x1 = (float)dst_rect.x;
902     y1 = (float)dst_rect.y;
903     x2 = (float)(dst_rect.x + dst_rect.width);
904     y2 = (float)(dst_rect.y + dst_rect.height);
905
906     vb[i++] = tx2;
907     vb[i++] = ty2;
908     vb[i++] = x2;
909     vb[i++] = y2;
910
911     vb[i++] = tx1;
912     vb[i++] = ty2;
913     vb[i++] = x1;
914     vb[i++] = y2;
915
916     vb[i++] = tx1;
917     vb[i++] = ty1;
918     vb[i++] = x1;
919     vb[i++] = y1;
920     dri_bo_unmap(render_state->vb.vertex_buffer);
921 }
922
923 static void 
924 i965_render_upload_vertex(
925     VADriverContextP   ctx,
926     VASurfaceID        surface,
927     const VARectangle *src_rect,
928     const VARectangle *dst_rect
929 )
930 {
931     struct i965_driver_data *i965 = i965_driver_data(ctx);
932     struct i965_render_state *render_state = &i965->render_state;
933     struct intel_region *dest_region = render_state->draw_region;
934     struct object_surface *obj_surface;
935     float *vb;
936
937     float u1, v1, u2, v2;
938     int i, width, height;
939     int box_x1 = dest_region->x + dst_rect->x;
940     int box_y1 = dest_region->y + dst_rect->y;
941     int box_x2 = box_x1 + dst_rect->width;
942     int box_y2 = box_y1 + dst_rect->height;
943
944     obj_surface = SURFACE(surface);
945     assert(surface);
946     width = obj_surface->orig_width;
947     height = obj_surface->orig_height;
948
949     u1 = (float)src_rect->x / width;
950     v1 = (float)src_rect->y / height;
951     u2 = (float)(src_rect->x + src_rect->width) / width;
952     v2 = (float)(src_rect->y + src_rect->height) / height;
953
954     dri_bo_map(render_state->vb.vertex_buffer, 1);
955     assert(render_state->vb.vertex_buffer->virtual);
956     vb = render_state->vb.vertex_buffer->virtual;
957
958     i = 0;
959     vb[i++] = u2;
960     vb[i++] = v2;
961     vb[i++] = (float)box_x2;
962     vb[i++] = (float)box_y2;
963     
964     vb[i++] = u1;
965     vb[i++] = v2;
966     vb[i++] = (float)box_x1;
967     vb[i++] = (float)box_y2;
968
969     vb[i++] = u1;
970     vb[i++] = v1;
971     vb[i++] = (float)box_x1;
972     vb[i++] = (float)box_y1;
973
974     dri_bo_unmap(render_state->vb.vertex_buffer);
975 }
976
977 static void
978 i965_render_upload_constants(VADriverContextP ctx,
979                              VASurfaceID surface)
980 {
981     struct i965_driver_data *i965 = i965_driver_data(ctx);
982     struct i965_render_state *render_state = &i965->render_state;
983     unsigned short *constant_buffer;
984     struct object_surface *obj_surface = SURFACE(surface);
985
986     dri_bo_map(render_state->curbe.bo, 1);
987     assert(render_state->curbe.bo->virtual);
988     constant_buffer = render_state->curbe.bo->virtual;
989
990     if (obj_surface->fourcc == VA_FOURCC('N','V','1','2'))
991         *constant_buffer = 1;
992     else
993         *constant_buffer = 0;
994
995     dri_bo_unmap(render_state->curbe.bo);
996 }
997
998 static void
999 i965_surface_render_state_setup(
1000     VADriverContextP   ctx,
1001     VASurfaceID        surface,
1002     const VARectangle *src_rect,
1003     const VARectangle *dst_rect
1004 )
1005 {
1006     i965_render_vs_unit(ctx);
1007     i965_render_sf_unit(ctx);
1008     i965_render_dest_surface_state(ctx, 0);
1009     i965_render_src_surfaces_state(ctx, surface);
1010     i965_render_sampler(ctx);
1011     i965_render_wm_unit(ctx);
1012     i965_render_cc_viewport(ctx);
1013     i965_render_cc_unit(ctx);
1014     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1015     i965_render_upload_constants(ctx, surface);
1016 }
1017 static void
1018 i965_subpic_render_state_setup(
1019     VADriverContextP   ctx,
1020     VASurfaceID        surface,
1021     const VARectangle *src_rect,
1022     const VARectangle *dst_rect
1023 )
1024 {
1025     i965_render_vs_unit(ctx);
1026     i965_render_sf_unit(ctx);
1027     i965_render_dest_surface_state(ctx, 0);
1028     i965_subpic_render_src_surfaces_state(ctx, surface);
1029     i965_render_sampler(ctx);
1030     i965_subpic_render_wm_unit(ctx);
1031     i965_render_cc_viewport(ctx);
1032     i965_subpic_render_cc_unit(ctx);
1033     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
1034 }
1035
1036
1037 static void
1038 i965_render_pipeline_select(VADriverContextP ctx)
1039 {
1040     struct i965_driver_data *i965 = i965_driver_data(ctx);
1041     struct intel_batchbuffer *batch = i965->batch;
1042  
1043     BEGIN_BATCH(batch, 1);
1044     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1045     ADVANCE_BATCH(batch);
1046 }
1047
1048 static void
1049 i965_render_state_sip(VADriverContextP ctx)
1050 {
1051     struct i965_driver_data *i965 = i965_driver_data(ctx);
1052     struct intel_batchbuffer *batch = i965->batch;
1053
1054     BEGIN_BATCH(batch, 2);
1055     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1056     OUT_BATCH(batch, 0);
1057     ADVANCE_BATCH(batch);
1058 }
1059
1060 static void
1061 i965_render_state_base_address(VADriverContextP ctx)
1062 {
1063     struct i965_driver_data *i965 = i965_driver_data(ctx);
1064     struct intel_batchbuffer *batch = i965->batch;
1065     struct i965_render_state *render_state = &i965->render_state;
1066
1067     if (IS_IRONLAKE(i965->intel.device_id)) {
1068         BEGIN_BATCH(batch, 8);
1069         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
1070         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1071         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1072         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1073         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1074         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1075         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1076         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1077         ADVANCE_BATCH(batch);
1078     } else {
1079         BEGIN_BATCH(batch, 6);
1080         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 4);
1081         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1082         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1083         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1084         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1085         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1086         ADVANCE_BATCH(batch);
1087     }
1088 }
1089
1090 static void
1091 i965_render_binding_table_pointers(VADriverContextP ctx)
1092 {
1093     struct i965_driver_data *i965 = i965_driver_data(ctx);
1094     struct intel_batchbuffer *batch = i965->batch;
1095
1096     BEGIN_BATCH(batch, 6);
1097     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS | 4);
1098     OUT_BATCH(batch, 0); /* vs */
1099     OUT_BATCH(batch, 0); /* gs */
1100     OUT_BATCH(batch, 0); /* clip */
1101     OUT_BATCH(batch, 0); /* sf */
1102     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1103     ADVANCE_BATCH(batch);
1104 }
1105
1106 static void 
1107 i965_render_constant_color(VADriverContextP ctx)
1108 {
1109     struct i965_driver_data *i965 = i965_driver_data(ctx);
1110     struct intel_batchbuffer *batch = i965->batch;
1111
1112     BEGIN_BATCH(batch, 5);
1113     OUT_BATCH(batch, CMD_CONSTANT_COLOR | 3);
1114     OUT_BATCH(batch, float_to_uint(1.0));
1115     OUT_BATCH(batch, float_to_uint(0.0));
1116     OUT_BATCH(batch, float_to_uint(1.0));
1117     OUT_BATCH(batch, float_to_uint(1.0));
1118     ADVANCE_BATCH(batch);
1119 }
1120
1121 static void
1122 i965_render_pipelined_pointers(VADriverContextP ctx)
1123 {
1124     struct i965_driver_data *i965 = i965_driver_data(ctx);
1125     struct intel_batchbuffer *batch = i965->batch;
1126     struct i965_render_state *render_state = &i965->render_state;
1127
1128     BEGIN_BATCH(batch, 7);
1129     OUT_BATCH(batch, CMD_PIPELINED_POINTERS | 5);
1130     OUT_RELOC(batch, render_state->vs.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1131     OUT_BATCH(batch, 0);  /* disable GS */
1132     OUT_BATCH(batch, 0);  /* disable CLIP */
1133     OUT_RELOC(batch, render_state->sf.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1134     OUT_RELOC(batch, render_state->wm.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1135     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1136     ADVANCE_BATCH(batch);
1137 }
1138
1139 static void
1140 i965_render_urb_layout(VADriverContextP ctx)
1141 {
1142     struct i965_driver_data *i965 = i965_driver_data(ctx);
1143     struct intel_batchbuffer *batch = i965->batch;
1144     int urb_vs_start, urb_vs_size;
1145     int urb_gs_start, urb_gs_size;
1146     int urb_clip_start, urb_clip_size;
1147     int urb_sf_start, urb_sf_size;
1148     int urb_cs_start, urb_cs_size;
1149
1150     urb_vs_start = 0;
1151     urb_vs_size = URB_VS_ENTRIES * URB_VS_ENTRY_SIZE;
1152     urb_gs_start = urb_vs_start + urb_vs_size;
1153     urb_gs_size = URB_GS_ENTRIES * URB_GS_ENTRY_SIZE;
1154     urb_clip_start = urb_gs_start + urb_gs_size;
1155     urb_clip_size = URB_CLIP_ENTRIES * URB_CLIP_ENTRY_SIZE;
1156     urb_sf_start = urb_clip_start + urb_clip_size;
1157     urb_sf_size = URB_SF_ENTRIES * URB_SF_ENTRY_SIZE;
1158     urb_cs_start = urb_sf_start + urb_sf_size;
1159     urb_cs_size = URB_CS_ENTRIES * URB_CS_ENTRY_SIZE;
1160
1161     BEGIN_BATCH(batch, 3);
1162     OUT_BATCH(batch, 
1163               CMD_URB_FENCE |
1164               UF0_CS_REALLOC |
1165               UF0_SF_REALLOC |
1166               UF0_CLIP_REALLOC |
1167               UF0_GS_REALLOC |
1168               UF0_VS_REALLOC |
1169               1);
1170     OUT_BATCH(batch, 
1171               ((urb_clip_start + urb_clip_size) << UF1_CLIP_FENCE_SHIFT) |
1172               ((urb_gs_start + urb_gs_size) << UF1_GS_FENCE_SHIFT) |
1173               ((urb_vs_start + urb_vs_size) << UF1_VS_FENCE_SHIFT));
1174     OUT_BATCH(batch,
1175               ((urb_cs_start + urb_cs_size) << UF2_CS_FENCE_SHIFT) |
1176               ((urb_sf_start + urb_sf_size) << UF2_SF_FENCE_SHIFT));
1177     ADVANCE_BATCH(batch);
1178 }
1179
1180 static void 
1181 i965_render_cs_urb_layout(VADriverContextP ctx)
1182 {
1183     struct i965_driver_data *i965 = i965_driver_data(ctx);
1184     struct intel_batchbuffer *batch = i965->batch;
1185
1186     BEGIN_BATCH(batch, 2);
1187     OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
1188     OUT_BATCH(batch,
1189               ((URB_CS_ENTRY_SIZE - 1) << 4) |          /* URB Entry Allocation Size */
1190               (URB_CS_ENTRIES << 0));                /* Number of URB Entries */
1191     ADVANCE_BATCH(batch);
1192 }
1193
1194 static void
1195 i965_render_constant_buffer(VADriverContextP ctx)
1196 {
1197     struct i965_driver_data *i965 = i965_driver_data(ctx);
1198     struct intel_batchbuffer *batch = i965->batch;
1199     struct i965_render_state *render_state = &i965->render_state;
1200
1201     BEGIN_BATCH(batch, 2);
1202     OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
1203     OUT_RELOC(batch, render_state->curbe.bo,
1204               I915_GEM_DOMAIN_INSTRUCTION, 0,
1205               URB_CS_ENTRY_SIZE - 1);
1206     ADVANCE_BATCH(batch);    
1207 }
1208
1209 static void
1210 i965_render_drawing_rectangle(VADriverContextP ctx)
1211 {
1212     struct i965_driver_data *i965 = i965_driver_data(ctx);
1213     struct intel_batchbuffer *batch = i965->batch;
1214     struct i965_render_state *render_state = &i965->render_state;
1215     struct intel_region *dest_region = render_state->draw_region;
1216
1217     BEGIN_BATCH(batch, 4);
1218     OUT_BATCH(batch, CMD_DRAWING_RECTANGLE | 2);
1219     OUT_BATCH(batch, 0x00000000);
1220     OUT_BATCH(batch, (dest_region->width - 1) | (dest_region->height - 1) << 16);
1221     OUT_BATCH(batch, 0x00000000);         
1222     ADVANCE_BATCH(batch);
1223 }
1224
1225 static void
1226 i965_render_vertex_elements(VADriverContextP ctx)
1227 {
1228     struct i965_driver_data *i965 = i965_driver_data(ctx);
1229     struct intel_batchbuffer *batch = i965->batch;
1230
1231     if (IS_IRONLAKE(i965->intel.device_id)) {
1232         BEGIN_BATCH(batch, 5);
1233         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1234         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1235         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1236                   VE0_VALID |
1237                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1238                   (0 << VE0_OFFSET_SHIFT));
1239         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1240                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1241                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1242                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1243         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1244         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1245                   VE0_VALID |
1246                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1247                   (8 << VE0_OFFSET_SHIFT));
1248         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1249                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1250                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1251                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1252         ADVANCE_BATCH(batch);
1253     } else {
1254         BEGIN_BATCH(batch, 5);
1255         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1256         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1257         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1258                   VE0_VALID |
1259                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1260                   (0 << VE0_OFFSET_SHIFT));
1261         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1262                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1263                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1264                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1265                   (0 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1266         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1267         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1268                   VE0_VALID |
1269                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1270                   (8 << VE0_OFFSET_SHIFT));
1271         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1272                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1273                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1274                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1275                   (4 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1276         ADVANCE_BATCH(batch);
1277     }
1278 }
1279
1280 static void
1281 i965_render_upload_image_palette(
1282     VADriverContextP ctx,
1283     VAImageID        image_id,
1284     unsigned int     alpha
1285 )
1286 {
1287     struct i965_driver_data *i965 = i965_driver_data(ctx);
1288     struct intel_batchbuffer *batch = i965->batch;
1289     unsigned int i;
1290
1291     struct object_image *obj_image = IMAGE(image_id);
1292     assert(obj_image);
1293
1294     if (obj_image->image.num_palette_entries == 0)
1295         return;
1296
1297     BEGIN_BATCH(batch, 1 + obj_image->image.num_palette_entries);
1298     OUT_BATCH(batch, CMD_SAMPLER_PALETTE_LOAD | (obj_image->image.num_palette_entries - 1));
1299     /*fill palette*/
1300     //int32_t out[16]; //0-23:color 23-31:alpha
1301     for (i = 0; i < obj_image->image.num_palette_entries; i++)
1302         OUT_BATCH(batch, (alpha << 24) | obj_image->palette[i]);
1303     ADVANCE_BATCH(batch);
1304 }
1305
1306 static void
1307 i965_render_startup(VADriverContextP ctx)
1308 {
1309     struct i965_driver_data *i965 = i965_driver_data(ctx);
1310     struct intel_batchbuffer *batch = i965->batch;
1311     struct i965_render_state *render_state = &i965->render_state;
1312
1313     BEGIN_BATCH(batch, 11);
1314     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
1315     OUT_BATCH(batch, 
1316               (0 << VB0_BUFFER_INDEX_SHIFT) |
1317               VB0_VERTEXDATA |
1318               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
1319     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
1320
1321     if (IS_IRONLAKE(i965->intel.device_id))
1322         OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
1323     else
1324         OUT_BATCH(batch, 3);
1325
1326     OUT_BATCH(batch, 0);
1327
1328     OUT_BATCH(batch, 
1329               CMD_3DPRIMITIVE |
1330               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
1331               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
1332               (0 << 9) |
1333               4);
1334     OUT_BATCH(batch, 3); /* vertex count per instance */
1335     OUT_BATCH(batch, 0); /* start vertex offset */
1336     OUT_BATCH(batch, 1); /* single instance */
1337     OUT_BATCH(batch, 0); /* start instance location */
1338     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
1339     ADVANCE_BATCH(batch);
1340 }
1341
1342 static void 
1343 i965_clear_dest_region(VADriverContextP ctx)
1344 {
1345     struct i965_driver_data *i965 = i965_driver_data(ctx);
1346     struct intel_batchbuffer *batch = i965->batch;
1347     struct i965_render_state *render_state = &i965->render_state;
1348     struct intel_region *dest_region = render_state->draw_region;
1349     unsigned int blt_cmd, br13;
1350     int pitch;
1351
1352     blt_cmd = XY_COLOR_BLT_CMD;
1353     br13 = 0xf0 << 16;
1354     pitch = dest_region->pitch;
1355
1356     if (dest_region->cpp == 4) {
1357         br13 |= BR13_8888;
1358         blt_cmd |= (XY_COLOR_BLT_WRITE_RGB | XY_COLOR_BLT_WRITE_ALPHA);
1359     } else {
1360         assert(dest_region->cpp == 2);
1361         br13 |= BR13_565;
1362     }
1363
1364     if (dest_region->tiling != I915_TILING_NONE) {
1365         blt_cmd |= XY_COLOR_BLT_DST_TILED;
1366         pitch /= 4;
1367     }
1368
1369     br13 |= pitch;
1370
1371     if (IS_GEN6(i965->intel.device_id) ||
1372         IS_GEN7(i965->intel.device_id)) {
1373         intel_batchbuffer_start_atomic_blt(batch, 24);
1374         BEGIN_BLT_BATCH(batch, 6);
1375     } else {
1376         intel_batchbuffer_start_atomic(batch, 24);
1377         BEGIN_BATCH(batch, 6);
1378     }
1379
1380     OUT_BATCH(batch, blt_cmd);
1381     OUT_BATCH(batch, br13);
1382     OUT_BATCH(batch, (dest_region->y << 16) | (dest_region->x));
1383     OUT_BATCH(batch, ((dest_region->y + dest_region->height) << 16) |
1384               (dest_region->x + dest_region->width));
1385     OUT_RELOC(batch, dest_region->bo, 
1386               I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1387               0);
1388     OUT_BATCH(batch, 0x0);
1389     ADVANCE_BATCH(batch);
1390     intel_batchbuffer_end_atomic(batch);
1391 }
1392
1393 static void
1394 i965_surface_render_pipeline_setup(VADriverContextP ctx)
1395 {
1396     struct i965_driver_data *i965 = i965_driver_data(ctx);
1397     struct intel_batchbuffer *batch = i965->batch;
1398
1399     i965_clear_dest_region(ctx);
1400     intel_batchbuffer_start_atomic(batch, 0x1000);
1401     intel_batchbuffer_emit_mi_flush(batch);
1402     i965_render_pipeline_select(ctx);
1403     i965_render_state_sip(ctx);
1404     i965_render_state_base_address(ctx);
1405     i965_render_binding_table_pointers(ctx);
1406     i965_render_constant_color(ctx);
1407     i965_render_pipelined_pointers(ctx);
1408     i965_render_urb_layout(ctx);
1409     i965_render_cs_urb_layout(ctx);
1410     i965_render_constant_buffer(ctx);
1411     i965_render_drawing_rectangle(ctx);
1412     i965_render_vertex_elements(ctx);
1413     i965_render_startup(ctx);
1414     intel_batchbuffer_end_atomic(batch);
1415 }
1416
1417 static void
1418 i965_subpic_render_pipeline_setup(VADriverContextP ctx)
1419 {
1420     struct i965_driver_data *i965 = i965_driver_data(ctx);
1421     struct intel_batchbuffer *batch = i965->batch;
1422
1423     intel_batchbuffer_start_atomic(batch, 0x1000);
1424     intel_batchbuffer_emit_mi_flush(batch);
1425     i965_render_pipeline_select(ctx);
1426     i965_render_state_sip(ctx);
1427     i965_render_state_base_address(ctx);
1428     i965_render_binding_table_pointers(ctx);
1429     i965_render_constant_color(ctx);
1430     i965_render_pipelined_pointers(ctx);
1431     i965_render_urb_layout(ctx);
1432     i965_render_cs_urb_layout(ctx);
1433     i965_render_drawing_rectangle(ctx);
1434     i965_render_vertex_elements(ctx);
1435     i965_render_startup(ctx);
1436     intel_batchbuffer_end_atomic(batch);
1437 }
1438
1439
1440 static void 
1441 i965_render_initialize(VADriverContextP ctx)
1442 {
1443     struct i965_driver_data *i965 = i965_driver_data(ctx);
1444     struct i965_render_state *render_state = &i965->render_state;
1445     dri_bo *bo;
1446
1447     /* VERTEX BUFFER */
1448     dri_bo_unreference(render_state->vb.vertex_buffer);
1449     bo = dri_bo_alloc(i965->intel.bufmgr,
1450                       "vertex buffer",
1451                       4096,
1452                       4096);
1453     assert(bo);
1454     render_state->vb.vertex_buffer = bo;
1455
1456     /* VS */
1457     dri_bo_unreference(render_state->vs.state);
1458     bo = dri_bo_alloc(i965->intel.bufmgr,
1459                       "vs state",
1460                       sizeof(struct i965_vs_unit_state),
1461                       64);
1462     assert(bo);
1463     render_state->vs.state = bo;
1464
1465     /* GS */
1466     /* CLIP */
1467     /* SF */
1468     dri_bo_unreference(render_state->sf.state);
1469     bo = dri_bo_alloc(i965->intel.bufmgr,
1470                       "sf state",
1471                       sizeof(struct i965_sf_unit_state),
1472                       64);
1473     assert(bo);
1474     render_state->sf.state = bo;
1475
1476     /* WM */
1477     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1478     bo = dri_bo_alloc(i965->intel.bufmgr,
1479                       "surface state & binding table",
1480                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1481                       4096);
1482     assert(bo);
1483     render_state->wm.surface_state_binding_table_bo = bo;
1484
1485     dri_bo_unreference(render_state->wm.sampler);
1486     bo = dri_bo_alloc(i965->intel.bufmgr,
1487                       "sampler state",
1488                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1489                       64);
1490     assert(bo);
1491     render_state->wm.sampler = bo;
1492     render_state->wm.sampler_count = 0;
1493
1494     dri_bo_unreference(render_state->wm.state);
1495     bo = dri_bo_alloc(i965->intel.bufmgr,
1496                       "wm state",
1497                       sizeof(struct i965_wm_unit_state),
1498                       64);
1499     assert(bo);
1500     render_state->wm.state = bo;
1501
1502     /* COLOR CALCULATOR */
1503     dri_bo_unreference(render_state->cc.state);
1504     bo = dri_bo_alloc(i965->intel.bufmgr,
1505                       "color calc state",
1506                       sizeof(struct i965_cc_unit_state),
1507                       64);
1508     assert(bo);
1509     render_state->cc.state = bo;
1510
1511     dri_bo_unreference(render_state->cc.viewport);
1512     bo = dri_bo_alloc(i965->intel.bufmgr,
1513                       "cc viewport",
1514                       sizeof(struct i965_cc_viewport),
1515                       64);
1516     assert(bo);
1517     render_state->cc.viewport = bo;
1518 }
1519
1520 static void
1521 i965_render_put_surface(
1522     VADriverContextP   ctx,
1523     VASurfaceID        surface,
1524     const VARectangle *src_rect,
1525     const VARectangle *dst_rect,
1526     unsigned int       flags
1527 )
1528 {
1529     struct i965_driver_data *i965 = i965_driver_data(ctx);
1530     struct intel_batchbuffer *batch = i965->batch;
1531
1532     i965_render_initialize(ctx);
1533     i965_surface_render_state_setup(ctx, surface, src_rect, dst_rect);
1534     i965_surface_render_pipeline_setup(ctx);
1535     intel_batchbuffer_flush(batch);
1536 }
1537
1538 static void
1539 i965_render_put_subpicture(
1540     VADriverContextP   ctx,
1541     VASurfaceID        surface,
1542     const VARectangle *src_rect,
1543     const VARectangle *dst_rect
1544 )
1545 {
1546     struct i965_driver_data *i965 = i965_driver_data(ctx);
1547     struct intel_batchbuffer *batch = i965->batch;
1548     struct object_surface *obj_surface = SURFACE(surface);
1549     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
1550
1551     assert(obj_subpic);
1552
1553     i965_render_initialize(ctx);
1554     i965_subpic_render_state_setup(ctx, surface, src_rect, dst_rect);
1555     i965_subpic_render_pipeline_setup(ctx);
1556     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
1557     intel_batchbuffer_flush(batch);
1558 }
1559
1560 /*
1561  * for GEN6+
1562  */
1563 static void 
1564 gen6_render_initialize(VADriverContextP ctx)
1565 {
1566     struct i965_driver_data *i965 = i965_driver_data(ctx);
1567     struct i965_render_state *render_state = &i965->render_state;
1568     dri_bo *bo;
1569
1570     /* VERTEX BUFFER */
1571     dri_bo_unreference(render_state->vb.vertex_buffer);
1572     bo = dri_bo_alloc(i965->intel.bufmgr,
1573                       "vertex buffer",
1574                       4096,
1575                       4096);
1576     assert(bo);
1577     render_state->vb.vertex_buffer = bo;
1578
1579     /* WM */
1580     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1581     bo = dri_bo_alloc(i965->intel.bufmgr,
1582                       "surface state & binding table",
1583                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1584                       4096);
1585     assert(bo);
1586     render_state->wm.surface_state_binding_table_bo = bo;
1587
1588     dri_bo_unreference(render_state->wm.sampler);
1589     bo = dri_bo_alloc(i965->intel.bufmgr,
1590                       "sampler state",
1591                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1592                       4096);
1593     assert(bo);
1594     render_state->wm.sampler = bo;
1595     render_state->wm.sampler_count = 0;
1596
1597     /* COLOR CALCULATOR */
1598     dri_bo_unreference(render_state->cc.state);
1599     bo = dri_bo_alloc(i965->intel.bufmgr,
1600                       "color calc state",
1601                       sizeof(struct gen6_color_calc_state),
1602                       4096);
1603     assert(bo);
1604     render_state->cc.state = bo;
1605
1606     /* CC VIEWPORT */
1607     dri_bo_unreference(render_state->cc.viewport);
1608     bo = dri_bo_alloc(i965->intel.bufmgr,
1609                       "cc viewport",
1610                       sizeof(struct i965_cc_viewport),
1611                       4096);
1612     assert(bo);
1613     render_state->cc.viewport = bo;
1614
1615     /* BLEND STATE */
1616     dri_bo_unreference(render_state->cc.blend);
1617     bo = dri_bo_alloc(i965->intel.bufmgr,
1618                       "blend state",
1619                       sizeof(struct gen6_blend_state),
1620                       4096);
1621     assert(bo);
1622     render_state->cc.blend = bo;
1623
1624     /* DEPTH & STENCIL STATE */
1625     dri_bo_unreference(render_state->cc.depth_stencil);
1626     bo = dri_bo_alloc(i965->intel.bufmgr,
1627                       "depth & stencil state",
1628                       sizeof(struct gen6_depth_stencil_state),
1629                       4096);
1630     assert(bo);
1631     render_state->cc.depth_stencil = bo;
1632 }
1633
1634 static void
1635 gen6_render_color_calc_state(VADriverContextP ctx)
1636 {
1637     struct i965_driver_data *i965 = i965_driver_data(ctx);
1638     struct i965_render_state *render_state = &i965->render_state;
1639     struct gen6_color_calc_state *color_calc_state;
1640     
1641     dri_bo_map(render_state->cc.state, 1);
1642     assert(render_state->cc.state->virtual);
1643     color_calc_state = render_state->cc.state->virtual;
1644     memset(color_calc_state, 0, sizeof(*color_calc_state));
1645     color_calc_state->constant_r = 1.0;
1646     color_calc_state->constant_g = 0.0;
1647     color_calc_state->constant_b = 1.0;
1648     color_calc_state->constant_a = 1.0;
1649     dri_bo_unmap(render_state->cc.state);
1650 }
1651
1652 static void
1653 gen6_render_blend_state(VADriverContextP ctx)
1654 {
1655     struct i965_driver_data *i965 = i965_driver_data(ctx);
1656     struct i965_render_state *render_state = &i965->render_state;
1657     struct gen6_blend_state *blend_state;
1658     
1659     dri_bo_map(render_state->cc.blend, 1);
1660     assert(render_state->cc.blend->virtual);
1661     blend_state = render_state->cc.blend->virtual;
1662     memset(blend_state, 0, sizeof(*blend_state));
1663     blend_state->blend1.logic_op_enable = 1;
1664     blend_state->blend1.logic_op_func = 0xc;
1665     dri_bo_unmap(render_state->cc.blend);
1666 }
1667
1668 static void
1669 gen6_render_depth_stencil_state(VADriverContextP ctx)
1670 {
1671     struct i965_driver_data *i965 = i965_driver_data(ctx);
1672     struct i965_render_state *render_state = &i965->render_state;
1673     struct gen6_depth_stencil_state *depth_stencil_state;
1674     
1675     dri_bo_map(render_state->cc.depth_stencil, 1);
1676     assert(render_state->cc.depth_stencil->virtual);
1677     depth_stencil_state = render_state->cc.depth_stencil->virtual;
1678     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
1679     dri_bo_unmap(render_state->cc.depth_stencil);
1680 }
1681
1682 static void
1683 gen6_render_setup_states(
1684     VADriverContextP   ctx,
1685     VASurfaceID        surface,
1686     const VARectangle *src_rect,
1687     const VARectangle *dst_rect
1688 )
1689 {
1690     i965_render_dest_surface_state(ctx, 0);
1691     i965_render_src_surfaces_state(ctx, surface);
1692     i965_render_sampler(ctx);
1693     i965_render_cc_viewport(ctx);
1694     gen6_render_color_calc_state(ctx);
1695     gen6_render_blend_state(ctx);
1696     gen6_render_depth_stencil_state(ctx);
1697     i965_render_upload_constants(ctx, surface);
1698     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1699 }
1700
1701 static void
1702 gen6_emit_invarient_states(VADriverContextP ctx)
1703 {
1704     struct i965_driver_data *i965 = i965_driver_data(ctx);
1705     struct intel_batchbuffer *batch = i965->batch;
1706
1707     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1708
1709     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (3 - 2));
1710     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
1711               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
1712     OUT_BATCH(batch, 0);
1713
1714     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
1715     OUT_BATCH(batch, 1);
1716
1717     /* Set system instruction pointer */
1718     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1719     OUT_BATCH(batch, 0);
1720 }
1721
1722 static void
1723 gen6_emit_state_base_address(VADriverContextP ctx)
1724 {
1725     struct i965_driver_data *i965 = i965_driver_data(ctx);
1726     struct intel_batchbuffer *batch = i965->batch;
1727     struct i965_render_state *render_state = &i965->render_state;
1728
1729     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
1730     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
1731     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1732     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
1733     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
1734     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
1735     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
1736     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
1737     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
1738     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
1739 }
1740
1741 static void
1742 gen6_emit_viewport_state_pointers(VADriverContextP ctx)
1743 {
1744     struct i965_driver_data *i965 = i965_driver_data(ctx);
1745     struct intel_batchbuffer *batch = i965->batch;
1746     struct i965_render_state *render_state = &i965->render_state;
1747
1748     OUT_BATCH(batch, GEN6_3DSTATE_VIEWPORT_STATE_POINTERS |
1749               GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CC |
1750               (4 - 2));
1751     OUT_BATCH(batch, 0);
1752     OUT_BATCH(batch, 0);
1753     OUT_RELOC(batch, render_state->cc.viewport, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1754 }
1755
1756 static void
1757 gen6_emit_urb(VADriverContextP ctx)
1758 {
1759     struct i965_driver_data *i965 = i965_driver_data(ctx);
1760     struct intel_batchbuffer *batch = i965->batch;
1761
1762     OUT_BATCH(batch, GEN6_3DSTATE_URB | (3 - 2));
1763     OUT_BATCH(batch, ((1 - 1) << GEN6_3DSTATE_URB_VS_SIZE_SHIFT) |
1764               (24 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */
1765     OUT_BATCH(batch, (0 << GEN6_3DSTATE_URB_GS_SIZE_SHIFT) |
1766               (0 << GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT)); /* no GS thread */
1767 }
1768
1769 static void
1770 gen6_emit_cc_state_pointers(VADriverContextP ctx)
1771 {
1772     struct i965_driver_data *i965 = i965_driver_data(ctx);
1773     struct intel_batchbuffer *batch = i965->batch;
1774     struct i965_render_state *render_state = &i965->render_state;
1775
1776     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2));
1777     OUT_RELOC(batch, render_state->cc.blend, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1778     OUT_RELOC(batch, render_state->cc.depth_stencil, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1779     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1780 }
1781
1782 static void
1783 gen6_emit_sampler_state_pointers(VADriverContextP ctx)
1784 {
1785     struct i965_driver_data *i965 = i965_driver_data(ctx);
1786     struct intel_batchbuffer *batch = i965->batch;
1787     struct i965_render_state *render_state = &i965->render_state;
1788
1789     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLER_STATE_POINTERS |
1790               GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS |
1791               (4 - 2));
1792     OUT_BATCH(batch, 0); /* VS */
1793     OUT_BATCH(batch, 0); /* GS */
1794     OUT_RELOC(batch,render_state->wm.sampler, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1795 }
1796
1797 static void
1798 gen6_emit_binding_table(VADriverContextP ctx)
1799 {
1800     struct i965_driver_data *i965 = i965_driver_data(ctx);
1801     struct intel_batchbuffer *batch = i965->batch;
1802
1803     /* Binding table pointers */
1804     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS |
1805               GEN6_BINDING_TABLE_MODIFY_PS |
1806               (4 - 2));
1807     OUT_BATCH(batch, 0);                /* vs */
1808     OUT_BATCH(batch, 0);                /* gs */
1809     /* Only the PS uses the binding table */
1810     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1811 }
1812
1813 static void
1814 gen6_emit_depth_buffer_state(VADriverContextP ctx)
1815 {
1816     struct i965_driver_data *i965 = i965_driver_data(ctx);
1817     struct intel_batchbuffer *batch = i965->batch;
1818
1819     OUT_BATCH(batch, CMD_DEPTH_BUFFER | (7 - 2));
1820     OUT_BATCH(batch, (I965_SURFACE_NULL << CMD_DEPTH_BUFFER_TYPE_SHIFT) |
1821               (I965_DEPTHFORMAT_D32_FLOAT << CMD_DEPTH_BUFFER_FORMAT_SHIFT));
1822     OUT_BATCH(batch, 0);
1823     OUT_BATCH(batch, 0);
1824     OUT_BATCH(batch, 0);
1825     OUT_BATCH(batch, 0);
1826     OUT_BATCH(batch, 0);
1827
1828     OUT_BATCH(batch, CMD_CLEAR_PARAMS | (2 - 2));
1829     OUT_BATCH(batch, 0);
1830 }
1831
1832 static void
1833 gen6_emit_drawing_rectangle(VADriverContextP ctx)
1834 {
1835     i965_render_drawing_rectangle(ctx);
1836 }
1837
1838 static void 
1839 gen6_emit_vs_state(VADriverContextP ctx)
1840 {
1841     struct i965_driver_data *i965 = i965_driver_data(ctx);
1842     struct intel_batchbuffer *batch = i965->batch;
1843
1844     /* disable VS constant buffer */
1845     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (5 - 2));
1846     OUT_BATCH(batch, 0);
1847     OUT_BATCH(batch, 0);
1848     OUT_BATCH(batch, 0);
1849     OUT_BATCH(batch, 0);
1850         
1851     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
1852     OUT_BATCH(batch, 0); /* without VS kernel */
1853     OUT_BATCH(batch, 0);
1854     OUT_BATCH(batch, 0);
1855     OUT_BATCH(batch, 0);
1856     OUT_BATCH(batch, 0); /* pass-through */
1857 }
1858
1859 static void 
1860 gen6_emit_gs_state(VADriverContextP ctx)
1861 {
1862     struct i965_driver_data *i965 = i965_driver_data(ctx);
1863     struct intel_batchbuffer *batch = i965->batch;
1864
1865     /* disable GS constant buffer */
1866     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (5 - 2));
1867     OUT_BATCH(batch, 0);
1868     OUT_BATCH(batch, 0);
1869     OUT_BATCH(batch, 0);
1870     OUT_BATCH(batch, 0);
1871         
1872     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
1873     OUT_BATCH(batch, 0); /* without GS kernel */
1874     OUT_BATCH(batch, 0);
1875     OUT_BATCH(batch, 0);
1876     OUT_BATCH(batch, 0);
1877     OUT_BATCH(batch, 0);
1878     OUT_BATCH(batch, 0); /* pass-through */
1879 }
1880
1881 static void 
1882 gen6_emit_clip_state(VADriverContextP ctx)
1883 {
1884     struct i965_driver_data *i965 = i965_driver_data(ctx);
1885     struct intel_batchbuffer *batch = i965->batch;
1886
1887     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
1888     OUT_BATCH(batch, 0);
1889     OUT_BATCH(batch, 0); /* pass-through */
1890     OUT_BATCH(batch, 0);
1891 }
1892
1893 static void 
1894 gen6_emit_sf_state(VADriverContextP ctx)
1895 {
1896     struct i965_driver_data *i965 = i965_driver_data(ctx);
1897     struct intel_batchbuffer *batch = i965->batch;
1898
1899     OUT_BATCH(batch, GEN6_3DSTATE_SF | (20 - 2));
1900     OUT_BATCH(batch, (1 << GEN6_3DSTATE_SF_NUM_OUTPUTS_SHIFT) |
1901               (1 << GEN6_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT) |
1902               (0 << GEN6_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT));
1903     OUT_BATCH(batch, 0);
1904     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
1905     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT); /* DW4 */
1906     OUT_BATCH(batch, 0);
1907     OUT_BATCH(batch, 0);
1908     OUT_BATCH(batch, 0);
1909     OUT_BATCH(batch, 0);
1910     OUT_BATCH(batch, 0); /* DW9 */
1911     OUT_BATCH(batch, 0);
1912     OUT_BATCH(batch, 0);
1913     OUT_BATCH(batch, 0);
1914     OUT_BATCH(batch, 0);
1915     OUT_BATCH(batch, 0); /* DW14 */
1916     OUT_BATCH(batch, 0);
1917     OUT_BATCH(batch, 0);
1918     OUT_BATCH(batch, 0);
1919     OUT_BATCH(batch, 0);
1920     OUT_BATCH(batch, 0); /* DW19 */
1921 }
1922
1923 static void 
1924 gen6_emit_wm_state(VADriverContextP ctx, int kernel)
1925 {
1926     struct i965_driver_data *i965 = i965_driver_data(ctx);
1927     struct intel_batchbuffer *batch = i965->batch;
1928     struct i965_render_state *render_state = &i965->render_state;
1929
1930     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS |
1931               GEN6_3DSTATE_CONSTANT_BUFFER_0_ENABLE |
1932               (5 - 2));
1933     OUT_RELOC(batch, 
1934               render_state->curbe.bo,
1935               I915_GEM_DOMAIN_INSTRUCTION, 0,
1936               0);
1937     OUT_BATCH(batch, 0);
1938     OUT_BATCH(batch, 0);
1939     OUT_BATCH(batch, 0);
1940
1941     OUT_BATCH(batch, GEN6_3DSTATE_WM | (9 - 2));
1942     OUT_RELOC(batch, render_state->render_kernels[kernel].bo,
1943               I915_GEM_DOMAIN_INSTRUCTION, 0,
1944               0);
1945     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHITF) |
1946               (5 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT));
1947     OUT_BATCH(batch, 0);
1948     OUT_BATCH(batch, (6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT)); /* DW4 */
1949     OUT_BATCH(batch, ((40 - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT) |
1950               GEN6_3DSTATE_WM_DISPATCH_ENABLE |
1951               GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
1952     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT) |
1953               GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
1954     OUT_BATCH(batch, 0);
1955     OUT_BATCH(batch, 0);
1956 }
1957
1958 static void
1959 gen6_emit_vertex_element_state(VADriverContextP ctx)
1960 {
1961     struct i965_driver_data *i965 = i965_driver_data(ctx);
1962     struct intel_batchbuffer *batch = i965->batch;
1963
1964     /* Set up our vertex elements, sourced from the single vertex buffer. */
1965     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
1966     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1967     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1968               GEN6_VE0_VALID |
1969               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1970               (0 << VE0_OFFSET_SHIFT));
1971     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1972               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1973               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1974               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1975     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1976     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1977               GEN6_VE0_VALID |
1978               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1979               (8 << VE0_OFFSET_SHIFT));
1980     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
1981               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1982               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1983               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1984 }
1985
1986 static void
1987 gen6_emit_vertices(VADriverContextP ctx)
1988 {
1989     struct i965_driver_data *i965 = i965_driver_data(ctx);
1990     struct intel_batchbuffer *batch = i965->batch;
1991     struct i965_render_state *render_state = &i965->render_state;
1992
1993     BEGIN_BATCH(batch, 11);
1994     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
1995     OUT_BATCH(batch, 
1996               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
1997               GEN6_VB0_VERTEXDATA |
1998               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
1999     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2000     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2001     OUT_BATCH(batch, 0);
2002
2003     OUT_BATCH(batch, 
2004               CMD_3DPRIMITIVE |
2005               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
2006               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
2007               (0 << 9) |
2008               4);
2009     OUT_BATCH(batch, 3); /* vertex count per instance */
2010     OUT_BATCH(batch, 0); /* start vertex offset */
2011     OUT_BATCH(batch, 1); /* single instance */
2012     OUT_BATCH(batch, 0); /* start instance location */
2013     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
2014     ADVANCE_BATCH(batch);
2015 }
2016
2017 static void
2018 gen6_render_emit_states(VADriverContextP ctx, int kernel)
2019 {
2020     struct i965_driver_data *i965 = i965_driver_data(ctx);
2021     struct intel_batchbuffer *batch = i965->batch;
2022
2023     intel_batchbuffer_start_atomic(batch, 0x1000);
2024     intel_batchbuffer_emit_mi_flush(batch);
2025     gen6_emit_invarient_states(ctx);
2026     gen6_emit_state_base_address(ctx);
2027     gen6_emit_viewport_state_pointers(ctx);
2028     gen6_emit_urb(ctx);
2029     gen6_emit_cc_state_pointers(ctx);
2030     gen6_emit_sampler_state_pointers(ctx);
2031     gen6_emit_vs_state(ctx);
2032     gen6_emit_gs_state(ctx);
2033     gen6_emit_clip_state(ctx);
2034     gen6_emit_sf_state(ctx);
2035     gen6_emit_wm_state(ctx, kernel);
2036     gen6_emit_binding_table(ctx);
2037     gen6_emit_depth_buffer_state(ctx);
2038     gen6_emit_drawing_rectangle(ctx);
2039     gen6_emit_vertex_element_state(ctx);
2040     gen6_emit_vertices(ctx);
2041     intel_batchbuffer_end_atomic(batch);
2042 }
2043
2044 static void
2045 gen6_render_put_surface(
2046     VADriverContextP   ctx,
2047     VASurfaceID        surface,
2048     const VARectangle *src_rect,
2049     const VARectangle *dst_rect,
2050     unsigned int       flags
2051 )
2052 {
2053     struct i965_driver_data *i965 = i965_driver_data(ctx);
2054     struct intel_batchbuffer *batch = i965->batch;
2055
2056     gen6_render_initialize(ctx);
2057     gen6_render_setup_states(ctx, surface, src_rect, dst_rect);
2058     i965_clear_dest_region(ctx);
2059     gen6_render_emit_states(ctx, PS_KERNEL);
2060     intel_batchbuffer_flush(batch);
2061 }
2062
2063 static void
2064 gen6_subpicture_render_blend_state(VADriverContextP ctx)
2065 {
2066     struct i965_driver_data *i965 = i965_driver_data(ctx);
2067     struct i965_render_state *render_state = &i965->render_state;
2068     struct gen6_blend_state *blend_state;
2069
2070     dri_bo_unmap(render_state->cc.state);    
2071     dri_bo_map(render_state->cc.blend, 1);
2072     assert(render_state->cc.blend->virtual);
2073     blend_state = render_state->cc.blend->virtual;
2074     memset(blend_state, 0, sizeof(*blend_state));
2075     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2076     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2077     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2078     blend_state->blend0.blend_enable = 1;
2079     blend_state->blend1.post_blend_clamp_enable = 1;
2080     blend_state->blend1.pre_blend_clamp_enable = 1;
2081     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2082     dri_bo_unmap(render_state->cc.blend);
2083 }
2084
2085 static void
2086 gen6_subpicture_render_setup_states(
2087     VADriverContextP   ctx,
2088     VASurfaceID        surface,
2089     const VARectangle *src_rect,
2090     const VARectangle *dst_rect
2091 )
2092 {
2093     i965_render_dest_surface_state(ctx, 0);
2094     i965_subpic_render_src_surfaces_state(ctx, surface);
2095     i965_render_sampler(ctx);
2096     i965_render_cc_viewport(ctx);
2097     gen6_render_color_calc_state(ctx);
2098     gen6_subpicture_render_blend_state(ctx);
2099     gen6_render_depth_stencil_state(ctx);
2100     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2101 }
2102
2103 static void
2104 gen6_render_put_subpicture(
2105     VADriverContextP   ctx,
2106     VASurfaceID        surface,
2107     const VARectangle *src_rect,
2108     const VARectangle *dst_rect
2109 )
2110 {
2111     struct i965_driver_data *i965 = i965_driver_data(ctx);
2112     struct intel_batchbuffer *batch = i965->batch;
2113     struct object_surface *obj_surface = SURFACE(surface);
2114     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
2115
2116     assert(obj_subpic);
2117     gen6_render_initialize(ctx);
2118     gen6_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2119     gen6_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2120     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2121     intel_batchbuffer_flush(batch);
2122 }
2123
2124 /*
2125  * for GEN7
2126  */
2127 static void 
2128 gen7_render_initialize(VADriverContextP ctx)
2129 {
2130     struct i965_driver_data *i965 = i965_driver_data(ctx);
2131     struct i965_render_state *render_state = &i965->render_state;
2132     dri_bo *bo;
2133
2134     /* VERTEX BUFFER */
2135     dri_bo_unreference(render_state->vb.vertex_buffer);
2136     bo = dri_bo_alloc(i965->intel.bufmgr,
2137                       "vertex buffer",
2138                       4096,
2139                       4096);
2140     assert(bo);
2141     render_state->vb.vertex_buffer = bo;
2142
2143     /* WM */
2144     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
2145     bo = dri_bo_alloc(i965->intel.bufmgr,
2146                       "surface state & binding table",
2147                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
2148                       4096);
2149     assert(bo);
2150     render_state->wm.surface_state_binding_table_bo = bo;
2151
2152     dri_bo_unreference(render_state->wm.sampler);
2153     bo = dri_bo_alloc(i965->intel.bufmgr,
2154                       "sampler state",
2155                       MAX_SAMPLERS * sizeof(struct gen7_sampler_state),
2156                       4096);
2157     assert(bo);
2158     render_state->wm.sampler = bo;
2159     render_state->wm.sampler_count = 0;
2160
2161     /* COLOR CALCULATOR */
2162     dri_bo_unreference(render_state->cc.state);
2163     bo = dri_bo_alloc(i965->intel.bufmgr,
2164                       "color calc state",
2165                       sizeof(struct gen6_color_calc_state),
2166                       4096);
2167     assert(bo);
2168     render_state->cc.state = bo;
2169
2170     /* CC VIEWPORT */
2171     dri_bo_unreference(render_state->cc.viewport);
2172     bo = dri_bo_alloc(i965->intel.bufmgr,
2173                       "cc viewport",
2174                       sizeof(struct i965_cc_viewport),
2175                       4096);
2176     assert(bo);
2177     render_state->cc.viewport = bo;
2178
2179     /* BLEND STATE */
2180     dri_bo_unreference(render_state->cc.blend);
2181     bo = dri_bo_alloc(i965->intel.bufmgr,
2182                       "blend state",
2183                       sizeof(struct gen6_blend_state),
2184                       4096);
2185     assert(bo);
2186     render_state->cc.blend = bo;
2187
2188     /* DEPTH & STENCIL STATE */
2189     dri_bo_unreference(render_state->cc.depth_stencil);
2190     bo = dri_bo_alloc(i965->intel.bufmgr,
2191                       "depth & stencil state",
2192                       sizeof(struct gen6_depth_stencil_state),
2193                       4096);
2194     assert(bo);
2195     render_state->cc.depth_stencil = bo;
2196 }
2197
2198 static void
2199 gen7_render_color_calc_state(VADriverContextP ctx)
2200 {
2201     struct i965_driver_data *i965 = i965_driver_data(ctx);
2202     struct i965_render_state *render_state = &i965->render_state;
2203     struct gen6_color_calc_state *color_calc_state;
2204     
2205     dri_bo_map(render_state->cc.state, 1);
2206     assert(render_state->cc.state->virtual);
2207     color_calc_state = render_state->cc.state->virtual;
2208     memset(color_calc_state, 0, sizeof(*color_calc_state));
2209     color_calc_state->constant_r = 1.0;
2210     color_calc_state->constant_g = 0.0;
2211     color_calc_state->constant_b = 1.0;
2212     color_calc_state->constant_a = 1.0;
2213     dri_bo_unmap(render_state->cc.state);
2214 }
2215
2216 static void
2217 gen7_render_blend_state(VADriverContextP ctx)
2218 {
2219     struct i965_driver_data *i965 = i965_driver_data(ctx);
2220     struct i965_render_state *render_state = &i965->render_state;
2221     struct gen6_blend_state *blend_state;
2222     
2223     dri_bo_map(render_state->cc.blend, 1);
2224     assert(render_state->cc.blend->virtual);
2225     blend_state = render_state->cc.blend->virtual;
2226     memset(blend_state, 0, sizeof(*blend_state));
2227     blend_state->blend1.logic_op_enable = 1;
2228     blend_state->blend1.logic_op_func = 0xc;
2229     blend_state->blend1.pre_blend_clamp_enable = 1;
2230     dri_bo_unmap(render_state->cc.blend);
2231 }
2232
2233 static void
2234 gen7_render_depth_stencil_state(VADriverContextP ctx)
2235 {
2236     struct i965_driver_data *i965 = i965_driver_data(ctx);
2237     struct i965_render_state *render_state = &i965->render_state;
2238     struct gen6_depth_stencil_state *depth_stencil_state;
2239     
2240     dri_bo_map(render_state->cc.depth_stencil, 1);
2241     assert(render_state->cc.depth_stencil->virtual);
2242     depth_stencil_state = render_state->cc.depth_stencil->virtual;
2243     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
2244     dri_bo_unmap(render_state->cc.depth_stencil);
2245 }
2246
2247 static void 
2248 gen7_render_sampler(VADriverContextP ctx)
2249 {
2250     struct i965_driver_data *i965 = i965_driver_data(ctx);
2251     struct i965_render_state *render_state = &i965->render_state;
2252     struct gen7_sampler_state *sampler_state;
2253     int i;
2254     
2255     assert(render_state->wm.sampler_count > 0);
2256     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
2257
2258     dri_bo_map(render_state->wm.sampler, 1);
2259     assert(render_state->wm.sampler->virtual);
2260     sampler_state = render_state->wm.sampler->virtual;
2261     for (i = 0; i < render_state->wm.sampler_count; i++) {
2262         memset(sampler_state, 0, sizeof(*sampler_state));
2263         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
2264         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
2265         sampler_state->ss3.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2266         sampler_state->ss3.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2267         sampler_state->ss3.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2268         sampler_state++;
2269     }
2270
2271     dri_bo_unmap(render_state->wm.sampler);
2272 }
2273
2274 static void
2275 gen7_render_setup_states(
2276     VADriverContextP   ctx,
2277     VASurfaceID        surface,
2278     const VARectangle *src_rect,
2279     const VARectangle *dst_rect
2280 )
2281 {
2282     i965_render_dest_surface_state(ctx, 0);
2283     i965_render_src_surfaces_state(ctx, surface);
2284     gen7_render_sampler(ctx);
2285     i965_render_cc_viewport(ctx);
2286     gen7_render_color_calc_state(ctx);
2287     gen7_render_blend_state(ctx);
2288     gen7_render_depth_stencil_state(ctx);
2289     i965_render_upload_constants(ctx, surface);
2290     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
2291 }
2292
2293 static void
2294 gen7_emit_invarient_states(VADriverContextP ctx)
2295 {
2296     struct i965_driver_data *i965 = i965_driver_data(ctx);
2297     struct intel_batchbuffer *batch = i965->batch;
2298
2299     BEGIN_BATCH(batch, 1);
2300     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
2301     ADVANCE_BATCH(batch);
2302
2303     BEGIN_BATCH(batch, 4);
2304     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (4 - 2));
2305     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
2306               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
2307     OUT_BATCH(batch, 0);
2308     OUT_BATCH(batch, 0);
2309     ADVANCE_BATCH(batch);
2310
2311     BEGIN_BATCH(batch, 2);
2312     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
2313     OUT_BATCH(batch, 1);
2314     ADVANCE_BATCH(batch);
2315
2316     /* Set system instruction pointer */
2317     BEGIN_BATCH(batch, 2);
2318     OUT_BATCH(batch, CMD_STATE_SIP | 0);
2319     OUT_BATCH(batch, 0);
2320     ADVANCE_BATCH(batch);
2321 }
2322
2323 static void
2324 gen7_emit_state_base_address(VADriverContextP ctx)
2325 {
2326     struct i965_driver_data *i965 = i965_driver_data(ctx);
2327     struct intel_batchbuffer *batch = i965->batch;
2328     struct i965_render_state *render_state = &i965->render_state;
2329
2330     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
2331     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
2332     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
2333     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
2334     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
2335     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
2336     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
2337     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
2338     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
2339     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
2340 }
2341
2342 static void
2343 gen7_emit_viewport_state_pointers(VADriverContextP ctx)
2344 {
2345     struct i965_driver_data *i965 = i965_driver_data(ctx);
2346     struct intel_batchbuffer *batch = i965->batch;
2347     struct i965_render_state *render_state = &i965->render_state;
2348
2349     BEGIN_BATCH(batch, 2);
2350     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC | (2 - 2));
2351     OUT_RELOC(batch,
2352               render_state->cc.viewport,
2353               I915_GEM_DOMAIN_INSTRUCTION, 0,
2354               0);
2355     ADVANCE_BATCH(batch);
2356
2357     BEGIN_BATCH(batch, 2);
2358     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL | (2 - 2));
2359     OUT_BATCH(batch, 0);
2360     ADVANCE_BATCH(batch);
2361 }
2362
2363 /*
2364  * URB layout on GEN7 
2365  * ----------------------------------------
2366  * | PS Push Constants (8KB) | VS entries |
2367  * ----------------------------------------
2368  */
2369 static void
2370 gen7_emit_urb(VADriverContextP ctx)
2371 {
2372     struct i965_driver_data *i965 = i965_driver_data(ctx);
2373     struct intel_batchbuffer *batch = i965->batch;
2374
2375     BEGIN_BATCH(batch, 2);
2376     OUT_BATCH(batch, GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS | (2 - 2));
2377     OUT_BATCH(batch, 8); /* in 1KBs */
2378     ADVANCE_BATCH(batch);
2379
2380     BEGIN_BATCH(batch, 2);
2381     OUT_BATCH(batch, GEN7_3DSTATE_URB_VS | (2 - 2));
2382     OUT_BATCH(batch, 
2383               (32 << GEN7_URB_ENTRY_NUMBER_SHIFT) | /* at least 32 */
2384               (2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
2385               (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2386    ADVANCE_BATCH(batch);
2387
2388    BEGIN_BATCH(batch, 2);
2389    OUT_BATCH(batch, GEN7_3DSTATE_URB_GS | (2 - 2));
2390    OUT_BATCH(batch,
2391              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2392              (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2393    ADVANCE_BATCH(batch);
2394
2395    BEGIN_BATCH(batch, 2);
2396    OUT_BATCH(batch, GEN7_3DSTATE_URB_HS | (2 - 2));
2397    OUT_BATCH(batch,
2398              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2399              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2400    ADVANCE_BATCH(batch);
2401
2402    BEGIN_BATCH(batch, 2);
2403    OUT_BATCH(batch, GEN7_3DSTATE_URB_DS | (2 - 2));
2404    OUT_BATCH(batch,
2405              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2406              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2407    ADVANCE_BATCH(batch);
2408 }
2409
2410 static void
2411 gen7_emit_cc_state_pointers(VADriverContextP ctx)
2412 {
2413     struct i965_driver_data *i965 = i965_driver_data(ctx);
2414     struct intel_batchbuffer *batch = i965->batch;
2415     struct i965_render_state *render_state = &i965->render_state;
2416
2417     BEGIN_BATCH(batch, 2);
2418     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (2 - 2));
2419     OUT_RELOC(batch,
2420               render_state->cc.state,
2421               I915_GEM_DOMAIN_INSTRUCTION, 0,
2422               1);
2423     ADVANCE_BATCH(batch);
2424
2425     BEGIN_BATCH(batch, 2);
2426     OUT_BATCH(batch, GEN7_3DSTATE_BLEND_STATE_POINTERS | (2 - 2));
2427     OUT_RELOC(batch,
2428               render_state->cc.blend,
2429               I915_GEM_DOMAIN_INSTRUCTION, 0,
2430               1);
2431     ADVANCE_BATCH(batch);
2432
2433     BEGIN_BATCH(batch, 2);
2434     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS | (2 - 2));
2435     OUT_RELOC(batch,
2436               render_state->cc.depth_stencil,
2437               I915_GEM_DOMAIN_INSTRUCTION, 0, 
2438               1);
2439     ADVANCE_BATCH(batch);
2440 }
2441
2442 static void
2443 gen7_emit_sampler_state_pointers(VADriverContextP ctx)
2444 {
2445     struct i965_driver_data *i965 = i965_driver_data(ctx);
2446     struct intel_batchbuffer *batch = i965->batch;
2447     struct i965_render_state *render_state = &i965->render_state;
2448
2449     BEGIN_BATCH(batch, 2);
2450     OUT_BATCH(batch, GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
2451     OUT_RELOC(batch,
2452               render_state->wm.sampler,
2453               I915_GEM_DOMAIN_INSTRUCTION, 0,
2454               0);
2455     ADVANCE_BATCH(batch);
2456 }
2457
2458 static void
2459 gen7_emit_binding_table(VADriverContextP ctx)
2460 {
2461     struct i965_driver_data *i965 = i965_driver_data(ctx);
2462     struct intel_batchbuffer *batch = i965->batch;
2463
2464     BEGIN_BATCH(batch, 2);
2465     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS | (2 - 2));
2466     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
2467     ADVANCE_BATCH(batch);
2468 }
2469
2470 static void
2471 gen7_emit_depth_buffer_state(VADriverContextP ctx)
2472 {
2473     struct i965_driver_data *i965 = i965_driver_data(ctx);
2474     struct intel_batchbuffer *batch = i965->batch;
2475
2476     BEGIN_BATCH(batch, 7);
2477     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_BUFFER | (7 - 2));
2478     OUT_BATCH(batch,
2479               (I965_DEPTHFORMAT_D32_FLOAT << 18) |
2480               (I965_SURFACE_NULL << 29));
2481     OUT_BATCH(batch, 0);
2482     OUT_BATCH(batch, 0);
2483     OUT_BATCH(batch, 0);
2484     OUT_BATCH(batch, 0);
2485     OUT_BATCH(batch, 0);
2486     ADVANCE_BATCH(batch);
2487
2488     BEGIN_BATCH(batch, 3);
2489     OUT_BATCH(batch, GEN7_3DSTATE_CLEAR_PARAMS | (3 - 2));
2490     OUT_BATCH(batch, 0);
2491     OUT_BATCH(batch, 0);
2492     ADVANCE_BATCH(batch);
2493 }
2494
2495 static void
2496 gen7_emit_drawing_rectangle(VADriverContextP ctx)
2497 {
2498     i965_render_drawing_rectangle(ctx);
2499 }
2500
2501 static void 
2502 gen7_emit_vs_state(VADriverContextP ctx)
2503 {
2504     struct i965_driver_data *i965 = i965_driver_data(ctx);
2505     struct intel_batchbuffer *batch = i965->batch;
2506
2507     /* disable VS constant buffer */
2508     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (7 - 2));
2509     OUT_BATCH(batch, 0);
2510     OUT_BATCH(batch, 0);
2511     OUT_BATCH(batch, 0);
2512     OUT_BATCH(batch, 0);
2513     OUT_BATCH(batch, 0);
2514     OUT_BATCH(batch, 0);
2515         
2516     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
2517     OUT_BATCH(batch, 0); /* without VS kernel */
2518     OUT_BATCH(batch, 0);
2519     OUT_BATCH(batch, 0);
2520     OUT_BATCH(batch, 0);
2521     OUT_BATCH(batch, 0); /* pass-through */
2522 }
2523
2524 static void 
2525 gen7_emit_bypass_state(VADriverContextP ctx)
2526 {
2527     struct i965_driver_data *i965 = i965_driver_data(ctx);
2528     struct intel_batchbuffer *batch = i965->batch;
2529
2530     /* bypass GS */
2531     BEGIN_BATCH(batch, 7);
2532     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (7 - 2));
2533     OUT_BATCH(batch, 0);
2534     OUT_BATCH(batch, 0);
2535     OUT_BATCH(batch, 0);
2536     OUT_BATCH(batch, 0);
2537     OUT_BATCH(batch, 0);
2538     OUT_BATCH(batch, 0);
2539     ADVANCE_BATCH(batch);
2540
2541     BEGIN_BATCH(batch, 7);      
2542     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
2543     OUT_BATCH(batch, 0); /* without GS kernel */
2544     OUT_BATCH(batch, 0);
2545     OUT_BATCH(batch, 0);
2546     OUT_BATCH(batch, 0);
2547     OUT_BATCH(batch, 0);
2548     OUT_BATCH(batch, 0); /* pass-through */
2549     ADVANCE_BATCH(batch);
2550
2551     BEGIN_BATCH(batch, 2);
2552     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_GS | (2 - 2));
2553     OUT_BATCH(batch, 0);
2554     ADVANCE_BATCH(batch);
2555
2556     /* disable HS */
2557     BEGIN_BATCH(batch, 7);
2558     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_HS | (7 - 2));
2559     OUT_BATCH(batch, 0);
2560     OUT_BATCH(batch, 0);
2561     OUT_BATCH(batch, 0);
2562     OUT_BATCH(batch, 0);
2563     OUT_BATCH(batch, 0);
2564     OUT_BATCH(batch, 0);
2565     ADVANCE_BATCH(batch);
2566
2567     BEGIN_BATCH(batch, 7);
2568     OUT_BATCH(batch, GEN7_3DSTATE_HS | (7 - 2));
2569     OUT_BATCH(batch, 0);
2570     OUT_BATCH(batch, 0);
2571     OUT_BATCH(batch, 0);
2572     OUT_BATCH(batch, 0);
2573     OUT_BATCH(batch, 0);
2574     OUT_BATCH(batch, 0);
2575     ADVANCE_BATCH(batch);
2576
2577     BEGIN_BATCH(batch, 2);
2578     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_HS | (2 - 2));
2579     OUT_BATCH(batch, 0);
2580     ADVANCE_BATCH(batch);
2581
2582     /* Disable TE */
2583     BEGIN_BATCH(batch, 4);
2584     OUT_BATCH(batch, GEN7_3DSTATE_TE | (4 - 2));
2585     OUT_BATCH(batch, 0);
2586     OUT_BATCH(batch, 0);
2587     OUT_BATCH(batch, 0);
2588     ADVANCE_BATCH(batch);
2589
2590     /* Disable DS */
2591     BEGIN_BATCH(batch, 7);
2592     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_DS | (7 - 2));
2593     OUT_BATCH(batch, 0);
2594     OUT_BATCH(batch, 0);
2595     OUT_BATCH(batch, 0);
2596     OUT_BATCH(batch, 0);
2597     OUT_BATCH(batch, 0);
2598     OUT_BATCH(batch, 0);
2599     ADVANCE_BATCH(batch);
2600
2601     BEGIN_BATCH(batch, 6);
2602     OUT_BATCH(batch, GEN7_3DSTATE_DS | (6 - 2));
2603     OUT_BATCH(batch, 0);
2604     OUT_BATCH(batch, 0);
2605     OUT_BATCH(batch, 0);
2606     OUT_BATCH(batch, 0);
2607     OUT_BATCH(batch, 0);
2608     ADVANCE_BATCH(batch);
2609
2610     BEGIN_BATCH(batch, 2);
2611     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_DS | (2 - 2));
2612     OUT_BATCH(batch, 0);
2613     ADVANCE_BATCH(batch);
2614
2615     /* Disable STREAMOUT */
2616     BEGIN_BATCH(batch, 3);
2617     OUT_BATCH(batch, GEN7_3DSTATE_STREAMOUT | (3 - 2));
2618     OUT_BATCH(batch, 0);
2619     OUT_BATCH(batch, 0);
2620     ADVANCE_BATCH(batch);
2621 }
2622
2623 static void 
2624 gen7_emit_clip_state(VADriverContextP ctx)
2625 {
2626     struct i965_driver_data *i965 = i965_driver_data(ctx);
2627     struct intel_batchbuffer *batch = i965->batch;
2628
2629     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
2630     OUT_BATCH(batch, 0);
2631     OUT_BATCH(batch, 0); /* pass-through */
2632     OUT_BATCH(batch, 0);
2633 }
2634
2635 static void 
2636 gen7_emit_sf_state(VADriverContextP ctx)
2637 {
2638     struct i965_driver_data *i965 = i965_driver_data(ctx);
2639     struct intel_batchbuffer *batch = i965->batch;
2640
2641     BEGIN_BATCH(batch, 14);
2642     OUT_BATCH(batch, GEN7_3DSTATE_SBE | (14 - 2));
2643     OUT_BATCH(batch,
2644               (1 << GEN7_SBE_NUM_OUTPUTS_SHIFT) |
2645               (1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT) |
2646               (0 << GEN7_SBE_URB_ENTRY_READ_OFFSET_SHIFT));
2647     OUT_BATCH(batch, 0);
2648     OUT_BATCH(batch, 0);
2649     OUT_BATCH(batch, 0); /* DW4 */
2650     OUT_BATCH(batch, 0);
2651     OUT_BATCH(batch, 0);
2652     OUT_BATCH(batch, 0);
2653     OUT_BATCH(batch, 0);
2654     OUT_BATCH(batch, 0); /* DW9 */
2655     OUT_BATCH(batch, 0);
2656     OUT_BATCH(batch, 0);
2657     OUT_BATCH(batch, 0);
2658     OUT_BATCH(batch, 0);
2659     ADVANCE_BATCH(batch);
2660
2661     BEGIN_BATCH(batch, 7);
2662     OUT_BATCH(batch, GEN6_3DSTATE_SF | (7 - 2));
2663     OUT_BATCH(batch, 0);
2664     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
2665     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT);
2666     OUT_BATCH(batch, 0);
2667     OUT_BATCH(batch, 0);
2668     OUT_BATCH(batch, 0);
2669     ADVANCE_BATCH(batch);
2670 }
2671
2672 static void 
2673 gen7_emit_wm_state(VADriverContextP ctx, int kernel)
2674 {
2675     struct i965_driver_data *i965 = i965_driver_data(ctx);
2676     struct intel_batchbuffer *batch = i965->batch;
2677     struct i965_render_state *render_state = &i965->render_state;
2678
2679     BEGIN_BATCH(batch, 3);
2680     OUT_BATCH(batch, GEN6_3DSTATE_WM | (3 - 2));
2681     OUT_BATCH(batch,
2682               GEN7_WM_DISPATCH_ENABLE |
2683               GEN7_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2684     OUT_BATCH(batch, 0);
2685     ADVANCE_BATCH(batch);
2686
2687     BEGIN_BATCH(batch, 7);
2688     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS | (7 - 2));
2689     OUT_BATCH(batch, 1);
2690     OUT_BATCH(batch, 0);
2691     OUT_RELOC(batch, 
2692               render_state->curbe.bo,
2693               I915_GEM_DOMAIN_INSTRUCTION, 0,
2694               0);
2695     OUT_BATCH(batch, 0);
2696     OUT_BATCH(batch, 0);
2697     OUT_BATCH(batch, 0);
2698     ADVANCE_BATCH(batch);
2699
2700     BEGIN_BATCH(batch, 8);
2701     OUT_BATCH(batch, GEN7_3DSTATE_PS | (8 - 2));
2702     OUT_RELOC(batch, 
2703               render_state->render_kernels[kernel].bo,
2704               I915_GEM_DOMAIN_INSTRUCTION, 0,
2705               0);
2706     OUT_BATCH(batch, 
2707               (1 << GEN7_PS_SAMPLER_COUNT_SHIFT) |
2708               (5 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
2709     OUT_BATCH(batch, 0); /* scratch space base offset */
2710     OUT_BATCH(batch, 
2711               ((86 - 1) << GEN7_PS_MAX_THREADS_SHIFT) |
2712               GEN7_PS_PUSH_CONSTANT_ENABLE |
2713               GEN7_PS_ATTRIBUTE_ENABLE |
2714               GEN7_PS_16_DISPATCH_ENABLE);
2715     OUT_BATCH(batch, 
2716               (6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0));
2717     OUT_BATCH(batch, 0); /* kernel 1 pointer */
2718     OUT_BATCH(batch, 0); /* kernel 2 pointer */
2719     ADVANCE_BATCH(batch);
2720 }
2721
2722 static void
2723 gen7_emit_vertex_element_state(VADriverContextP ctx)
2724 {
2725     struct i965_driver_data *i965 = i965_driver_data(ctx);
2726     struct intel_batchbuffer *batch = i965->batch;
2727
2728     /* Set up our vertex elements, sourced from the single vertex buffer. */
2729     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2730     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2731     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2732               GEN6_VE0_VALID |
2733               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2734               (0 << VE0_OFFSET_SHIFT));
2735     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2736               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2737               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2738               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2739     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2740     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2741               GEN6_VE0_VALID |
2742               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2743               (8 << VE0_OFFSET_SHIFT));
2744     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2745               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2746               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2747               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2748 }
2749
2750 static void
2751 gen7_emit_vertices(VADriverContextP ctx)
2752 {
2753     struct i965_driver_data *i965 = i965_driver_data(ctx);
2754     struct intel_batchbuffer *batch = i965->batch;
2755     struct i965_render_state *render_state = &i965->render_state;
2756
2757     BEGIN_BATCH(batch, 5);
2758     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | (5 - 2));
2759     OUT_BATCH(batch, 
2760               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2761               GEN6_VB0_VERTEXDATA |
2762               GEN7_VB0_ADDRESS_MODIFYENABLE |
2763               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2764     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2765     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2766     OUT_BATCH(batch, 0);
2767     ADVANCE_BATCH(batch);
2768
2769     BEGIN_BATCH(batch, 7);
2770     OUT_BATCH(batch, CMD_3DPRIMITIVE | (7 - 2));
2771     OUT_BATCH(batch,
2772               _3DPRIM_RECTLIST |
2773               GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL);
2774     OUT_BATCH(batch, 3); /* vertex count per instance */
2775     OUT_BATCH(batch, 0); /* start vertex offset */
2776     OUT_BATCH(batch, 1); /* single instance */
2777     OUT_BATCH(batch, 0); /* start instance location */
2778     OUT_BATCH(batch, 0);
2779     ADVANCE_BATCH(batch);
2780 }
2781
2782 static void
2783 gen7_render_emit_states(VADriverContextP ctx, int kernel)
2784 {
2785     struct i965_driver_data *i965 = i965_driver_data(ctx);
2786     struct intel_batchbuffer *batch = i965->batch;
2787
2788     intel_batchbuffer_start_atomic(batch, 0x1000);
2789     intel_batchbuffer_emit_mi_flush(batch);
2790     gen7_emit_invarient_states(ctx);
2791     gen7_emit_state_base_address(ctx);
2792     gen7_emit_viewport_state_pointers(ctx);
2793     gen7_emit_urb(ctx);
2794     gen7_emit_cc_state_pointers(ctx);
2795     gen7_emit_sampler_state_pointers(ctx);
2796     gen7_emit_bypass_state(ctx);
2797     gen7_emit_vs_state(ctx);
2798     gen7_emit_clip_state(ctx);
2799     gen7_emit_sf_state(ctx);
2800     gen7_emit_wm_state(ctx, kernel);
2801     gen7_emit_binding_table(ctx);
2802     gen7_emit_depth_buffer_state(ctx);
2803     gen7_emit_drawing_rectangle(ctx);
2804     gen7_emit_vertex_element_state(ctx);
2805     gen7_emit_vertices(ctx);
2806     intel_batchbuffer_end_atomic(batch);
2807 }
2808
2809 static void
2810 gen7_render_put_surface(
2811     VADriverContextP   ctx,
2812     VASurfaceID        surface,
2813     const VARectangle *src_rect,
2814     const VARectangle *dst_rect,
2815     unsigned int       flags
2816 )
2817 {
2818     struct i965_driver_data *i965 = i965_driver_data(ctx);
2819     struct intel_batchbuffer *batch = i965->batch;
2820
2821     gen7_render_initialize(ctx);
2822     gen7_render_setup_states(ctx, surface, src_rect, dst_rect);
2823     i965_clear_dest_region(ctx);
2824     gen7_render_emit_states(ctx, PS_KERNEL);
2825     intel_batchbuffer_flush(batch);
2826 }
2827
2828 static void
2829 gen7_subpicture_render_blend_state(VADriverContextP ctx)
2830 {
2831     struct i965_driver_data *i965 = i965_driver_data(ctx);
2832     struct i965_render_state *render_state = &i965->render_state;
2833     struct gen6_blend_state *blend_state;
2834
2835     dri_bo_unmap(render_state->cc.state);    
2836     dri_bo_map(render_state->cc.blend, 1);
2837     assert(render_state->cc.blend->virtual);
2838     blend_state = render_state->cc.blend->virtual;
2839     memset(blend_state, 0, sizeof(*blend_state));
2840     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2841     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2842     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2843     blend_state->blend0.blend_enable = 1;
2844     blend_state->blend1.post_blend_clamp_enable = 1;
2845     blend_state->blend1.pre_blend_clamp_enable = 1;
2846     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2847     dri_bo_unmap(render_state->cc.blend);
2848 }
2849
2850 static void
2851 gen7_subpicture_render_setup_states(
2852     VADriverContextP   ctx,
2853     VASurfaceID        surface,
2854     const VARectangle *src_rect,
2855     const VARectangle *dst_rect
2856 )
2857 {
2858     i965_render_dest_surface_state(ctx, 0);
2859     i965_subpic_render_src_surfaces_state(ctx, surface);
2860     i965_render_sampler(ctx);
2861     i965_render_cc_viewport(ctx);
2862     gen7_render_color_calc_state(ctx);
2863     gen7_subpicture_render_blend_state(ctx);
2864     gen7_render_depth_stencil_state(ctx);
2865     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2866 }
2867
2868 static void
2869 gen7_render_put_subpicture(
2870     VADriverContextP   ctx,
2871     VASurfaceID        surface,
2872     const VARectangle *src_rect,
2873     const VARectangle *dst_rect
2874 )
2875 {
2876     struct i965_driver_data *i965 = i965_driver_data(ctx);
2877     struct intel_batchbuffer *batch = i965->batch;
2878     struct object_surface *obj_surface = SURFACE(surface);
2879     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
2880
2881     assert(obj_subpic);
2882     gen7_render_initialize(ctx);
2883     gen7_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2884     gen7_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2885     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2886     intel_batchbuffer_flush(batch);
2887 }
2888
2889
2890 /*
2891  * global functions
2892  */
2893 VAStatus 
2894 i965_DestroySurfaces(VADriverContextP ctx,
2895                      VASurfaceID *surface_list,
2896                      int num_surfaces);
2897 void
2898 intel_render_put_surface(
2899     VADriverContextP   ctx,
2900     VASurfaceID        surface,
2901     const VARectangle *src_rect,
2902     const VARectangle *dst_rect,
2903     unsigned int       flags
2904 )
2905 {
2906     struct i965_driver_data *i965 = i965_driver_data(ctx);
2907     int has_done_scaling = 0;
2908     VASurfaceID in_surface_id = surface;
2909     VASurfaceID out_surface_id = i965_post_processing(ctx, surface, src_rect, dst_rect, flags, &has_done_scaling);
2910
2911     assert((!has_done_scaling) || (out_surface_id != VA_INVALID_ID));
2912
2913     if (out_surface_id != VA_INVALID_ID)
2914         in_surface_id = out_surface_id;
2915
2916     if (IS_GEN7(i965->intel.device_id))
2917         gen7_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
2918     else if (IS_GEN6(i965->intel.device_id))
2919         gen6_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
2920     else
2921         i965_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
2922
2923     if (in_surface_id != surface)
2924         i965_DestroySurfaces(ctx, &in_surface_id, 1);
2925 }
2926
2927 void
2928 intel_render_put_subpicture(
2929     VADriverContextP   ctx,
2930     VASurfaceID        surface,
2931     const VARectangle *src_rect,
2932     const VARectangle *dst_rect
2933 )
2934 {
2935     struct i965_driver_data *i965 = i965_driver_data(ctx);
2936
2937     if (IS_GEN7(i965->intel.device_id))
2938         gen7_render_put_subpicture(ctx, surface, src_rect, dst_rect);
2939     else if (IS_GEN6(i965->intel.device_id))
2940         gen6_render_put_subpicture(ctx, surface, src_rect, dst_rect);
2941     else
2942         i965_render_put_subpicture(ctx, surface, src_rect, dst_rect);
2943 }
2944
2945 Bool 
2946 i965_render_init(VADriverContextP ctx)
2947 {
2948     struct i965_driver_data *i965 = i965_driver_data(ctx);
2949     struct i965_render_state *render_state = &i965->render_state;
2950     int i;
2951
2952     /* kernel */
2953     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen5) / 
2954                                  sizeof(render_kernels_gen5[0])));
2955     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen6) / 
2956                                  sizeof(render_kernels_gen6[0])));
2957
2958     if (IS_GEN7(i965->intel.device_id))
2959         memcpy(render_state->render_kernels, render_kernels_gen7, sizeof(render_state->render_kernels));
2960     else if (IS_GEN6(i965->intel.device_id))
2961         memcpy(render_state->render_kernels, render_kernels_gen6, sizeof(render_state->render_kernels));
2962     else if (IS_IRONLAKE(i965->intel.device_id))
2963         memcpy(render_state->render_kernels, render_kernels_gen5, sizeof(render_state->render_kernels));
2964     else
2965         memcpy(render_state->render_kernels, render_kernels_gen4, sizeof(render_state->render_kernels));
2966
2967     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
2968         struct i965_kernel *kernel = &render_state->render_kernels[i];
2969
2970         if (!kernel->size)
2971             continue;
2972
2973         kernel->bo = dri_bo_alloc(i965->intel.bufmgr, 
2974                                   kernel->name, 
2975                                   kernel->size, 0x1000);
2976         assert(kernel->bo);
2977         dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
2978     }
2979
2980     /* constant buffer */
2981     render_state->curbe.bo = dri_bo_alloc(i965->intel.bufmgr,
2982                       "constant buffer",
2983                       4096, 64);
2984     assert(render_state->curbe.bo);
2985
2986     return True;
2987 }
2988
2989 Bool 
2990 i965_render_terminate(VADriverContextP ctx)
2991 {
2992     int i;
2993     struct i965_driver_data *i965 = i965_driver_data(ctx);
2994     struct i965_render_state *render_state = &i965->render_state;
2995
2996     dri_bo_unreference(render_state->curbe.bo);
2997     render_state->curbe.bo = NULL;
2998
2999     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
3000         struct i965_kernel *kernel = &render_state->render_kernels[i];
3001         
3002         dri_bo_unreference(kernel->bo);
3003         kernel->bo = NULL;
3004     }
3005
3006     dri_bo_unreference(render_state->vb.vertex_buffer);
3007     render_state->vb.vertex_buffer = NULL;
3008     dri_bo_unreference(render_state->vs.state);
3009     render_state->vs.state = NULL;
3010     dri_bo_unreference(render_state->sf.state);
3011     render_state->sf.state = NULL;
3012     dri_bo_unreference(render_state->wm.sampler);
3013     render_state->wm.sampler = NULL;
3014     dri_bo_unreference(render_state->wm.state);
3015     render_state->wm.state = NULL;
3016     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
3017     dri_bo_unreference(render_state->cc.viewport);
3018     render_state->cc.viewport = NULL;
3019     dri_bo_unreference(render_state->cc.state);
3020     render_state->cc.state = NULL;
3021     dri_bo_unreference(render_state->cc.blend);
3022     render_state->cc.blend = NULL;
3023     dri_bo_unreference(render_state->cc.depth_stencil);
3024     render_state->cc.depth_stencil = NULL;
3025
3026     if (render_state->draw_region) {
3027         dri_bo_unreference(render_state->draw_region->bo);
3028         free(render_state->draw_region);
3029         render_state->draw_region = NULL;
3030     }
3031
3032     return True;
3033 }
3034