1 #include "rendercopy.h"
2 #include "gen8_render.h"
6 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
7 #define VERTEX_SIZE (3*4)
10 static void dump_batch(struct intel_batchbuffer *batch)
12 #define dump_batch(x) do { } while(0)
22 uint32_t sf_clip_state;
25 /* see shaders/ps/blit.g7a */
26 static const uint32_t ps_kernel[][4] = {
28 { 0x0060005a, 0x21403ae8, 0x3a0000c0, 0x008d0040 },
29 { 0x0060005a, 0x21603ae8, 0x3a0000c0, 0x008d0080 },
30 { 0x0060005a, 0x21803ae8, 0x3a0000d0, 0x008d0040 },
31 { 0x0060005a, 0x21a03ae8, 0x3a0000d0, 0x008d0080 },
32 { 0x02800031, 0x2e0022e8, 0x0e000140, 0x08840001 },
33 { 0x05800031, 0x200022e0, 0x0e000e00, 0x90031000 },
36 { 0x00600001, 0x2e000061, 0x00000000, 0x3f800000 },
37 { 0x00600001, 0x2e200061, 0x00000000, 0x3f800000 },
38 { 0x00600001, 0x2e400061, 0x00000000, 0x3f800000 },
39 { 0x00600001, 0x2e600061, 0x00000000, 0x3f800000 },
40 { 0x00600001, 0x2e800061, 0x00000000, 0x3f800000 },
41 { 0x00600001, 0x2ea00061, 0x00000000, 0x3f800000 },
42 { 0x00600001, 0x2ec00061, 0x00000000, 0x3f800000 },
43 { 0x00600001, 0x2ee00061, 0x00000000, 0x3f800000 },
44 { 0x05800031, 0x20001e3c, 0x00000e00, 0x90031000 },
49 batch_used(struct intel_batchbuffer *batch)
51 return batch->ptr - batch->buffer;
55 batch_align(struct intel_batchbuffer *batch, uint32_t align)
57 uint32_t offset = batch_used(batch);
58 offset = ALIGN(offset, align);
59 batch->ptr = batch->buffer + offset;
64 batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
66 uint32_t offset = batch_align(batch, align);
68 return memset(batch->buffer + offset, 0, size);
72 batch_offset(struct intel_batchbuffer *batch, void *ptr)
74 return (uint8_t *)ptr - batch->buffer;
78 batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
80 return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
84 gen6_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
88 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer);
90 ret = drm_intel_bo_mrb_exec(batch->bo, batch_end,
95 /* Mostly copy+paste from gen6, except height, width, pitch moved */
97 gen8_bind_buf(struct intel_batchbuffer *batch, struct scratch_buf *buf,
98 uint32_t format, int is_dst) {
99 struct gen8_surface_state *ss;
100 uint32_t write_domain, read_domain;
104 write_domain = read_domain = I915_GEM_DOMAIN_RENDER;
107 read_domain = I915_GEM_DOMAIN_SAMPLER;
110 ss = batch_alloc(batch, sizeof(*ss), 32);
111 ss->ss0.surface_type = GEN6_SURFACE_2D;
112 ss->ss0.surface_format = format;
113 ss->ss0.render_cache_read_write = 1;
114 if (buf->tiling == I915_TILING_X)
115 ss->ss0.tiled_mode = 2;
116 else if (buf->tiling == I915_TILING_Y)
117 ss->ss0.tiled_mode = 3;
119 ss->ss8.base_addr = buf->bo->offset;
121 ret = drm_intel_bo_emit_reloc(batch->bo,
122 batch_offset(batch, ss) + 4,
124 read_domain, write_domain);
127 ss->ss2.height = buf_height(buf) - 1;
128 ss->ss2.width = buf_width(buf) - 1;
129 ss->ss3.pitch = buf->stride - 1;
131 ss->ss7.shader_chanel_select_a = 4;
132 ss->ss7.shader_chanel_select_g = 5;
133 ss->ss7.shader_chanel_select_b = 6;
134 ss->ss7.shader_chanel_select_a = 7;
136 return batch_offset(batch, ss);
140 gen8_bind_surfaces(struct intel_batchbuffer *batch,
141 struct scratch_buf *src,
142 struct scratch_buf *dst) {
143 uint32_t *binding_table;
145 binding_table = batch_alloc(batch, 8, 32);
148 gen8_bind_buf(batch, dst, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 1);
150 gen8_bind_buf(batch, src, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 0);
152 return batch_offset(batch, binding_table);
155 /* Mostly copy+paste from gen6, except wrap modes moved */
157 gen8_create_sampler(struct intel_batchbuffer *batch) {
158 struct gen8_sampler_state *ss;
160 ss = batch_alloc(batch, sizeof(*ss), 32);
162 ss->ss0.min_filter = GEN6_MAPFILTER_NEAREST;
163 ss->ss0.mag_filter = GEN6_MAPFILTER_NEAREST;
164 ss->ss3.r_wrap_mode = GEN6_TEXCOORDMODE_CLAMP;
165 ss->ss3.s_wrap_mode = GEN6_TEXCOORDMODE_CLAMP;
166 ss->ss3.t_wrap_mode = GEN6_TEXCOORDMODE_CLAMP;
168 /* I've experimented with non-normalized coordinates and using the LD
169 * sampler fetch, but couldn't make it work. */
170 ss->ss3.non_normalized_coord = 0;
172 return batch_offset(batch, ss);
176 * gen7_fill_vertex_buffer_data populate vertex buffer with data.
178 * The vertex buffer consists of 3 vertices to construct a RECTLIST. The 4th
179 * vertex is implied (automatically derived by the HW). Each element has the
180 * destination offset, and the normalized texture offset (src). The rectangle
181 * itself will span the entire subsurface to be copied.
183 * see gen6_emit_vertex_elements
186 gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
187 struct scratch_buf *src,
188 uint32_t src_x, uint32_t src_y,
189 uint32_t dst_x, uint32_t dst_y,
190 uint32_t width, uint32_t height) {
195 emit_vertex_2s(batch, dst_x + width, dst_y + height);
196 emit_vertex_normalized(batch, src_x + width, buf_width(src));
197 emit_vertex_normalized(batch, src_y + height, buf_height(src));
199 emit_vertex_2s(batch, dst_x, dst_y + height);
200 emit_vertex_normalized(batch, src_x, buf_width(src));
201 emit_vertex_normalized(batch, src_y + height, buf_height(src));
203 emit_vertex_2s(batch, dst_x, dst_y);
204 emit_vertex_normalized(batch, src_x, buf_width(src));
205 emit_vertex_normalized(batch, src_y, buf_height(src));
207 return batch_offset(batch, ret);
211 * gen6_emit_vertex_elements - The vertex elements describe the contents of the
212 * vertex buffer. We pack the vertex buffer in a semi weird way, conforming to
213 * what gen6_rendercopy did. The most straightforward would be to store
214 * everything as floats.
216 * see gen7_fill_vertex_buffer_data() for where the corresponding elements are
220 gen6_emit_vertex_elements(struct intel_batchbuffer *batch) {
223 * dword 0-3: pad (0, 0, 0. 0)
224 * dword 4-7: position (x, y, 0, 1.0),
225 * dword 8-11: texture coordinate 0 (u0, v0, 0, 1.0)
227 OUT_BATCH(GEN6_3DSTATE_VERTEX_ELEMENTS | (3 * 2 + 1 - 2));
229 /* Element state 0. These are 4 dwords of 0 required for the VUE format.
230 * We don't really know or care what they do.
232 OUT_BATCH(0 << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
233 GEN6_SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT |
234 0 << VE0_OFFSET_SHIFT); /* we specify 0, but it's really does not exist */
235 OUT_BATCH(GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT |
236 GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT |
237 GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
238 GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT);
240 /* Element state 1 - Our "destination" vertices. These are passed down
241 * through the pipeline, and eventually make it to the pixel shader as
242 * the offsets in the destination surface. It's packed as the 16
243 * signed/scaled because of gen6 rendercopy. I see no particular reason
244 * for doing this though.
246 OUT_BATCH(0 << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
247 GEN6_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
248 0 << VE0_OFFSET_SHIFT); /* offsets vb in bytes */
249 OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
250 GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
251 GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
252 GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
254 /* Element state 2. Last but not least we store the U,V components as
255 * normalized floats. These will be used in the pixel shader to sample
256 * from the source buffer.
258 OUT_BATCH(0 << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
259 GEN6_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT |
260 4 << VE0_OFFSET_SHIFT); /* offset vb in bytes */
261 OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
262 GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
263 GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
264 GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
268 * gen7_emit_vertex_buffer emit the vertex buffers command
271 * @offset - bytw offset within the @batch where the vertex buffer starts.
273 static void gen7_emit_vertex_buffer(struct intel_batchbuffer *batch,
275 OUT_BATCH(GEN6_3DSTATE_VERTEX_BUFFERS | (4 * 1 - 1));
276 OUT_BATCH(0 << VB0_BUFFER_INDEX_SHIFT | /* VB 0th index */
278 GEN7_VB0_BUFFER_ADDR_MOD_EN | /* Address Modify Enable */
279 VERTEX_SIZE << VB0_BUFFER_PITCH_SHIFT);
280 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_VERTEX, 0, offset);
281 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_VERTEX, 0, offset + (VERTEX_SIZE * 3) - 1);
286 gen6_create_cc_state(struct intel_batchbuffer *batch)
288 struct gen6_color_calc_state *cc_state;
289 cc_state = batch_alloc(batch, sizeof(*cc_state), 64);
290 return batch_offset(batch, cc_state);
294 gen8_create_blend_state(struct intel_batchbuffer *batch)
296 struct gen8_blend_state *blend;
299 blend = batch_alloc(batch, sizeof(*blend), 64);
300 for (i = 0; i < 16; i++) {
301 blend->bs[i].pre_blend_color_clamp = 1;
302 blend->bs[i].color_buffer_blend = 0;
304 return batch_offset(batch, blend);
308 gen6_create_cc_viewport(struct intel_batchbuffer *batch)
310 struct gen6_cc_viewport *vp;
312 vp = batch_alloc(batch, sizeof(*vp), 32);
313 /* XXX I don't understand this */
314 vp->min_depth = -1.e35;
315 vp->max_depth = 1.e35;
316 return batch_offset(batch, vp);
320 gen7_create_sf_clip_viewport(struct intel_batchbuffer *batch) {
321 /* XXX these are likely not needed */
322 struct gen7_sf_clip_viewport *scv_state;
323 scv_state = batch_alloc(batch, sizeof(*scv_state), 64);
324 scv_state->guardband.xmin = 0;
325 scv_state->guardband.xmax = 1.0f;
326 scv_state->guardband.ymin = 0;
327 scv_state->guardband.ymax = 1.0f;
328 return batch_offset(batch, scv_state);
332 gen6_create_scissor_rect(struct intel_batchbuffer *batch)
334 struct gen6_scissor_rect *scissor;
335 scissor = batch_alloc(batch, sizeof(*scissor), 64);
336 return batch_offset(batch, scissor);
344 gen6_emit_sip(struct intel_batchbuffer *batch) {
345 OUT_BATCH(GEN6_STATE_SIP | 0);
350 gen7_emit_push_constants(struct intel_batchbuffer *batch) {
351 OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_VS);
353 OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_HS);
355 OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_DS);
357 OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_GS);
359 OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS);
364 gen7_emit_state_base_address(struct intel_batchbuffer *batch) {
365 OUT_BATCH(GEN6_STATE_BASE_ADDRESS | (10 - 2));
366 /* general (stateless) */
371 OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
372 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_SAMPLER, 0, BASE_ADDRESS_MODIFY);
373 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION,
374 0, BASE_ADDRESS_MODIFY);
375 OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
376 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
378 OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
379 OUT_BATCH(0xfffff000 | BASE_ADDRESS_MODIFY); // copied from mesa
380 OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
381 OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
385 gen7_emit_urb(struct intel_batchbuffer *batch) {
386 /* XXX: Min valid values from mesa */
387 const int vs_entries = 32;
388 const int vs_size = 2;
389 const int vs_start = 2;
391 OUT_BATCH(GEN7_3DSTATE_URB_VS);
392 OUT_BATCH(vs_entries | ((vs_size - 1) << 16) | (vs_start << 25));
393 OUT_BATCH(GEN7_3DSTATE_URB_GS);
394 OUT_BATCH(vs_start << 25);
395 OUT_BATCH(GEN7_3DSTATE_URB_HS);
396 OUT_BATCH(vs_start << 25);
397 OUT_BATCH(GEN7_3DSTATE_URB_DS);
398 OUT_BATCH(vs_start << 25);
402 gen8_emit_cc(struct intel_batchbuffer *batch) {
403 OUT_BATCH(GEN7_3DSTATE_BLEND_STATE_POINTERS);
404 OUT_BATCH(cc.blend_state | 1);
406 OUT_BATCH(GEN6_3DSTATE_CC_STATE_POINTERS);
407 OUT_BATCH(cc.cc_state | 1);
411 gen7_emit_multisample(struct intel_batchbuffer *batch) {
412 OUT_BATCH(GEN6_3DSTATE_MULTISAMPLE | 2);
417 OUT_BATCH(GEN6_3DSTATE_SAMPLE_MASK);
422 gen7_emit_vs(struct intel_batchbuffer *batch) {
423 OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_VS);
426 OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_VS);
429 OUT_BATCH(GEN6_3DSTATE_CONSTANT_VS | (7-2));
437 OUT_BATCH(GEN6_3DSTATE_VS | (6-2));
446 gen7_emit_hs(struct intel_batchbuffer *batch) {
447 OUT_BATCH(GEN7_3DSTATE_CONSTANT_HS | (7-2));
455 OUT_BATCH(GEN7_3DSTATE_HS | (7-2));
463 OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_HS);
466 OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_HS);
471 gen7_emit_gs(struct intel_batchbuffer *batch) {
472 OUT_BATCH(GEN7_3DSTATE_CONSTANT_GS | (7-2));
480 OUT_BATCH(GEN7_3DSTATE_GS | (7-2));
488 OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_GS);
491 OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_GS);
496 gen7_emit_ds(struct intel_batchbuffer *batch) {
497 OUT_BATCH(GEN7_3DSTATE_CONSTANT_DS | (7-2));
505 OUT_BATCH(GEN7_3DSTATE_DS | (6-2));
512 OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_DS);
515 OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_DS);
520 gen7_emit_null_state(struct intel_batchbuffer *batch) {
522 OUT_BATCH(GEN7_3DSTATE_TE | (4-2));
532 gen7_emit_clip(struct intel_batchbuffer *batch) {
533 OUT_BATCH(GEN6_3DSTATE_CLIP | (4 - 2));
535 OUT_BATCH(0); /* pass-through */
540 gen7_emit_sf(struct intel_batchbuffer *batch) {
541 OUT_BATCH(GEN7_3DSTATE_SBE | (14 - 2));
543 OUT_BATCH(0 << 22 | 1 << 11 | 1 << 4);
545 OUT_BATCH(1 << 22 | 1 << 11 | 1 << 4);
560 OUT_BATCH(GEN6_3DSTATE_SF | (7 - 2));
562 OUT_BATCH(GEN6_3DSTATE_SF_CULL_NONE);
563 // OUT_BATCH(2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT);
571 gen8_emit_ps(struct intel_batchbuffer *batch, uint32_t kernel) {
572 const int max_threads = 86;
574 OUT_BATCH(GEN6_3DSTATE_WM | (3 - 2));
575 OUT_BATCH(GEN7_WM_DISPATCH_ENABLE |
576 /* XXX: I don't understand the BARYCENTRIC stuff, but it
577 * appears we need it to put our setup data in the place we
578 * expect (g6, see below) */
579 GEN7_3DSTATE_PS_PERSPECTIVE_PIXEL_BARYCENTRIC);
582 OUT_BATCH(GEN6_3DSTATE_CONSTANT_PS | (7-2));
590 OUT_BATCH(GEN7_3DSTATE_PS | (10-2));
592 OUT_BATCH(0); /* kernel hi */
593 OUT_BATCH(1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHITF |
594 2 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT);
595 OUT_BATCH(0); /* scratch space stuff */
596 OUT_BATCH(0); /* scratch hi */
597 OUT_BATCH((max_threads - 1) << GEN7_3DSTATE_WM_MAX_THREADS_SHIFT |
598 GEN7_3DSTATE_PS_ATTRIBUTE_ENABLED |
599 GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
600 OUT_BATCH(6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT);
601 OUT_BATCH(0); // kernel 1
602 OUT_BATCH(0); /* kernel 1 hi */
606 gen8_emit_depth(struct intel_batchbuffer *batch) {
607 OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER | (7-2));
615 OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER | (3-2));
619 OUT_BATCH(GEN7_3DSTATE_STENCIL_BUFFER | (3-2));
625 gen7_emit_clear(struct intel_batchbuffer *batch) {
626 OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS | (3-2));
628 OUT_BATCH(1); // clear valid
632 gen6_emit_drawing_rectangle(struct intel_batchbuffer *batch, struct scratch_buf *dst)
634 OUT_BATCH(GEN6_3DSTATE_DRAWING_RECTANGLE | (4 - 2));
636 OUT_BATCH((buf_height(dst) - 1) << 16 | (buf_width(dst) - 1));
640 /* Vertex elements MUST be defined before this according to spec */
641 static void gen7_emit_primitive(struct intel_batchbuffer *batch, uint32_t offset)
643 OUT_BATCH(GEN6_3DPRIMITIVE | (7-2));
644 OUT_BATCH(_3DPRIM_RECTLIST);
645 OUT_BATCH(3); /* vertex count */
646 OUT_BATCH(0); /* We're specifying this instead with offset in GEN6_3DSTATE_VERTEX_BUFFERS */
647 OUT_BATCH(1); /* single instance */
648 OUT_BATCH(0); /* start instance location */
649 OUT_BATCH(0); /* index buffer offset, ignored */
652 /* The general rule is if it's named gen6 it is directly copied from
653 * gen6_render_copyfunc.
655 * This sets up most of the 3d pipeline, and most of that to NULL state. The
656 * docs aren't specific about exactly what must be set up NULL, but the general
657 * rule is we could be run at any time, and so the most state we set to NULL,
658 * the better our odds of success.
660 * +---------------+ <---- 4096
666 * |_______|_______| <---- 2048 + ?
673 * +---------------+ <---- 0 + ?
675 * The batch commands point to state within tthe batch, so all state offsets should be
676 * 0 < offset < 4096. Both commands and state build upwards, and are constructed
677 * in that order. This means too many batch commands can delete state if not
682 #define BATCH_STATE_SPLIT 2048
683 void gen8_render_copyfunc(struct intel_batchbuffer *batch,
684 struct scratch_buf *src, unsigned src_x, unsigned src_y,
685 unsigned width, unsigned height,
686 struct scratch_buf *dst, unsigned dst_x, unsigned dst_y)
688 uint32_t ps_sampler_state, ps_kernel_off, ps_binding_table;
689 uint32_t scissor_state;
690 uint32_t vertex_buffer;
693 intel_batchbuffer_flush(batch);
695 batch_align(batch, 8);
697 batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
699 ps_binding_table = gen8_bind_surfaces(batch, src, dst);
700 ps_sampler_state = gen8_create_sampler(batch);
701 ps_kernel_off = batch_copy(batch, ps_kernel, sizeof(ps_kernel), 64);
702 vertex_buffer = gen7_fill_vertex_buffer_data(batch, src, src_x, src_y, dst_x, dst_y, width, height);
703 cc.cc_state = gen6_create_cc_state(batch);
704 cc.blend_state = gen8_create_blend_state(batch);
705 viewport.cc_state = gen6_create_cc_viewport(batch);
706 viewport.sf_clip_state = gen7_create_sf_clip_viewport(batch);
707 scissor_state = gen6_create_scissor_rect(batch);
708 /* TODO: theree is other state which isn't setup */
710 assert(batch->ptr < &batch->buffer[4095]);
712 batch->ptr = batch->buffer;
714 /* Start emitting the commands. The order roughly follows the mesa blorp
716 OUT_BATCH(GEN6_PIPELINE_SELECT | PIPELINE_SELECT_3D);
718 gen6_emit_sip(batch);
720 gen7_emit_push_constants(batch);
722 gen7_emit_state_base_address(batch);
724 OUT_BATCH(GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC);
725 OUT_BATCH(viewport.cc_state);
726 OUT_BATCH(GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP);
727 OUT_BATCH(viewport.sf_clip_state);
729 gen7_emit_urb(batch);
733 gen7_emit_multisample(batch);
735 gen7_emit_null_state(batch);
737 OUT_BATCH(GEN7_3DSTATE_STREAMOUT | 1);
741 gen7_emit_clip(batch);
745 OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS);
746 OUT_BATCH(ps_binding_table);
748 OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS);
749 OUT_BATCH(ps_sampler_state);
751 gen8_emit_ps(batch, ps_kernel_off);
753 OUT_BATCH(GEN6_3DSTATE_SCISSOR_STATE_POINTERS);
754 OUT_BATCH(scissor_state);
756 gen8_emit_depth(batch);
758 gen7_emit_clear(batch);
760 gen6_emit_drawing_rectangle(batch, dst);
762 gen7_emit_vertex_buffer(batch, vertex_buffer);
763 gen6_emit_vertex_elements(batch);
765 gen7_emit_primitive(batch, vertex_buffer);
767 OUT_BATCH(MI_BATCH_BUFFER_END);
769 batch_end = batch_align(batch, 8);
770 assert(batch_end < BATCH_STATE_SPLIT);
774 gen6_render_flush(batch, batch_end);
775 intel_batchbuffer_reset(batch);
779 static void dump_batch(struct intel_batchbuffer *batch) {
780 int fd = open("/tmp/i965-batchbuffers.dump", O_WRONLY | O_CREAT, 0666);
782 write(fd, batch->buffer, 4096);