17 #include "intel_bufmgr.h"
18 #include "intel_batchbuffer.h"
20 #include "rendercopy.h"
21 #include "gen8_render.h"
22 #include "intel_reg.h"
25 #include <intel_aub.h>
27 #define VERTEX_SIZE (3*4)
30 static void dump_batch(struct intel_batchbuffer *batch) {
31 int fd = open("/tmp/i965-batchbuffers.dump", O_WRONLY | O_CREAT, 0666);
33 write(fd, batch->buffer, 4096);
38 #define dump_batch(x) do { } while(0)
48 uint32_t sf_clip_state;
51 /* see shaders/ps/blit.g7a */
52 static const uint32_t ps_kernel[][4] = {
54 { 0x0060005a, 0x21403ae8, 0x3a0000c0, 0x008d0040 },
55 { 0x0060005a, 0x21603ae8, 0x3a0000c0, 0x008d0080 },
56 { 0x0060005a, 0x21803ae8, 0x3a0000d0, 0x008d0040 },
57 { 0x0060005a, 0x21a03ae8, 0x3a0000d0, 0x008d0080 },
58 { 0x02800031, 0x2e0022e8, 0x0e000140, 0x08840001 },
59 { 0x05800031, 0x200022e0, 0x0e000e00, 0x90031000 },
62 { 0x00600001, 0x2e000608, 0x00000000, 0x3f800000 },
63 { 0x00600001, 0x2e200608, 0x00000000, 0x3f800000 },
64 { 0x00600001, 0x2e400608, 0x00000000, 0x3f800000 },
65 { 0x00600001, 0x2e600608, 0x00000000, 0x3f800000 },
66 { 0x00600001, 0x2e800608, 0x00000000, 0x3f800000 },
67 { 0x00600001, 0x2ea00608, 0x00000000, 0x3f800000 },
68 { 0x00600001, 0x2ec00608, 0x00000000, 0x3f800000 },
69 { 0x00600001, 0x2ee00608, 0x00000000, 0x3f800000 },
70 { 0x05800031, 0x200022e0, 0x0e000e00, 0x90031000 },
74 /* AUB annotation support */
75 #define MAX_ANNOTATIONS 33
76 struct annotations_context {
77 drm_intel_aub_annotation annotations[MAX_ANNOTATIONS];
82 static void annotation_init(struct annotations_context *ctx)
84 /* ctx->annotations is an array keeping a list of annotations of the
85 * batch buffer ordered by offset. ctx->annotations[0] is thus left
86 * for the command stream and will be filled just before executing
87 * the batch buffer with annotations_add_batch() */
91 static void add_annotation(drm_intel_aub_annotation *a,
92 uint32_t type, uint32_t subtype,
93 uint32_t ending_offset)
97 a->ending_offset = ending_offset;
100 static void annotation_add_batch(struct annotations_context *ctx, size_t size)
102 add_annotation(&ctx->annotations[0], AUB_TRACE_TYPE_BATCH, 0, size);
105 static void annotation_add_state(struct annotations_context *ctx,
107 uint32_t start_offset,
110 igt_assert(ctx->index < MAX_ANNOTATIONS);
112 add_annotation(&ctx->annotations[ctx->index++],
113 AUB_TRACE_TYPE_NOTYPE, 0,
115 add_annotation(&ctx->annotations[ctx->index++],
116 AUB_TRACE_TYPE(state_type),
117 AUB_TRACE_SUBTYPE(state_type),
118 start_offset + size);
121 static void annotation_flush(struct annotations_context *ctx,
122 struct intel_batchbuffer *batch)
124 if (!igt_aub_dump_enabled())
127 drm_intel_bufmgr_gem_set_aub_annotations(batch->bo,
133 batch_used(struct intel_batchbuffer *batch)
135 return batch->ptr - batch->buffer;
139 batch_align(struct intel_batchbuffer *batch, uint32_t align)
141 uint32_t offset = batch_used(batch);
142 offset = ALIGN(offset, align);
143 batch->ptr = batch->buffer + offset;
148 batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
150 uint32_t offset = batch_align(batch, align);
152 return memset(batch->buffer + offset, 0, size);
156 batch_offset(struct intel_batchbuffer *batch, void *ptr)
158 return (uint8_t *)ptr - batch->buffer;
162 batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
164 return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
168 gen6_render_flush(struct intel_batchbuffer *batch,
169 drm_intel_context *context, uint32_t batch_end)
173 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer);
175 ret = drm_intel_gem_bo_context_exec(batch->bo, context,
177 igt_assert(ret == 0);
180 /* Mostly copy+paste from gen6, except height, width, pitch moved */
182 gen8_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
183 uint32_t format, int is_dst) {
184 struct gen8_surface_state *ss;
185 uint32_t write_domain, read_domain, offset;
189 write_domain = read_domain = I915_GEM_DOMAIN_RENDER;
192 read_domain = I915_GEM_DOMAIN_SAMPLER;
195 ss = batch_alloc(batch, sizeof(*ss), 64);
196 offset = batch_offset(batch, ss);
197 annotation_add_state(&aub_annotations, AUB_TRACE_SURFACE_STATE,
198 offset, sizeof(*ss));
200 ss->ss0.surface_type = GEN6_SURFACE_2D;
201 ss->ss0.surface_format = format;
202 ss->ss0.render_cache_read_write = 1;
203 ss->ss0.vertical_alignment = 1; /* align 4 */
204 ss->ss0.horizontal_alignment = 1; /* align 4 */
205 if (buf->tiling == I915_TILING_X)
206 ss->ss0.tiled_mode = 2;
207 else if (buf->tiling == I915_TILING_Y)
208 ss->ss0.tiled_mode = 3;
210 ss->ss8.base_addr = buf->bo->offset;
212 ret = drm_intel_bo_emit_reloc(batch->bo,
213 batch_offset(batch, ss) + 8 * 4,
215 read_domain, write_domain);
216 igt_assert(ret == 0);
218 ss->ss2.height = igt_buf_height(buf) - 1;
219 ss->ss2.width = igt_buf_width(buf) - 1;
220 ss->ss3.pitch = buf->stride - 1;
222 ss->ss7.shader_chanel_select_r = 4;
223 ss->ss7.shader_chanel_select_g = 5;
224 ss->ss7.shader_chanel_select_b = 6;
225 ss->ss7.shader_chanel_select_a = 7;
231 gen8_bind_surfaces(struct intel_batchbuffer *batch,
235 uint32_t *binding_table, offset;
237 binding_table = batch_alloc(batch, 8, 32);
238 offset = batch_offset(batch, binding_table);
239 annotation_add_state(&aub_annotations, AUB_TRACE_BINDING_TABLE,
243 gen8_bind_buf(batch, dst, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 1);
245 gen8_bind_buf(batch, src, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 0);
250 /* Mostly copy+paste from gen6, except wrap modes moved */
252 gen8_create_sampler(struct intel_batchbuffer *batch) {
253 struct gen8_sampler_state *ss;
256 ss = batch_alloc(batch, sizeof(*ss), 64);
257 offset = batch_offset(batch, ss);
258 annotation_add_state(&aub_annotations, AUB_TRACE_SAMPLER_STATE,
259 offset, sizeof(*ss));
261 ss->ss0.min_filter = GEN6_MAPFILTER_NEAREST;
262 ss->ss0.mag_filter = GEN6_MAPFILTER_NEAREST;
263 ss->ss3.r_wrap_mode = GEN6_TEXCOORDMODE_CLAMP;
264 ss->ss3.s_wrap_mode = GEN6_TEXCOORDMODE_CLAMP;
265 ss->ss3.t_wrap_mode = GEN6_TEXCOORDMODE_CLAMP;
267 /* I've experimented with non-normalized coordinates and using the LD
268 * sampler fetch, but couldn't make it work. */
269 ss->ss3.non_normalized_coord = 0;
275 gen8_fill_ps(struct intel_batchbuffer *batch,
276 const uint32_t kernel[][4],
281 offset = batch_copy(batch, kernel, size, 64);
282 annotation_add_state(&aub_annotations, AUB_TRACE_KERNEL_INSTRUCTIONS,
289 * gen7_fill_vertex_buffer_data populate vertex buffer with data.
291 * The vertex buffer consists of 3 vertices to construct a RECTLIST. The 4th
292 * vertex is implied (automatically derived by the HW). Each element has the
293 * destination offset, and the normalized texture offset (src). The rectangle
294 * itself will span the entire subsurface to be copied.
296 * see gen6_emit_vertex_elements
299 gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
301 uint32_t src_x, uint32_t src_y,
302 uint32_t dst_x, uint32_t dst_y,
303 uint32_t width, uint32_t height)
308 batch_align(batch, 8);
311 emit_vertex_2s(batch, dst_x + width, dst_y + height);
312 emit_vertex_normalized(batch, src_x + width, igt_buf_width(src));
313 emit_vertex_normalized(batch, src_y + height, igt_buf_height(src));
315 emit_vertex_2s(batch, dst_x, dst_y + height);
316 emit_vertex_normalized(batch, src_x, igt_buf_width(src));
317 emit_vertex_normalized(batch, src_y + height, igt_buf_height(src));
319 emit_vertex_2s(batch, dst_x, dst_y);
320 emit_vertex_normalized(batch, src_x, igt_buf_width(src));
321 emit_vertex_normalized(batch, src_y, igt_buf_height(src));
323 offset = batch_offset(batch, start);
324 annotation_add_state(&aub_annotations, AUB_TRACE_VERTEX_BUFFER,
325 offset, 3 * VERTEX_SIZE);
330 * gen6_emit_vertex_elements - The vertex elements describe the contents of the
331 * vertex buffer. We pack the vertex buffer in a semi weird way, conforming to
332 * what gen6_rendercopy did. The most straightforward would be to store
333 * everything as floats.
335 * see gen7_fill_vertex_buffer_data() for where the corresponding elements are
339 gen6_emit_vertex_elements(struct intel_batchbuffer *batch) {
342 * dword 0-3: pad (0, 0, 0. 0)
343 * dword 4-7: position (x, y, 0, 1.0),
344 * dword 8-11: texture coordinate 0 (u0, v0, 0, 1.0)
346 OUT_BATCH(GEN6_3DSTATE_VERTEX_ELEMENTS | (3 * 2 + 1 - 2));
348 /* Element state 0. These are 4 dwords of 0 required for the VUE format.
349 * We don't really know or care what they do.
351 OUT_BATCH(0 << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
352 GEN6_SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT |
353 0 << VE0_OFFSET_SHIFT); /* we specify 0, but it's really does not exist */
354 OUT_BATCH(GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT |
355 GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT |
356 GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
357 GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT);
359 /* Element state 1 - Our "destination" vertices. These are passed down
360 * through the pipeline, and eventually make it to the pixel shader as
361 * the offsets in the destination surface. It's packed as the 16
362 * signed/scaled because of gen6 rendercopy. I see no particular reason
363 * for doing this though.
365 OUT_BATCH(0 << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
366 GEN6_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
367 0 << VE0_OFFSET_SHIFT); /* offsets vb in bytes */
368 OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
369 GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
370 GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
371 GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
373 /* Element state 2. Last but not least we store the U,V components as
374 * normalized floats. These will be used in the pixel shader to sample
375 * from the source buffer.
377 OUT_BATCH(0 << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
378 GEN6_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT |
379 4 << VE0_OFFSET_SHIFT); /* offset vb in bytes */
380 OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
381 GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
382 GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
383 GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
387 * gen8_emit_vertex_buffer emit the vertex buffers command
390 * @offset - bytw offset within the @batch where the vertex buffer starts.
392 static void gen8_emit_vertex_buffer(struct intel_batchbuffer *batch,
394 OUT_BATCH(GEN6_3DSTATE_VERTEX_BUFFERS | (1 + (4 * 1) - 2));
395 OUT_BATCH(0 << VB0_BUFFER_INDEX_SHIFT | /* VB 0th index */
396 GEN7_VB0_BUFFER_ADDR_MOD_EN | /* Address Modify Enable */
397 VERTEX_SIZE << VB0_BUFFER_PITCH_SHIFT);
398 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_VERTEX, 0, offset);
399 OUT_BATCH(3 * VERTEX_SIZE);
403 gen6_create_cc_state(struct intel_batchbuffer *batch)
405 struct gen6_color_calc_state *cc_state;
408 cc_state = batch_alloc(batch, sizeof(*cc_state), 64);
409 offset = batch_offset(batch, cc_state);
410 annotation_add_state(&aub_annotations, AUB_TRACE_CC_STATE,
411 offset, sizeof(*cc_state));
417 gen8_create_blend_state(struct intel_batchbuffer *batch)
419 struct gen8_blend_state *blend;
423 blend = batch_alloc(batch, sizeof(*blend), 64);
424 offset = batch_offset(batch, blend);
425 annotation_add_state(&aub_annotations, AUB_TRACE_BLEND_STATE,
426 offset, sizeof(*blend));
428 for (i = 0; i < 16; i++) {
429 blend->bs[i].dest_blend_factor = GEN6_BLENDFACTOR_ZERO;
430 blend->bs[i].source_blend_factor = GEN6_BLENDFACTOR_ONE;
431 blend->bs[i].color_blend_func = GEN6_BLENDFUNCTION_ADD;
432 blend->bs[i].pre_blend_color_clamp = 1;
433 blend->bs[i].color_buffer_blend = 0;
440 gen6_create_cc_viewport(struct intel_batchbuffer *batch)
442 struct gen6_cc_viewport *vp;
445 vp = batch_alloc(batch, sizeof(*vp), 32);
446 offset = batch_offset(batch, vp);
447 annotation_add_state(&aub_annotations, AUB_TRACE_CC_VP_STATE,
448 offset, sizeof(*vp));
450 /* XXX I don't understand this */
451 vp->min_depth = -1.e35;
452 vp->max_depth = 1.e35;
458 gen7_create_sf_clip_viewport(struct intel_batchbuffer *batch) {
459 /* XXX these are likely not needed */
460 struct gen7_sf_clip_viewport *scv_state;
463 scv_state = batch_alloc(batch, sizeof(*scv_state), 64);
464 offset = batch_offset(batch, scv_state);
465 annotation_add_state(&aub_annotations, AUB_TRACE_CLIP_VP_STATE,
466 offset, sizeof(*scv_state));
468 scv_state->guardband.xmin = 0;
469 scv_state->guardband.xmax = 1.0f;
470 scv_state->guardband.ymin = 0;
471 scv_state->guardband.ymax = 1.0f;
477 gen6_create_scissor_rect(struct intel_batchbuffer *batch)
479 struct gen6_scissor_rect *scissor;
482 scissor = batch_alloc(batch, sizeof(*scissor), 64);
483 offset = batch_offset(batch, scissor);
484 annotation_add_state(&aub_annotations, AUB_TRACE_SCISSOR_STATE,
485 offset, sizeof(*scissor));
491 gen8_emit_sip(struct intel_batchbuffer *batch) {
492 OUT_BATCH(GEN6_STATE_SIP | (3 - 2));
498 gen7_emit_push_constants(struct intel_batchbuffer *batch) {
499 OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_VS);
501 OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_HS);
503 OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_DS);
505 OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_GS);
507 OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS);
512 gen8_emit_state_base_address(struct intel_batchbuffer *batch) {
513 OUT_BATCH(GEN6_STATE_BASE_ADDRESS | (16 - 2));
516 OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
519 /* stateless data port */
520 OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
523 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_SAMPLER, 0, BASE_ADDRESS_MODIFY);
526 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION,
527 0, BASE_ADDRESS_MODIFY);
534 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
536 /* general state buffer size */
537 OUT_BATCH(0xfffff000 | 1);
538 /* dynamic state buffer size */
539 OUT_BATCH(1 << 12 | 1);
540 /* indirect object buffer size */
541 OUT_BATCH(0xfffff000 | 1);
542 /* intruction buffer size */
543 OUT_BATCH(1 << 12 | 1);
547 gen7_emit_urb(struct intel_batchbuffer *batch) {
548 /* XXX: Min valid values from mesa */
549 const int vs_entries = 64;
550 const int vs_size = 2;
551 const int vs_start = 2;
553 OUT_BATCH(GEN7_3DSTATE_URB_VS);
554 OUT_BATCH(vs_entries | ((vs_size - 1) << 16) | (vs_start << 25));
555 OUT_BATCH(GEN7_3DSTATE_URB_GS);
556 OUT_BATCH(vs_start << 25);
557 OUT_BATCH(GEN7_3DSTATE_URB_HS);
558 OUT_BATCH(vs_start << 25);
559 OUT_BATCH(GEN7_3DSTATE_URB_DS);
560 OUT_BATCH(vs_start << 25);
564 gen8_emit_cc(struct intel_batchbuffer *batch) {
565 OUT_BATCH(GEN7_3DSTATE_BLEND_STATE_POINTERS);
566 OUT_BATCH(cc.blend_state | 1);
568 OUT_BATCH(GEN6_3DSTATE_CC_STATE_POINTERS);
569 OUT_BATCH(cc.cc_state | 1);
573 gen8_emit_multisample(struct intel_batchbuffer *batch) {
574 OUT_BATCH(GEN8_3DSTATE_MULTISAMPLE);
577 OUT_BATCH(GEN6_3DSTATE_SAMPLE_MASK);
582 gen8_emit_vs(struct intel_batchbuffer *batch) {
583 OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_VS);
586 OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_VS);
589 OUT_BATCH(GEN6_3DSTATE_CONSTANT_VS | (11 - 2));
601 OUT_BATCH(GEN6_3DSTATE_VS | (9-2));
613 gen8_emit_hs(struct intel_batchbuffer *batch) {
614 OUT_BATCH(GEN7_3DSTATE_CONSTANT_HS | (11 - 2));
626 OUT_BATCH(GEN7_3DSTATE_HS | (9-2));
636 OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_HS);
639 OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_HS);
644 gen8_emit_gs(struct intel_batchbuffer *batch) {
645 OUT_BATCH(GEN7_3DSTATE_CONSTANT_GS | (11 - 2));
657 OUT_BATCH(GEN7_3DSTATE_GS | (10-2));
668 OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_GS);
671 OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_GS);
676 gen8_emit_ds(struct intel_batchbuffer *batch) {
677 OUT_BATCH(GEN7_3DSTATE_CONSTANT_DS | (11 - 2));
689 OUT_BATCH(GEN7_3DSTATE_DS | (9-2));
699 OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_DS);
702 OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_DS);
707 gen8_emit_wm_hz_op(struct intel_batchbuffer *batch) {
708 OUT_BATCH(GEN8_3DSTATE_WM_HZ_OP | (5-2));
716 gen8_emit_null_state(struct intel_batchbuffer *batch) {
717 gen8_emit_wm_hz_op(batch);
719 OUT_BATCH(GEN7_3DSTATE_TE | (4-2));
729 gen7_emit_clip(struct intel_batchbuffer *batch) {
730 OUT_BATCH(GEN6_3DSTATE_CLIP | (4 - 2));
732 OUT_BATCH(0); /* pass-through */
737 gen8_emit_sf(struct intel_batchbuffer *batch)
741 OUT_BATCH(GEN7_3DSTATE_SBE | (4 - 2));
742 OUT_BATCH(1 << GEN7_SBE_NUM_OUTPUTS_SHIFT |
743 GEN8_SBE_FORCE_URB_ENTRY_READ_LENGTH |
744 GEN8_SBE_FORCE_URB_ENTRY_READ_OFFSET |
745 1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT |
746 1 << GEN8_SBE_URB_ENTRY_READ_OFFSET_SHIFT);
750 OUT_BATCH(GEN8_3DSTATE_SBE_SWIZ | (11 - 2));
751 for (i = 0; i < 8; i++)
756 OUT_BATCH(GEN8_3DSTATE_RASTER | (5 - 2));
757 OUT_BATCH(GEN8_RASTER_FRONT_WINDING_CCW | GEN8_RASTER_CULL_NONE);
762 OUT_BATCH(GEN6_3DSTATE_SF | (4 - 2));
769 gen8_emit_ps(struct intel_batchbuffer *batch, uint32_t kernel) {
770 const int max_threads = 63;
772 OUT_BATCH(GEN6_3DSTATE_WM | (2 - 2));
773 OUT_BATCH(/* XXX: I don't understand the BARYCENTRIC stuff, but it
774 * appears we need it to put our setup data in the place we
775 * expect (g6, see below) */
776 GEN7_3DSTATE_PS_PERSPECTIVE_PIXEL_BARYCENTRIC);
778 OUT_BATCH(GEN6_3DSTATE_CONSTANT_PS | (11-2));
790 OUT_BATCH(GEN7_3DSTATE_PS | (12-2));
792 OUT_BATCH(0); /* kernel hi */
793 OUT_BATCH(1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHIFT |
794 2 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT);
795 OUT_BATCH(0); /* scratch space stuff */
796 OUT_BATCH(0); /* scratch hi */
797 OUT_BATCH((max_threads - 1) << GEN8_3DSTATE_PS_MAX_THREADS_SHIFT |
798 GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
799 OUT_BATCH(6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT);
800 OUT_BATCH(0); // kernel 1
801 OUT_BATCH(0); /* kernel 1 hi */
802 OUT_BATCH(0); // kernel 2
803 OUT_BATCH(0); /* kernel 2 hi */
805 OUT_BATCH(GEN8_3DSTATE_PS_BLEND | (2 - 2));
806 OUT_BATCH(GEN8_PS_BLEND_HAS_WRITEABLE_RT);
808 OUT_BATCH(GEN8_3DSTATE_PS_EXTRA | (2 - 2));
809 OUT_BATCH(GEN8_PSX_PIXEL_SHADER_VALID | GEN8_PSX_ATTRIBUTE_ENABLE);
813 gen8_emit_depth(struct intel_batchbuffer *batch) {
814 OUT_BATCH(GEN8_3DSTATE_WM_DEPTH_STENCIL | (3 - 2));
818 OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER | (8-2));
827 OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER | (5 - 2));
833 OUT_BATCH(GEN7_3DSTATE_STENCIL_BUFFER | (5 - 2));
841 gen7_emit_clear(struct intel_batchbuffer *batch) {
842 OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS | (3-2));
844 OUT_BATCH(1); // clear valid
848 gen6_emit_drawing_rectangle(struct intel_batchbuffer *batch, struct igt_buf *dst)
850 OUT_BATCH(GEN6_3DSTATE_DRAWING_RECTANGLE | (4 - 2));
852 OUT_BATCH((igt_buf_height(dst) - 1) << 16 | (igt_buf_width(dst) - 1));
856 static void gen8_emit_vf_topology(struct intel_batchbuffer *batch)
858 OUT_BATCH(GEN8_3DSTATE_VF_TOPOLOGY);
859 OUT_BATCH(_3DPRIM_RECTLIST);
862 /* Vertex elements MUST be defined before this according to spec */
863 static void gen8_emit_primitive(struct intel_batchbuffer *batch, uint32_t offset)
865 OUT_BATCH(GEN8_3DSTATE_VF_INSTANCING | (3 - 2));
869 OUT_BATCH(GEN6_3DPRIMITIVE | (7-2));
870 OUT_BATCH(0); /* gen8+ ignore the topology type field */
871 OUT_BATCH(3); /* vertex count */
872 OUT_BATCH(0); /* We're specifying this instead with offset in GEN6_3DSTATE_VERTEX_BUFFERS */
873 OUT_BATCH(1); /* single instance */
874 OUT_BATCH(0); /* start instance location */
875 OUT_BATCH(0); /* index buffer offset, ignored */
878 /* The general rule is if it's named gen6 it is directly copied from
879 * gen6_render_copyfunc.
881 * This sets up most of the 3d pipeline, and most of that to NULL state. The
882 * docs aren't specific about exactly what must be set up NULL, but the general
883 * rule is we could be run at any time, and so the most state we set to NULL,
884 * the better our odds of success.
886 * +---------------+ <---- 4096
892 * |_______|_______| <---- 2048 + ?
899 * +---------------+ <---- 0 + ?
901 * The batch commands point to state within tthe batch, so all state offsets should be
902 * 0 < offset < 4096. Both commands and state build upwards, and are constructed
903 * in that order. This means too many batch commands can delete state if not
908 #define BATCH_STATE_SPLIT 2048
910 void gen8_render_copyfunc(struct intel_batchbuffer *batch,
911 drm_intel_context *context,
912 struct igt_buf *src, unsigned src_x, unsigned src_y,
913 unsigned width, unsigned height,
914 struct igt_buf *dst, unsigned dst_x, unsigned dst_y)
916 uint32_t ps_sampler_state, ps_kernel_off, ps_binding_table;
917 uint32_t scissor_state;
918 uint32_t vertex_buffer;
921 intel_batchbuffer_flush_with_context(batch, context);
923 batch_align(batch, 8);
925 batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
927 annotation_init(&aub_annotations);
929 ps_binding_table = gen8_bind_surfaces(batch, src, dst);
930 ps_sampler_state = gen8_create_sampler(batch);
931 ps_kernel_off = gen8_fill_ps(batch, ps_kernel, sizeof(ps_kernel));
932 vertex_buffer = gen7_fill_vertex_buffer_data(batch, src,
936 cc.cc_state = gen6_create_cc_state(batch);
937 cc.blend_state = gen8_create_blend_state(batch);
938 viewport.cc_state = gen6_create_cc_viewport(batch);
939 viewport.sf_clip_state = gen7_create_sf_clip_viewport(batch);
940 scissor_state = gen6_create_scissor_rect(batch);
941 /* TODO: theree is other state which isn't setup */
943 igt_assert(batch->ptr < &batch->buffer[4095]);
945 batch->ptr = batch->buffer;
947 /* Start emitting the commands. The order roughly follows the mesa blorp
949 OUT_BATCH(GEN6_PIPELINE_SELECT | PIPELINE_SELECT_3D);
951 gen8_emit_sip(batch);
953 gen7_emit_push_constants(batch);
955 gen8_emit_state_base_address(batch);
957 OUT_BATCH(GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC);
958 OUT_BATCH(viewport.cc_state);
959 OUT_BATCH(GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP);
960 OUT_BATCH(viewport.sf_clip_state);
962 gen7_emit_urb(batch);
966 gen8_emit_multisample(batch);
968 gen8_emit_null_state(batch);
970 OUT_BATCH(GEN7_3DSTATE_STREAMOUT | (5-2));
976 gen7_emit_clip(batch);
980 OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS);
981 OUT_BATCH(ps_binding_table);
983 OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS);
984 OUT_BATCH(ps_sampler_state);
986 gen8_emit_ps(batch, ps_kernel_off);
988 OUT_BATCH(GEN6_3DSTATE_SCISSOR_STATE_POINTERS);
989 OUT_BATCH(scissor_state);
991 gen8_emit_depth(batch);
993 gen7_emit_clear(batch);
995 gen6_emit_drawing_rectangle(batch, dst);
997 gen8_emit_vertex_buffer(batch, vertex_buffer);
998 gen6_emit_vertex_elements(batch);
1000 gen8_emit_vf_topology(batch);
1001 gen8_emit_primitive(batch, vertex_buffer);
1003 OUT_BATCH(MI_BATCH_BUFFER_END);
1005 batch_end = batch_align(batch, 8);
1006 igt_assert(batch_end < BATCH_STATE_SPLIT);
1007 annotation_add_batch(&aub_annotations, batch_end);
1011 annotation_flush(&aub_annotations, batch);
1013 gen6_render_flush(batch, context, batch_end);
1014 intel_batchbuffer_reset(batch);