2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
30 * This is the main state upload code.
32 * Gallium uses Constant State Objects, or CSOs, for most state. Large,
33 * complex, or highly reusable state can be created once, and bound and
34 * rebound multiple times. This is modeled with the pipe->create_*_state()
35 * and pipe->bind_*_state() hooks. Highly dynamic or inexpensive state is
36 * streamed out on the fly, via pipe->set_*_state() hooks.
38 * OpenGL involves frequently mutating context state, which is mirrored in
39 * core Mesa by highly mutable data structures. However, most applications
40 * typically draw the same things over and over - from frame to frame, most
41 * of the same objects are still visible and need to be redrawn. So, rather
42 * than inventing new state all the time, applications usually mutate to swap
43 * between known states that we've seen before.
45 * Gallium isolates us from this mutation by tracking API state, and
46 * distilling it into a set of Constant State Objects, or CSOs. Large,
47 * complex, or typically reusable state can be created once, then reused
48 * multiple times. Drivers can create and store their own associated data.
49 * This create/bind model corresponds to the pipe->create_*_state() and
50 * pipe->bind_*_state() driver hooks.
52 * Some state is cheap to create, or expected to be highly dynamic. Rather
53 * than creating and caching piles of CSOs for these, Gallium simply streams
54 * them out, via the pipe->set_*_state() driver hooks.
56 * To reduce draw time overhead, we try to compute as much state at create
57 * time as possible. Wherever possible, we translate the Gallium pipe state
58 * to 3DSTATE commands, and store those commands in the CSO. At draw time,
59 * we can simply memcpy them into a batch buffer.
61 * No hardware matches the abstraction perfectly, so some commands require
62 * information from multiple CSOs. In this case, we can store two copies
63 * of the packet (one in each CSO), and simply | together their DWords at
64 * draw time. Sometimes the second set is trivial (one or two fields), so
65 * we simply pack it at draw time.
67 * There are two main components in the file below. First, the CSO hooks
68 * create/bind/track state. The second are the draw-time upload functions,
69 * iris_upload_render_state() and iris_upload_compute_state(), which read
70 * the context state and emit the commands into the actual batch.
81 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
87 #include "pipe/p_defines.h"
88 #include "pipe/p_state.h"
89 #include "pipe/p_context.h"
90 #include "pipe/p_screen.h"
91 #include "util/u_dual_blend.h"
92 #include "util/u_inlines.h"
93 #include "util/format/u_format.h"
94 #include "util/u_framebuffer.h"
95 #include "util/u_transfer.h"
96 #include "util/u_upload_mgr.h"
97 #include "util/u_viewport.h"
98 #include "util/u_memory.h"
99 #include "drm-uapi/i915_drm.h"
101 #include "intel/compiler/brw_compiler.h"
102 #include "intel/common/intel_aux_map.h"
103 #include "intel/common/intel_l3_config.h"
104 #include "intel/common/intel_sample_positions.h"
105 #include "iris_batch.h"
106 #include "iris_context.h"
107 #include "iris_defines.h"
108 #include "iris_pipe.h"
109 #include "iris_resource.h"
111 #include "iris_genx_macros.h"
112 #include "intel/common/intel_guardband.h"
115 * Statically assert that PIPE_* enums match the hardware packets.
116 * (As long as they match, we don't need to translate them.)
118 UNUSED static void pipe_asserts()
120 #define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
122 /* pipe_logicop happens to match the hardware. */
123 PIPE_ASSERT(PIPE_LOGICOP_CLEAR == LOGICOP_CLEAR);
124 PIPE_ASSERT(PIPE_LOGICOP_NOR == LOGICOP_NOR);
125 PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED == LOGICOP_AND_INVERTED);
126 PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED == LOGICOP_COPY_INVERTED);
127 PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE == LOGICOP_AND_REVERSE);
128 PIPE_ASSERT(PIPE_LOGICOP_INVERT == LOGICOP_INVERT);
129 PIPE_ASSERT(PIPE_LOGICOP_XOR == LOGICOP_XOR);
130 PIPE_ASSERT(PIPE_LOGICOP_NAND == LOGICOP_NAND);
131 PIPE_ASSERT(PIPE_LOGICOP_AND == LOGICOP_AND);
132 PIPE_ASSERT(PIPE_LOGICOP_EQUIV == LOGICOP_EQUIV);
133 PIPE_ASSERT(PIPE_LOGICOP_NOOP == LOGICOP_NOOP);
134 PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED == LOGICOP_OR_INVERTED);
135 PIPE_ASSERT(PIPE_LOGICOP_COPY == LOGICOP_COPY);
136 PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE == LOGICOP_OR_REVERSE);
137 PIPE_ASSERT(PIPE_LOGICOP_OR == LOGICOP_OR);
138 PIPE_ASSERT(PIPE_LOGICOP_SET == LOGICOP_SET);
140 /* pipe_blend_func happens to match the hardware. */
141 PIPE_ASSERT(PIPE_BLENDFACTOR_ONE == BLENDFACTOR_ONE);
142 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR == BLENDFACTOR_SRC_COLOR);
143 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA == BLENDFACTOR_SRC_ALPHA);
144 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA == BLENDFACTOR_DST_ALPHA);
145 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR == BLENDFACTOR_DST_COLOR);
146 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE == BLENDFACTOR_SRC_ALPHA_SATURATE);
147 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR == BLENDFACTOR_CONST_COLOR);
148 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA == BLENDFACTOR_CONST_ALPHA);
149 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR == BLENDFACTOR_SRC1_COLOR);
150 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA == BLENDFACTOR_SRC1_ALPHA);
151 PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO == BLENDFACTOR_ZERO);
152 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR == BLENDFACTOR_INV_SRC_COLOR);
153 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA == BLENDFACTOR_INV_SRC_ALPHA);
154 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA == BLENDFACTOR_INV_DST_ALPHA);
155 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR == BLENDFACTOR_INV_DST_COLOR);
156 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR == BLENDFACTOR_INV_CONST_COLOR);
157 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA == BLENDFACTOR_INV_CONST_ALPHA);
158 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR == BLENDFACTOR_INV_SRC1_COLOR);
159 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA == BLENDFACTOR_INV_SRC1_ALPHA);
161 /* pipe_blend_func happens to match the hardware. */
162 PIPE_ASSERT(PIPE_BLEND_ADD == BLENDFUNCTION_ADD);
163 PIPE_ASSERT(PIPE_BLEND_SUBTRACT == BLENDFUNCTION_SUBTRACT);
164 PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT == BLENDFUNCTION_REVERSE_SUBTRACT);
165 PIPE_ASSERT(PIPE_BLEND_MIN == BLENDFUNCTION_MIN);
166 PIPE_ASSERT(PIPE_BLEND_MAX == BLENDFUNCTION_MAX);
168 /* pipe_stencil_op happens to match the hardware. */
169 PIPE_ASSERT(PIPE_STENCIL_OP_KEEP == STENCILOP_KEEP);
170 PIPE_ASSERT(PIPE_STENCIL_OP_ZERO == STENCILOP_ZERO);
171 PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE == STENCILOP_REPLACE);
172 PIPE_ASSERT(PIPE_STENCIL_OP_INCR == STENCILOP_INCRSAT);
173 PIPE_ASSERT(PIPE_STENCIL_OP_DECR == STENCILOP_DECRSAT);
174 PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP == STENCILOP_INCR);
175 PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP == STENCILOP_DECR);
176 PIPE_ASSERT(PIPE_STENCIL_OP_INVERT == STENCILOP_INVERT);
178 /* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
179 PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT == UPPERLEFT);
180 PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT == LOWERLEFT);
185 translate_prim_type(enum pipe_prim_type prim, uint8_t verts_per_patch)
187 static const unsigned map[] = {
188 [PIPE_PRIM_POINTS] = _3DPRIM_POINTLIST,
189 [PIPE_PRIM_LINES] = _3DPRIM_LINELIST,
190 [PIPE_PRIM_LINE_LOOP] = _3DPRIM_LINELOOP,
191 [PIPE_PRIM_LINE_STRIP] = _3DPRIM_LINESTRIP,
192 [PIPE_PRIM_TRIANGLES] = _3DPRIM_TRILIST,
193 [PIPE_PRIM_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
194 [PIPE_PRIM_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
195 [PIPE_PRIM_QUADS] = _3DPRIM_QUADLIST,
196 [PIPE_PRIM_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
197 [PIPE_PRIM_POLYGON] = _3DPRIM_POLYGON,
198 [PIPE_PRIM_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
199 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
200 [PIPE_PRIM_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
201 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
202 [PIPE_PRIM_PATCHES] = _3DPRIM_PATCHLIST_1 - 1,
205 return map[prim] + (prim == PIPE_PRIM_PATCHES ? verts_per_patch : 0);
209 translate_compare_func(enum pipe_compare_func pipe_func)
211 static const unsigned map[] = {
212 [PIPE_FUNC_NEVER] = COMPAREFUNCTION_NEVER,
213 [PIPE_FUNC_LESS] = COMPAREFUNCTION_LESS,
214 [PIPE_FUNC_EQUAL] = COMPAREFUNCTION_EQUAL,
215 [PIPE_FUNC_LEQUAL] = COMPAREFUNCTION_LEQUAL,
216 [PIPE_FUNC_GREATER] = COMPAREFUNCTION_GREATER,
217 [PIPE_FUNC_NOTEQUAL] = COMPAREFUNCTION_NOTEQUAL,
218 [PIPE_FUNC_GEQUAL] = COMPAREFUNCTION_GEQUAL,
219 [PIPE_FUNC_ALWAYS] = COMPAREFUNCTION_ALWAYS,
221 return map[pipe_func];
225 translate_shadow_func(enum pipe_compare_func pipe_func)
227 /* Gallium specifies the result of shadow comparisons as:
229 * 1 if ref <op> texel,
234 * 0 if texel <op> ref,
237 * So we need to flip the operator and also negate.
239 static const unsigned map[] = {
240 [PIPE_FUNC_NEVER] = PREFILTEROP_ALWAYS,
241 [PIPE_FUNC_LESS] = PREFILTEROP_LEQUAL,
242 [PIPE_FUNC_EQUAL] = PREFILTEROP_NOTEQUAL,
243 [PIPE_FUNC_LEQUAL] = PREFILTEROP_LESS,
244 [PIPE_FUNC_GREATER] = PREFILTEROP_GEQUAL,
245 [PIPE_FUNC_NOTEQUAL] = PREFILTEROP_EQUAL,
246 [PIPE_FUNC_GEQUAL] = PREFILTEROP_GREATER,
247 [PIPE_FUNC_ALWAYS] = PREFILTEROP_NEVER,
249 return map[pipe_func];
253 translate_cull_mode(unsigned pipe_face)
255 static const unsigned map[4] = {
256 [PIPE_FACE_NONE] = CULLMODE_NONE,
257 [PIPE_FACE_FRONT] = CULLMODE_FRONT,
258 [PIPE_FACE_BACK] = CULLMODE_BACK,
259 [PIPE_FACE_FRONT_AND_BACK] = CULLMODE_BOTH,
261 return map[pipe_face];
265 translate_fill_mode(unsigned pipe_polymode)
267 static const unsigned map[4] = {
268 [PIPE_POLYGON_MODE_FILL] = FILL_MODE_SOLID,
269 [PIPE_POLYGON_MODE_LINE] = FILL_MODE_WIREFRAME,
270 [PIPE_POLYGON_MODE_POINT] = FILL_MODE_POINT,
271 [PIPE_POLYGON_MODE_FILL_RECTANGLE] = FILL_MODE_SOLID,
273 return map[pipe_polymode];
277 translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
279 static const unsigned map[] = {
280 [PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
281 [PIPE_TEX_MIPFILTER_LINEAR] = MIPFILTER_LINEAR,
282 [PIPE_TEX_MIPFILTER_NONE] = MIPFILTER_NONE,
284 return map[pipe_mip];
288 translate_wrap(unsigned pipe_wrap)
290 static const unsigned map[] = {
291 [PIPE_TEX_WRAP_REPEAT] = TCM_WRAP,
292 [PIPE_TEX_WRAP_CLAMP] = TCM_HALF_BORDER,
293 [PIPE_TEX_WRAP_CLAMP_TO_EDGE] = TCM_CLAMP,
294 [PIPE_TEX_WRAP_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
295 [PIPE_TEX_WRAP_MIRROR_REPEAT] = TCM_MIRROR,
296 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
298 /* These are unsupported. */
299 [PIPE_TEX_WRAP_MIRROR_CLAMP] = -1,
300 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1,
302 return map[pipe_wrap];
306 * Allocate space for some indirect state.
308 * Return a pointer to the map (to fill it out) and a state ref (for
309 * referring to the state in GPU commands).
312 upload_state(struct u_upload_mgr *uploader,
313 struct iris_state_ref *ref,
318 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
323 * Stream out temporary/short-lived state.
325 * This allocates space, pins the BO, and includes the BO address in the
326 * returned offset (which works because all state lives in 32-bit memory
330 stream_state(struct iris_batch *batch,
331 struct u_upload_mgr *uploader,
332 struct pipe_resource **out_res,
335 uint32_t *out_offset)
339 u_upload_alloc(uploader, 0, size, alignment, out_offset, out_res, &ptr);
341 struct iris_bo *bo = iris_resource_bo(*out_res);
342 iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
344 iris_record_state_size(batch->state_sizes,
345 bo->gtt_offset + *out_offset, size);
347 *out_offset += iris_bo_offset_from_base_address(bo);
353 * stream_state() + memcpy.
356 emit_state(struct iris_batch *batch,
357 struct u_upload_mgr *uploader,
358 struct pipe_resource **out_res,
365 stream_state(batch, uploader, out_res, size, alignment, &offset);
368 memcpy(map, data, size);
374 * Did field 'x' change between 'old_cso' and 'new_cso'?
376 * (If so, we may want to set some dirty flags.)
378 #define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
379 #define cso_changed_memcmp(x) \
380 (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
383 flush_before_state_base_change(struct iris_batch *batch)
385 const struct intel_device_info *devinfo = &batch->screen->devinfo;
387 /* Flush before emitting STATE_BASE_ADDRESS.
389 * This isn't documented anywhere in the PRM. However, it seems to be
390 * necessary prior to changing the surface state base address. We've
391 * seen issues in Vulkan where we get GPU hangs when using multi-level
392 * command buffers which clear depth, reset state base address, and then
395 * Normally, in GL, we would trust the kernel to do sufficient stalls
396 * and flushes prior to executing our batch. However, it doesn't seem
397 * as if the kernel's flushing is always sufficient and we don't want to
400 * We make this an end-of-pipe sync instead of a normal flush because we
401 * do not know the current status of the GPU. On Haswell at least,
402 * having a fast-clear operation in flight at the same time as a normal
403 * rendering operation can cause hangs. Since the kernel's flushing is
404 * insufficient, we need to ensure that any rendering operations from
405 * other processes are definitely complete before we try to do our own
406 * rendering. It's a bit of a big hammer but it appears to work.
408 iris_emit_end_of_pipe_sync(batch,
409 "change STATE_BASE_ADDRESS (flushes)",
410 PIPE_CONTROL_RENDER_TARGET_FLUSH |
411 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
412 PIPE_CONTROL_DATA_CACHE_FLUSH |
415 * Software must program PIPE_CONTROL command
416 * with "HDC Pipeline Flush" prior to
417 * programming of the below two non-pipeline
419 * * STATE_BASE_ADDRESS
420 * * 3DSTATE_BINDING_TABLE_POOL_ALLOC
422 ((GFX_VER == 12 && devinfo->revision == 0 /* A0 */ ?
423 PIPE_CONTROL_FLUSH_HDC : 0)));
427 flush_after_state_base_change(struct iris_batch *batch)
429 /* After re-setting the surface state base address, we have to do some
430 * cache flusing so that the sampler engine will pick up the new
431 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
432 * Shared Function > 3D Sampler > State > State Caching (page 96):
434 * Coherency with system memory in the state cache, like the texture
435 * cache is handled partially by software. It is expected that the
436 * command stream or shader will issue Cache Flush operation or
437 * Cache_Flush sampler message to ensure that the L1 cache remains
438 * coherent with system memory.
442 * Whenever the value of the Dynamic_State_Base_Addr,
443 * Surface_State_Base_Addr are altered, the L1 state cache must be
444 * invalidated to ensure the new surface or sampler state is fetched
445 * from system memory.
447 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
448 * which, according the PIPE_CONTROL instruction documentation in the
451 * Setting this bit is independent of any other bit in this packet.
452 * This bit controls the invalidation of the L1 and L2 state caches
453 * at the top of the pipe i.e. at the parsing time.
455 * Unfortunately, experimentation seems to indicate that state cache
456 * invalidation through a PIPE_CONTROL does nothing whatsoever in
457 * regards to surface state and binding tables. In stead, it seems that
458 * invalidating the texture cache is what is actually needed.
460 * XXX: As far as we have been able to determine through
461 * experimentation, shows that flush the texture cache appears to be
462 * sufficient. The theory here is that all of the sampling/rendering
463 * units cache the binding table in the texture cache. However, we have
464 * yet to be able to actually confirm this.
466 iris_emit_end_of_pipe_sync(batch,
467 "change STATE_BASE_ADDRESS (invalidates)",
468 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
469 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
470 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
474 _iris_emit_lri(struct iris_batch *batch, uint32_t reg, uint32_t val)
476 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
477 lri.RegisterOffset = reg;
481 #define iris_emit_lri(b, r, v) _iris_emit_lri(b, GENX(r##_num), v)
484 _iris_emit_lrr(struct iris_batch *batch, uint32_t dst, uint32_t src)
486 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {
487 lrr.SourceRegisterAddress = src;
488 lrr.DestinationRegisterAddress = dst;
493 iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
496 _iris_emit_lrr(batch, dst, src);
500 iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
503 _iris_emit_lrr(batch, dst, src);
504 _iris_emit_lrr(batch, dst + 4, src + 4);
508 iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
511 _iris_emit_lri(batch, reg, val);
515 iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
518 _iris_emit_lri(batch, reg + 0, val & 0xffffffff);
519 _iris_emit_lri(batch, reg + 4, val >> 32);
523 * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
526 iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
527 struct iris_bo *bo, uint32_t offset)
529 iris_batch_sync_region_start(batch);
530 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
531 lrm.RegisterAddress = reg;
532 lrm.MemoryAddress = ro_bo(bo, offset);
534 iris_batch_sync_region_end(batch);
538 * Load a 64-bit value from a buffer into a MMIO register via
539 * two MI_LOAD_REGISTER_MEM commands.
542 iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
543 struct iris_bo *bo, uint32_t offset)
545 iris_load_register_mem32(batch, reg + 0, bo, offset + 0);
546 iris_load_register_mem32(batch, reg + 4, bo, offset + 4);
550 iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
551 struct iris_bo *bo, uint32_t offset,
554 iris_batch_sync_region_start(batch);
555 iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
556 srm.RegisterAddress = reg;
557 srm.MemoryAddress = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
558 srm.PredicateEnable = predicated;
560 iris_batch_sync_region_end(batch);
564 iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
565 struct iris_bo *bo, uint32_t offset,
568 iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated);
569 iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated);
573 iris_store_data_imm32(struct iris_batch *batch,
574 struct iris_bo *bo, uint32_t offset,
577 iris_batch_sync_region_start(batch);
578 iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) {
579 sdi.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
580 sdi.ImmediateData = imm;
582 iris_batch_sync_region_end(batch);
586 iris_store_data_imm64(struct iris_batch *batch,
587 struct iris_bo *bo, uint32_t offset,
590 /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
591 * 2 in genxml but it's actually variable length and we need 5 DWords.
593 void *map = iris_get_command_space(batch, 4 * 5);
594 iris_batch_sync_region_start(batch);
595 _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) {
596 sdi.DWordLength = 5 - 2;
597 sdi.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
598 sdi.ImmediateData = imm;
600 iris_batch_sync_region_end(batch);
604 iris_copy_mem_mem(struct iris_batch *batch,
605 struct iris_bo *dst_bo, uint32_t dst_offset,
606 struct iris_bo *src_bo, uint32_t src_offset,
609 /* MI_COPY_MEM_MEM operates on DWords. */
610 assert(bytes % 4 == 0);
611 assert(dst_offset % 4 == 0);
612 assert(src_offset % 4 == 0);
613 iris_batch_sync_region_start(batch);
615 for (unsigned i = 0; i < bytes; i += 4) {
616 iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
617 cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i,
618 IRIS_DOMAIN_OTHER_WRITE);
619 cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
623 iris_batch_sync_region_end(batch);
627 emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
629 #if GFX_VER >= 8 && GFX_VER < 10
630 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
632 * Software must clear the COLOR_CALC_STATE Valid field in
633 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
634 * with Pipeline Select set to GPGPU.
636 * The internal hardware docs recommend the same workaround for Gfx9
639 if (pipeline == GPGPU)
640 iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
644 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
645 * PIPELINE_SELECT [DevBWR+]":
649 * Software must ensure all the write caches are flushed through a
650 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
651 * command to invalidate read only caches prior to programming
652 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
654 iris_emit_pipe_control_flush(batch,
655 "workaround: PIPELINE_SELECT flushes (1/2)",
656 PIPE_CONTROL_RENDER_TARGET_FLUSH |
657 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
658 PIPE_CONTROL_DATA_CACHE_FLUSH |
659 PIPE_CONTROL_CS_STALL);
661 iris_emit_pipe_control_flush(batch,
662 "workaround: PIPELINE_SELECT flushes (2/2)",
663 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
664 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
665 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
666 PIPE_CONTROL_INSTRUCTION_INVALIDATE);
668 iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
670 sel.MaskBits = GFX_VER >= 12 ? 0x13 : 3;
671 sel.MediaSamplerDOPClockGateEnable = GFX_VER >= 12;
673 sel.PipelineSelection = pipeline;
678 init_glk_barrier_mode(struct iris_batch *batch, uint32_t value)
683 * "This chicken bit works around a hardware issue with barrier
684 * logic encountered when switching between GPGPU and 3D pipelines.
685 * To workaround the issue, this mode bit should be set after a
686 * pipeline is selected."
688 iris_emit_reg(batch, GENX(SLICE_COMMON_ECO_CHICKEN1), reg) {
689 reg.GLKBarrierMode = value;
690 reg.GLKBarrierModeMask = 1;
696 init_state_base_address(struct iris_batch *batch)
698 struct isl_device *isl_dev = &batch->screen->isl_dev;
699 uint32_t mocs = isl_mocs(isl_dev, 0, false);
700 flush_before_state_base_change(batch);
702 /* We program most base addresses once at context initialization time.
703 * Each base address points at a 4GB memory zone, and never needs to
704 * change. See iris_bufmgr.h for a description of the memory zones.
706 * The one exception is Surface State Base Address, which needs to be
707 * updated occasionally. See iris_binder.c for the details there.
709 iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
710 sba.GeneralStateMOCS = mocs;
711 sba.StatelessDataPortAccessMOCS = mocs;
712 sba.DynamicStateMOCS = mocs;
713 sba.IndirectObjectMOCS = mocs;
714 sba.InstructionMOCS = mocs;
715 sba.SurfaceStateMOCS = mocs;
717 sba.GeneralStateBaseAddressModifyEnable = true;
718 sba.DynamicStateBaseAddressModifyEnable = true;
719 sba.IndirectObjectBaseAddressModifyEnable = true;
720 sba.InstructionBaseAddressModifyEnable = true;
721 sba.GeneralStateBufferSizeModifyEnable = true;
722 sba.DynamicStateBufferSizeModifyEnable = true;
724 sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
725 sba.BindlessSurfaceStateMOCS = mocs;
727 sba.IndirectObjectBufferSizeModifyEnable = true;
728 sba.InstructionBuffersizeModifyEnable = true;
730 sba.InstructionBaseAddress = ro_bo(NULL, IRIS_MEMZONE_SHADER_START);
731 sba.DynamicStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_DYNAMIC_START);
733 sba.GeneralStateBufferSize = 0xfffff;
734 sba.IndirectObjectBufferSize = 0xfffff;
735 sba.InstructionBufferSize = 0xfffff;
736 sba.DynamicStateBufferSize = 0xfffff;
739 flush_after_state_base_change(batch);
743 iris_emit_l3_config(struct iris_batch *batch,
744 const struct intel_l3_config *cfg)
746 assert(cfg || GFX_VER >= 12);
749 #define L3_ALLOCATION_REG GENX(L3ALLOC)
750 #define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
752 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
753 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
756 iris_emit_reg(batch, L3_ALLOCATION_REG, reg) {
758 reg.SLMEnable = cfg->n[INTEL_L3P_SLM] > 0;
761 /* Wa_1406697149: Bit 9 "Error Detection Behavior Control" must be set
762 * in L3CNTLREG register. The default setting of the bit is not the
763 * desirable behavior.
765 reg.ErrorDetectionBehaviorControl = true;
766 reg.UseFullWays = true;
768 if (GFX_VER < 12 || cfg) {
769 reg.URBAllocation = cfg->n[INTEL_L3P_URB];
770 reg.ROAllocation = cfg->n[INTEL_L3P_RO];
771 reg.DCAllocation = cfg->n[INTEL_L3P_DC];
772 reg.AllAllocation = cfg->n[INTEL_L3P_ALL];
775 reg.L3FullWayAllocationEnable = true;
783 iris_enable_obj_preemption(struct iris_batch *batch, bool enable)
785 /* A fixed function pipe flush is required before modifying this field */
786 iris_emit_end_of_pipe_sync(batch, enable ? "enable preemption"
787 : "disable preemption",
788 PIPE_CONTROL_RENDER_TARGET_FLUSH);
790 /* enable object level preemption */
791 iris_emit_reg(batch, GENX(CS_CHICKEN1), reg) {
792 reg.ReplayMode = enable;
793 reg.ReplayModeMask = true;
799 * Compute an \p n x \p m pixel hashing table usable as slice, subslice or
800 * pixel pipe hashing table. The resulting table is the cyclic repetition of
801 * a fixed pattern with periodicity equal to \p period.
803 * If \p index is specified to be equal to \p period, a 2-way hashing table
804 * will be generated such that indices 0 and 1 are returned for the following
805 * fractions of entries respectively:
807 * p_0 = ceil(period / 2) / period
808 * p_1 = floor(period / 2) / period
810 * If \p index is even and less than \p period, a 3-way hashing table will be
811 * generated such that indices 0, 1 and 2 are returned for the following
812 * fractions of entries:
814 * p_0 = (ceil(period / 2) - 1) / period
815 * p_1 = floor(period / 2) / period
818 * The equations above apply if \p flip is equal to 0, if it is equal to 1 p_0
819 * and p_1 will be swapped for the result. Note that in the context of pixel
820 * pipe hashing this can be always 0 on Gfx12 platforms, since the hardware
821 * transparently remaps logical indices found on the table to physical pixel
822 * pipe indices from the highest to lowest EU count.
825 calculate_pixel_hashing_table(unsigned n, unsigned m,
826 unsigned period, unsigned index, bool flip,
829 for (unsigned i = 0; i < n; i++) {
830 for (unsigned j = 0; j < m; j++) {
831 const unsigned k = (i + j) % period;
832 p[j + m * i] = (k == index ? 2 : (k & 1) ^ flip);
839 gfx11_upload_pixel_hashing_tables(struct iris_batch *batch)
841 const struct intel_device_info *devinfo = &batch->screen->devinfo;
842 assert(devinfo->ppipe_subslices[2] == 0);
844 if (devinfo->ppipe_subslices[0] == devinfo->ppipe_subslices[1])
847 struct iris_context *ice = batch->ice;
848 assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
850 unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
851 uint32_t hash_address;
852 struct pipe_resource *tmp = NULL;
854 stream_state(batch, ice->state.dynamic_uploader, &tmp,
855 size, 64, &hash_address);
856 pipe_resource_reference(&tmp, NULL);
858 const bool flip = devinfo->ppipe_subslices[0] < devinfo->ppipe_subslices[1];
859 struct GENX(SLICE_HASH_TABLE) table;
860 calculate_pixel_hashing_table(16, 16, 3, 3, flip, table.Entry[0]);
862 GENX(SLICE_HASH_TABLE_pack)(NULL, map, &table);
864 iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
865 ptr.SliceHashStatePointerValid = true;
866 ptr.SliceHashTableStatePointer = hash_address;
869 iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) {
870 mode.SliceHashingTableEnable = true;
873 #elif GFX_VERx10 == 120
875 gfx12_upload_pixel_hashing_tables(struct iris_batch *batch)
877 const struct intel_device_info *devinfo = &batch->screen->devinfo;
878 /* For each n calculate ppipes_of[n], equal to the number of pixel pipes
879 * present with n active dual subslices.
881 unsigned ppipes_of[3] = {};
883 for (unsigned n = 0; n < ARRAY_SIZE(ppipes_of); n++) {
884 for (unsigned p = 0; p < ARRAY_SIZE(devinfo->ppipe_subslices); p++)
885 ppipes_of[n] += (devinfo->ppipe_subslices[p] == n);
888 /* Gfx12 has three pixel pipes. */
889 assert(ppipes_of[0] + ppipes_of[1] + ppipes_of[2] == 3);
891 if (ppipes_of[2] == 3 || ppipes_of[0] == 2) {
892 /* All three pixel pipes have the maximum number of active dual
893 * subslices, or there is only one active pixel pipe: Nothing to do.
898 iris_emit_cmd(batch, GENX(3DSTATE_SUBSLICE_HASH_TABLE), p) {
899 p.SliceHashControl[0] = TABLE_0;
901 if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
902 calculate_pixel_hashing_table(8, 16, 2, 2, 0, p.TwoWayTableEntry[0]);
903 else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
904 calculate_pixel_hashing_table(8, 16, 3, 3, 0, p.TwoWayTableEntry[0]);
906 if (ppipes_of[2] == 2 && ppipes_of[1] == 1)
907 calculate_pixel_hashing_table(8, 16, 5, 4, 0, p.ThreeWayTableEntry[0]);
908 else if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
909 calculate_pixel_hashing_table(8, 16, 2, 2, 0, p.ThreeWayTableEntry[0]);
910 else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
911 calculate_pixel_hashing_table(8, 16, 3, 3, 0, p.ThreeWayTableEntry[0]);
913 unreachable("Illegal fusing.");
916 iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), p) {
917 p.SubsliceHashingTableEnable = true;
918 p.SubsliceHashingTableEnableMask = true;
924 iris_alloc_push_constants(struct iris_batch *batch)
926 /* For now, we set a static partitioning of the push constant area,
927 * assuming that all stages could be in use.
929 * TODO: Try lazily allocating the HS/DS/GS sections as needed, and
930 * see if that improves performance by offering more space to
931 * the VS/FS when those aren't in use. Also, try dynamically
932 * enabling/disabling it like i965 does. This would be more
933 * stalls and may not actually help; we don't know yet.
935 for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
936 iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
937 alloc._3DCommandSubOpcode = 18 + i;
938 alloc.ConstantBufferOffset = 6 * i;
939 alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6;
946 init_aux_map_state(struct iris_batch *batch);
950 * Upload initial GPU state for any kind of context.
952 * These need to happen for both render and compute.
955 iris_init_common_context(struct iris_batch *batch)
958 iris_emit_reg(batch, GENX(SAMPLER_MODE), reg) {
959 reg.HeaderlessMessageforPreemptableContexts = 1;
960 reg.HeaderlessMessageforPreemptableContextsMask = 1;
963 /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
964 iris_emit_reg(batch, GENX(HALF_SLICE_CHICKEN7), reg) {
965 reg.EnabledTexelOffsetPrecisionFix = 1;
966 reg.EnabledTexelOffsetPrecisionFixMask = 1;
972 * Upload the initial GPU state for a render context.
974 * This sets some invariant state that needs to be programmed a particular
975 * way, but we never actually change.
978 iris_init_render_context(struct iris_batch *batch)
980 UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
982 iris_batch_sync_region_start(batch);
984 emit_pipeline_select(batch, _3D);
986 iris_emit_l3_config(batch, batch->screen->l3_config_3d);
988 init_state_base_address(batch);
990 iris_init_common_context(batch);
993 iris_emit_reg(batch, GENX(CS_DEBUG_MODE2), reg) {
994 reg.CONSTANT_BUFFERAddressOffsetDisable = true;
995 reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
998 iris_emit_reg(batch, GENX(INSTPM), reg) {
999 reg.CONSTANT_BUFFERAddressOffsetDisable = true;
1000 reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
1005 iris_emit_reg(batch, GENX(CACHE_MODE_1), reg) {
1006 reg.FloatBlendOptimizationEnable = true;
1007 reg.FloatBlendOptimizationEnableMask = true;
1008 reg.MSCRAWHazardAvoidanceBit = true;
1009 reg.MSCRAWHazardAvoidanceBitMask = true;
1010 reg.PartialResolveDisableInVC = true;
1011 reg.PartialResolveDisableInVCMask = true;
1014 if (devinfo->is_geminilake)
1015 init_glk_barrier_mode(batch, GLK_BARRIER_MODE_3D_HULL);
1019 iris_emit_reg(batch, GENX(TCCNTLREG), reg) {
1020 reg.L3DataPartialWriteMergingEnable = true;
1021 reg.ColorZPartialWriteMergingEnable = true;
1022 reg.URBPartialWriteMergingEnable = true;
1023 reg.TCDisable = true;
1026 /* Hardware specification recommends disabling repacking for the
1027 * compatibility with decompression mechanism in display controller.
1029 if (devinfo->disable_ccs_repack) {
1030 iris_emit_reg(batch, GENX(CACHE_MODE_0), reg) {
1031 reg.DisableRepackingforCompression = true;
1032 reg.DisableRepackingforCompressionMask = true;
1036 gfx11_upload_pixel_hashing_tables(batch);
1039 #if GFX_VERx10 == 120
1040 gfx12_upload_pixel_hashing_tables(batch);
1043 /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
1044 * changing it dynamically. We set it to the maximum size here, and
1045 * instead include the render target dimensions in the viewport, so
1046 * viewport extents clipping takes care of pruning stray geometry.
1048 iris_emit_cmd(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
1049 rect.ClippedDrawingRectangleXMax = UINT16_MAX;
1050 rect.ClippedDrawingRectangleYMax = UINT16_MAX;
1053 /* Set the initial MSAA sample positions. */
1054 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_PATTERN), pat) {
1055 INTEL_SAMPLE_POS_1X(pat._1xSample);
1056 INTEL_SAMPLE_POS_2X(pat._2xSample);
1057 INTEL_SAMPLE_POS_4X(pat._4xSample);
1058 INTEL_SAMPLE_POS_8X(pat._8xSample);
1060 INTEL_SAMPLE_POS_16X(pat._16xSample);
1064 /* Use the legacy AA line coverage computation. */
1065 iris_emit_cmd(batch, GENX(3DSTATE_AA_LINE_PARAMETERS), foo);
1067 /* Disable chromakeying (it's for media) */
1068 iris_emit_cmd(batch, GENX(3DSTATE_WM_CHROMAKEY), foo);
1070 /* We want regular rendering, not special HiZ operations. */
1071 iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
1073 /* No polygon stippling offsets are necessary. */
1074 /* TODO: may need to set an offset for origin-UL framebuffers */
1075 iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
1077 iris_alloc_push_constants(batch);
1081 init_aux_map_state(batch);
1084 iris_batch_sync_region_end(batch);
1088 iris_init_compute_context(struct iris_batch *batch)
1090 UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
1092 iris_batch_sync_region_start(batch);
1096 * Start with pipeline in 3D mode to set the STATE_BASE_ADDRESS.
1099 emit_pipeline_select(batch, _3D);
1101 emit_pipeline_select(batch, GPGPU);
1104 iris_emit_l3_config(batch, batch->screen->l3_config_cs);
1106 init_state_base_address(batch);
1108 iris_init_common_context(batch);
1111 emit_pipeline_select(batch, GPGPU);
1115 if (devinfo->is_geminilake)
1116 init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
1120 init_aux_map_state(batch);
1123 iris_batch_sync_region_end(batch);
1126 struct iris_vertex_buffer_state {
1127 /** The VERTEX_BUFFER_STATE hardware structure. */
1128 uint32_t state[GENX(VERTEX_BUFFER_STATE_length)];
1130 /** The resource to source vertex data from. */
1131 struct pipe_resource *resource;
1136 struct iris_depth_buffer_state {
1137 /* Depth/HiZ/Stencil related hardware packets. */
1138 uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
1139 GENX(3DSTATE_STENCIL_BUFFER_length) +
1140 GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
1141 GENX(3DSTATE_CLEAR_PARAMS_length) +
1142 GENX(MI_LOAD_REGISTER_IMM_length) * 2];
1146 * Generation-specific context state (ice->state.genx->...).
1148 * Most state can go in iris_context directly, but these encode hardware
1149 * packets which vary by generation.
1151 struct iris_genx_state {
1152 struct iris_vertex_buffer_state vertex_buffers[33];
1153 uint32_t last_index_buffer[GENX(3DSTATE_INDEX_BUFFER_length)];
1155 struct iris_depth_buffer_state depth_buffer;
1157 uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
1160 bool pma_fix_enabled;
1164 /* Is object level preemption enabled? */
1165 bool object_preemption;
1170 struct brw_image_param image_param[PIPE_MAX_SHADER_IMAGES];
1172 } shaders[MESA_SHADER_STAGES];
1176 * The pipe->set_blend_color() driver hook.
1178 * This corresponds to our COLOR_CALC_STATE.
1181 iris_set_blend_color(struct pipe_context *ctx,
1182 const struct pipe_blend_color *state)
1184 struct iris_context *ice = (struct iris_context *) ctx;
1186 /* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
1187 memcpy(&ice->state.blend_color, state, sizeof(struct pipe_blend_color));
1188 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1192 * Gallium CSO for blend state (see pipe_blend_state).
1194 struct iris_blend_state {
1195 /** Partial 3DSTATE_PS_BLEND */
1196 uint32_t ps_blend[GENX(3DSTATE_PS_BLEND_length)];
1198 /** Partial BLEND_STATE */
1199 uint32_t blend_state[GENX(BLEND_STATE_length) +
1200 BRW_MAX_DRAW_BUFFERS * GENX(BLEND_STATE_ENTRY_length)];
1202 bool alpha_to_coverage; /* for shader key */
1204 /** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
1205 uint8_t blend_enables;
1207 /** Bitfield of whether color writes are enabled for RT[i] */
1208 uint8_t color_write_enables;
1210 /** Does RT[0] use dual color blending? */
1211 bool dual_color_blending;
1214 static enum pipe_blendfactor
1215 fix_blendfactor(enum pipe_blendfactor f, bool alpha_to_one)
1218 if (f == PIPE_BLENDFACTOR_SRC1_ALPHA)
1219 return PIPE_BLENDFACTOR_ONE;
1221 if (f == PIPE_BLENDFACTOR_INV_SRC1_ALPHA)
1222 return PIPE_BLENDFACTOR_ZERO;
1229 * The pipe->create_blend_state() driver hook.
1231 * Translates a pipe_blend_state into iris_blend_state.
1234 iris_create_blend_state(struct pipe_context *ctx,
1235 const struct pipe_blend_state *state)
1237 struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
1238 uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
1240 cso->blend_enables = 0;
1241 cso->color_write_enables = 0;
1242 STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS <= 8);
1244 cso->alpha_to_coverage = state->alpha_to_coverage;
1246 bool indep_alpha_blend = false;
1248 for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
1249 const struct pipe_rt_blend_state *rt =
1250 &state->rt[state->independent_blend_enable ? i : 0];
1252 enum pipe_blendfactor src_rgb =
1253 fix_blendfactor(rt->rgb_src_factor, state->alpha_to_one);
1254 enum pipe_blendfactor src_alpha =
1255 fix_blendfactor(rt->alpha_src_factor, state->alpha_to_one);
1256 enum pipe_blendfactor dst_rgb =
1257 fix_blendfactor(rt->rgb_dst_factor, state->alpha_to_one);
1258 enum pipe_blendfactor dst_alpha =
1259 fix_blendfactor(rt->alpha_dst_factor, state->alpha_to_one);
1261 if (rt->rgb_func != rt->alpha_func ||
1262 src_rgb != src_alpha || dst_rgb != dst_alpha)
1263 indep_alpha_blend = true;
1265 if (rt->blend_enable)
1266 cso->blend_enables |= 1u << i;
1269 cso->color_write_enables |= 1u << i;
1271 iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
1272 be.LogicOpEnable = state->logicop_enable;
1273 be.LogicOpFunction = state->logicop_func;
1275 be.PreBlendSourceOnlyClampEnable = false;
1276 be.ColorClampRange = COLORCLAMP_RTFORMAT;
1277 be.PreBlendColorClampEnable = true;
1278 be.PostBlendColorClampEnable = true;
1280 be.ColorBufferBlendEnable = rt->blend_enable;
1282 be.ColorBlendFunction = rt->rgb_func;
1283 be.AlphaBlendFunction = rt->alpha_func;
1285 /* The casts prevent warnings about implicit enum type conversions. */
1286 be.SourceBlendFactor = (int) src_rgb;
1287 be.SourceAlphaBlendFactor = (int) src_alpha;
1288 be.DestinationBlendFactor = (int) dst_rgb;
1289 be.DestinationAlphaBlendFactor = (int) dst_alpha;
1291 be.WriteDisableRed = !(rt->colormask & PIPE_MASK_R);
1292 be.WriteDisableGreen = !(rt->colormask & PIPE_MASK_G);
1293 be.WriteDisableBlue = !(rt->colormask & PIPE_MASK_B);
1294 be.WriteDisableAlpha = !(rt->colormask & PIPE_MASK_A);
1296 blend_entry += GENX(BLEND_STATE_ENTRY_length);
1299 iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
1300 /* pb.HasWriteableRT is filled in at draw time.
1301 * pb.AlphaTestEnable is filled in at draw time.
1303 * pb.ColorBufferBlendEnable is filled in at draw time so we can avoid
1304 * setting it when dual color blending without an appropriate shader.
1307 pb.AlphaToCoverageEnable = state->alpha_to_coverage;
1308 pb.IndependentAlphaBlendEnable = indep_alpha_blend;
1310 /* The casts prevent warnings about implicit enum type conversions. */
1311 pb.SourceBlendFactor =
1312 (int) fix_blendfactor(state->rt[0].rgb_src_factor, state->alpha_to_one);
1313 pb.SourceAlphaBlendFactor =
1314 (int) fix_blendfactor(state->rt[0].alpha_src_factor, state->alpha_to_one);
1315 pb.DestinationBlendFactor =
1316 (int) fix_blendfactor(state->rt[0].rgb_dst_factor, state->alpha_to_one);
1317 pb.DestinationAlphaBlendFactor =
1318 (int) fix_blendfactor(state->rt[0].alpha_dst_factor, state->alpha_to_one);
1321 iris_pack_state(GENX(BLEND_STATE), cso->blend_state, bs) {
1322 bs.AlphaToCoverageEnable = state->alpha_to_coverage;
1323 bs.IndependentAlphaBlendEnable = indep_alpha_blend;
1324 bs.AlphaToOneEnable = state->alpha_to_one;
1325 bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
1326 bs.ColorDitherEnable = state->dither;
1327 /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
1330 cso->dual_color_blending = util_blend_state_is_dual(state, 0);
1336 * The pipe->bind_blend_state() driver hook.
1338 * Bind a blending CSO and flag related dirty bits.
1341 iris_bind_blend_state(struct pipe_context *ctx, void *state)
1343 struct iris_context *ice = (struct iris_context *) ctx;
1344 struct iris_blend_state *cso = state;
1346 ice->state.cso_blend = cso;
1348 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
1349 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1350 ice->state.stage_dirty |= ice->state.stage_dirty_for_nos[IRIS_NOS_BLEND];
1353 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1357 * Return true if the FS writes to any color outputs which are not disabled
1358 * via color masking.
1361 has_writeable_rt(const struct iris_blend_state *cso_blend,
1362 const struct shader_info *fs_info)
1367 unsigned rt_outputs = fs_info->outputs_written >> FRAG_RESULT_DATA0;
1369 if (fs_info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_COLOR))
1370 rt_outputs = (1 << BRW_MAX_DRAW_BUFFERS) - 1;
1372 return cso_blend->color_write_enables & rt_outputs;
1376 * Gallium CSO for depth, stencil, and alpha testing state.
1378 struct iris_depth_stencil_alpha_state {
1379 /** Partial 3DSTATE_WM_DEPTH_STENCIL. */
1380 uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
1383 uint32_t depth_bounds[GENX(3DSTATE_DEPTH_BOUNDS_length)];
1386 /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
1387 unsigned alpha_enabled:1;
1388 unsigned alpha_func:3; /**< PIPE_FUNC_x */
1389 float alpha_ref_value; /**< reference value */
1391 /** Outbound to resolve and cache set tracking. */
1392 bool depth_writes_enabled;
1393 bool stencil_writes_enabled;
1395 /** Outbound to Gfx8-9 PMA stall equations */
1396 bool depth_test_enabled;
1400 * The pipe->create_depth_stencil_alpha_state() driver hook.
1402 * We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
1403 * testing state since we need pieces of it in a variety of places.
1406 iris_create_zsa_state(struct pipe_context *ctx,
1407 const struct pipe_depth_stencil_alpha_state *state)
1409 struct iris_depth_stencil_alpha_state *cso =
1410 malloc(sizeof(struct iris_depth_stencil_alpha_state));
1412 bool two_sided_stencil = state->stencil[1].enabled;
1414 cso->alpha_enabled = state->alpha_enabled;
1415 cso->alpha_func = state->alpha_func;
1416 cso->alpha_ref_value = state->alpha_ref_value;
1417 cso->depth_writes_enabled = state->depth_writemask;
1418 cso->depth_test_enabled = state->depth_enabled;
1419 cso->stencil_writes_enabled =
1420 state->stencil[0].writemask != 0 ||
1421 (two_sided_stencil && state->stencil[1].writemask != 0);
1423 /* gallium frontends need to optimize away EQUAL writes for us. */
1424 assert(!(state->depth_func == PIPE_FUNC_EQUAL && state->depth_writemask));
1426 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), cso->wmds, wmds) {
1427 wmds.StencilFailOp = state->stencil[0].fail_op;
1428 wmds.StencilPassDepthFailOp = state->stencil[0].zfail_op;
1429 wmds.StencilPassDepthPassOp = state->stencil[0].zpass_op;
1430 wmds.StencilTestFunction =
1431 translate_compare_func(state->stencil[0].func);
1432 wmds.BackfaceStencilFailOp = state->stencil[1].fail_op;
1433 wmds.BackfaceStencilPassDepthFailOp = state->stencil[1].zfail_op;
1434 wmds.BackfaceStencilPassDepthPassOp = state->stencil[1].zpass_op;
1435 wmds.BackfaceStencilTestFunction =
1436 translate_compare_func(state->stencil[1].func);
1437 wmds.DepthTestFunction = translate_compare_func(state->depth_func);
1438 wmds.DoubleSidedStencilEnable = two_sided_stencil;
1439 wmds.StencilTestEnable = state->stencil[0].enabled;
1440 wmds.StencilBufferWriteEnable =
1441 state->stencil[0].writemask != 0 ||
1442 (two_sided_stencil && state->stencil[1].writemask != 0);
1443 wmds.DepthTestEnable = state->depth_enabled;
1444 wmds.DepthBufferWriteEnable = state->depth_writemask;
1445 wmds.StencilTestMask = state->stencil[0].valuemask;
1446 wmds.StencilWriteMask = state->stencil[0].writemask;
1447 wmds.BackfaceStencilTestMask = state->stencil[1].valuemask;
1448 wmds.BackfaceStencilWriteMask = state->stencil[1].writemask;
1449 /* wmds.[Backface]StencilReferenceValue are merged later */
1451 wmds.StencilReferenceValueModifyDisable = true;
1456 iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS), cso->depth_bounds, depth_bounds) {
1457 depth_bounds.DepthBoundsTestValueModifyDisable = false;
1458 depth_bounds.DepthBoundsTestEnableModifyDisable = false;
1459 depth_bounds.DepthBoundsTestEnable = state->depth_bounds_test;
1460 depth_bounds.DepthBoundsTestMinValue = state->depth_bounds_min;
1461 depth_bounds.DepthBoundsTestMaxValue = state->depth_bounds_max;
1469 * The pipe->bind_depth_stencil_alpha_state() driver hook.
1471 * Bind a depth/stencil/alpha CSO and flag related dirty bits.
1474 iris_bind_zsa_state(struct pipe_context *ctx, void *state)
1476 struct iris_context *ice = (struct iris_context *) ctx;
1477 struct iris_depth_stencil_alpha_state *old_cso = ice->state.cso_zsa;
1478 struct iris_depth_stencil_alpha_state *new_cso = state;
1481 if (cso_changed(alpha_ref_value))
1482 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1484 if (cso_changed(alpha_enabled))
1485 ice->state.dirty |= IRIS_DIRTY_PS_BLEND | IRIS_DIRTY_BLEND_STATE;
1487 if (cso_changed(alpha_func))
1488 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1490 if (cso_changed(depth_writes_enabled) || cso_changed(stencil_writes_enabled))
1491 ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
1493 ice->state.depth_writes_enabled = new_cso->depth_writes_enabled;
1494 ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled;
1497 if (cso_changed(depth_bounds))
1498 ice->state.dirty |= IRIS_DIRTY_DEPTH_BOUNDS;
1502 ice->state.cso_zsa = new_cso;
1503 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1504 ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
1505 ice->state.stage_dirty |=
1506 ice->state.stage_dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
1509 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1514 want_pma_fix(struct iris_context *ice)
1516 UNUSED struct iris_screen *screen = (void *) ice->ctx.screen;
1517 UNUSED const struct intel_device_info *devinfo = &screen->devinfo;
1518 const struct brw_wm_prog_data *wm_prog_data = (void *)
1519 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
1520 const struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
1521 const struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
1522 const struct iris_blend_state *cso_blend = ice->state.cso_blend;
1524 /* In very specific combinations of state, we can instruct Gfx8-9 hardware
1525 * to avoid stalling at the pixel mask array. The state equations are
1526 * documented in these places:
1528 * - Gfx8 Depth PMA Fix: CACHE_MODE_1::NP_PMA_FIX_ENABLE
1529 * - Gfx9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable
1531 * Both equations share some common elements:
1534 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1535 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1536 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1537 * 3DSTATE_WM_HZ_OP::StencilBufferClear) &&
1540 * 3DSTATE_WM::ForceKillPix != ForceOff &&
1541 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1542 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1543 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1544 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1545 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1547 * (Technically the stencil PMA treats ForceKillPix differently,
1548 * but I think this is a documentation oversight, and we don't
1549 * ever use it in this way, so it doesn't matter).
1552 * 3DSTATE_WM::ForceThreadDispatch != 1 &&
1553 * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 &&
1554 * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1555 * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1556 * 3DSTATE_WM::EDSC_Mode != EDSC_PREPS &&
1557 * 3DSTATE_PS_EXTRA::PixelShaderValid &&
1560 * These are always true:
1562 * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0
1563 * 3DSTATE_PS_EXTRA::PixelShaderValid
1565 * Also, we never use the normal drawing path for HiZ ops; these are true:
1567 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1568 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1569 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1570 * 3DSTATE_WM_HZ_OP::StencilBufferClear)
1572 * This happens sometimes:
1574 * 3DSTATE_WM::ForceThreadDispatch != 1
1576 * However, we choose to ignore it as it either agrees with the signal
1577 * (dispatch was already enabled, so nothing out of the ordinary), or
1578 * there are no framebuffer attachments (so no depth or HiZ anyway,
1579 * meaning the PMA signal will already be disabled).
1585 struct iris_resource *zres, *sres;
1586 iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, &zres, &sres);
1588 /* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1589 * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1591 if (!zres || !iris_resource_level_has_hiz(zres, cso_fb->zsbuf->u.tex.level))
1594 /* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */
1595 if (wm_prog_data->early_fragment_tests)
1598 /* 3DSTATE_WM::ForceKillPix != ForceOff &&
1599 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1600 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1601 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1602 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1603 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1605 bool killpixels = wm_prog_data->uses_kill || wm_prog_data->uses_omask ||
1606 cso_blend->alpha_to_coverage || cso_zsa->alpha_enabled;
1608 /* The Gfx8 depth PMA equation becomes:
1611 * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
1612 * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE
1615 * 3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
1616 * 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
1617 * 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE
1621 * 3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable &&
1622 * ((killpixels && (depth_writes || stencil_writes)) ||
1623 * 3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF)
1626 if (!cso_zsa->depth_test_enabled)
1629 return wm_prog_data->computed_depth_mode != PSCDEPTH_OFF ||
1630 (killpixels && (cso_zsa->depth_writes_enabled ||
1631 (sres && cso_zsa->stencil_writes_enabled)));
1636 genX(update_pma_fix)(struct iris_context *ice,
1637 struct iris_batch *batch,
1641 struct iris_genx_state *genx = ice->state.genx;
1643 if (genx->pma_fix_enabled == enable)
1646 genx->pma_fix_enabled = enable;
1648 /* According to the Broadwell PIPE_CONTROL documentation, software should
1649 * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
1650 * prior to the LRI. If stencil buffer writes are enabled, then a Render * Cache Flush is also necessary.
1652 * The Gfx9 docs say to use a depth stall rather than a command streamer
1653 * stall. However, the hardware seems to violently disagree. A full
1654 * command streamer stall seems to be needed in both cases.
1656 iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1657 PIPE_CONTROL_CS_STALL |
1658 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1659 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1661 iris_emit_reg(batch, GENX(CACHE_MODE_1), reg) {
1662 reg.NPPMAFixEnable = enable;
1663 reg.NPEarlyZFailsDisable = enable;
1664 reg.NPPMAFixEnableMask = true;
1665 reg.NPEarlyZFailsDisableMask = true;
1668 /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
1669 * Flush bits is often necessary. We do it regardless because it's easier.
1670 * The render cache flush is also necessary if stencil writes are enabled.
1672 * Again, the Gfx9 docs give a different set of flushes but the Broadwell
1673 * flushes seem to work just as well.
1675 iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1676 PIPE_CONTROL_DEPTH_STALL |
1677 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1678 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1683 * Gallium CSO for rasterizer state.
1685 struct iris_rasterizer_state {
1686 uint32_t sf[GENX(3DSTATE_SF_length)];
1687 uint32_t clip[GENX(3DSTATE_CLIP_length)];
1688 uint32_t raster[GENX(3DSTATE_RASTER_length)];
1689 uint32_t wm[GENX(3DSTATE_WM_length)];
1690 uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
1692 uint8_t num_clip_plane_consts;
1693 bool clip_halfz; /* for CC_VIEWPORT */
1694 bool depth_clip_near; /* for CC_VIEWPORT */
1695 bool depth_clip_far; /* for CC_VIEWPORT */
1696 bool flatshade; /* for shader state */
1697 bool flatshade_first; /* for stream output */
1698 bool clamp_fragment_color; /* for shader state */
1699 bool light_twoside; /* for shader state */
1700 bool rasterizer_discard; /* for 3DSTATE_STREAMOUT and 3DSTATE_CLIP */
1701 bool half_pixel_center; /* for 3DSTATE_MULTISAMPLE */
1702 bool line_stipple_enable;
1703 bool poly_stipple_enable;
1705 bool force_persample_interp;
1706 bool conservative_rasterization;
1707 bool fill_mode_point;
1708 bool fill_mode_line;
1709 bool fill_mode_point_or_line;
1710 enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
1711 uint16_t sprite_coord_enable;
1715 get_line_width(const struct pipe_rasterizer_state *state)
1717 float line_width = state->line_width;
1719 /* From the OpenGL 4.4 spec:
1721 * "The actual width of non-antialiased lines is determined by rounding
1722 * the supplied width to the nearest integer, then clamping it to the
1723 * implementation-dependent maximum non-antialiased line width."
1725 if (!state->multisample && !state->line_smooth)
1726 line_width = roundf(state->line_width);
1728 if (!state->multisample && state->line_smooth && line_width < 1.5f) {
1729 /* For 1 pixel line thickness or less, the general anti-aliasing
1730 * algorithm gives up, and a garbage line is generated. Setting a
1731 * Line Width of 0.0 specifies the rasterization of the "thinnest"
1732 * (one-pixel-wide), non-antialiased lines.
1734 * Lines rendered with zero Line Width are rasterized using the
1735 * "Grid Intersection Quantization" rules as specified by the
1736 * "Zero-Width (Cosmetic) Line Rasterization" section of the docs.
1745 * The pipe->create_rasterizer_state() driver hook.
1748 iris_create_rasterizer_state(struct pipe_context *ctx,
1749 const struct pipe_rasterizer_state *state)
1751 struct iris_rasterizer_state *cso =
1752 malloc(sizeof(struct iris_rasterizer_state));
1754 cso->multisample = state->multisample;
1755 cso->force_persample_interp = state->force_persample_interp;
1756 cso->clip_halfz = state->clip_halfz;
1757 cso->depth_clip_near = state->depth_clip_near;
1758 cso->depth_clip_far = state->depth_clip_far;
1759 cso->flatshade = state->flatshade;
1760 cso->flatshade_first = state->flatshade_first;
1761 cso->clamp_fragment_color = state->clamp_fragment_color;
1762 cso->light_twoside = state->light_twoside;
1763 cso->rasterizer_discard = state->rasterizer_discard;
1764 cso->half_pixel_center = state->half_pixel_center;
1765 cso->sprite_coord_mode = state->sprite_coord_mode;
1766 cso->sprite_coord_enable = state->sprite_coord_enable;
1767 cso->line_stipple_enable = state->line_stipple_enable;
1768 cso->poly_stipple_enable = state->poly_stipple_enable;
1769 cso->conservative_rasterization =
1770 state->conservative_raster_mode == PIPE_CONSERVATIVE_RASTER_POST_SNAP;
1772 cso->fill_mode_point =
1773 state->fill_front == PIPE_POLYGON_MODE_POINT ||
1774 state->fill_back == PIPE_POLYGON_MODE_POINT;
1775 cso->fill_mode_line =
1776 state->fill_front == PIPE_POLYGON_MODE_LINE ||
1777 state->fill_back == PIPE_POLYGON_MODE_LINE;
1778 cso->fill_mode_point_or_line =
1779 cso->fill_mode_point ||
1780 cso->fill_mode_line;
1782 if (state->clip_plane_enable != 0)
1783 cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1;
1785 cso->num_clip_plane_consts = 0;
1787 float line_width = get_line_width(state);
1789 iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
1790 sf.StatisticsEnable = true;
1791 sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
1792 sf.LineEndCapAntialiasingRegionWidth =
1793 state->line_smooth ? _10pixels : _05pixels;
1794 sf.LastPixelEnable = state->line_last_pixel;
1795 sf.LineWidth = line_width;
1796 sf.SmoothPointEnable = (state->point_smooth || state->multisample) &&
1797 !state->point_quad_rasterization;
1798 sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
1799 sf.PointWidth = CLAMP(state->point_size, 0.125f, 255.875f);
1801 if (state->flatshade_first) {
1802 sf.TriangleFanProvokingVertexSelect = 1;
1804 sf.TriangleStripListProvokingVertexSelect = 2;
1805 sf.TriangleFanProvokingVertexSelect = 2;
1806 sf.LineStripListProvokingVertexSelect = 1;
1810 iris_pack_command(GENX(3DSTATE_RASTER), cso->raster, rr) {
1811 rr.FrontWinding = state->front_ccw ? CounterClockwise : Clockwise;
1812 rr.CullMode = translate_cull_mode(state->cull_face);
1813 rr.FrontFaceFillMode = translate_fill_mode(state->fill_front);
1814 rr.BackFaceFillMode = translate_fill_mode(state->fill_back);
1815 rr.DXMultisampleRasterizationEnable = state->multisample;
1816 rr.GlobalDepthOffsetEnableSolid = state->offset_tri;
1817 rr.GlobalDepthOffsetEnableWireframe = state->offset_line;
1818 rr.GlobalDepthOffsetEnablePoint = state->offset_point;
1819 rr.GlobalDepthOffsetConstant = state->offset_units * 2;
1820 rr.GlobalDepthOffsetScale = state->offset_scale;
1821 rr.GlobalDepthOffsetClamp = state->offset_clamp;
1822 rr.SmoothPointEnable = state->point_smooth;
1823 rr.AntialiasingEnable = state->line_smooth;
1824 rr.ScissorRectangleEnable = state->scissor;
1826 rr.ViewportZNearClipTestEnable = state->depth_clip_near;
1827 rr.ViewportZFarClipTestEnable = state->depth_clip_far;
1828 rr.ConservativeRasterizationEnable =
1829 cso->conservative_rasterization;
1831 rr.ViewportZClipTestEnable = (state->depth_clip_near || state->depth_clip_far);
1835 iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
1836 /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
1837 * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
1839 cl.EarlyCullEnable = true;
1840 cl.UserClipDistanceClipTestEnableBitmask = state->clip_plane_enable;
1841 cl.ForceUserClipDistanceClipTestEnableBitmask = true;
1842 cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
1843 cl.GuardbandClipTestEnable = true;
1844 cl.ClipEnable = true;
1845 cl.MinimumPointWidth = 0.125;
1846 cl.MaximumPointWidth = 255.875;
1848 if (state->flatshade_first) {
1849 cl.TriangleFanProvokingVertexSelect = 1;
1851 cl.TriangleStripListProvokingVertexSelect = 2;
1852 cl.TriangleFanProvokingVertexSelect = 2;
1853 cl.LineStripListProvokingVertexSelect = 1;
1857 iris_pack_command(GENX(3DSTATE_WM), cso->wm, wm) {
1858 /* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
1859 * filled in at draw time from the FS program.
1861 wm.LineAntialiasingRegionWidth = _10pixels;
1862 wm.LineEndCapAntialiasingRegionWidth = _05pixels;
1863 wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
1864 wm.LineStippleEnable = state->line_stipple_enable;
1865 wm.PolygonStippleEnable = state->poly_stipple_enable;
1868 /* Remap from 0..255 back to 1..256 */
1869 const unsigned line_stipple_factor = state->line_stipple_factor + 1;
1871 iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) {
1872 if (state->line_stipple_enable) {
1873 line.LineStipplePattern = state->line_stipple_pattern;
1874 line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
1875 line.LineStippleRepeatCount = line_stipple_factor;
1883 * The pipe->bind_rasterizer_state() driver hook.
1885 * Bind a rasterizer CSO and flag related dirty bits.
1888 iris_bind_rasterizer_state(struct pipe_context *ctx, void *state)
1890 struct iris_context *ice = (struct iris_context *) ctx;
1891 struct iris_rasterizer_state *old_cso = ice->state.cso_rast;
1892 struct iris_rasterizer_state *new_cso = state;
1895 /* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
1896 if (cso_changed_memcmp(line_stipple))
1897 ice->state.dirty |= IRIS_DIRTY_LINE_STIPPLE;
1899 if (cso_changed(half_pixel_center))
1900 ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
1902 if (cso_changed(line_stipple_enable) || cso_changed(poly_stipple_enable))
1903 ice->state.dirty |= IRIS_DIRTY_WM;
1905 if (cso_changed(rasterizer_discard))
1906 ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
1908 if (cso_changed(flatshade_first))
1909 ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
1911 if (cso_changed(depth_clip_near) || cso_changed(depth_clip_far) ||
1912 cso_changed(clip_halfz))
1913 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1915 if (cso_changed(sprite_coord_enable) ||
1916 cso_changed(sprite_coord_mode) ||
1917 cso_changed(light_twoside))
1918 ice->state.dirty |= IRIS_DIRTY_SBE;
1920 if (cso_changed(conservative_rasterization))
1921 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
1924 ice->state.cso_rast = new_cso;
1925 ice->state.dirty |= IRIS_DIRTY_RASTER;
1926 ice->state.dirty |= IRIS_DIRTY_CLIP;
1927 ice->state.stage_dirty |=
1928 ice->state.stage_dirty_for_nos[IRIS_NOS_RASTERIZER];
1932 * Return true if the given wrap mode requires the border color to exist.
1934 * (We can skip uploading it if the sampler isn't going to use it.)
1937 wrap_mode_needs_border_color(unsigned wrap_mode)
1939 return wrap_mode == TCM_CLAMP_BORDER || wrap_mode == TCM_HALF_BORDER;
1943 * Gallium CSO for sampler state.
1945 struct iris_sampler_state {
1946 union pipe_color_union border_color;
1947 bool needs_border_color;
1949 uint32_t sampler_state[GENX(SAMPLER_STATE_length)];
1953 * The pipe->create_sampler_state() driver hook.
1955 * We fill out SAMPLER_STATE (except for the border color pointer), and
1956 * store that on the CPU. It doesn't make sense to upload it to a GPU
1957 * buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
1958 * all bound sampler states to be in contiguous memor.
1961 iris_create_sampler_state(struct pipe_context *ctx,
1962 const struct pipe_sampler_state *state)
1964 struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
1969 STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST == MAPFILTER_NEAREST);
1970 STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR == MAPFILTER_LINEAR);
1972 unsigned wrap_s = translate_wrap(state->wrap_s);
1973 unsigned wrap_t = translate_wrap(state->wrap_t);
1974 unsigned wrap_r = translate_wrap(state->wrap_r);
1976 memcpy(&cso->border_color, &state->border_color, sizeof(cso->border_color));
1978 cso->needs_border_color = wrap_mode_needs_border_color(wrap_s) ||
1979 wrap_mode_needs_border_color(wrap_t) ||
1980 wrap_mode_needs_border_color(wrap_r);
1982 float min_lod = state->min_lod;
1983 unsigned mag_img_filter = state->mag_img_filter;
1985 // XXX: explain this code ported from ilo...I don't get it at all...
1986 if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE &&
1987 state->min_lod > 0.0f) {
1989 mag_img_filter = state->min_img_filter;
1992 iris_pack_state(GENX(SAMPLER_STATE), cso->sampler_state, samp) {
1993 samp.TCXAddressControlMode = wrap_s;
1994 samp.TCYAddressControlMode = wrap_t;
1995 samp.TCZAddressControlMode = wrap_r;
1996 samp.CubeSurfaceControlMode = state->seamless_cube_map;
1997 samp.NonnormalizedCoordinateEnable = !state->normalized_coords;
1998 samp.MinModeFilter = state->min_img_filter;
1999 samp.MagModeFilter = mag_img_filter;
2000 samp.MipModeFilter = translate_mip_filter(state->min_mip_filter);
2001 samp.MaximumAnisotropy = RATIO21;
2003 if (state->max_anisotropy >= 2) {
2004 if (state->min_img_filter == PIPE_TEX_FILTER_LINEAR) {
2005 samp.MinModeFilter = MAPFILTER_ANISOTROPIC;
2006 samp.AnisotropicAlgorithm = EWAApproximation;
2009 if (state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)
2010 samp.MagModeFilter = MAPFILTER_ANISOTROPIC;
2012 samp.MaximumAnisotropy =
2013 MIN2((state->max_anisotropy - 2) / 2, RATIO161);
2016 /* Set address rounding bits if not using nearest filtering. */
2017 if (state->min_img_filter != PIPE_TEX_FILTER_NEAREST) {
2018 samp.UAddressMinFilterRoundingEnable = true;
2019 samp.VAddressMinFilterRoundingEnable = true;
2020 samp.RAddressMinFilterRoundingEnable = true;
2023 if (state->mag_img_filter != PIPE_TEX_FILTER_NEAREST) {
2024 samp.UAddressMagFilterRoundingEnable = true;
2025 samp.VAddressMagFilterRoundingEnable = true;
2026 samp.RAddressMagFilterRoundingEnable = true;
2029 if (state->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
2030 samp.ShadowFunction = translate_shadow_func(state->compare_func);
2032 const float hw_max_lod = GFX_VER >= 7 ? 14 : 13;
2034 samp.LODPreClampMode = CLAMP_MODE_OGL;
2035 samp.MinLOD = CLAMP(min_lod, 0, hw_max_lod);
2036 samp.MaxLOD = CLAMP(state->max_lod, 0, hw_max_lod);
2037 samp.TextureLODBias = CLAMP(state->lod_bias, -16, 15);
2039 /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
2046 * The pipe->bind_sampler_states() driver hook.
2049 iris_bind_sampler_states(struct pipe_context *ctx,
2050 enum pipe_shader_type p_stage,
2051 unsigned start, unsigned count,
2054 struct iris_context *ice = (struct iris_context *) ctx;
2055 gl_shader_stage stage = stage_from_pipe(p_stage);
2056 struct iris_shader_state *shs = &ice->state.shaders[stage];
2058 assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
2062 for (int i = 0; i < count; i++) {
2063 if (shs->samplers[start + i] != states[i]) {
2064 shs->samplers[start + i] = states[i];
2070 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2074 * Upload the sampler states into a contiguous area of GPU memory, for
2075 * for 3DSTATE_SAMPLER_STATE_POINTERS_*.
2077 * Also fill out the border color state pointers.
2080 iris_upload_sampler_states(struct iris_context *ice, gl_shader_stage stage)
2082 struct iris_shader_state *shs = &ice->state.shaders[stage];
2083 const struct shader_info *info = iris_get_shader_info(ice, stage);
2085 /* We assume gallium frontends will call pipe->bind_sampler_states()
2086 * if the program's number of textures changes.
2088 unsigned count = info ? BITSET_LAST_BIT(info->textures_used) : 0;
2093 /* Assemble the SAMPLER_STATEs into a contiguous table that lives
2094 * in the dynamic state memory zone, so we can point to it via the
2095 * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
2097 unsigned size = count * 4 * GENX(SAMPLER_STATE_length);
2099 upload_state(ice->state.dynamic_uploader, &shs->sampler_table, size, 32);
2103 struct pipe_resource *res = shs->sampler_table.res;
2104 struct iris_bo *bo = iris_resource_bo(res);
2106 iris_record_state_size(ice->state.sizes,
2107 bo->gtt_offset + shs->sampler_table.offset, size);
2109 shs->sampler_table.offset += iris_bo_offset_from_base_address(bo);
2111 /* Make sure all land in the same BO */
2112 iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
2114 ice->state.need_border_colors &= ~(1 << stage);
2116 for (int i = 0; i < count; i++) {
2117 struct iris_sampler_state *state = shs->samplers[i];
2118 struct iris_sampler_view *tex = shs->textures[i];
2121 memset(map, 0, 4 * GENX(SAMPLER_STATE_length));
2122 } else if (!state->needs_border_color) {
2123 memcpy(map, state->sampler_state, 4 * GENX(SAMPLER_STATE_length));
2125 ice->state.need_border_colors |= 1 << stage;
2127 /* We may need to swizzle the border color for format faking.
2128 * A/LA formats are faked as R/RG with 000R or R00G swizzles.
2129 * This means we need to move the border color's A channel into
2130 * the R or G channels so that those read swizzles will move it
2133 union pipe_color_union *color = &state->border_color;
2134 union pipe_color_union tmp;
2136 enum pipe_format internal_format = tex->res->internal_format;
2138 if (util_format_is_alpha(internal_format)) {
2139 unsigned char swz[4] = {
2140 PIPE_SWIZZLE_W, PIPE_SWIZZLE_0,
2141 PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2143 util_format_apply_color_swizzle(&tmp, color, swz, true);
2145 } else if (util_format_is_luminance_alpha(internal_format) &&
2146 internal_format != PIPE_FORMAT_L8A8_SRGB) {
2147 unsigned char swz[4] = {
2148 PIPE_SWIZZLE_X, PIPE_SWIZZLE_W,
2149 PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2151 util_format_apply_color_swizzle(&tmp, color, swz, true);
2156 /* Stream out the border color and merge the pointer. */
2157 uint32_t offset = iris_upload_border_color(ice, color);
2159 uint32_t dynamic[GENX(SAMPLER_STATE_length)];
2160 iris_pack_state(GENX(SAMPLER_STATE), dynamic, dyns) {
2161 dyns.BorderColorPointer = offset;
2164 for (uint32_t j = 0; j < GENX(SAMPLER_STATE_length); j++)
2165 map[j] = state->sampler_state[j] | dynamic[j];
2168 map += GENX(SAMPLER_STATE_length);
2172 static enum isl_channel_select
2173 fmt_swizzle(const struct iris_format_info *fmt, enum pipe_swizzle swz)
2176 case PIPE_SWIZZLE_X: return fmt->swizzle.r;
2177 case PIPE_SWIZZLE_Y: return fmt->swizzle.g;
2178 case PIPE_SWIZZLE_Z: return fmt->swizzle.b;
2179 case PIPE_SWIZZLE_W: return fmt->swizzle.a;
2180 case PIPE_SWIZZLE_1: return ISL_CHANNEL_SELECT_ONE;
2181 case PIPE_SWIZZLE_0: return ISL_CHANNEL_SELECT_ZERO;
2182 default: unreachable("invalid swizzle");
2187 fill_buffer_surface_state(struct isl_device *isl_dev,
2188 struct iris_resource *res,
2190 enum isl_format format,
2191 struct isl_swizzle swizzle,
2194 isl_surf_usage_flags_t usage)
2196 const struct isl_format_layout *fmtl = isl_format_get_layout(format);
2197 const unsigned cpp = format == ISL_FORMAT_RAW ? 1 : fmtl->bpb / 8;
2199 /* The ARB_texture_buffer_specification says:
2201 * "The number of texels in the buffer texture's texel array is given by
2203 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
2205 * where <buffer_size> is the size of the buffer object, in basic
2206 * machine units and <components> and <base_type> are the element count
2207 * and base data type for elements, as specified in Table X.1. The
2208 * number of texels in the texel array is then clamped to the
2209 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
2211 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
2212 * so that when ISL divides by stride to obtain the number of texels, that
2213 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
2215 unsigned final_size =
2216 MIN3(size, res->bo->size - res->offset - offset,
2217 IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
2219 isl_buffer_fill_state(isl_dev, map,
2220 .address = res->bo->gtt_offset + res->offset + offset,
2221 .size_B = final_size,
2225 .mocs = iris_mocs(res->bo, isl_dev, usage));
2228 #define SURFACE_STATE_ALIGNMENT 64
2231 * Allocate several contiguous SURFACE_STATE structures, one for each
2232 * supported auxiliary surface mode. This only allocates the CPU-side
2233 * copy, they will need to be uploaded later after they're filled in.
2236 alloc_surface_states(struct iris_surface_state *surf_state,
2237 unsigned aux_usages)
2239 const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2241 /* If this changes, update this to explicitly align pointers */
2242 STATIC_ASSERT(surf_size == SURFACE_STATE_ALIGNMENT);
2244 assert(aux_usages != 0);
2246 /* In case we're re-allocating them... */
2247 free(surf_state->cpu);
2249 surf_state->num_states = util_bitcount(aux_usages);
2250 surf_state->cpu = calloc(surf_state->num_states, surf_size);
2251 surf_state->ref.offset = 0;
2252 pipe_resource_reference(&surf_state->ref.res, NULL);
2254 assert(surf_state->cpu);
2258 * Upload the CPU side SURFACE_STATEs into a GPU buffer.
2261 upload_surface_states(struct u_upload_mgr *mgr,
2262 struct iris_surface_state *surf_state)
2264 const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2265 const unsigned bytes = surf_state->num_states * surf_size;
2268 upload_state(mgr, &surf_state->ref, bytes, SURFACE_STATE_ALIGNMENT);
2270 surf_state->ref.offset +=
2271 iris_bo_offset_from_base_address(iris_resource_bo(surf_state->ref.res));
2274 memcpy(map, surf_state->cpu, bytes);
2278 * Update resource addresses in a set of SURFACE_STATE descriptors,
2279 * and re-upload them if necessary.
2282 update_surface_state_addrs(struct u_upload_mgr *mgr,
2283 struct iris_surface_state *surf_state,
2286 if (surf_state->bo_address == bo->gtt_offset)
2289 STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) % 64 == 0);
2290 STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_bits) == 64);
2292 uint64_t *ss_addr = (uint64_t *) &surf_state->cpu[GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) / 32];
2294 /* First, update the CPU copies. We assume no other fields exist in
2295 * the QWord containing Surface Base Address.
2297 for (unsigned i = 0; i < surf_state->num_states; i++) {
2298 *ss_addr = *ss_addr - surf_state->bo_address + bo->gtt_offset;
2299 ss_addr = ((void *) ss_addr) + SURFACE_STATE_ALIGNMENT;
2302 /* Next, upload the updated copies to a GPU buffer. */
2303 upload_surface_states(mgr, surf_state);
2305 surf_state->bo_address = bo->gtt_offset;
2311 fill_surface_state(struct isl_device *isl_dev,
2313 struct iris_resource *res,
2314 struct isl_surf *surf,
2315 struct isl_view *view,
2317 uint32_t extra_main_offset,
2321 struct isl_surf_fill_state_info f = {
2324 .mocs = iris_mocs(res->bo, isl_dev, view->usage),
2325 .address = res->bo->gtt_offset + res->offset + extra_main_offset,
2326 .x_offset_sa = tile_x_sa,
2327 .y_offset_sa = tile_y_sa,
2330 assert(!iris_resource_unfinished_aux_import(res));
2332 if (aux_usage != ISL_AUX_USAGE_NONE) {
2333 f.aux_surf = &res->aux.surf;
2334 f.aux_usage = aux_usage;
2335 f.aux_address = res->aux.bo->gtt_offset + res->aux.offset;
2337 struct iris_bo *clear_bo = NULL;
2338 uint64_t clear_offset = 0;
2340 iris_resource_get_clear_color(res, &clear_bo, &clear_offset);
2342 f.clear_address = clear_bo->gtt_offset + clear_offset;
2343 f.use_clear_address = isl_dev->info->ver > 9;
2347 isl_surf_fill_state_s(isl_dev, map, &f);
2351 * The pipe->create_sampler_view() driver hook.
2353 static struct pipe_sampler_view *
2354 iris_create_sampler_view(struct pipe_context *ctx,
2355 struct pipe_resource *tex,
2356 const struct pipe_sampler_view *tmpl)
2358 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2359 const struct intel_device_info *devinfo = &screen->devinfo;
2360 struct iris_sampler_view *isv = calloc(1, sizeof(struct iris_sampler_view));
2365 /* initialize base object */
2367 isv->base.context = ctx;
2368 isv->base.texture = NULL;
2369 pipe_reference_init(&isv->base.reference, 1);
2370 pipe_resource_reference(&isv->base.texture, tex);
2372 if (util_format_is_depth_or_stencil(tmpl->format)) {
2373 struct iris_resource *zres, *sres;
2374 const struct util_format_description *desc =
2375 util_format_description(tmpl->format);
2377 iris_get_depth_stencil_resources(tex, &zres, &sres);
2379 tex = util_format_has_depth(desc) ? &zres->base.b : &sres->base.b;
2382 isv->res = (struct iris_resource *) tex;
2384 alloc_surface_states(&isv->surface_state, isv->res->aux.sampler_usages);
2386 isv->surface_state.bo_address = isv->res->bo->gtt_offset;
2388 isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
2390 if (isv->base.target == PIPE_TEXTURE_CUBE ||
2391 isv->base.target == PIPE_TEXTURE_CUBE_ARRAY)
2392 usage |= ISL_SURF_USAGE_CUBE_BIT;
2394 const struct iris_format_info fmt =
2395 iris_format_for_usage(devinfo, tmpl->format, usage);
2397 isv->clear_color = isv->res->aux.clear_color;
2399 isv->view = (struct isl_view) {
2401 .swizzle = (struct isl_swizzle) {
2402 .r = fmt_swizzle(&fmt, tmpl->swizzle_r),
2403 .g = fmt_swizzle(&fmt, tmpl->swizzle_g),
2404 .b = fmt_swizzle(&fmt, tmpl->swizzle_b),
2405 .a = fmt_swizzle(&fmt, tmpl->swizzle_a),
2410 void *map = isv->surface_state.cpu;
2412 /* Fill out SURFACE_STATE for this view. */
2413 if (tmpl->target != PIPE_BUFFER) {
2414 isv->view.base_level = tmpl->u.tex.first_level;
2415 isv->view.levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1;
2416 // XXX: do I need to port f9fd0cf4790cb2a530e75d1a2206dbb9d8af7cb2?
2417 isv->view.base_array_layer = tmpl->u.tex.first_layer;
2418 isv->view.array_len =
2419 tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2421 if (iris_resource_unfinished_aux_import(isv->res))
2422 iris_resource_finish_aux_import(&screen->base, isv->res);
2424 unsigned aux_modes = isv->res->aux.sampler_usages;
2426 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2428 fill_surface_state(&screen->isl_dev, map, isv->res, &isv->res->surf,
2429 &isv->view, aux_usage, 0, 0, 0);
2431 map += SURFACE_STATE_ALIGNMENT;
2434 fill_buffer_surface_state(&screen->isl_dev, isv->res, map,
2435 isv->view.format, isv->view.swizzle,
2436 tmpl->u.buf.offset, tmpl->u.buf.size,
2437 ISL_SURF_USAGE_TEXTURE_BIT);
2444 iris_sampler_view_destroy(struct pipe_context *ctx,
2445 struct pipe_sampler_view *state)
2447 struct iris_sampler_view *isv = (void *) state;
2448 pipe_resource_reference(&state->texture, NULL);
2449 pipe_resource_reference(&isv->surface_state.ref.res, NULL);
2450 free(isv->surface_state.cpu);
2455 * The pipe->create_surface() driver hook.
2457 * In Gallium nomenclature, "surfaces" are a view of a resource that
2458 * can be bound as a render target or depth/stencil buffer.
2460 static struct pipe_surface *
2461 iris_create_surface(struct pipe_context *ctx,
2462 struct pipe_resource *tex,
2463 const struct pipe_surface *tmpl)
2465 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2466 const struct intel_device_info *devinfo = &screen->devinfo;
2468 isl_surf_usage_flags_t usage = 0;
2470 usage = ISL_SURF_USAGE_STORAGE_BIT;
2471 else if (util_format_is_depth_or_stencil(tmpl->format))
2472 usage = ISL_SURF_USAGE_DEPTH_BIT;
2474 usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
2476 const struct iris_format_info fmt =
2477 iris_format_for_usage(devinfo, tmpl->format, usage);
2479 if ((usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) &&
2480 !isl_format_supports_rendering(devinfo, fmt.fmt)) {
2481 /* Framebuffer validation will reject this invalid case, but it
2482 * hasn't had the opportunity yet. In the meantime, we need to
2483 * avoid hitting ISL asserts about unsupported formats below.
2488 struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
2489 struct pipe_surface *psurf = &surf->base;
2490 struct iris_resource *res = (struct iris_resource *) tex;
2495 pipe_reference_init(&psurf->reference, 1);
2496 pipe_resource_reference(&psurf->texture, tex);
2497 psurf->context = ctx;
2498 psurf->format = tmpl->format;
2499 psurf->width = tex->width0;
2500 psurf->height = tex->height0;
2501 psurf->texture = tex;
2502 psurf->u.tex.first_layer = tmpl->u.tex.first_layer;
2503 psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
2504 psurf->u.tex.level = tmpl->u.tex.level;
2506 uint32_t array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2508 struct isl_view *view = &surf->view;
2509 *view = (struct isl_view) {
2511 .base_level = tmpl->u.tex.level,
2513 .base_array_layer = tmpl->u.tex.first_layer,
2514 .array_len = array_len,
2515 .swizzle = ISL_SWIZZLE_IDENTITY,
2520 struct isl_view *read_view = &surf->read_view;
2521 *read_view = (struct isl_view) {
2523 .base_level = tmpl->u.tex.level,
2525 .base_array_layer = tmpl->u.tex.first_layer,
2526 .array_len = array_len,
2527 .swizzle = ISL_SWIZZLE_IDENTITY,
2528 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
2531 struct isl_surf read_surf = res->surf;
2532 uint32_t read_surf_offset_B = 0;
2533 uint32_t read_surf_tile_x_sa = 0, read_surf_tile_y_sa = 0;
2534 if (tex->target == PIPE_TEXTURE_3D && array_len == 1) {
2535 /* The minimum array element field of the surface state structure is
2536 * ignored by the sampler unit for 3D textures on some hardware. If the
2537 * render buffer is a single slice of a 3D texture, create a 2D texture
2538 * covering that slice.
2540 * TODO: This only handles the case where we're rendering to a single
2541 * slice of an array texture. If we have layered rendering combined
2542 * with non-coherent FB fetch and a non-zero base_array_layer, then
2543 * we're going to run into problems.
2545 * See https://gitlab.freedesktop.org/mesa/mesa/-/issues/4904
2547 isl_surf_get_image_surf(&screen->isl_dev, &res->surf,
2548 read_view->base_level,
2549 0, read_view->base_array_layer,
2550 &read_surf, &read_surf_offset_B,
2551 &read_surf_tile_x_sa, &read_surf_tile_y_sa);
2552 read_view->base_level = 0;
2553 read_view->base_array_layer = 0;
2554 assert(read_view->array_len == 1);
2555 } else if (tex->target == PIPE_TEXTURE_1D_ARRAY) {
2556 /* Convert 1D array textures to 2D arrays because shaders always provide
2557 * the array index coordinate at the Z component to avoid recompiles
2558 * when changing the texture target of the framebuffer.
2560 assert(read_surf.dim_layout == ISL_DIM_LAYOUT_GFX4_2D);
2561 read_surf.dim = ISL_SURF_DIM_2D;
2565 surf->clear_color = res->aux.clear_color;
2567 /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
2568 if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
2569 ISL_SURF_USAGE_STENCIL_BIT))
2573 alloc_surface_states(&surf->surface_state, res->aux.possible_usages);
2574 surf->surface_state.bo_address = res->bo->gtt_offset;
2577 alloc_surface_states(&surf->surface_state_read, res->aux.possible_usages);
2578 surf->surface_state_read.bo_address = res->bo->gtt_offset;
2581 if (!isl_format_is_compressed(res->surf.format)) {
2582 if (iris_resource_unfinished_aux_import(res))
2583 iris_resource_finish_aux_import(&screen->base, res);
2585 void *map = surf->surface_state.cpu;
2586 UNUSED void *map_read = surf->surface_state_read.cpu;
2588 /* This is a normal surface. Fill out a SURFACE_STATE for each possible
2589 * auxiliary surface mode and return the pipe_surface.
2591 unsigned aux_modes = res->aux.possible_usages;
2593 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2594 fill_surface_state(&screen->isl_dev, map, res, &res->surf,
2595 view, aux_usage, 0, 0, 0);
2596 map += SURFACE_STATE_ALIGNMENT;
2599 fill_surface_state(&screen->isl_dev, map_read, res,
2600 &read_surf, read_view, aux_usage,
2602 read_surf_tile_x_sa, read_surf_tile_y_sa);
2603 map_read += SURFACE_STATE_ALIGNMENT;
2610 /* The resource has a compressed format, which is not renderable, but we
2611 * have a renderable view format. We must be attempting to upload blocks
2612 * of compressed data via an uncompressed view.
2614 * In this case, we can assume there are no auxiliary buffers, a single
2615 * miplevel, and that the resource is single-sampled. Gallium may try
2616 * and create an uncompressed view with multiple layers, however.
2618 assert(!isl_format_is_compressed(fmt.fmt));
2619 assert(res->aux.possible_usages == 1 << ISL_AUX_USAGE_NONE);
2620 assert(res->surf.samples == 1);
2621 assert(view->levels == 1);
2623 struct isl_surf isl_surf;
2624 uint32_t offset_B = 0, tile_x_sa = 0, tile_y_sa = 0;
2626 if (view->base_level > 0) {
2627 /* We can't rely on the hardware's miplevel selection with such
2628 * a substantial lie about the format, so we select a single image
2629 * using the Tile X/Y Offset fields. In this case, we can't handle
2630 * multiple array slices.
2632 * On Broadwell, HALIGN and VALIGN are specified in pixels and are
2633 * hard-coded to align to exactly the block size of the compressed
2634 * texture. This means that, when reinterpreted as a non-compressed
2635 * texture, the tile offsets may be anything and we can't rely on
2638 * Return NULL to force gallium frontends to take fallback paths.
2640 if (view->array_len > 1 || GFX_VER == 8)
2643 const bool is_3d = res->surf.dim == ISL_SURF_DIM_3D;
2644 isl_surf_get_image_surf(&screen->isl_dev, &res->surf,
2646 is_3d ? 0 : view->base_array_layer,
2647 is_3d ? view->base_array_layer : 0,
2649 &offset_B, &tile_x_sa, &tile_y_sa);
2651 /* We use address and tile offsets to access a single level/layer
2652 * as a subimage, so reset level/layer so it doesn't offset again.
2654 view->base_array_layer = 0;
2655 view->base_level = 0;
2657 /* Level 0 doesn't require tile offsets, and the hardware can find
2658 * array slices using QPitch even with the format override, so we
2659 * can allow layers in this case. Copy the original ISL surface.
2661 memcpy(&isl_surf, &res->surf, sizeof(isl_surf));
2664 /* Scale down the image dimensions by the block size. */
2665 const struct isl_format_layout *fmtl =
2666 isl_format_get_layout(res->surf.format);
2667 isl_surf.format = fmt.fmt;
2668 isl_surf.logical_level0_px = isl_surf_get_logical_level0_el(&isl_surf);
2669 isl_surf.phys_level0_sa = isl_surf_get_phys_level0_el(&isl_surf);
2670 tile_x_sa /= fmtl->bw;
2671 tile_y_sa /= fmtl->bh;
2673 psurf->width = isl_surf.logical_level0_px.width;
2674 psurf->height = isl_surf.logical_level0_px.height;
2676 struct isl_surf_fill_state_info f = {
2679 .mocs = iris_mocs(res->bo, &screen->isl_dev,
2680 ISL_SURF_USAGE_RENDER_TARGET_BIT),
2681 .address = res->bo->gtt_offset + offset_B,
2682 .x_offset_sa = tile_x_sa,
2683 .y_offset_sa = tile_y_sa,
2686 isl_surf_fill_state_s(&screen->isl_dev, surf->surface_state.cpu, &f);
2693 fill_default_image_param(struct brw_image_param *param)
2695 memset(param, 0, sizeof(*param));
2696 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
2697 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
2698 * detailed explanation of these parameters.
2700 param->swizzling[0] = 0xff;
2701 param->swizzling[1] = 0xff;
2705 fill_buffer_image_param(struct brw_image_param *param,
2706 enum pipe_format pfmt,
2709 const unsigned cpp = util_format_get_blocksize(pfmt);
2711 fill_default_image_param(param);
2712 param->size[0] = size / cpp;
2713 param->stride[0] = cpp;
2716 #define isl_surf_fill_image_param(x, ...)
2717 #define fill_default_image_param(x, ...)
2718 #define fill_buffer_image_param(x, ...)
2722 * The pipe->set_shader_images() driver hook.
2725 iris_set_shader_images(struct pipe_context *ctx,
2726 enum pipe_shader_type p_stage,
2727 unsigned start_slot, unsigned count,
2728 unsigned unbind_num_trailing_slots,
2729 const struct pipe_image_view *p_images)
2731 struct iris_context *ice = (struct iris_context *) ctx;
2732 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2733 gl_shader_stage stage = stage_from_pipe(p_stage);
2734 struct iris_shader_state *shs = &ice->state.shaders[stage];
2736 struct iris_genx_state *genx = ice->state.genx;
2737 struct brw_image_param *image_params = genx->shaders[stage].image_param;
2740 shs->bound_image_views &=
2741 ~u_bit_consecutive(start_slot, count + unbind_num_trailing_slots);
2743 for (unsigned i = 0; i < count; i++) {
2744 struct iris_image_view *iv = &shs->image[start_slot + i];
2746 if (p_images && p_images[i].resource) {
2747 const struct pipe_image_view *img = &p_images[i];
2748 struct iris_resource *res = (void *) img->resource;
2750 util_copy_image_view(&iv->base, img);
2752 shs->bound_image_views |= 1 << (start_slot + i);
2754 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
2755 res->bind_stages |= 1 << stage;
2757 enum isl_format isl_fmt = iris_image_view_get_format(ice, img);
2759 /* Render compression with images supported on gfx12+ only. */
2760 unsigned aux_usages = GFX_VER >= 12 ? res->aux.possible_usages :
2761 1 << ISL_AUX_USAGE_NONE;
2763 alloc_surface_states(&iv->surface_state, aux_usages);
2764 iv->surface_state.bo_address = res->bo->gtt_offset;
2766 void *map = iv->surface_state.cpu;
2768 if (res->base.b.target != PIPE_BUFFER) {
2769 struct isl_view view = {
2771 .base_level = img->u.tex.level,
2773 .base_array_layer = img->u.tex.first_layer,
2774 .array_len = img->u.tex.last_layer - img->u.tex.first_layer + 1,
2775 .swizzle = ISL_SWIZZLE_IDENTITY,
2776 .usage = ISL_SURF_USAGE_STORAGE_BIT,
2779 /* If using untyped fallback. */
2780 if (isl_fmt == ISL_FORMAT_RAW) {
2781 fill_buffer_surface_state(&screen->isl_dev, res, map,
2782 isl_fmt, ISL_SWIZZLE_IDENTITY,
2784 ISL_SURF_USAGE_STORAGE_BIT);
2786 unsigned aux_modes = aux_usages;
2788 enum isl_aux_usage usage = u_bit_scan(&aux_modes);
2790 fill_surface_state(&screen->isl_dev, map, res, &res->surf,
2791 &view, usage, 0, 0, 0);
2793 map += SURFACE_STATE_ALIGNMENT;
2797 isl_surf_fill_image_param(&screen->isl_dev,
2798 &image_params[start_slot + i],
2801 util_range_add(&res->base.b, &res->valid_buffer_range, img->u.buf.offset,
2802 img->u.buf.offset + img->u.buf.size);
2804 fill_buffer_surface_state(&screen->isl_dev, res, map,
2805 isl_fmt, ISL_SWIZZLE_IDENTITY,
2806 img->u.buf.offset, img->u.buf.size,
2807 ISL_SURF_USAGE_STORAGE_BIT);
2808 fill_buffer_image_param(&image_params[start_slot + i],
2809 img->format, img->u.buf.size);
2812 upload_surface_states(ice->state.surface_uploader, &iv->surface_state);
2814 pipe_resource_reference(&iv->base.resource, NULL);
2815 pipe_resource_reference(&iv->surface_state.ref.res, NULL);
2816 fill_default_image_param(&image_params[start_slot + i]);
2820 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
2822 stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2823 : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2825 /* Broadwell also needs brw_image_params re-uploaded */
2827 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
2828 shs->sysvals_need_upload = true;
2831 if (unbind_num_trailing_slots) {
2832 iris_set_shader_images(ctx, p_stage, start_slot + count,
2833 unbind_num_trailing_slots, 0, NULL);
2839 * The pipe->set_sampler_views() driver hook.
2842 iris_set_sampler_views(struct pipe_context *ctx,
2843 enum pipe_shader_type p_stage,
2844 unsigned start, unsigned count,
2845 unsigned unbind_num_trailing_slots,
2846 struct pipe_sampler_view **views)
2848 struct iris_context *ice = (struct iris_context *) ctx;
2849 gl_shader_stage stage = stage_from_pipe(p_stage);
2850 struct iris_shader_state *shs = &ice->state.shaders[stage];
2853 shs->bound_sampler_views &=
2854 ~u_bit_consecutive(start, count + unbind_num_trailing_slots);
2856 for (i = 0; i < count; i++) {
2857 struct pipe_sampler_view *pview = views ? views[i] : NULL;
2858 pipe_sampler_view_reference((struct pipe_sampler_view **)
2859 &shs->textures[start + i], pview);
2860 struct iris_sampler_view *view = (void *) pview;
2862 view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
2863 view->res->bind_stages |= 1 << stage;
2865 shs->bound_sampler_views |= 1 << (start + i);
2867 update_surface_state_addrs(ice->state.surface_uploader,
2868 &view->surface_state, view->res->bo);
2871 for (; i < count + unbind_num_trailing_slots; i++) {
2872 pipe_sampler_view_reference((struct pipe_sampler_view **)
2873 &shs->textures[start + i], NULL);
2876 ice->state.stage_dirty |= (IRIS_STAGE_DIRTY_BINDINGS_VS << stage);
2878 stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2879 : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2883 iris_set_compute_resources(struct pipe_context *ctx,
2884 unsigned start, unsigned count,
2885 struct pipe_surface **resources)
2891 iris_set_global_binding(struct pipe_context *ctx,
2892 unsigned start_slot, unsigned count,
2893 struct pipe_resource **resources,
2896 struct iris_context *ice = (struct iris_context *) ctx;
2898 assert(start_slot + count <= IRIS_MAX_GLOBAL_BINDINGS);
2899 for (unsigned i = 0; i < count; i++) {
2900 if (resources && resources[i]) {
2901 pipe_resource_reference(&ice->state.global_bindings[start_slot + i],
2903 struct iris_resource *res = (void *) resources[i];
2904 uint64_t addr = res->bo->gtt_offset;
2905 memcpy(handles[i], &addr, sizeof(addr));
2907 pipe_resource_reference(&ice->state.global_bindings[start_slot + i],
2912 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_CS;
2916 * The pipe->set_tess_state() driver hook.
2919 iris_set_tess_state(struct pipe_context *ctx,
2920 const float default_outer_level[4],
2921 const float default_inner_level[2])
2923 struct iris_context *ice = (struct iris_context *) ctx;
2924 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
2926 memcpy(&ice->state.default_outer_level[0], &default_outer_level[0], 4 * sizeof(float));
2927 memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float));
2929 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TCS;
2930 shs->sysvals_need_upload = true;
2934 iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
2936 struct iris_surface *surf = (void *) p_surf;
2937 pipe_resource_reference(&p_surf->texture, NULL);
2938 pipe_resource_reference(&surf->surface_state.ref.res, NULL);
2939 pipe_resource_reference(&surf->surface_state_read.ref.res, NULL);
2940 free(surf->surface_state.cpu);
2945 iris_set_clip_state(struct pipe_context *ctx,
2946 const struct pipe_clip_state *state)
2948 struct iris_context *ice = (struct iris_context *) ctx;
2949 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
2950 struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
2951 struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
2953 memcpy(&ice->state.clip_planes, state, sizeof(*state));
2955 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS |
2956 IRIS_STAGE_DIRTY_CONSTANTS_GS |
2957 IRIS_STAGE_DIRTY_CONSTANTS_TES;
2958 shs->sysvals_need_upload = true;
2959 gshs->sysvals_need_upload = true;
2960 tshs->sysvals_need_upload = true;
2964 * The pipe->set_polygon_stipple() driver hook.
2967 iris_set_polygon_stipple(struct pipe_context *ctx,
2968 const struct pipe_poly_stipple *state)
2970 struct iris_context *ice = (struct iris_context *) ctx;
2971 memcpy(&ice->state.poly_stipple, state, sizeof(*state));
2972 ice->state.dirty |= IRIS_DIRTY_POLYGON_STIPPLE;
2976 * The pipe->set_sample_mask() driver hook.
2979 iris_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
2981 struct iris_context *ice = (struct iris_context *) ctx;
2983 /* We only support 16x MSAA, so we have 16 bits of sample maks.
2984 * st/mesa may pass us 0xffffffff though, meaning "enable all samples".
2986 ice->state.sample_mask = sample_mask & 0xffff;
2987 ice->state.dirty |= IRIS_DIRTY_SAMPLE_MASK;
2991 * The pipe->set_scissor_states() driver hook.
2993 * This corresponds to our SCISSOR_RECT state structures. It's an
2994 * exact match, so we just store them, and memcpy them out later.
2997 iris_set_scissor_states(struct pipe_context *ctx,
2998 unsigned start_slot,
2999 unsigned num_scissors,
3000 const struct pipe_scissor_state *rects)
3002 struct iris_context *ice = (struct iris_context *) ctx;
3004 for (unsigned i = 0; i < num_scissors; i++) {
3005 if (rects[i].minx == rects[i].maxx || rects[i].miny == rects[i].maxy) {
3006 /* If the scissor was out of bounds and got clamped to 0 width/height
3007 * at the bounds, the subtraction of 1 from maximums could produce a
3008 * negative number and thus not clip anything. Instead, just provide
3009 * a min > max scissor inside the bounds, which produces the expected
3012 ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
3013 .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
3016 ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
3017 .minx = rects[i].minx, .miny = rects[i].miny,
3018 .maxx = rects[i].maxx - 1, .maxy = rects[i].maxy - 1,
3023 ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
3027 * The pipe->set_stencil_ref() driver hook.
3029 * This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
3032 iris_set_stencil_ref(struct pipe_context *ctx,
3033 const struct pipe_stencil_ref state)
3035 struct iris_context *ice = (struct iris_context *) ctx;
3036 memcpy(&ice->state.stencil_ref, &state, sizeof(state));
3038 ice->state.dirty |= IRIS_DIRTY_STENCIL_REF;
3039 else if (GFX_VER >= 9)
3040 ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
3042 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
3046 viewport_extent(const struct pipe_viewport_state *state, int axis, float sign)
3048 return copysignf(state->scale[axis], sign) + state->translate[axis];
3052 * The pipe->set_viewport_states() driver hook.
3054 * This corresponds to our SF_CLIP_VIEWPORT states. We can't calculate
3055 * the guardband yet, as we need the framebuffer dimensions, but we can
3056 * at least fill out the rest.
3059 iris_set_viewport_states(struct pipe_context *ctx,
3060 unsigned start_slot,
3062 const struct pipe_viewport_state *states)
3064 struct iris_context *ice = (struct iris_context *) ctx;
3066 memcpy(&ice->state.viewports[start_slot], states, sizeof(*states) * count);
3068 ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
3070 if (ice->state.cso_rast && (!ice->state.cso_rast->depth_clip_near ||
3071 !ice->state.cso_rast->depth_clip_far))
3072 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
3076 * The pipe->set_framebuffer_state() driver hook.
3078 * Sets the current draw FBO, including color render targets, depth,
3079 * and stencil buffers.
3082 iris_set_framebuffer_state(struct pipe_context *ctx,
3083 const struct pipe_framebuffer_state *state)
3085 struct iris_context *ice = (struct iris_context *) ctx;
3086 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3087 struct isl_device *isl_dev = &screen->isl_dev;
3088 struct pipe_framebuffer_state *cso = &ice->state.framebuffer;
3089 struct iris_resource *zres;
3090 struct iris_resource *stencil_res;
3092 unsigned samples = util_framebuffer_get_num_samples(state);
3093 unsigned layers = util_framebuffer_get_num_layers(state);
3095 if (cso->samples != samples) {
3096 ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
3098 /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
3099 if (GFX_VER >= 9 && (cso->samples == 16 || samples == 16))
3100 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
3103 if (cso->nr_cbufs != state->nr_cbufs) {
3104 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
3107 if ((cso->layers == 0) != (layers == 0)) {
3108 ice->state.dirty |= IRIS_DIRTY_CLIP;
3111 if (cso->width != state->width || cso->height != state->height) {
3112 ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
3115 if (cso->zsbuf || state->zsbuf) {
3116 ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
3119 util_copy_framebuffer_state(cso, state);
3120 cso->samples = samples;
3121 cso->layers = layers;
3123 struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
3125 struct isl_view view = {
3128 .base_array_layer = 0,
3130 .swizzle = ISL_SWIZZLE_IDENTITY,
3133 struct isl_depth_stencil_hiz_emit_info info = { .view = &view };
3136 iris_get_depth_stencil_resources(cso->zsbuf->texture, &zres,
3139 view.base_level = cso->zsbuf->u.tex.level;
3140 view.base_array_layer = cso->zsbuf->u.tex.first_layer;
3142 cso->zsbuf->u.tex.last_layer - cso->zsbuf->u.tex.first_layer + 1;
3145 view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
3147 info.depth_surf = &zres->surf;
3148 info.depth_address = zres->bo->gtt_offset + zres->offset;
3149 info.mocs = iris_mocs(zres->bo, isl_dev, view.usage);
3151 view.format = zres->surf.format;
3153 if (iris_resource_level_has_hiz(zres, view.base_level)) {
3154 info.hiz_usage = zres->aux.usage;
3155 info.hiz_surf = &zres->aux.surf;
3156 info.hiz_address = zres->aux.bo->gtt_offset + zres->aux.offset;
3159 ice->state.hiz_usage = info.hiz_usage;
3163 view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
3164 info.stencil_aux_usage = stencil_res->aux.usage;
3165 info.stencil_surf = &stencil_res->surf;
3166 info.stencil_address = stencil_res->bo->gtt_offset + stencil_res->offset;
3168 view.format = stencil_res->surf.format;
3169 info.mocs = iris_mocs(stencil_res->bo, isl_dev, view.usage);
3174 isl_emit_depth_stencil_hiz_s(isl_dev, cso_z->packets, &info);
3176 /* Make a null surface for unbound buffers */
3177 void *null_surf_map =
3178 upload_state(ice->state.surface_uploader, &ice->state.null_fb,
3179 4 * GENX(RENDER_SURFACE_STATE_length), 64);
3180 isl_null_fill_state(&screen->isl_dev, null_surf_map,
3181 .size = isl_extent3d(MAX2(cso->width, 1),
3182 MAX2(cso->height, 1),
3183 cso->layers ? cso->layers : 1));
3184 ice->state.null_fb.offset +=
3185 iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
3187 /* Render target change */
3188 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_FS;
3190 ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
3192 ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
3194 ice->state.stage_dirty |=
3195 ice->state.stage_dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
3198 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
3202 * The pipe->set_constant_buffer() driver hook.
3204 * This uploads any constant data in user buffers, and references
3205 * any UBO resources containing constant data.
3208 iris_set_constant_buffer(struct pipe_context *ctx,
3209 enum pipe_shader_type p_stage, unsigned index,
3210 bool take_ownership,
3211 const struct pipe_constant_buffer *input)
3213 struct iris_context *ice = (struct iris_context *) ctx;
3214 gl_shader_stage stage = stage_from_pipe(p_stage);
3215 struct iris_shader_state *shs = &ice->state.shaders[stage];
3216 struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
3218 /* TODO: Only do this if the buffer changes? */
3219 pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
3221 if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
3222 shs->bound_cbufs |= 1u << index;
3224 if (input->user_buffer) {
3226 pipe_resource_reference(&cbuf->buffer, NULL);
3227 u_upload_alloc(ice->ctx.const_uploader, 0, input->buffer_size, 64,
3228 &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
3230 if (!cbuf->buffer) {
3231 /* Allocation was unsuccessful - just unbind */
3232 iris_set_constant_buffer(ctx, p_stage, index, false, NULL);
3237 memcpy(map, input->user_buffer, input->buffer_size);
3238 } else if (input->buffer) {
3239 if (take_ownership) {
3240 pipe_resource_reference(&cbuf->buffer, NULL);
3241 cbuf->buffer = input->buffer;
3243 pipe_resource_reference(&cbuf->buffer, input->buffer);
3246 cbuf->buffer_offset = input->buffer_offset;
3250 MIN2(input->buffer_size,
3251 iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset);
3253 struct iris_resource *res = (void *) cbuf->buffer;
3254 res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
3255 res->bind_stages |= 1 << stage;
3257 shs->bound_cbufs &= ~(1u << index);
3258 pipe_resource_reference(&cbuf->buffer, NULL);
3261 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
3265 upload_sysvals(struct iris_context *ice,
3266 gl_shader_stage stage,
3267 const struct pipe_grid_info *grid)
3269 UNUSED struct iris_genx_state *genx = ice->state.genx;
3270 struct iris_shader_state *shs = &ice->state.shaders[stage];
3272 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
3273 if (!shader || (shader->num_system_values == 0 &&
3274 shader->kernel_input_size == 0))
3277 assert(shader->num_cbufs > 0);
3279 unsigned sysval_cbuf_index = shader->num_cbufs - 1;
3280 struct pipe_shader_buffer *cbuf = &shs->constbuf[sysval_cbuf_index];
3281 unsigned system_values_start =
3282 ALIGN(shader->kernel_input_size, sizeof(uint32_t));
3283 unsigned upload_size = system_values_start +
3284 shader->num_system_values * sizeof(uint32_t);
3287 assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS);
3288 u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64,
3289 &cbuf->buffer_offset, &cbuf->buffer, &map);
3291 if (shader->kernel_input_size > 0)
3292 memcpy(map, grid->input, shader->kernel_input_size);
3294 uint32_t *sysval_map = map + system_values_start;
3295 for (int i = 0; i < shader->num_system_values; i++) {
3296 uint32_t sysval = shader->system_values[i];
3299 if (BRW_PARAM_DOMAIN(sysval) == BRW_PARAM_DOMAIN_IMAGE) {
3301 unsigned img = BRW_PARAM_IMAGE_IDX(sysval);
3302 unsigned offset = BRW_PARAM_IMAGE_OFFSET(sysval);
3303 struct brw_image_param *param =
3304 &genx->shaders[stage].image_param[img];
3306 assert(offset < sizeof(struct brw_image_param));
3307 value = ((uint32_t *) param)[offset];
3309 } else if (sysval == BRW_PARAM_BUILTIN_ZERO) {
3311 } else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
3312 int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval);
3313 int comp = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval);
3314 value = fui(ice->state.clip_planes.ucp[plane][comp]);
3315 } else if (sysval == BRW_PARAM_BUILTIN_PATCH_VERTICES_IN) {
3316 if (stage == MESA_SHADER_TESS_CTRL) {
3317 value = ice->state.vertices_per_patch;
3319 assert(stage == MESA_SHADER_TESS_EVAL);
3320 const struct shader_info *tcs_info =
3321 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
3323 value = tcs_info->tess.tcs_vertices_out;
3325 value = ice->state.vertices_per_patch;
3327 } else if (sysval >= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X &&
3328 sysval <= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W) {
3329 unsigned i = sysval - BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
3330 value = fui(ice->state.default_outer_level[i]);
3331 } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X) {
3332 value = fui(ice->state.default_inner_level[0]);
3333 } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y) {
3334 value = fui(ice->state.default_inner_level[1]);
3335 } else if (sysval >= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X &&
3336 sysval <= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z) {
3337 unsigned i = sysval - BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X;
3338 value = ice->state.last_block[i];
3339 } else if (sysval == BRW_PARAM_BUILTIN_WORK_DIM) {
3340 value = grid->work_dim;
3342 assert(!"unhandled system value");
3345 *sysval_map++ = value;
3348 cbuf->buffer_size = upload_size;
3349 iris_upload_ubo_ssbo_surf_state(ice, cbuf,
3350 &shs->constbuf_surf_state[sysval_cbuf_index],
3351 ISL_SURF_USAGE_CONSTANT_BUFFER_BIT);
3353 shs->sysvals_need_upload = false;
3357 * The pipe->set_shader_buffers() driver hook.
3359 * This binds SSBOs and ABOs. Unfortunately, we need to stream out
3360 * SURFACE_STATE here, as the buffer offset may change each time.
3363 iris_set_shader_buffers(struct pipe_context *ctx,
3364 enum pipe_shader_type p_stage,
3365 unsigned start_slot, unsigned count,
3366 const struct pipe_shader_buffer *buffers,
3367 unsigned writable_bitmask)
3369 struct iris_context *ice = (struct iris_context *) ctx;
3370 gl_shader_stage stage = stage_from_pipe(p_stage);
3371 struct iris_shader_state *shs = &ice->state.shaders[stage];
3373 unsigned modified_bits = u_bit_consecutive(start_slot, count);
3375 shs->bound_ssbos &= ~modified_bits;
3376 shs->writable_ssbos &= ~modified_bits;
3377 shs->writable_ssbos |= writable_bitmask << start_slot;
3379 for (unsigned i = 0; i < count; i++) {
3380 if (buffers && buffers[i].buffer) {
3381 struct iris_resource *res = (void *) buffers[i].buffer;
3382 struct pipe_shader_buffer *ssbo = &shs->ssbo[start_slot + i];
3383 struct iris_state_ref *surf_state =
3384 &shs->ssbo_surf_state[start_slot + i];
3385 pipe_resource_reference(&ssbo->buffer, &res->base.b);
3386 ssbo->buffer_offset = buffers[i].buffer_offset;
3388 MIN2(buffers[i].buffer_size, res->bo->size - ssbo->buffer_offset);
3390 shs->bound_ssbos |= 1 << (start_slot + i);
3392 isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
3394 iris_upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, usage);
3396 res->bind_history |= PIPE_BIND_SHADER_BUFFER;
3397 res->bind_stages |= 1 << stage;
3399 util_range_add(&res->base.b, &res->valid_buffer_range, ssbo->buffer_offset,
3400 ssbo->buffer_offset + ssbo->buffer_size);
3402 pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL);
3403 pipe_resource_reference(&shs->ssbo_surf_state[start_slot + i].res,
3408 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
3412 iris_delete_state(struct pipe_context *ctx, void *state)
3418 * The pipe->set_vertex_buffers() driver hook.
3420 * This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
3423 iris_set_vertex_buffers(struct pipe_context *ctx,
3424 unsigned start_slot, unsigned count,
3425 unsigned unbind_num_trailing_slots,
3426 bool take_ownership,
3427 const struct pipe_vertex_buffer *buffers)
3429 struct iris_context *ice = (struct iris_context *) ctx;
3430 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3431 struct iris_genx_state *genx = ice->state.genx;
3433 ice->state.bound_vertex_buffers &=
3434 ~u_bit_consecutive64(start_slot, count + unbind_num_trailing_slots);
3436 for (unsigned i = 0; i < count; i++) {
3437 const struct pipe_vertex_buffer *buffer = buffers ? &buffers[i] : NULL;
3438 struct iris_vertex_buffer_state *state =
3439 &genx->vertex_buffers[start_slot + i];
3442 pipe_resource_reference(&state->resource, NULL);
3446 /* We may see user buffers that are NULL bindings. */
3447 assert(!(buffer->is_user_buffer && buffer->buffer.user != NULL));
3449 if (take_ownership) {
3450 pipe_resource_reference(&state->resource, NULL);
3451 state->resource = buffer->buffer.resource;
3453 pipe_resource_reference(&state->resource, buffer->buffer.resource);
3455 struct iris_resource *res = (void *) state->resource;
3457 state->offset = (int) buffer->buffer_offset;
3460 ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
3461 res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
3464 iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
3465 vb.VertexBufferIndex = start_slot + i;
3466 vb.AddressModifyEnable = true;
3467 vb.BufferPitch = buffer->stride;
3469 vb.BufferSize = res->base.b.width0 - (int) buffer->buffer_offset;
3470 vb.BufferStartingAddress =
3471 ro_bo(NULL, res->bo->gtt_offset + (int) buffer->buffer_offset);
3472 vb.MOCS = iris_mocs(res->bo, &screen->isl_dev,
3473 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
3475 vb.L3BypassDisable = true;
3478 vb.NullVertexBuffer = true;
3483 for (unsigned i = 0; i < unbind_num_trailing_slots; i++) {
3484 struct iris_vertex_buffer_state *state =
3485 &genx->vertex_buffers[start_slot + count + i];
3487 pipe_resource_reference(&state->resource, NULL);
3490 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
3494 * Gallium CSO for vertex elements.
3496 struct iris_vertex_element_state {
3497 uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
3498 uint32_t vf_instancing[33 * GENX(3DSTATE_VF_INSTANCING_length)];
3499 uint32_t edgeflag_ve[GENX(VERTEX_ELEMENT_STATE_length)];
3500 uint32_t edgeflag_vfi[GENX(3DSTATE_VF_INSTANCING_length)];
3505 * The pipe->create_vertex_elements() driver hook.
3507 * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
3508 * and 3DSTATE_VF_INSTANCING commands. The vertex_elements and vf_instancing
3509 * arrays are ready to be emitted at draw time if no EdgeFlag or SGVs are
3510 * needed. In these cases we will need information available at draw time.
3511 * We setup edgeflag_ve and edgeflag_vfi as alternatives last
3512 * 3DSTATE_VERTEX_ELEMENT and 3DSTATE_VF_INSTANCING that can be used at
3513 * draw time if we detect that EdgeFlag is needed by the Vertex Shader.
3516 iris_create_vertex_elements(struct pipe_context *ctx,
3518 const struct pipe_vertex_element *state)
3520 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3521 const struct intel_device_info *devinfo = &screen->devinfo;
3522 struct iris_vertex_element_state *cso =
3523 malloc(sizeof(struct iris_vertex_element_state));
3527 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS), cso->vertex_elements, ve) {
3529 1 + GENX(VERTEX_ELEMENT_STATE_length) * MAX2(count, 1) - 2;
3532 uint32_t *ve_pack_dest = &cso->vertex_elements[1];
3533 uint32_t *vfi_pack_dest = cso->vf_instancing;
3536 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3538 ve.SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT;
3539 ve.Component0Control = VFCOMP_STORE_0;
3540 ve.Component1Control = VFCOMP_STORE_0;
3541 ve.Component2Control = VFCOMP_STORE_0;
3542 ve.Component3Control = VFCOMP_STORE_1_FP;
3545 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3549 for (int i = 0; i < count; i++) {
3550 const struct iris_format_info fmt =
3551 iris_format_for_usage(devinfo, state[i].src_format, 0);
3552 unsigned comp[4] = { VFCOMP_STORE_SRC, VFCOMP_STORE_SRC,
3553 VFCOMP_STORE_SRC, VFCOMP_STORE_SRC };
3555 switch (isl_format_get_num_channels(fmt.fmt)) {
3556 case 0: comp[0] = VFCOMP_STORE_0; FALLTHROUGH;
3557 case 1: comp[1] = VFCOMP_STORE_0; FALLTHROUGH;
3558 case 2: comp[2] = VFCOMP_STORE_0; FALLTHROUGH;
3560 comp[3] = isl_format_has_int_channel(fmt.fmt) ? VFCOMP_STORE_1_INT
3561 : VFCOMP_STORE_1_FP;
3564 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3565 ve.EdgeFlagEnable = false;
3566 ve.VertexBufferIndex = state[i].vertex_buffer_index;
3568 ve.SourceElementOffset = state[i].src_offset;
3569 ve.SourceElementFormat = fmt.fmt;
3570 ve.Component0Control = comp[0];
3571 ve.Component1Control = comp[1];
3572 ve.Component2Control = comp[2];
3573 ve.Component3Control = comp[3];
3576 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3577 vi.VertexElementIndex = i;
3578 vi.InstancingEnable = state[i].instance_divisor > 0;
3579 vi.InstanceDataStepRate = state[i].instance_divisor;
3582 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
3583 vfi_pack_dest += GENX(3DSTATE_VF_INSTANCING_length);
3586 /* An alternative version of the last VE and VFI is stored so it
3587 * can be used at draw time in case Vertex Shader uses EdgeFlag
3590 const unsigned edgeflag_index = count - 1;
3591 const struct iris_format_info fmt =
3592 iris_format_for_usage(devinfo, state[edgeflag_index].src_format, 0);
3593 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), cso->edgeflag_ve, ve) {
3594 ve.EdgeFlagEnable = true ;
3595 ve.VertexBufferIndex = state[edgeflag_index].vertex_buffer_index;
3597 ve.SourceElementOffset = state[edgeflag_index].src_offset;
3598 ve.SourceElementFormat = fmt.fmt;
3599 ve.Component0Control = VFCOMP_STORE_SRC;
3600 ve.Component1Control = VFCOMP_STORE_0;
3601 ve.Component2Control = VFCOMP_STORE_0;
3602 ve.Component3Control = VFCOMP_STORE_0;
3604 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), cso->edgeflag_vfi, vi) {
3605 /* The vi.VertexElementIndex of the EdgeFlag Vertex Element is filled
3606 * at draw time, as it should change if SGVs are emitted.
3608 vi.InstancingEnable = state[edgeflag_index].instance_divisor > 0;
3609 vi.InstanceDataStepRate = state[edgeflag_index].instance_divisor;
3617 * The pipe->bind_vertex_elements_state() driver hook.
3620 iris_bind_vertex_elements_state(struct pipe_context *ctx, void *state)
3622 struct iris_context *ice = (struct iris_context *) ctx;
3623 struct iris_vertex_element_state *old_cso = ice->state.cso_vertex_elements;
3624 struct iris_vertex_element_state *new_cso = state;
3626 /* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
3627 * we need to re-emit it to ensure we're overriding the right one.
3629 if (new_cso && cso_changed(count))
3630 ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
3632 ice->state.cso_vertex_elements = state;
3633 ice->state.dirty |= IRIS_DIRTY_VERTEX_ELEMENTS;
3637 * The pipe->create_stream_output_target() driver hook.
3639 * "Target" here refers to a destination buffer. We translate this into
3640 * a 3DSTATE_SO_BUFFER packet. We can handle most fields, but don't yet
3641 * know which buffer this represents, or whether we ought to zero the
3642 * write-offsets, or append. Those are handled in the set() hook.
3644 static struct pipe_stream_output_target *
3645 iris_create_stream_output_target(struct pipe_context *ctx,
3646 struct pipe_resource *p_res,
3647 unsigned buffer_offset,
3648 unsigned buffer_size)
3650 struct iris_resource *res = (void *) p_res;
3651 struct iris_stream_output_target *cso = calloc(1, sizeof(*cso));
3655 res->bind_history |= PIPE_BIND_STREAM_OUTPUT;
3657 pipe_reference_init(&cso->base.reference, 1);
3658 pipe_resource_reference(&cso->base.buffer, p_res);
3659 cso->base.buffer_offset = buffer_offset;
3660 cso->base.buffer_size = buffer_size;
3661 cso->base.context = ctx;
3663 util_range_add(&res->base.b, &res->valid_buffer_range, buffer_offset,
3664 buffer_offset + buffer_size);
3670 iris_stream_output_target_destroy(struct pipe_context *ctx,
3671 struct pipe_stream_output_target *state)
3673 struct iris_stream_output_target *cso = (void *) state;
3675 pipe_resource_reference(&cso->base.buffer, NULL);
3676 pipe_resource_reference(&cso->offset.res, NULL);
3682 * The pipe->set_stream_output_targets() driver hook.
3684 * At this point, we know which targets are bound to a particular index,
3685 * and also whether we want to append or start over. We can finish the
3686 * 3DSTATE_SO_BUFFER packets we started earlier.
3689 iris_set_stream_output_targets(struct pipe_context *ctx,
3690 unsigned num_targets,
3691 struct pipe_stream_output_target **targets,
3692 const unsigned *offsets)
3694 struct iris_context *ice = (struct iris_context *) ctx;
3695 struct iris_genx_state *genx = ice->state.genx;
3696 uint32_t *so_buffers = genx->so_buffers;
3697 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3699 const bool active = num_targets > 0;
3700 if (ice->state.streamout_active != active) {
3701 ice->state.streamout_active = active;
3702 ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
3704 /* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
3705 * it's a non-pipelined command. If we're switching streamout on, we
3706 * may have missed emitting it earlier, so do so now. (We're already
3707 * taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
3710 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST;
3713 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
3714 struct iris_stream_output_target *tgt =
3715 (void *) ice->state.so_target[i];
3717 struct iris_resource *res = (void *) tgt->base.buffer;
3719 flush |= iris_flush_bits_for_history(ice, res);
3720 iris_dirty_for_history(ice, res);
3724 /* SO draws require flushing of const cache to make SO data
3725 * observable when VB/IB are cached in L3.
3727 if (flush & PIPE_CONTROL_VF_CACHE_INVALIDATE)
3728 flush |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
3730 iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
3731 "make streamout results visible", flush);
3735 for (int i = 0; i < 4; i++) {
3736 pipe_so_target_reference(&ice->state.so_target[i],
3737 i < num_targets ? targets[i] : NULL);
3740 /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
3744 for (unsigned i = 0; i < 4; i++,
3745 so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
3747 struct iris_stream_output_target *tgt = (void *) ice->state.so_target[i];
3748 unsigned offset = offsets[i];
3751 iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3753 sob.SOBufferIndex = i;
3755 sob._3DCommandOpcode = 0;
3756 sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3762 if (!tgt->offset.res)
3763 upload_state(ctx->const_uploader, &tgt->offset, sizeof(uint32_t), 4);
3765 struct iris_resource *res = (void *) tgt->base.buffer;
3767 /* Note that offsets[i] will either be 0, causing us to zero
3768 * the value in the buffer, or 0xFFFFFFFF, which happens to mean
3769 * "continue appending at the existing offset."
3771 assert(offset == 0 || offset == 0xFFFFFFFF);
3773 /* When we're first called with an offset of 0, we want the next
3774 * 3DSTATE_SO_BUFFER packets to reset the offset to the beginning.
3775 * Any further times we emit those packets, we want to use 0xFFFFFFFF
3776 * to continue appending from the current offset.
3778 * Note that we might be called by Begin (offset = 0), Pause, then
3779 * Resume (offset = 0xFFFFFFFF) before ever drawing (where these
3780 * commands will actually be sent to the GPU). In this case, we
3781 * don't want to append - we still want to do our initial zeroing.
3784 tgt->zero_offset = true;
3786 iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3788 sob.SOBufferIndex = i;
3790 sob._3DCommandOpcode = 0;
3791 sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3793 sob.SurfaceBaseAddress =
3794 rw_bo(NULL, res->bo->gtt_offset + tgt->base.buffer_offset,
3795 IRIS_DOMAIN_OTHER_WRITE);
3796 sob.SOBufferEnable = true;
3797 sob.StreamOffsetWriteEnable = true;
3798 sob.StreamOutputBufferOffsetAddressEnable = true;
3799 sob.MOCS = iris_mocs(res->bo, &screen->isl_dev, 0);
3801 sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
3802 sob.StreamOutputBufferOffsetAddress =
3803 rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset +
3804 tgt->offset.offset, IRIS_DOMAIN_OTHER_WRITE);
3805 sob.StreamOffset = 0xFFFFFFFF; /* not offset, see above */
3809 ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
3813 * An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
3814 * 3DSTATE_STREAMOUT packets.
3816 * 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
3817 * hardware to record. We can create it entirely based on the shader, with
3818 * no dynamic state dependencies.
3820 * 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
3821 * state-based settings. We capture the shader-related ones here, and merge
3822 * the rest in at draw time.
3825 iris_create_so_decl_list(const struct pipe_stream_output_info *info,
3826 const struct brw_vue_map *vue_map)
3828 struct GENX(SO_DECL) so_decl[MAX_VERTEX_STREAMS][128];
3829 int buffer_mask[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3830 int next_offset[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3831 int decls[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3833 STATIC_ASSERT(ARRAY_SIZE(so_decl[0]) >= MAX_PROGRAM_OUTPUTS);
3835 memset(so_decl, 0, sizeof(so_decl));
3837 /* Construct the list of SO_DECLs to be emitted. The formatting of the
3838 * command feels strange -- each dword pair contains a SO_DECL per stream.
3840 for (unsigned i = 0; i < info->num_outputs; i++) {
3841 const struct pipe_stream_output *output = &info->output[i];
3842 const int buffer = output->output_buffer;
3843 const int varying = output->register_index;
3844 const unsigned stream_id = output->stream;
3845 assert(stream_id < MAX_VERTEX_STREAMS);
3847 buffer_mask[stream_id] |= 1 << buffer;
3849 assert(vue_map->varying_to_slot[varying] >= 0);
3851 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3852 * array. Instead, it simply increments DstOffset for the following
3853 * input by the number of components that should be skipped.
3855 * Our hardware is unusual in that it requires us to program SO_DECLs
3856 * for fake "hole" components, rather than simply taking the offset
3857 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
3858 * program as many size = 4 holes as we can, then a final hole to
3859 * accommodate the final 1, 2, or 3 remaining.
3861 int skip_components = output->dst_offset - next_offset[buffer];
3863 while (skip_components > 0) {
3864 so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
3866 .OutputBufferSlot = output->output_buffer,
3867 .ComponentMask = (1 << MIN2(skip_components, 4)) - 1,
3869 skip_components -= 4;
3872 next_offset[buffer] = output->dst_offset + output->num_components;
3874 so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
3875 .OutputBufferSlot = output->output_buffer,
3876 .RegisterIndex = vue_map->varying_to_slot[varying],
3878 ((1 << output->num_components) - 1) << output->start_component,
3881 if (decls[stream_id] > max_decls)
3882 max_decls = decls[stream_id];
3885 unsigned dwords = GENX(3DSTATE_STREAMOUT_length) + (3 + 2 * max_decls);
3886 uint32_t *map = ralloc_size(NULL, sizeof(uint32_t) * dwords);
3887 uint32_t *so_decl_map = map + GENX(3DSTATE_STREAMOUT_length);
3889 iris_pack_command(GENX(3DSTATE_STREAMOUT), map, sol) {
3890 int urb_entry_read_offset = 0;
3891 int urb_entry_read_length = (vue_map->num_slots + 1) / 2 -
3892 urb_entry_read_offset;
3894 /* We always read the whole vertex. This could be reduced at some
3895 * point by reading less and offsetting the register index in the
3898 sol.Stream0VertexReadOffset = urb_entry_read_offset;
3899 sol.Stream0VertexReadLength = urb_entry_read_length - 1;
3900 sol.Stream1VertexReadOffset = urb_entry_read_offset;
3901 sol.Stream1VertexReadLength = urb_entry_read_length - 1;
3902 sol.Stream2VertexReadOffset = urb_entry_read_offset;
3903 sol.Stream2VertexReadLength = urb_entry_read_length - 1;
3904 sol.Stream3VertexReadOffset = urb_entry_read_offset;
3905 sol.Stream3VertexReadLength = urb_entry_read_length - 1;
3907 /* Set buffer pitches; 0 means unbound. */
3908 sol.Buffer0SurfacePitch = 4 * info->stride[0];
3909 sol.Buffer1SurfacePitch = 4 * info->stride[1];
3910 sol.Buffer2SurfacePitch = 4 * info->stride[2];
3911 sol.Buffer3SurfacePitch = 4 * info->stride[3];
3914 iris_pack_command(GENX(3DSTATE_SO_DECL_LIST), so_decl_map, list) {
3915 list.DWordLength = 3 + 2 * max_decls - 2;
3916 list.StreamtoBufferSelects0 = buffer_mask[0];
3917 list.StreamtoBufferSelects1 = buffer_mask[1];
3918 list.StreamtoBufferSelects2 = buffer_mask[2];
3919 list.StreamtoBufferSelects3 = buffer_mask[3];
3920 list.NumEntries0 = decls[0];
3921 list.NumEntries1 = decls[1];
3922 list.NumEntries2 = decls[2];
3923 list.NumEntries3 = decls[3];
3926 for (int i = 0; i < max_decls; i++) {
3927 iris_pack_state(GENX(SO_DECL_ENTRY), so_decl_map + 3 + i * 2, entry) {
3928 entry.Stream0Decl = so_decl[0][i];
3929 entry.Stream1Decl = so_decl[1][i];
3930 entry.Stream2Decl = so_decl[2][i];
3931 entry.Stream3Decl = so_decl[3][i];
3939 iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,
3940 const struct brw_vue_map *last_vue_map,
3941 bool two_sided_color,
3942 unsigned *out_offset,
3943 unsigned *out_length)
3945 /* The compiler computes the first URB slot without considering COL/BFC
3946 * swizzling (because it doesn't know whether it's enabled), so we need
3947 * to do that here too. This may result in a smaller offset, which
3950 const unsigned first_slot =
3951 brw_compute_first_urb_slot_required(fs_input_slots, last_vue_map);
3953 /* This becomes the URB read offset (counted in pairs of slots). */
3954 assert(first_slot % 2 == 0);
3955 *out_offset = first_slot / 2;
3957 /* We need to adjust the inputs read to account for front/back color
3958 * swizzling, as it can make the URB length longer.
3960 for (int c = 0; c <= 1; c++) {
3961 if (fs_input_slots & (VARYING_BIT_COL0 << c)) {
3962 /* If two sided color is enabled, the fragment shader's gl_Color
3963 * (COL0) input comes from either the gl_FrontColor (COL0) or
3964 * gl_BackColor (BFC0) input varyings. Mark BFC as used, too.
3966 if (two_sided_color)
3967 fs_input_slots |= (VARYING_BIT_BFC0 << c);
3969 /* If front color isn't written, we opt to give them back color
3970 * instead of an undefined value. Switch from COL to BFC.
3972 if (last_vue_map->varying_to_slot[VARYING_SLOT_COL0 + c] == -1) {
3973 fs_input_slots &= ~(VARYING_BIT_COL0 << c);
3974 fs_input_slots |= (VARYING_BIT_BFC0 << c);
3979 /* Compute the minimum URB Read Length necessary for the FS inputs.
3981 * From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
3982 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
3984 * "This field should be set to the minimum length required to read the
3985 * maximum source attribute. The maximum source attribute is indicated
3986 * by the maximum value of the enabled Attribute # Source Attribute if
3987 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
3988 * enable is not set.
3989 * read_length = ceiling((max_source_attr + 1) / 2)
3991 * [errata] Corruption/Hang possible if length programmed larger than
3994 * Similar text exists for Ivy Bridge.
3996 * We find the last URB slot that's actually read by the FS.
3998 unsigned last_read_slot = last_vue_map->num_slots - 1;
3999 while (last_read_slot > first_slot && !(fs_input_slots &
4000 (1ull << last_vue_map->slot_to_varying[last_read_slot])))
4003 /* The URB read length is the difference of the two, counted in pairs. */
4004 *out_length = DIV_ROUND_UP(last_read_slot - first_slot + 1, 2);
4008 iris_emit_sbe_swiz(struct iris_batch *batch,
4009 const struct iris_context *ice,
4010 const struct brw_vue_map *vue_map,
4011 unsigned urb_read_offset,
4012 unsigned sprite_coord_enables)
4014 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) attr_overrides[16] = {};
4015 const struct brw_wm_prog_data *wm_prog_data = (void *)
4016 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
4017 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4019 /* XXX: this should be generated when putting programs in place */
4021 for (uint8_t idx = 0; idx < wm_prog_data->urb_setup_attribs_count; idx++) {
4022 const uint8_t fs_attr = wm_prog_data->urb_setup_attribs[idx];
4023 const int input_index = wm_prog_data->urb_setup[fs_attr];
4024 if (input_index < 0 || input_index >= 16)
4027 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) *attr =
4028 &attr_overrides[input_index];
4029 int slot = vue_map->varying_to_slot[fs_attr];
4031 /* Viewport and Layer are stored in the VUE header. We need to override
4032 * them to zero if earlier stages didn't write them, as GL requires that
4033 * they read back as zero when not explicitly set.
4036 case VARYING_SLOT_VIEWPORT:
4037 case VARYING_SLOT_LAYER:
4038 attr->ComponentOverrideX = true;
4039 attr->ComponentOverrideW = true;
4040 attr->ConstantSource = CONST_0000;
4042 if (!(vue_map->slots_valid & VARYING_BIT_LAYER))
4043 attr->ComponentOverrideY = true;
4044 if (!(vue_map->slots_valid & VARYING_BIT_VIEWPORT))
4045 attr->ComponentOverrideZ = true;
4048 case VARYING_SLOT_PRIMITIVE_ID:
4049 /* Override if the previous shader stage didn't write gl_PrimitiveID. */
4051 attr->ComponentOverrideX = true;
4052 attr->ComponentOverrideY = true;
4053 attr->ComponentOverrideZ = true;
4054 attr->ComponentOverrideW = true;
4055 attr->ConstantSource = PRIM_ID;
4064 if (sprite_coord_enables & (1 << input_index))
4067 /* If there was only a back color written but not front, use back
4068 * as the color instead of undefined.
4070 if (slot == -1 && fs_attr == VARYING_SLOT_COL0)
4071 slot = vue_map->varying_to_slot[VARYING_SLOT_BFC0];
4072 if (slot == -1 && fs_attr == VARYING_SLOT_COL1)
4073 slot = vue_map->varying_to_slot[VARYING_SLOT_BFC1];
4075 /* Not written by the previous stage - undefined. */
4077 attr->ComponentOverrideX = true;
4078 attr->ComponentOverrideY = true;
4079 attr->ComponentOverrideZ = true;
4080 attr->ComponentOverrideW = true;
4081 attr->ConstantSource = CONST_0001_FLOAT;
4085 /* Compute the location of the attribute relative to the read offset,
4086 * which is counted in 256-bit increments (two 128-bit VUE slots).
4088 const int source_attr = slot - 2 * urb_read_offset;
4089 assert(source_attr >= 0 && source_attr <= 32);
4090 attr->SourceAttribute = source_attr;
4092 /* If we are doing two-sided color, and the VUE slot following this one
4093 * represents a back-facing color, then we need to instruct the SF unit
4094 * to do back-facing swizzling.
4096 if (cso_rast->light_twoside &&
4097 ((vue_map->slot_to_varying[slot] == VARYING_SLOT_COL0 &&
4098 vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC0) ||
4099 (vue_map->slot_to_varying[slot] == VARYING_SLOT_COL1 &&
4100 vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC1)))
4101 attr->SwizzleSelect = INPUTATTR_FACING;
4104 iris_emit_cmd(batch, GENX(3DSTATE_SBE_SWIZ), sbes) {
4105 for (int i = 0; i < 16; i++)
4106 sbes.Attribute[i] = attr_overrides[i];
4111 iris_is_drawing_points(const struct iris_context *ice)
4113 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4115 if (cso_rast->fill_mode_point) {
4119 if (ice->shaders.prog[MESA_SHADER_GEOMETRY]) {
4120 const struct brw_gs_prog_data *gs_prog_data =
4121 (void *) ice->shaders.prog[MESA_SHADER_GEOMETRY]->prog_data;
4122 return gs_prog_data->output_topology == _3DPRIM_POINTLIST;
4123 } else if (ice->shaders.prog[MESA_SHADER_TESS_EVAL]) {
4124 const struct brw_tes_prog_data *tes_data =
4125 (void *) ice->shaders.prog[MESA_SHADER_TESS_EVAL]->prog_data;
4126 return tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
4128 return ice->state.prim_mode == PIPE_PRIM_POINTS;
4133 iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data *prog_data,
4134 const struct iris_rasterizer_state *cso)
4136 unsigned overrides = 0;
4138 if (prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
4139 overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_PNTC];
4141 for (int i = 0; i < 8; i++) {
4142 if ((cso->sprite_coord_enable & (1 << i)) &&
4143 prog_data->urb_setup[VARYING_SLOT_TEX0 + i] != -1)
4144 overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_TEX0 + i];
4151 iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice)
4153 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4154 const struct brw_wm_prog_data *wm_prog_data = (void *)
4155 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
4156 const struct shader_info *fs_info =
4157 iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
4158 const struct brw_vue_map *last_vue_map =
4159 &brw_vue_prog_data(ice->shaders.last_vue_shader->prog_data)->vue_map;
4161 unsigned urb_read_offset, urb_read_length;
4162 iris_compute_sbe_urb_read_interval(fs_info->inputs_read,
4164 cso_rast->light_twoside,
4165 &urb_read_offset, &urb_read_length);
4167 unsigned sprite_coord_overrides =
4168 iris_is_drawing_points(ice) ?
4169 iris_calculate_point_sprite_overrides(wm_prog_data, cso_rast) : 0;
4171 iris_emit_cmd(batch, GENX(3DSTATE_SBE), sbe) {
4172 sbe.AttributeSwizzleEnable = true;
4173 sbe.NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs;
4174 sbe.PointSpriteTextureCoordinateOrigin = cso_rast->sprite_coord_mode;
4175 sbe.VertexURBEntryReadOffset = urb_read_offset;
4176 sbe.VertexURBEntryReadLength = urb_read_length;
4177 sbe.ForceVertexURBEntryReadOffset = true;
4178 sbe.ForceVertexURBEntryReadLength = true;
4179 sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs;
4180 sbe.PointSpriteTextureCoordinateEnable = sprite_coord_overrides;
4182 for (int i = 0; i < 32; i++) {
4183 sbe.AttributeActiveComponentFormat[i] = ACTIVE_COMPONENT_XYZW;
4188 iris_emit_sbe_swiz(batch, ice, last_vue_map, urb_read_offset,
4189 sprite_coord_overrides);
4192 /* ------------------------------------------------------------------- */
4195 * Populate VS program key fields based on the current state.
4198 iris_populate_vs_key(const struct iris_context *ice,
4199 const struct shader_info *info,
4200 gl_shader_stage last_stage,
4201 struct iris_vs_prog_key *key)
4203 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4205 if (info->clip_distance_array_size == 0 &&
4206 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4207 last_stage == MESA_SHADER_VERTEX)
4208 key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4212 * Populate TCS program key fields based on the current state.
4215 iris_populate_tcs_key(const struct iris_context *ice,
4216 struct iris_tcs_prog_key *key)
4221 * Populate TES program key fields based on the current state.
4224 iris_populate_tes_key(const struct iris_context *ice,
4225 const struct shader_info *info,
4226 gl_shader_stage last_stage,
4227 struct iris_tes_prog_key *key)
4229 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4231 if (info->clip_distance_array_size == 0 &&
4232 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4233 last_stage == MESA_SHADER_TESS_EVAL)
4234 key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4238 * Populate GS program key fields based on the current state.
4241 iris_populate_gs_key(const struct iris_context *ice,
4242 const struct shader_info *info,
4243 gl_shader_stage last_stage,
4244 struct iris_gs_prog_key *key)
4246 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4248 if (info->clip_distance_array_size == 0 &&
4249 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4250 last_stage == MESA_SHADER_GEOMETRY)
4251 key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4255 * Populate FS program key fields based on the current state.
4258 iris_populate_fs_key(const struct iris_context *ice,
4259 const struct shader_info *info,
4260 struct iris_fs_prog_key *key)
4262 struct iris_screen *screen = (void *) ice->ctx.screen;
4263 const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
4264 const struct iris_depth_stencil_alpha_state *zsa = ice->state.cso_zsa;
4265 const struct iris_rasterizer_state *rast = ice->state.cso_rast;
4266 const struct iris_blend_state *blend = ice->state.cso_blend;
4268 key->nr_color_regions = fb->nr_cbufs;
4270 key->clamp_fragment_color = rast->clamp_fragment_color;
4272 key->alpha_to_coverage = blend->alpha_to_coverage;
4274 key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha_enabled;
4276 key->flat_shade = rast->flatshade &&
4277 (info->inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1));
4279 key->persample_interp = rast->force_persample_interp;
4280 key->multisample_fbo = rast->multisample && fb->samples > 1;
4282 key->coherent_fb_fetch = GFX_VER >= 9;
4284 key->force_dual_color_blend =
4285 screen->driconf.dual_color_blend_by_location &&
4286 (blend->blend_enables & 1) && blend->dual_color_blending;
4288 /* TODO: Respect glHint for key->high_quality_derivatives */
4292 iris_populate_cs_key(const struct iris_context *ice,
4293 struct iris_cs_prog_key *key)
4298 KSP(const struct iris_compiled_shader *shader)
4300 struct iris_resource *res = (void *) shader->assembly.res;
4301 return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
4304 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
4305 pkt.KernelStartPointer = KSP(shader); \
4306 pkt.BindingTableEntryCount = shader->bt.size_bytes / 4; \
4307 pkt.FloatingPointMode = prog_data->use_alt_mode; \
4309 pkt.DispatchGRFStartRegisterForURBData = \
4310 prog_data->dispatch_grf_start_reg; \
4311 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
4312 pkt.prefix##URBEntryReadOffset = 0; \
4314 pkt.StatisticsEnable = true; \
4315 pkt.Enable = true; \
4317 if (prog_data->total_scratch) { \
4318 pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11; \
4321 #define MERGE_SCRATCH_ADDR(name) \
4323 uint32_t pkt2[GENX(name##_length)] = {0}; \
4324 _iris_pack_command(batch, GENX(name), pkt2, p) { \
4325 p.ScratchSpaceBasePointer = \
4326 rw_bo(NULL, scratch_addr, IRIS_DOMAIN_NONE); \
4328 iris_emit_merge(batch, pkt, pkt2, GENX(name##_length)); \
4333 * Encode most of 3DSTATE_VS based on the compiled shader.
4336 iris_store_vs_state(const struct intel_device_info *devinfo,
4337 struct iris_compiled_shader *shader)
4339 struct brw_stage_prog_data *prog_data = shader->prog_data;
4340 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4342 iris_pack_command(GENX(3DSTATE_VS), shader->derived_data, vs) {
4343 INIT_THREAD_DISPATCH_FIELDS(vs, Vertex, MESA_SHADER_VERTEX);
4344 vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
4345 vs.SIMD8DispatchEnable = true;
4346 vs.UserClipDistanceCullTestEnableBitmask =
4347 vue_prog_data->cull_distance_mask;
4352 * Encode most of 3DSTATE_HS based on the compiled shader.
4355 iris_store_tcs_state(const struct intel_device_info *devinfo,
4356 struct iris_compiled_shader *shader)
4358 struct brw_stage_prog_data *prog_data = shader->prog_data;
4359 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4360 struct brw_tcs_prog_data *tcs_prog_data = (void *) prog_data;
4362 iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
4363 INIT_THREAD_DISPATCH_FIELDS(hs, Vertex, MESA_SHADER_TESS_CTRL);
4368 * Hang occurs when the number of max threads is less than 2 times
4369 * the number of instance count. The number of max threads must be
4370 * more than 2 times the number of instance count.
4372 assert((devinfo->max_tcs_threads / 2) > tcs_prog_data->instances);
4373 hs.DispatchGRFStartRegisterForURBData = prog_data->dispatch_grf_start_reg & 0x1f;
4374 hs.DispatchGRFStartRegisterForURBData5 = prog_data->dispatch_grf_start_reg >> 5;
4377 hs.InstanceCount = tcs_prog_data->instances - 1;
4378 hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
4379 hs.IncludeVertexHandles = true;
4382 /* Patch Count threshold specifies the maximum number of patches that
4383 * will be accumulated before a thread dispatch is forced.
4385 hs.PatchCountThreshold = tcs_prog_data->patch_count_threshold;
4389 hs.DispatchMode = vue_prog_data->dispatch_mode;
4390 hs.IncludePrimitiveID = tcs_prog_data->include_primitive_id;
4396 * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
4399 iris_store_tes_state(const struct intel_device_info *devinfo,
4400 struct iris_compiled_shader *shader)
4402 struct brw_stage_prog_data *prog_data = shader->prog_data;
4403 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4404 struct brw_tes_prog_data *tes_prog_data = (void *) prog_data;
4406 uint32_t *te_state = (void *) shader->derived_data;
4407 uint32_t *ds_state = te_state + GENX(3DSTATE_TE_length);
4409 iris_pack_command(GENX(3DSTATE_TE), te_state, te) {
4410 te.Partitioning = tes_prog_data->partitioning;
4411 te.OutputTopology = tes_prog_data->output_topology;
4412 te.TEDomain = tes_prog_data->domain;
4414 te.MaximumTessellationFactorOdd = 63.0;
4415 te.MaximumTessellationFactorNotOdd = 64.0;
4418 iris_pack_command(GENX(3DSTATE_DS), ds_state, ds) {
4419 INIT_THREAD_DISPATCH_FIELDS(ds, Patch, MESA_SHADER_TESS_EVAL);
4421 ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
4422 ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
4423 ds.ComputeWCoordinateEnable =
4424 tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
4426 ds.UserClipDistanceCullTestEnableBitmask =
4427 vue_prog_data->cull_distance_mask;
4433 * Encode most of 3DSTATE_GS based on the compiled shader.
4436 iris_store_gs_state(const struct intel_device_info *devinfo,
4437 struct iris_compiled_shader *shader)
4439 struct brw_stage_prog_data *prog_data = shader->prog_data;
4440 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4441 struct brw_gs_prog_data *gs_prog_data = (void *) prog_data;
4443 iris_pack_command(GENX(3DSTATE_GS), shader->derived_data, gs) {
4444 INIT_THREAD_DISPATCH_FIELDS(gs, Vertex, MESA_SHADER_GEOMETRY);
4446 gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
4447 gs.OutputTopology = gs_prog_data->output_topology;
4448 gs.ControlDataHeaderSize =
4449 gs_prog_data->control_data_header_size_hwords;
4450 gs.InstanceControl = gs_prog_data->invocations - 1;
4451 gs.DispatchMode = DISPATCH_MODE_SIMD8;
4452 gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
4453 gs.ControlDataFormat = gs_prog_data->control_data_format;
4454 gs.ReorderMode = TRAILING;
4455 gs.ExpectedVertexCount = gs_prog_data->vertices_in;
4456 gs.MaximumNumberofThreads =
4457 GFX_VER == 8 ? (devinfo->max_gs_threads / 2 - 1)
4458 : (devinfo->max_gs_threads - 1);
4460 if (gs_prog_data->static_vertex_count != -1) {
4461 gs.StaticOutput = true;
4462 gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count;
4464 gs.IncludeVertexHandles = vue_prog_data->include_vue_handles;
4466 gs.UserClipDistanceCullTestEnableBitmask =
4467 vue_prog_data->cull_distance_mask;
4469 const int urb_entry_write_offset = 1;
4470 const uint32_t urb_entry_output_length =
4471 DIV_ROUND_UP(vue_prog_data->vue_map.num_slots, 2) -
4472 urb_entry_write_offset;
4474 gs.VertexURBEntryOutputReadOffset = urb_entry_write_offset;
4475 gs.VertexURBEntryOutputLength = MAX2(urb_entry_output_length, 1);
4480 * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
4483 iris_store_fs_state(const struct intel_device_info *devinfo,
4484 struct iris_compiled_shader *shader)
4486 struct brw_stage_prog_data *prog_data = shader->prog_data;
4487 struct brw_wm_prog_data *wm_prog_data = (void *) shader->prog_data;
4489 uint32_t *ps_state = (void *) shader->derived_data;
4490 uint32_t *psx_state = ps_state + GENX(3DSTATE_PS_length);
4492 iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
4493 ps.VectorMaskEnable = true;
4494 ps.BindingTableEntryCount = shader->bt.size_bytes / 4;
4495 ps.FloatingPointMode = prog_data->use_alt_mode;
4496 ps.MaximumNumberofThreadsPerPSD = 64 - (GFX_VER == 8 ? 2 : 1);
4498 ps.PushConstantEnable = prog_data->ubo_ranges[0].length > 0;
4500 /* From the documentation for this packet:
4501 * "If the PS kernel does not need the Position XY Offsets to
4502 * compute a Position Value, then this field should be programmed
4503 * to POSOFFSET_NONE."
4505 * "SW Recommendation: If the PS kernel needs the Position Offsets
4506 * to compute a Position XY value, this field should match Position
4507 * ZW Interpolation Mode to ensure a consistent position.xyzw
4510 * We only require XY sample offsets. So, this recommendation doesn't
4511 * look useful at the moment. We might need this in future.
4513 ps.PositionXYOffsetSelect =
4514 wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE;
4516 if (prog_data->total_scratch)
4517 ps.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
4520 iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
4521 psx.PixelShaderValid = true;
4522 psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
4523 psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
4524 psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
4525 psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
4526 psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
4527 psx.PixelShaderIsPerSample = wm_prog_data->persample_dispatch;
4528 psx.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
4531 psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
4532 psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
4538 * Compute the size of the derived data (shader command packets).
4540 * This must match the data written by the iris_store_xs_state() functions.
4543 iris_store_cs_state(const struct intel_device_info *devinfo,
4544 struct iris_compiled_shader *shader)
4546 struct brw_cs_prog_data *cs_prog_data = (void *) shader->prog_data;
4547 void *map = shader->derived_data;
4549 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), map, desc) {
4550 #if GFX_VERx10 < 125
4551 desc.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
4552 desc.CrossThreadConstantDataReadLength =
4553 cs_prog_data->push.cross_thread.regs;
4555 assert(cs_prog_data->push.per_thread.regs == 0);
4556 assert(cs_prog_data->push.cross_thread.regs == 0);
4558 desc.BarrierEnable = cs_prog_data->uses_barrier;
4560 /* TODO: Check if we are missing workarounds and enable mid-thread
4563 * We still have issues with mid-thread preemption (it was already
4564 * disabled by the kernel on gfx11, due to missing workarounds). It's
4565 * possible that we are just missing some workarounds, and could enable
4566 * it later, but for now let's disable it to fix a GPU in compute in Car
4567 * Chase (and possibly more).
4569 desc.ThreadPreemptionDisable = true;
4575 iris_derived_program_state_size(enum iris_program_cache_id cache_id)
4577 assert(cache_id <= IRIS_CACHE_BLORP);
4579 static const unsigned dwords[] = {
4580 [IRIS_CACHE_VS] = GENX(3DSTATE_VS_length),
4581 [IRIS_CACHE_TCS] = GENX(3DSTATE_HS_length),
4582 [IRIS_CACHE_TES] = GENX(3DSTATE_TE_length) + GENX(3DSTATE_DS_length),
4583 [IRIS_CACHE_GS] = GENX(3DSTATE_GS_length),
4585 GENX(3DSTATE_PS_length) + GENX(3DSTATE_PS_EXTRA_length),
4586 [IRIS_CACHE_CS] = GENX(INTERFACE_DESCRIPTOR_DATA_length),
4587 [IRIS_CACHE_BLORP] = 0,
4590 return sizeof(uint32_t) * dwords[cache_id];
4594 * Create any state packets corresponding to the given shader stage
4595 * (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
4596 * This means that we can look up a program in the in-memory cache and
4597 * get most of the state packet without having to reconstruct it.
4600 iris_store_derived_program_state(const struct intel_device_info *devinfo,
4601 enum iris_program_cache_id cache_id,
4602 struct iris_compiled_shader *shader)
4606 iris_store_vs_state(devinfo, shader);
4608 case IRIS_CACHE_TCS:
4609 iris_store_tcs_state(devinfo, shader);
4611 case IRIS_CACHE_TES:
4612 iris_store_tes_state(devinfo, shader);
4615 iris_store_gs_state(devinfo, shader);
4618 iris_store_fs_state(devinfo, shader);
4621 iris_store_cs_state(devinfo, shader);
4623 case IRIS_CACHE_BLORP:
4628 /* ------------------------------------------------------------------- */
4630 static const uint32_t push_constant_opcodes[] = {
4631 [MESA_SHADER_VERTEX] = 21,
4632 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
4633 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
4634 [MESA_SHADER_GEOMETRY] = 22,
4635 [MESA_SHADER_FRAGMENT] = 23,
4636 [MESA_SHADER_COMPUTE] = 0,
4640 use_null_surface(struct iris_batch *batch, struct iris_context *ice)
4642 struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
4644 iris_use_pinned_bo(batch, state_bo, false, IRIS_DOMAIN_NONE);
4646 return ice->state.unbound_tex.offset;
4650 use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
4652 /* If set_framebuffer_state() was never called, fall back to 1x1x1 */
4653 if (!ice->state.null_fb.res)
4654 return use_null_surface(batch, ice);
4656 struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
4658 iris_use_pinned_bo(batch, state_bo, false, IRIS_DOMAIN_NONE);
4660 return ice->state.null_fb.offset;
4664 surf_state_offset_for_aux(struct iris_resource *res,
4666 enum isl_aux_usage aux_usage)
4668 assert(aux_modes & (1 << aux_usage));
4669 return SURFACE_STATE_ALIGNMENT *
4670 util_bitcount(aux_modes & ((1 << aux_usage) - 1));
4675 surf_state_update_clear_value(struct iris_batch *batch,
4676 struct iris_resource *res,
4677 struct iris_state_ref *state,
4679 enum isl_aux_usage aux_usage)
4681 struct isl_device *isl_dev = &batch->screen->isl_dev;
4682 struct iris_bo *state_bo = iris_resource_bo(state->res);
4683 uint64_t real_offset = state->offset + IRIS_MEMZONE_BINDER_START;
4684 uint32_t offset_into_bo = real_offset - state_bo->gtt_offset;
4685 uint32_t clear_offset = offset_into_bo +
4686 isl_dev->ss.clear_value_offset +
4687 surf_state_offset_for_aux(res, aux_modes, aux_usage);
4688 uint32_t *color = res->aux.clear_color.u32;
4690 assert(isl_dev->ss.clear_value_size == 16);
4692 if (aux_usage == ISL_AUX_USAGE_HIZ) {
4693 iris_emit_pipe_control_write(batch, "update fast clear value (Z)",
4694 PIPE_CONTROL_WRITE_IMMEDIATE,
4695 state_bo, clear_offset, color[0]);
4697 iris_emit_pipe_control_write(batch, "update fast clear color (RG__)",
4698 PIPE_CONTROL_WRITE_IMMEDIATE,
4699 state_bo, clear_offset,
4700 (uint64_t) color[0] |
4701 (uint64_t) color[1] << 32);
4702 iris_emit_pipe_control_write(batch, "update fast clear color (__BA)",
4703 PIPE_CONTROL_WRITE_IMMEDIATE,
4704 state_bo, clear_offset + 8,
4705 (uint64_t) color[2] |
4706 (uint64_t) color[3] << 32);
4709 iris_emit_pipe_control_flush(batch,
4710 "update fast clear: state cache invalidate",
4711 PIPE_CONTROL_FLUSH_ENABLE |
4712 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
4717 update_clear_value(struct iris_context *ice,
4718 struct iris_batch *batch,
4719 struct iris_resource *res,
4720 struct iris_surface_state *surf_state,
4721 unsigned all_aux_modes,
4722 struct isl_view *view)
4724 UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
4725 UNUSED unsigned aux_modes = all_aux_modes;
4727 /* We only need to update the clear color in the surface state for gfx8 and
4728 * gfx9. Newer gens can read it directly from the clear color state buffer.
4731 /* Skip updating the ISL_AUX_USAGE_NONE surface state */
4732 aux_modes &= ~(1 << ISL_AUX_USAGE_NONE);
4735 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4737 surf_state_update_clear_value(batch, res, &surf_state->ref,
4738 all_aux_modes, aux_usage);
4741 /* TODO: Could update rather than re-filling */
4742 alloc_surface_states(surf_state, all_aux_modes);
4744 void *map = surf_state->cpu;
4747 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4748 fill_surface_state(isl_dev, map, res, &res->surf, view, aux_usage,
4750 map += SURFACE_STATE_ALIGNMENT;
4753 upload_surface_states(ice->state.surface_uploader, surf_state);
4758 * Add a surface to the validation list, as well as the buffer containing
4759 * the corresponding SURFACE_STATE.
4761 * Returns the binding table entry (offset to SURFACE_STATE).
4764 use_surface(struct iris_context *ice,
4765 struct iris_batch *batch,
4766 struct pipe_surface *p_surf,
4768 enum isl_aux_usage aux_usage,
4769 bool is_read_surface,
4770 enum iris_domain access)
4772 struct iris_surface *surf = (void *) p_surf;
4773 struct iris_resource *res = (void *) p_surf->texture;
4774 uint32_t offset = 0;
4776 if (GFX_VER == 8 && is_read_surface && !surf->surface_state_read.ref.res) {
4777 upload_surface_states(ice->state.surface_uploader,
4778 &surf->surface_state_read);
4781 if (!surf->surface_state.ref.res) {
4782 upload_surface_states(ice->state.surface_uploader,
4783 &surf->surface_state);
4787 iris_use_pinned_bo(batch, res->aux.bo, writeable, access);
4788 if (res->aux.clear_color_bo)
4789 iris_use_pinned_bo(batch, res->aux.clear_color_bo, false, access);
4791 if (memcmp(&res->aux.clear_color, &surf->clear_color,
4792 sizeof(surf->clear_color)) != 0) {
4793 update_clear_value(ice, batch, res, &surf->surface_state,
4794 res->aux.possible_usages, &surf->view);
4796 update_clear_value(ice, batch, res, &surf->surface_state_read,
4797 res->aux.possible_usages, &surf->read_view);
4799 surf->clear_color = res->aux.clear_color;
4803 iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture),
4805 if (GFX_VER == 8 && is_read_surface) {
4806 iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state_read.ref.res), false,
4809 iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.ref.res), false,
4813 offset = (GFX_VER == 8 && is_read_surface)
4814 ? surf->surface_state_read.ref.offset
4815 : surf->surface_state.ref.offset;
4818 surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
4822 use_sampler_view(struct iris_context *ice,
4823 struct iris_batch *batch,
4824 struct iris_sampler_view *isv)
4826 enum isl_aux_usage aux_usage =
4827 iris_resource_texture_aux_usage(ice, isv->res, isv->view.format);
4829 if (!isv->surface_state.ref.res)
4830 upload_surface_states(ice->state.surface_uploader, &isv->surface_state);
4832 if (isv->res->aux.bo) {
4833 iris_use_pinned_bo(batch, isv->res->aux.bo,
4834 false, IRIS_DOMAIN_OTHER_READ);
4835 if (isv->res->aux.clear_color_bo)
4836 iris_use_pinned_bo(batch, isv->res->aux.clear_color_bo,
4837 false, IRIS_DOMAIN_OTHER_READ);
4838 if (memcmp(&isv->res->aux.clear_color, &isv->clear_color,
4839 sizeof(isv->clear_color)) != 0) {
4840 update_clear_value(ice, batch, isv->res, &isv->surface_state,
4841 isv->res->aux.sampler_usages, &isv->view);
4842 isv->clear_color = isv->res->aux.clear_color;
4846 iris_use_pinned_bo(batch, isv->res->bo, false, IRIS_DOMAIN_OTHER_READ);
4847 iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.ref.res), false,
4850 return isv->surface_state.ref.offset +
4851 surf_state_offset_for_aux(isv->res, isv->res->aux.sampler_usages,
4856 use_ubo_ssbo(struct iris_batch *batch,
4857 struct iris_context *ice,
4858 struct pipe_shader_buffer *buf,
4859 struct iris_state_ref *surf_state,
4860 bool writable, enum iris_domain access)
4862 if (!buf->buffer || !surf_state->res)
4863 return use_null_surface(batch, ice);
4865 iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable, access);
4866 iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false,
4869 return surf_state->offset;
4873 use_image(struct iris_batch *batch, struct iris_context *ice,
4874 struct iris_shader_state *shs, const struct shader_info *info,
4877 struct iris_image_view *iv = &shs->image[i];
4878 struct iris_resource *res = (void *) iv->base.resource;
4881 return use_null_surface(batch, ice);
4883 bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE;
4885 iris_use_pinned_bo(batch, res->bo, write, IRIS_DOMAIN_NONE);
4886 iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.ref.res),
4887 false, IRIS_DOMAIN_NONE);
4890 iris_use_pinned_bo(batch, res->aux.bo, write, IRIS_DOMAIN_NONE);
4892 enum isl_aux_usage aux_usage =
4893 iris_image_view_aux_usage(ice, &iv->base, info);
4895 return iv->surface_state.ref.offset +
4896 surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
4899 #define push_bt_entry(addr) \
4900 assert(addr >= binder_addr); \
4901 assert(s < shader->bt.size_bytes / sizeof(uint32_t)); \
4902 if (!pin_only) bt_map[s++] = (addr) - binder_addr;
4904 #define bt_assert(section) \
4905 if (!pin_only && shader->bt.used_mask[section] != 0) \
4906 assert(shader->bt.offsets[section] == s);
4909 * Populate the binding table for a given shader stage.
4911 * This fills out the table of pointers to surfaces required by the shader,
4912 * and also adds those buffers to the validation list so the kernel can make
4913 * resident before running our batch.
4916 iris_populate_binding_table(struct iris_context *ice,
4917 struct iris_batch *batch,
4918 gl_shader_stage stage,
4921 const struct iris_binder *binder = &ice->state.binder;
4922 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4926 struct iris_binding_table *bt = &shader->bt;
4927 UNUSED struct brw_stage_prog_data *prog_data = shader->prog_data;
4928 struct iris_shader_state *shs = &ice->state.shaders[stage];
4929 uint32_t binder_addr = binder->bo->gtt_offset;
4931 uint32_t *bt_map = binder->map + binder->bt_offset[stage];
4934 const struct shader_info *info = iris_get_shader_info(ice, stage);
4936 /* TCS passthrough doesn't need a binding table. */
4937 assert(stage == MESA_SHADER_TESS_CTRL);
4941 if (stage == MESA_SHADER_COMPUTE &&
4942 shader->bt.used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS]) {
4943 /* surface for gl_NumWorkGroups */
4944 struct iris_state_ref *grid_data = &ice->state.grid_size;
4945 struct iris_state_ref *grid_state = &ice->state.grid_surf_state;
4946 iris_use_pinned_bo(batch, iris_resource_bo(grid_data->res), false,
4947 IRIS_DOMAIN_OTHER_READ);
4948 iris_use_pinned_bo(batch, iris_resource_bo(grid_state->res), false,
4950 push_bt_entry(grid_state->offset);
4953 if (stage == MESA_SHADER_FRAGMENT) {
4954 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4955 /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
4956 if (cso_fb->nr_cbufs) {
4957 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
4959 if (cso_fb->cbufs[i]) {
4960 addr = use_surface(ice, batch, cso_fb->cbufs[i], true,
4961 ice->state.draw_aux_usage[i], false,
4962 IRIS_DOMAIN_RENDER_WRITE);
4964 addr = use_null_fb_surface(batch, ice);
4966 push_bt_entry(addr);
4968 } else if (GFX_VER < 11) {
4969 uint32_t addr = use_null_fb_surface(batch, ice);
4970 push_bt_entry(addr);
4974 #define foreach_surface_used(index, group) \
4976 for (int index = 0; index < bt->sizes[group]; index++) \
4977 if (iris_group_index_to_bti(bt, group, index) != \
4978 IRIS_SURFACE_NOT_USED)
4980 foreach_surface_used(i, IRIS_SURFACE_GROUP_RENDER_TARGET_READ) {
4981 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4983 if (cso_fb->cbufs[i]) {
4984 addr = use_surface(ice, batch, cso_fb->cbufs[i],
4985 false, ice->state.draw_aux_usage[i], true,
4986 IRIS_DOMAIN_OTHER_READ);
4987 push_bt_entry(addr);
4991 foreach_surface_used(i, IRIS_SURFACE_GROUP_TEXTURE) {
4992 struct iris_sampler_view *view = shs->textures[i];
4993 uint32_t addr = view ? use_sampler_view(ice, batch, view)
4994 : use_null_surface(batch, ice);
4995 push_bt_entry(addr);
4998 foreach_surface_used(i, IRIS_SURFACE_GROUP_IMAGE) {
4999 uint32_t addr = use_image(batch, ice, shs, info, i);
5000 push_bt_entry(addr);
5003 foreach_surface_used(i, IRIS_SURFACE_GROUP_UBO) {
5004 uint32_t addr = use_ubo_ssbo(batch, ice, &shs->constbuf[i],
5005 &shs->constbuf_surf_state[i], false,
5006 IRIS_DOMAIN_OTHER_READ);
5007 push_bt_entry(addr);
5010 foreach_surface_used(i, IRIS_SURFACE_GROUP_SSBO) {
5012 use_ubo_ssbo(batch, ice, &shs->ssbo[i], &shs->ssbo_surf_state[i],
5013 shs->writable_ssbos & (1u << i), IRIS_DOMAIN_NONE);
5014 push_bt_entry(addr);
5018 /* XXX: YUV surfaces not implemented yet */
5019 bt_assert(plane_start[1], ...);
5020 bt_assert(plane_start[2], ...);
5025 iris_use_optional_res(struct iris_batch *batch,
5026 struct pipe_resource *res,
5028 enum iris_domain access)
5031 struct iris_bo *bo = iris_resource_bo(res);
5032 iris_use_pinned_bo(batch, bo, writeable, access);
5037 pin_depth_and_stencil_buffers(struct iris_batch *batch,
5038 struct pipe_surface *zsbuf,
5039 struct iris_depth_stencil_alpha_state *cso_zsa)
5044 struct iris_resource *zres, *sres;
5045 iris_get_depth_stencil_resources(zsbuf->texture, &zres, &sres);
5048 const enum iris_domain access = cso_zsa->depth_writes_enabled ?
5049 IRIS_DOMAIN_DEPTH_WRITE : IRIS_DOMAIN_OTHER_READ;
5050 iris_use_pinned_bo(batch, zres->bo, cso_zsa->depth_writes_enabled,
5053 iris_use_pinned_bo(batch, zres->aux.bo,
5054 cso_zsa->depth_writes_enabled, access);
5059 const enum iris_domain access = cso_zsa->stencil_writes_enabled ?
5060 IRIS_DOMAIN_DEPTH_WRITE : IRIS_DOMAIN_OTHER_READ;
5061 iris_use_pinned_bo(batch, sres->bo, cso_zsa->stencil_writes_enabled,
5067 pin_scratch_space(struct iris_context *ice,
5068 struct iris_batch *batch,
5069 const struct brw_stage_prog_data *prog_data,
5070 gl_shader_stage stage)
5072 uint32_t scratch_addr = 0;
5074 if (prog_data->total_scratch > 0) {
5075 struct iris_bo *scratch_bo =
5076 iris_get_scratch_space(ice, prog_data->total_scratch, stage);
5077 iris_use_pinned_bo(batch, scratch_bo, true, IRIS_DOMAIN_NONE);
5079 scratch_addr = scratch_bo->gtt_offset;
5082 return scratch_addr;
5085 /* ------------------------------------------------------------------- */
5088 * Pin any BOs which were installed by a previous batch, and restored
5089 * via the hardware logical context mechanism.
5091 * We don't need to re-emit all state every batch - the hardware context
5092 * mechanism will save and restore it for us. This includes pointers to
5093 * various BOs...which won't exist unless we ask the kernel to pin them
5094 * by adding them to the validation list.
5096 * We can skip buffers if we've re-emitted those packets, as we're
5097 * overwriting those stale pointers with new ones, and don't actually
5098 * refer to the old BOs.
5101 iris_restore_render_saved_bos(struct iris_context *ice,
5102 struct iris_batch *batch,
5103 const struct pipe_draw_info *draw)
5105 struct iris_genx_state *genx = ice->state.genx;
5107 const uint64_t clean = ~ice->state.dirty;
5108 const uint64_t stage_clean = ~ice->state.stage_dirty;
5110 if (clean & IRIS_DIRTY_CC_VIEWPORT) {
5111 iris_use_optional_res(batch, ice->state.last_res.cc_vp, false,
5115 if (clean & IRIS_DIRTY_SF_CL_VIEWPORT) {
5116 iris_use_optional_res(batch, ice->state.last_res.sf_cl_vp, false,
5120 if (clean & IRIS_DIRTY_BLEND_STATE) {
5121 iris_use_optional_res(batch, ice->state.last_res.blend, false,
5125 if (clean & IRIS_DIRTY_COLOR_CALC_STATE) {
5126 iris_use_optional_res(batch, ice->state.last_res.color_calc, false,
5130 if (clean & IRIS_DIRTY_SCISSOR_RECT) {
5131 iris_use_optional_res(batch, ice->state.last_res.scissor, false,
5135 if (ice->state.streamout_active && (clean & IRIS_DIRTY_SO_BUFFERS)) {
5136 for (int i = 0; i < 4; i++) {
5137 struct iris_stream_output_target *tgt =
5138 (void *) ice->state.so_target[i];
5140 iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
5141 true, IRIS_DOMAIN_OTHER_WRITE);
5142 iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
5143 true, IRIS_DOMAIN_OTHER_WRITE);
5148 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5149 if (!(stage_clean & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)))
5152 struct iris_shader_state *shs = &ice->state.shaders[stage];
5153 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5158 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5160 for (int i = 0; i < 4; i++) {
5161 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
5163 if (range->length == 0)
5166 /* Range block is a binding table index, map back to UBO index. */
5167 unsigned block_index = iris_bti_to_group_index(
5168 &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
5169 assert(block_index != IRIS_SURFACE_NOT_USED);
5171 struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
5172 struct iris_resource *res = (void *) cbuf->buffer;
5175 iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_OTHER_READ);
5177 iris_use_pinned_bo(batch, batch->screen->workaround_bo, false,
5178 IRIS_DOMAIN_OTHER_READ);
5182 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5183 if (stage_clean & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
5184 /* Re-pin any buffers referred to by the binding table. */
5185 iris_populate_binding_table(ice, batch, stage, true);
5189 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5190 struct iris_shader_state *shs = &ice->state.shaders[stage];
5191 struct pipe_resource *res = shs->sampler_table.res;
5193 iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5197 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5198 if (stage_clean & (IRIS_STAGE_DIRTY_VS << stage)) {
5199 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5202 struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
5203 iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
5205 pin_scratch_space(ice, batch, shader->prog_data, stage);
5210 if ((clean & IRIS_DIRTY_DEPTH_BUFFER) &&
5211 (clean & IRIS_DIRTY_WM_DEPTH_STENCIL)) {
5212 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5213 pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
5216 iris_use_optional_res(batch, ice->state.last_res.index_buffer, false,
5217 IRIS_DOMAIN_OTHER_READ);
5219 if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
5220 uint64_t bound = ice->state.bound_vertex_buffers;
5222 const int i = u_bit_scan64(&bound);
5223 struct pipe_resource *res = genx->vertex_buffers[i].resource;
5224 iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5225 IRIS_DOMAIN_OTHER_READ);
5231 iris_restore_compute_saved_bos(struct iris_context *ice,
5232 struct iris_batch *batch,
5233 const struct pipe_grid_info *grid)
5235 const uint64_t stage_clean = ~ice->state.stage_dirty;
5237 const int stage = MESA_SHADER_COMPUTE;
5238 struct iris_shader_state *shs = &ice->state.shaders[stage];
5240 if (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) {
5241 /* Re-pin any buffers referred to by the binding table. */
5242 iris_populate_binding_table(ice, batch, stage, true);
5245 struct pipe_resource *sampler_res = shs->sampler_table.res;
5247 iris_use_pinned_bo(batch, iris_resource_bo(sampler_res), false,
5250 if ((stage_clean & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS) &&
5251 (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) &&
5252 (stage_clean & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
5253 (stage_clean & IRIS_STAGE_DIRTY_CS)) {
5254 iris_use_optional_res(batch, ice->state.last_res.cs_desc, false,
5258 if (stage_clean & IRIS_STAGE_DIRTY_CS) {
5259 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5262 struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
5263 iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
5265 if (GFX_VERx10 < 125) {
5266 struct iris_bo *curbe_bo =
5267 iris_resource_bo(ice->state.last_res.cs_thread_ids);
5268 iris_use_pinned_bo(batch, curbe_bo, false, IRIS_DOMAIN_NONE);
5271 pin_scratch_space(ice, batch, shader->prog_data, stage);
5277 * Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
5280 iris_update_surface_base_address(struct iris_batch *batch,
5281 struct iris_binder *binder)
5283 if (batch->last_surface_base_address == binder->bo->gtt_offset)
5286 struct isl_device *isl_dev = &batch->screen->isl_dev;
5287 uint32_t mocs = isl_mocs(isl_dev, 0, false);
5289 iris_batch_sync_region_start(batch);
5291 flush_before_state_base_change(batch);
5296 * Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
5297 * mode by putting the pipeline temporarily in 3D mode..
5299 if (batch->name == IRIS_BATCH_COMPUTE)
5300 emit_pipeline_select(batch, _3D);
5303 iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
5304 sba.SurfaceStateBaseAddressModifyEnable = true;
5305 sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0);
5307 /* The hardware appears to pay attention to the MOCS fields even
5308 * if you don't set the "Address Modify Enable" bit for the base.
5310 sba.GeneralStateMOCS = mocs;
5311 sba.StatelessDataPortAccessMOCS = mocs;
5312 sba.DynamicStateMOCS = mocs;
5313 sba.IndirectObjectMOCS = mocs;
5314 sba.InstructionMOCS = mocs;
5315 sba.SurfaceStateMOCS = mocs;
5317 sba.BindlessSurfaceStateMOCS = mocs;
5324 * Put the pipeline back into compute mode.
5326 if (batch->name == IRIS_BATCH_COMPUTE)
5327 emit_pipeline_select(batch, GPGPU);
5330 flush_after_state_base_change(batch);
5331 iris_batch_sync_region_end(batch);
5333 batch->last_surface_base_address = binder->bo->gtt_offset;
5337 iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
5338 bool window_space_position, float *zmin, float *zmax)
5340 if (window_space_position) {
5345 util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
5350 genX(invalidate_aux_map_state)(struct iris_batch *batch)
5352 struct iris_screen *screen = batch->screen;
5353 void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5356 uint32_t aux_map_state_num = intel_aux_map_get_state_num(aux_map_ctx);
5357 if (batch->last_aux_map_state != aux_map_state_num) {
5358 /* HSD 1209978178: docs say that before programming the aux table:
5360 * "Driver must ensure that the engine is IDLE but ensure it doesn't
5361 * add extra flushes in the case it knows that the engine is already
5364 * An end of pipe sync is needed here, otherwise we see GPU hangs in
5365 * dEQP-GLES31.functional.copy_image.* tests.
5367 iris_emit_end_of_pipe_sync(batch, "Invalidate aux map table",
5368 PIPE_CONTROL_CS_STALL);
5370 /* If the aux-map state number increased, then we need to rewrite the
5371 * register. Rewriting the register is used to both set the aux-map
5372 * translation table address, and also to invalidate any previously
5373 * cached translations.
5375 iris_load_register_imm32(batch, GENX(GFX_CCS_AUX_INV_num), 1);
5376 batch->last_aux_map_state = aux_map_state_num;
5381 init_aux_map_state(struct iris_batch *batch)
5383 struct iris_screen *screen = batch->screen;
5384 void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5388 uint64_t base_addr = intel_aux_map_get_base(aux_map_ctx);
5389 assert(base_addr != 0 && align64(base_addr, 32 * 1024) == base_addr);
5390 iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num),
5397 struct iris_address addr;
5401 uint32_t max_length;
5405 setup_constant_buffers(struct iris_context *ice,
5406 struct iris_batch *batch,
5408 struct push_bos *push_bos)
5410 struct iris_shader_state *shs = &ice->state.shaders[stage];
5411 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5412 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5414 uint32_t push_range_sum = 0;
5417 for (int i = 0; i < 4; i++) {
5418 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
5420 if (range->length == 0)
5423 push_range_sum += range->length;
5425 if (range->length > push_bos->max_length)
5426 push_bos->max_length = range->length;
5428 /* Range block is a binding table index, map back to UBO index. */
5429 unsigned block_index = iris_bti_to_group_index(
5430 &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
5431 assert(block_index != IRIS_SURFACE_NOT_USED);
5433 struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
5434 struct iris_resource *res = (void *) cbuf->buffer;
5436 assert(cbuf->buffer_offset % 32 == 0);
5438 push_bos->buffers[n].length = range->length;
5439 push_bos->buffers[n].addr =
5440 res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
5441 : batch->screen->workaround_address;
5445 /* From the 3DSTATE_CONSTANT_XS and 3DSTATE_CONSTANT_ALL programming notes:
5447 * "The sum of all four read length fields must be less than or
5448 * equal to the size of 64."
5450 assert(push_range_sum <= 64);
5452 push_bos->buffer_count = n;
5456 emit_push_constant_packets(struct iris_context *ice,
5457 struct iris_batch *batch,
5459 const struct push_bos *push_bos)
5461 UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
5462 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5463 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5465 iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
5466 pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
5468 pkt.MOCS = isl_mocs(isl_dev, 0, false);
5471 /* The Skylake PRM contains the following restriction:
5473 * "The driver must ensure The following case does not occur
5474 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
5475 * buffer 3 read length equal to zero committed followed by a
5476 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
5479 * To avoid this, we program the buffers in the highest slots.
5480 * This way, slot 0 is only used if slot 3 is also used.
5482 int n = push_bos->buffer_count;
5484 const unsigned shift = 4 - n;
5485 for (int i = 0; i < n; i++) {
5486 pkt.ConstantBody.ReadLength[i + shift] =
5487 push_bos->buffers[i].length;
5488 pkt.ConstantBody.Buffer[i + shift] = push_bos->buffers[i].addr;
5496 emit_push_constant_packet_all(struct iris_context *ice,
5497 struct iris_batch *batch,
5498 uint32_t shader_mask,
5499 const struct push_bos *push_bos)
5501 struct isl_device *isl_dev = &batch->screen->isl_dev;
5504 iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), pc) {
5505 pc.ShaderUpdateEnable = shader_mask;
5510 const uint32_t n = push_bos->buffer_count;
5511 const uint32_t max_pointers = 4;
5512 const uint32_t num_dwords = 2 + 2 * n;
5513 uint32_t const_all[2 + 2 * max_pointers];
5514 uint32_t *dw = &const_all[0];
5516 assert(n <= max_pointers);
5517 iris_pack_command(GENX(3DSTATE_CONSTANT_ALL), dw, all) {
5518 all.DWordLength = num_dwords - 2;
5519 all.MOCS = isl_mocs(isl_dev, 0, false);
5520 all.ShaderUpdateEnable = shader_mask;
5521 all.PointerBufferMask = (1 << n) - 1;
5525 for (int i = 0; i < n; i++) {
5526 _iris_pack_state(batch, GENX(3DSTATE_CONSTANT_ALL_DATA),
5528 data.PointerToConstantBuffer = push_bos->buffers[i].addr;
5529 data.ConstantBufferReadLength = push_bos->buffers[i].length;
5532 iris_batch_emit(batch, const_all, sizeof(uint32_t) * num_dwords);
5537 iris_upload_dirty_render_state(struct iris_context *ice,
5538 struct iris_batch *batch,
5539 const struct pipe_draw_info *draw)
5541 const uint64_t dirty = ice->state.dirty;
5542 const uint64_t stage_dirty = ice->state.stage_dirty;
5544 if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER) &&
5545 !(stage_dirty & IRIS_ALL_STAGE_DIRTY_FOR_RENDER))
5548 struct iris_genx_state *genx = ice->state.genx;
5549 struct iris_binder *binder = &ice->state.binder;
5550 struct brw_wm_prog_data *wm_prog_data = (void *)
5551 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
5553 if (dirty & IRIS_DIRTY_CC_VIEWPORT) {
5554 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5555 uint32_t cc_vp_address;
5557 /* XXX: could avoid streaming for depth_clip [0,1] case. */
5558 uint32_t *cc_vp_map =
5559 stream_state(batch, ice->state.dynamic_uploader,
5560 &ice->state.last_res.cc_vp,
5561 4 * ice->state.num_viewports *
5562 GENX(CC_VIEWPORT_length), 32, &cc_vp_address);
5563 for (int i = 0; i < ice->state.num_viewports; i++) {
5565 iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz,
5566 ice->state.window_space_position,
5568 if (cso_rast->depth_clip_near)
5570 if (cso_rast->depth_clip_far)
5573 iris_pack_state(GENX(CC_VIEWPORT), cc_vp_map, ccv) {
5574 ccv.MinimumDepth = zmin;
5575 ccv.MaximumDepth = zmax;
5578 cc_vp_map += GENX(CC_VIEWPORT_length);
5581 iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), ptr) {
5582 ptr.CCViewportPointer = cc_vp_address;
5586 if (dirty & IRIS_DIRTY_SF_CL_VIEWPORT) {
5587 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5588 uint32_t sf_cl_vp_address;
5590 stream_state(batch, ice->state.dynamic_uploader,
5591 &ice->state.last_res.sf_cl_vp,
5592 4 * ice->state.num_viewports *
5593 GENX(SF_CLIP_VIEWPORT_length), 64, &sf_cl_vp_address);
5595 for (unsigned i = 0; i < ice->state.num_viewports; i++) {
5596 const struct pipe_viewport_state *state = &ice->state.viewports[i];
5597 float gb_xmin, gb_xmax, gb_ymin, gb_ymax;
5599 float vp_xmin = viewport_extent(state, 0, -1.0f);
5600 float vp_xmax = viewport_extent(state, 0, 1.0f);
5601 float vp_ymin = viewport_extent(state, 1, -1.0f);
5602 float vp_ymax = viewport_extent(state, 1, 1.0f);
5604 intel_calculate_guardband_size(cso_fb->width, cso_fb->height,
5605 state->scale[0], state->scale[1],
5606 state->translate[0], state->translate[1],
5607 &gb_xmin, &gb_xmax, &gb_ymin, &gb_ymax);
5609 iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
5610 vp.ViewportMatrixElementm00 = state->scale[0];
5611 vp.ViewportMatrixElementm11 = state->scale[1];
5612 vp.ViewportMatrixElementm22 = state->scale[2];
5613 vp.ViewportMatrixElementm30 = state->translate[0];
5614 vp.ViewportMatrixElementm31 = state->translate[1];
5615 vp.ViewportMatrixElementm32 = state->translate[2];
5616 vp.XMinClipGuardband = gb_xmin;
5617 vp.XMaxClipGuardband = gb_xmax;
5618 vp.YMinClipGuardband = gb_ymin;
5619 vp.YMaxClipGuardband = gb_ymax;
5620 vp.XMinViewPort = MAX2(vp_xmin, 0);
5621 vp.XMaxViewPort = MIN2(vp_xmax, cso_fb->width) - 1;
5622 vp.YMinViewPort = MAX2(vp_ymin, 0);
5623 vp.YMaxViewPort = MIN2(vp_ymax, cso_fb->height) - 1;
5626 vp_map += GENX(SF_CLIP_VIEWPORT_length);
5629 iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), ptr) {
5630 ptr.SFClipViewportPointer = sf_cl_vp_address;
5634 if (dirty & IRIS_DIRTY_URB) {
5635 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5636 if (!ice->shaders.prog[i]) {
5637 ice->shaders.urb.size[i] = 1;
5639 struct brw_vue_prog_data *vue_prog_data =
5640 (void *) ice->shaders.prog[i]->prog_data;
5641 ice->shaders.urb.size[i] = vue_prog_data->urb_entry_size;
5643 assert(ice->shaders.urb.size[i] != 0);
5646 intel_get_urb_config(&batch->screen->devinfo,
5647 batch->screen->l3_config_3d,
5648 ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
5649 ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL,
5650 ice->shaders.urb.size,
5651 ice->shaders.urb.entries,
5652 ice->shaders.urb.start,
5653 &ice->state.urb_deref_block_size,
5654 &ice->shaders.urb.constrained);
5656 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5657 iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) {
5658 urb._3DCommandSubOpcode += i;
5659 urb.VSURBStartingAddress = ice->shaders.urb.start[i];
5660 urb.VSURBEntryAllocationSize = ice->shaders.urb.size[i] - 1;
5661 urb.VSNumberofURBEntries = ice->shaders.urb.entries[i];
5666 if (dirty & IRIS_DIRTY_BLEND_STATE) {
5667 struct iris_blend_state *cso_blend = ice->state.cso_blend;
5668 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5669 struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
5670 const int header_dwords = GENX(BLEND_STATE_length);
5672 /* Always write at least one BLEND_STATE - the final RT message will
5673 * reference BLEND_STATE[0] even if there aren't color writes. There
5674 * may still be alpha testing, computed depth, and so on.
5676 const int rt_dwords =
5677 MAX2(cso_fb->nr_cbufs, 1) * GENX(BLEND_STATE_ENTRY_length);
5679 uint32_t blend_offset;
5680 uint32_t *blend_map =
5681 stream_state(batch, ice->state.dynamic_uploader,
5682 &ice->state.last_res.blend,
5683 4 * (header_dwords + rt_dwords), 64, &blend_offset);
5685 uint32_t blend_state_header;
5686 iris_pack_state(GENX(BLEND_STATE), &blend_state_header, bs) {
5687 bs.AlphaTestEnable = cso_zsa->alpha_enabled;
5688 bs.AlphaTestFunction = translate_compare_func(cso_zsa->alpha_func);
5691 blend_map[0] = blend_state_header | cso_blend->blend_state[0];
5692 memcpy(&blend_map[1], &cso_blend->blend_state[1], 4 * rt_dwords);
5694 iris_emit_cmd(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), ptr) {
5695 ptr.BlendStatePointer = blend_offset;
5696 ptr.BlendStatePointerValid = true;
5700 if (dirty & IRIS_DIRTY_COLOR_CALC_STATE) {
5701 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
5703 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
5707 stream_state(batch, ice->state.dynamic_uploader,
5708 &ice->state.last_res.color_calc,
5709 sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length),
5711 iris_pack_state(GENX(COLOR_CALC_STATE), cc_map, cc) {
5712 cc.AlphaTestFormat = ALPHATEST_FLOAT32;
5713 cc.AlphaReferenceValueAsFLOAT32 = cso->alpha_ref_value;
5714 cc.BlendConstantColorRed = ice->state.blend_color.color[0];
5715 cc.BlendConstantColorGreen = ice->state.blend_color.color[1];
5716 cc.BlendConstantColorBlue = ice->state.blend_color.color[2];
5717 cc.BlendConstantColorAlpha = ice->state.blend_color.color[3];
5719 cc.StencilReferenceValue = p_stencil_refs->ref_value[0];
5720 cc.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
5723 iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), ptr) {
5724 ptr.ColorCalcStatePointer = cc_offset;
5725 ptr.ColorCalcStatePointerValid = true;
5731 * 3DSTATE_CONSTANT_* needs to be programmed before BTP_*
5733 * Testing shows that all the 3DSTATE_CONSTANT_XS need to be emitted if
5734 * any stage has a dirty binding table.
5736 const bool emit_const_wa = GFX_VER >= 11 &&
5737 ((dirty & IRIS_DIRTY_RENDER_BUFFER) ||
5738 (stage_dirty & IRIS_ALL_STAGE_DIRTY_BINDINGS));
5741 uint32_t nobuffer_stages = 0;
5744 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5745 if (!(stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)) &&
5749 struct iris_shader_state *shs = &ice->state.shaders[stage];
5750 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5755 if (shs->sysvals_need_upload)
5756 upload_sysvals(ice, stage, NULL);
5758 struct push_bos push_bos = {};
5759 setup_constant_buffers(ice, batch, stage, &push_bos);
5762 /* If this stage doesn't have any push constants, emit it later in a
5763 * single CONSTANT_ALL packet with all the other stages.
5765 if (push_bos.buffer_count == 0) {
5766 nobuffer_stages |= 1 << stage;
5770 /* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
5771 * contains only 5 bits, so we can only use it for buffers smaller than
5774 if (push_bos.max_length < 32) {
5775 emit_push_constant_packet_all(ice, batch, 1 << stage, &push_bos);
5779 emit_push_constant_packets(ice, batch, stage, &push_bos);
5783 if (nobuffer_stages)
5784 emit_push_constant_packet_all(ice, batch, nobuffer_stages, NULL);
5787 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5788 /* Gfx9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted
5789 * in order to commit constants. TODO: Investigate "Disable Gather
5790 * at Set Shader" to go back to legacy mode...
5792 if (stage_dirty & ((IRIS_STAGE_DIRTY_BINDINGS_VS |
5793 (GFX_VER == 9 ? IRIS_STAGE_DIRTY_CONSTANTS_VS : 0))
5795 iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
5796 ptr._3DCommandSubOpcode = 38 + stage;
5797 ptr.PointertoVSBindingTable = binder->bt_offset[stage];
5802 if (GFX_VER >= 11 && (dirty & IRIS_DIRTY_RENDER_BUFFER)) {
5803 // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
5804 // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
5806 /* The PIPE_CONTROL command description says:
5808 * "Whenever a Binding Table Index (BTI) used by a Render Target
5809 * Message points to a different RENDER_SURFACE_STATE, SW must issue a
5810 * Render Target Cache Flush by enabling this bit. When render target
5811 * flush is set due to new association of BTI, PS Scoreboard Stall bit
5812 * must be set in this packet."
5814 // XXX: does this need to happen at 3DSTATE_BTP_PS time?
5815 iris_emit_pipe_control_flush(batch, "workaround: RT BTI change [draw]",
5816 PIPE_CONTROL_RENDER_TARGET_FLUSH |
5817 PIPE_CONTROL_STALL_AT_SCOREBOARD);
5820 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5821 if (stage_dirty & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
5822 iris_populate_binding_table(ice, batch, stage, false);
5826 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5827 if (!(stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage)) ||
5828 !ice->shaders.prog[stage])
5831 iris_upload_sampler_states(ice, stage);
5833 struct iris_shader_state *shs = &ice->state.shaders[stage];
5834 struct pipe_resource *res = shs->sampler_table.res;
5836 iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5839 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ptr) {
5840 ptr._3DCommandSubOpcode = 43 + stage;
5841 ptr.PointertoVSSamplerState = shs->sampler_table.offset;
5845 if (ice->state.need_border_colors)
5846 iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false,
5849 if (dirty & IRIS_DIRTY_MULTISAMPLE) {
5850 iris_emit_cmd(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
5852 ice->state.cso_rast->half_pixel_center ? CENTER : UL_CORNER;
5853 if (ice->state.framebuffer.samples > 0)
5854 ms.NumberofMultisamples = ffs(ice->state.framebuffer.samples) - 1;
5858 if (dirty & IRIS_DIRTY_SAMPLE_MASK) {
5859 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) {
5860 ms.SampleMask = ice->state.sample_mask;
5864 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5865 if (!(stage_dirty & (IRIS_STAGE_DIRTY_VS << stage)))
5868 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5871 struct brw_stage_prog_data *prog_data = shader->prog_data;
5872 struct iris_resource *cache = (void *) shader->assembly.res;
5873 iris_use_pinned_bo(batch, cache->bo, false, IRIS_DOMAIN_NONE);
5875 uint32_t scratch_addr =
5876 pin_scratch_space(ice, batch, prog_data, stage);
5878 if (stage == MESA_SHADER_FRAGMENT) {
5879 UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast;
5880 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5882 uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0};
5883 _iris_pack_command(batch, GENX(3DSTATE_PS), ps_state, ps) {
5884 ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
5885 ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
5886 ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
5888 /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say:
5890 * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16,
5891 * SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch
5894 * 16x MSAA only exists on Gfx9+, so we can skip this on Gfx8.
5896 if (GFX_VER >= 9 && cso_fb->samples == 16 &&
5897 !wm_prog_data->persample_dispatch) {
5898 assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
5899 ps._32PixelDispatchEnable = false;
5902 ps.DispatchGRFStartRegisterForConstantSetupData0 =
5903 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
5904 ps.DispatchGRFStartRegisterForConstantSetupData1 =
5905 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
5906 ps.DispatchGRFStartRegisterForConstantSetupData2 =
5907 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
5909 ps.KernelStartPointer0 = KSP(shader) +
5910 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
5911 ps.KernelStartPointer1 = KSP(shader) +
5912 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
5913 ps.KernelStartPointer2 = KSP(shader) +
5914 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
5916 ps.ScratchSpaceBasePointer =
5917 rw_bo(NULL, scratch_addr, IRIS_DOMAIN_NONE);
5920 uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
5921 iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
5923 if (!wm_prog_data->uses_sample_mask)
5924 psx.InputCoverageMaskState = ICMS_NONE;
5925 else if (wm_prog_data->post_depth_coverage)
5926 psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
5927 else if (wm_prog_data->inner_coverage &&
5928 cso->conservative_rasterization)
5929 psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE;
5931 psx.InputCoverageMaskState = ICMS_NORMAL;
5933 psx.PixelShaderUsesInputCoverageMask =
5934 wm_prog_data->uses_sample_mask;
5938 uint32_t *shader_ps = (uint32_t *) shader->derived_data;
5939 uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length);
5940 iris_emit_merge(batch, shader_ps, ps_state,
5941 GENX(3DSTATE_PS_length));
5942 iris_emit_merge(batch, shader_psx, psx_state,
5943 GENX(3DSTATE_PS_EXTRA_length));
5944 } else if (scratch_addr) {
5945 uint32_t *pkt = (uint32_t *) shader->derived_data;
5947 case MESA_SHADER_VERTEX: MERGE_SCRATCH_ADDR(3DSTATE_VS); break;
5948 case MESA_SHADER_TESS_CTRL: MERGE_SCRATCH_ADDR(3DSTATE_HS); break;
5949 case MESA_SHADER_TESS_EVAL: MERGE_SCRATCH_ADDR(3DSTATE_DS); break;
5950 case MESA_SHADER_GEOMETRY: MERGE_SCRATCH_ADDR(3DSTATE_GS); break;
5953 iris_batch_emit(batch, shader->derived_data,
5954 iris_derived_program_state_size(stage));
5957 if (stage == MESA_SHADER_TESS_EVAL) {
5958 iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
5959 iris_emit_cmd(batch, GENX(3DSTATE_TE), te);
5960 iris_emit_cmd(batch, GENX(3DSTATE_DS), ds);
5961 } else if (stage == MESA_SHADER_GEOMETRY) {
5962 iris_emit_cmd(batch, GENX(3DSTATE_GS), gs);
5967 if (ice->state.streamout_active) {
5968 if (dirty & IRIS_DIRTY_SO_BUFFERS) {
5969 for (int i = 0; i < 4; i++) {
5970 struct iris_stream_output_target *tgt =
5971 (void *) ice->state.so_target[i];
5972 const uint32_t dwords = GENX(3DSTATE_SO_BUFFER_length);
5973 uint32_t *so_buffers = genx->so_buffers + i * dwords;
5974 bool zero_offset = false;
5977 zero_offset = tgt->zero_offset;
5978 iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
5979 true, IRIS_DOMAIN_OTHER_WRITE);
5980 iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
5981 true, IRIS_DOMAIN_OTHER_WRITE);
5985 /* Skip the last DWord which contains "Stream Offset" of
5986 * 0xFFFFFFFF and instead emit a dword of zero directly.
5988 STATIC_ASSERT(GENX(3DSTATE_SO_BUFFER_StreamOffset_start) ==
5990 const uint32_t zero = 0;
5991 iris_batch_emit(batch, so_buffers, 4 * (dwords - 1));
5992 iris_batch_emit(batch, &zero, sizeof(zero));
5993 tgt->zero_offset = false;
5995 iris_batch_emit(batch, so_buffers, 4 * dwords);
6000 if ((dirty & IRIS_DIRTY_SO_DECL_LIST) && ice->state.streamout) {
6001 uint32_t *decl_list =
6002 ice->state.streamout + GENX(3DSTATE_STREAMOUT_length);
6003 iris_batch_emit(batch, decl_list, 4 * ((decl_list[0] & 0xff) + 2));
6006 if (dirty & IRIS_DIRTY_STREAMOUT) {
6007 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
6009 uint32_t dynamic_sol[GENX(3DSTATE_STREAMOUT_length)];
6010 iris_pack_command(GENX(3DSTATE_STREAMOUT), dynamic_sol, sol) {
6011 sol.SOFunctionEnable = true;
6012 sol.SOStatisticsEnable = true;
6014 sol.RenderingDisable = cso_rast->rasterizer_discard &&
6015 !ice->state.prims_generated_query_active;
6016 sol.ReorderMode = cso_rast->flatshade_first ? LEADING : TRAILING;
6019 assert(ice->state.streamout);
6021 iris_emit_merge(batch, ice->state.streamout, dynamic_sol,
6022 GENX(3DSTATE_STREAMOUT_length));
6025 if (dirty & IRIS_DIRTY_STREAMOUT) {
6026 iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
6030 if (dirty & IRIS_DIRTY_CLIP) {
6031 struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
6032 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6034 bool gs_or_tes = ice->shaders.prog[MESA_SHADER_GEOMETRY] ||
6035 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
6036 bool points_or_lines = cso_rast->fill_mode_point_or_line ||
6037 (gs_or_tes ? ice->shaders.output_topology_is_points_or_lines
6038 : ice->state.prim_is_points_or_lines);
6040 uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
6041 iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
6042 cl.StatisticsEnable = ice->state.statistics_counters_enabled;
6043 if (cso_rast->rasterizer_discard)
6044 cl.ClipMode = CLIPMODE_REJECT_ALL;
6045 else if (ice->state.window_space_position)
6046 cl.ClipMode = CLIPMODE_ACCEPT_ALL;
6048 cl.ClipMode = CLIPMODE_NORMAL;
6050 cl.PerspectiveDivideDisable = ice->state.window_space_position;
6051 cl.ViewportXYClipTestEnable = !points_or_lines;
6053 if (wm_prog_data->barycentric_interp_modes &
6054 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
6055 cl.NonPerspectiveBarycentricEnable = true;
6057 cl.ForceZeroRTAIndexEnable = cso_fb->layers <= 1;
6058 cl.MaximumVPIndex = ice->state.num_viewports - 1;
6060 iris_emit_merge(batch, cso_rast->clip, dynamic_clip,
6061 ARRAY_SIZE(cso_rast->clip));
6064 if (dirty & (IRIS_DIRTY_RASTER | IRIS_DIRTY_URB)) {
6065 struct iris_rasterizer_state *cso = ice->state.cso_rast;
6066 iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
6068 uint32_t dynamic_sf[GENX(3DSTATE_SF_length)];
6069 iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) {
6070 sf.ViewportTransformEnable = !ice->state.window_space_position;
6073 sf.DerefBlockSize = ice->state.urb_deref_block_size;
6076 iris_emit_merge(batch, cso->sf, dynamic_sf,
6077 ARRAY_SIZE(dynamic_sf));
6080 if (dirty & IRIS_DIRTY_WM) {
6081 struct iris_rasterizer_state *cso = ice->state.cso_rast;
6082 uint32_t dynamic_wm[GENX(3DSTATE_WM_length)];
6084 iris_pack_command(GENX(3DSTATE_WM), &dynamic_wm, wm) {
6085 wm.StatisticsEnable = ice->state.statistics_counters_enabled;
6087 wm.BarycentricInterpolationMode =
6088 wm_prog_data->barycentric_interp_modes;
6090 if (wm_prog_data->early_fragment_tests)
6091 wm.EarlyDepthStencilControl = EDSC_PREPS;
6092 else if (wm_prog_data->has_side_effects)
6093 wm.EarlyDepthStencilControl = EDSC_PSEXEC;
6095 /* We could skip this bit if color writes are enabled. */
6096 if (wm_prog_data->has_side_effects || wm_prog_data->uses_kill)
6097 wm.ForceThreadDispatchEnable = ForceON;
6099 iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm));
6102 if (dirty & IRIS_DIRTY_SBE) {
6103 iris_emit_sbe(batch, ice);
6106 if (dirty & IRIS_DIRTY_PS_BLEND) {
6107 struct iris_blend_state *cso_blend = ice->state.cso_blend;
6108 struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
6109 const struct shader_info *fs_info =
6110 iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
6112 uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)];
6113 iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
6114 pb.HasWriteableRT = has_writeable_rt(cso_blend, fs_info);
6115 pb.AlphaTestEnable = cso_zsa->alpha_enabled;
6117 /* The dual source blending docs caution against using SRC1 factors
6118 * when the shader doesn't use a dual source render target write.
6119 * Empirically, this can lead to GPU hangs, and the results are
6120 * undefined anyway, so simply disable blending to avoid the hang.
6122 pb.ColorBufferBlendEnable = (cso_blend->blend_enables & 1) &&
6123 (!cso_blend->dual_color_blending || wm_prog_data->dual_src_blend);
6126 iris_emit_merge(batch, cso_blend->ps_blend, dynamic_pb,
6127 ARRAY_SIZE(cso_blend->ps_blend));
6130 if (dirty & IRIS_DIRTY_WM_DEPTH_STENCIL) {
6131 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
6132 #if GFX_VER >= 9 && GFX_VER < 12
6133 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
6134 uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
6135 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
6136 wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
6137 wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
6139 iris_emit_merge(batch, cso->wmds, stencil_refs, ARRAY_SIZE(cso->wmds));
6141 /* Use modify disable fields which allow us to emit packets
6142 * directly instead of merging them later.
6144 iris_batch_emit(batch, cso->wmds, sizeof(cso->wmds));
6148 iris_batch_emit(batch, cso->depth_bounds, sizeof(cso->depth_bounds));
6152 if (dirty & IRIS_DIRTY_STENCIL_REF) {
6154 /* Use modify disable fields which allow us to emit packets
6155 * directly instead of merging them later.
6157 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
6158 uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
6159 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
6160 wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
6161 wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
6162 wmds.StencilTestMaskModifyDisable = true;
6163 wmds.StencilWriteMaskModifyDisable = true;
6164 wmds.StencilStateModifyDisable = true;
6165 wmds.DepthStateModifyDisable = true;
6167 iris_batch_emit(batch, stencil_refs, sizeof(stencil_refs));
6171 if (dirty & IRIS_DIRTY_SCISSOR_RECT) {
6173 * "The viewport-specific state used by the SF unit (SCISSOR_RECT) is
6174 * stored as an array of up to 16 elements. The location of first
6175 * element of the array, as specified by Pointer to SCISSOR_RECT,
6176 * should be aligned to a 64-byte boundary.
6178 uint32_t alignment = 64;
6179 uint32_t scissor_offset =
6180 emit_state(batch, ice->state.dynamic_uploader,
6181 &ice->state.last_res.scissor,
6182 ice->state.scissors,
6183 sizeof(struct pipe_scissor_state) *
6184 ice->state.num_viewports, alignment);
6186 iris_emit_cmd(batch, GENX(3DSTATE_SCISSOR_STATE_POINTERS), ptr) {
6187 ptr.ScissorRectPointer = scissor_offset;
6191 if (dirty & IRIS_DIRTY_DEPTH_BUFFER) {
6192 struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
6194 /* Do not emit the clear params yets. We need to update the clear value
6197 uint32_t clear_length = GENX(3DSTATE_CLEAR_PARAMS_length) * 4;
6198 uint32_t cso_z_size = batch->screen->isl_dev.ds.size - clear_length;;
6200 #if GFX_VERx10 == 120
6203 * ISL will change some CHICKEN registers depending on the depth surface
6204 * format, along with emitting the depth and stencil packets. In that
6205 * case, we want to do a depth flush and stall, so the pipeline is not
6206 * using these settings while we change the registers.
6208 iris_emit_end_of_pipe_sync(batch,
6209 "Workaround: Stop pipeline for 14010455700",
6210 PIPE_CONTROL_DEPTH_STALL |
6211 PIPE_CONTROL_DEPTH_CACHE_FLUSH);
6214 iris_batch_emit(batch, cso_z->packets, cso_z_size);
6215 if (GFX_VER >= 12) {
6218 * Workaround: Gfx12LP Astep only An additional pipe control with
6219 * post-sync = store dword operation would be required.( w/a is to
6220 * have an additional pipe control after the stencil state whenever
6221 * the surface state bits of this state is changing).
6223 iris_emit_pipe_control_write(batch, "WA for stencil state",
6224 PIPE_CONTROL_WRITE_IMMEDIATE,
6225 batch->screen->workaround_address.bo,
6226 batch->screen->workaround_address.offset, 0);
6229 union isl_color_value clear_value = { .f32 = { 0, } };
6231 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6232 if (cso_fb->zsbuf) {
6233 struct iris_resource *zres, *sres;
6234 iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
6236 if (zres && zres->aux.bo)
6237 clear_value = iris_resource_get_clear_color(zres, NULL, NULL);
6240 uint32_t clear_params[GENX(3DSTATE_CLEAR_PARAMS_length)];
6241 iris_pack_command(GENX(3DSTATE_CLEAR_PARAMS), clear_params, clear) {
6242 clear.DepthClearValueValid = true;
6243 clear.DepthClearValue = clear_value.f32[0];
6245 iris_batch_emit(batch, clear_params, clear_length);
6248 if (dirty & (IRIS_DIRTY_DEPTH_BUFFER | IRIS_DIRTY_WM_DEPTH_STENCIL)) {
6249 /* Listen for buffer changes, and also write enable changes. */
6250 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6251 pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
6254 if (dirty & IRIS_DIRTY_POLYGON_STIPPLE) {
6255 iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_PATTERN), poly) {
6256 for (int i = 0; i < 32; i++) {
6257 poly.PatternRow[i] = ice->state.poly_stipple.stipple[i];
6262 if (dirty & IRIS_DIRTY_LINE_STIPPLE) {
6263 struct iris_rasterizer_state *cso = ice->state.cso_rast;
6264 iris_batch_emit(batch, cso->line_stipple, sizeof(cso->line_stipple));
6267 if (dirty & IRIS_DIRTY_VF_TOPOLOGY) {
6268 iris_emit_cmd(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
6269 topo.PrimitiveTopologyType =
6270 translate_prim_type(draw->mode, draw->vertices_per_patch);
6274 if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
6275 int count = util_bitcount64(ice->state.bound_vertex_buffers);
6276 uint64_t dynamic_bound = ice->state.bound_vertex_buffers;
6278 if (ice->state.vs_uses_draw_params) {
6279 assert(ice->draw.draw_params.res);
6281 struct iris_vertex_buffer_state *state =
6282 &(ice->state.genx->vertex_buffers[count]);
6283 pipe_resource_reference(&state->resource, ice->draw.draw_params.res);
6284 struct iris_resource *res = (void *) state->resource;
6286 iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
6287 vb.VertexBufferIndex = count;
6288 vb.AddressModifyEnable = true;
6290 vb.BufferSize = res->bo->size - ice->draw.draw_params.offset;
6291 vb.BufferStartingAddress =
6292 ro_bo(NULL, res->bo->gtt_offset +
6293 (int) ice->draw.draw_params.offset);
6294 vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev,
6295 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
6297 vb.L3BypassDisable = true;
6300 dynamic_bound |= 1ull << count;
6304 if (ice->state.vs_uses_derived_draw_params) {
6305 struct iris_vertex_buffer_state *state =
6306 &(ice->state.genx->vertex_buffers[count]);
6307 pipe_resource_reference(&state->resource,
6308 ice->draw.derived_draw_params.res);
6309 struct iris_resource *res = (void *) ice->draw.derived_draw_params.res;
6311 iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
6312 vb.VertexBufferIndex = count;
6313 vb.AddressModifyEnable = true;
6316 res->bo->size - ice->draw.derived_draw_params.offset;
6317 vb.BufferStartingAddress =
6318 ro_bo(NULL, res->bo->gtt_offset +
6319 (int) ice->draw.derived_draw_params.offset);
6320 vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev,
6321 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
6323 vb.L3BypassDisable = true;
6326 dynamic_bound |= 1ull << count;
6332 /* Gfx11+ doesn't need the cache workaround below */
6333 uint64_t bound = dynamic_bound;
6335 const int i = u_bit_scan64(&bound);
6336 iris_use_optional_res(batch, genx->vertex_buffers[i].resource,
6337 false, IRIS_DOMAIN_OTHER_READ);
6340 /* The VF cache designers cut corners, and made the cache key's
6341 * <VertexBufferIndex, Memory Address> tuple only consider the bottom
6342 * 32 bits of the address. If you have two vertex buffers which get
6343 * placed exactly 4 GiB apart and use them in back-to-back draw calls,
6344 * you can get collisions (even within a single batch).
6346 * So, we need to do a VF cache invalidate if the buffer for a VB
6347 * slot slot changes [48:32] address bits from the previous time.
6349 unsigned flush_flags = 0;
6351 uint64_t bound = dynamic_bound;
6353 const int i = u_bit_scan64(&bound);
6354 uint16_t high_bits = 0;
6356 struct iris_resource *res =
6357 (void *) genx->vertex_buffers[i].resource;
6359 iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_OTHER_READ);
6361 high_bits = res->bo->gtt_offset >> 32ull;
6362 if (high_bits != ice->state.last_vbo_high_bits[i]) {
6363 flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE |
6364 PIPE_CONTROL_CS_STALL;
6365 ice->state.last_vbo_high_bits[i] = high_bits;
6371 iris_emit_pipe_control_flush(batch,
6372 "workaround: VF cache 32-bit key [VB]",
6377 const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
6380 iris_get_command_space(batch, 4 * (1 + vb_dwords * count));
6381 _iris_pack_command(batch, GENX(3DSTATE_VERTEX_BUFFERS), map, vb) {
6382 vb.DWordLength = (vb_dwords * count + 1) - 2;
6386 bound = dynamic_bound;
6388 const int i = u_bit_scan64(&bound);
6389 memcpy(map, genx->vertex_buffers[i].state,
6390 sizeof(uint32_t) * vb_dwords);
6396 if (dirty & IRIS_DIRTY_VERTEX_ELEMENTS) {
6397 struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
6398 const unsigned entries = MAX2(cso->count, 1);
6399 if (!(ice->state.vs_needs_sgvs_element ||
6400 ice->state.vs_uses_derived_draw_params ||
6401 ice->state.vs_needs_edge_flag)) {
6402 iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) *
6403 (1 + entries * GENX(VERTEX_ELEMENT_STATE_length)));
6405 uint32_t dynamic_ves[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
6406 const unsigned dyn_count = cso->count +
6407 ice->state.vs_needs_sgvs_element +
6408 ice->state.vs_uses_derived_draw_params;
6410 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS),
6413 1 + GENX(VERTEX_ELEMENT_STATE_length) * dyn_count - 2;
6415 memcpy(&dynamic_ves[1], &cso->vertex_elements[1],
6416 (cso->count - ice->state.vs_needs_edge_flag) *
6417 GENX(VERTEX_ELEMENT_STATE_length) * sizeof(uint32_t));
6418 uint32_t *ve_pack_dest =
6419 &dynamic_ves[1 + (cso->count - ice->state.vs_needs_edge_flag) *
6420 GENX(VERTEX_ELEMENT_STATE_length)];
6422 if (ice->state.vs_needs_sgvs_element) {
6423 uint32_t base_ctrl = ice->state.vs_uses_draw_params ?
6424 VFCOMP_STORE_SRC : VFCOMP_STORE_0;
6425 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
6427 ve.VertexBufferIndex =
6428 util_bitcount64(ice->state.bound_vertex_buffers);
6429 ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
6430 ve.Component0Control = base_ctrl;
6431 ve.Component1Control = base_ctrl;
6432 ve.Component2Control = VFCOMP_STORE_0;
6433 ve.Component3Control = VFCOMP_STORE_0;
6435 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
6437 if (ice->state.vs_uses_derived_draw_params) {
6438 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
6440 ve.VertexBufferIndex =
6441 util_bitcount64(ice->state.bound_vertex_buffers) +
6442 ice->state.vs_uses_draw_params;
6443 ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
6444 ve.Component0Control = VFCOMP_STORE_SRC;
6445 ve.Component1Control = VFCOMP_STORE_SRC;
6446 ve.Component2Control = VFCOMP_STORE_0;
6447 ve.Component3Control = VFCOMP_STORE_0;
6449 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
6451 if (ice->state.vs_needs_edge_flag) {
6452 for (int i = 0; i < GENX(VERTEX_ELEMENT_STATE_length); i++)
6453 ve_pack_dest[i] = cso->edgeflag_ve[i];
6456 iris_batch_emit(batch, &dynamic_ves, sizeof(uint32_t) *
6457 (1 + dyn_count * GENX(VERTEX_ELEMENT_STATE_length)));
6460 if (!ice->state.vs_needs_edge_flag) {
6461 iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) *
6462 entries * GENX(3DSTATE_VF_INSTANCING_length));
6464 assert(cso->count > 0);
6465 const unsigned edgeflag_index = cso->count - 1;
6466 uint32_t dynamic_vfi[33 * GENX(3DSTATE_VF_INSTANCING_length)];
6467 memcpy(&dynamic_vfi[0], cso->vf_instancing, edgeflag_index *
6468 GENX(3DSTATE_VF_INSTANCING_length) * sizeof(uint32_t));
6470 uint32_t *vfi_pack_dest = &dynamic_vfi[0] +
6471 edgeflag_index * GENX(3DSTATE_VF_INSTANCING_length);
6472 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
6473 vi.VertexElementIndex = edgeflag_index +
6474 ice->state.vs_needs_sgvs_element +
6475 ice->state.vs_uses_derived_draw_params;
6477 for (int i = 0; i < GENX(3DSTATE_VF_INSTANCING_length); i++)
6478 vfi_pack_dest[i] |= cso->edgeflag_vfi[i];
6480 iris_batch_emit(batch, &dynamic_vfi[0], sizeof(uint32_t) *
6481 entries * GENX(3DSTATE_VF_INSTANCING_length));
6485 if (dirty & IRIS_DIRTY_VF_SGVS) {
6486 const struct brw_vs_prog_data *vs_prog_data = (void *)
6487 ice->shaders.prog[MESA_SHADER_VERTEX]->prog_data;
6488 struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
6490 iris_emit_cmd(batch, GENX(3DSTATE_VF_SGVS), sgv) {
6491 if (vs_prog_data->uses_vertexid) {
6492 sgv.VertexIDEnable = true;
6493 sgv.VertexIDComponentNumber = 2;
6494 sgv.VertexIDElementOffset =
6495 cso->count - ice->state.vs_needs_edge_flag;
6498 if (vs_prog_data->uses_instanceid) {
6499 sgv.InstanceIDEnable = true;
6500 sgv.InstanceIDComponentNumber = 3;
6501 sgv.InstanceIDElementOffset =
6502 cso->count - ice->state.vs_needs_edge_flag;
6507 if (dirty & IRIS_DIRTY_VF) {
6508 iris_emit_cmd(batch, GENX(3DSTATE_VF), vf) {
6509 if (draw->primitive_restart) {
6510 vf.IndexedDrawCutIndexEnable = true;
6511 vf.CutIndex = draw->restart_index;
6516 if (dirty & IRIS_DIRTY_VF_STATISTICS) {
6517 iris_emit_cmd(batch, GENX(3DSTATE_VF_STATISTICS), vf) {
6518 vf.StatisticsEnable = true;
6523 if (dirty & IRIS_DIRTY_PMA_FIX) {
6524 bool enable = want_pma_fix(ice);
6525 genX(update_pma_fix)(ice, batch, enable);
6529 if (ice->state.current_hash_scale != 1)
6530 genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1);
6533 genX(invalidate_aux_map_state)(batch);
6538 iris_upload_render_state(struct iris_context *ice,
6539 struct iris_batch *batch,
6540 const struct pipe_draw_info *draw,
6541 unsigned drawid_offset,
6542 const struct pipe_draw_indirect_info *indirect,
6543 const struct pipe_draw_start_count_bias *sc)
6545 bool use_predicate = ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT;
6547 iris_batch_sync_region_start(batch);
6549 /* Always pin the binder. If we're emitting new binding table pointers,
6550 * we need it. If not, we're probably inheriting old tables via the
6551 * context, and need it anyway. Since true zero-bindings cases are
6552 * practically non-existent, just pin it and avoid last_res tracking.
6554 iris_use_pinned_bo(batch, ice->state.binder.bo, false,
6557 if (!batch->contains_draw) {
6558 if (GFX_VER == 12) {
6559 /* Re-emit constants when starting a new batch buffer in order to
6560 * work around push constant corruption on context switch.
6562 * XXX - Provide hardware spec quotation when available.
6564 ice->state.stage_dirty |= (IRIS_STAGE_DIRTY_CONSTANTS_VS |
6565 IRIS_STAGE_DIRTY_CONSTANTS_TCS |
6566 IRIS_STAGE_DIRTY_CONSTANTS_TES |
6567 IRIS_STAGE_DIRTY_CONSTANTS_GS |
6568 IRIS_STAGE_DIRTY_CONSTANTS_FS);
6570 batch->contains_draw = true;
6573 if (!batch->contains_draw_with_next_seqno) {
6574 iris_restore_render_saved_bos(ice, batch, draw);
6575 batch->contains_draw_with_next_seqno = true;
6578 iris_upload_dirty_render_state(ice, batch, draw);
6580 if (draw->index_size > 0) {
6583 if (draw->has_user_indices) {
6584 unsigned start_offset = draw->index_size * sc->start;
6586 u_upload_data(ice->ctx.const_uploader, start_offset,
6587 sc->count * draw->index_size, 4,
6588 (char*)draw->index.user + start_offset,
6589 &offset, &ice->state.last_res.index_buffer);
6590 offset -= start_offset;
6592 struct iris_resource *res = (void *) draw->index.resource;
6593 res->bind_history |= PIPE_BIND_INDEX_BUFFER;
6595 pipe_resource_reference(&ice->state.last_res.index_buffer,
6596 draw->index.resource);
6600 struct iris_genx_state *genx = ice->state.genx;
6601 struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
6603 uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)];
6604 iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) {
6605 ib.IndexFormat = draw->index_size >> 1;
6606 ib.MOCS = iris_mocs(bo, &batch->screen->isl_dev,
6607 ISL_SURF_USAGE_INDEX_BUFFER_BIT);
6608 ib.BufferSize = bo->size - offset;
6609 ib.BufferStartingAddress = ro_bo(NULL, bo->gtt_offset + offset);
6611 ib.L3BypassDisable = true;
6615 if (memcmp(genx->last_index_buffer, ib_packet, sizeof(ib_packet)) != 0) {
6616 memcpy(genx->last_index_buffer, ib_packet, sizeof(ib_packet));
6617 iris_batch_emit(batch, ib_packet, sizeof(ib_packet));
6618 iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_OTHER_READ);
6622 /* The VF cache key only uses 32-bits, see vertex buffer comment above */
6623 uint16_t high_bits = bo->gtt_offset >> 32ull;
6624 if (high_bits != ice->state.last_index_bo_high_bits) {
6625 iris_emit_pipe_control_flush(batch,
6626 "workaround: VF cache 32-bit key [IB]",
6627 PIPE_CONTROL_VF_CACHE_INVALIDATE |
6628 PIPE_CONTROL_CS_STALL);
6629 ice->state.last_index_bo_high_bits = high_bits;
6634 #define _3DPRIM_END_OFFSET 0x2420
6635 #define _3DPRIM_START_VERTEX 0x2430
6636 #define _3DPRIM_VERTEX_COUNT 0x2434
6637 #define _3DPRIM_INSTANCE_COUNT 0x2438
6638 #define _3DPRIM_START_INSTANCE 0x243C
6639 #define _3DPRIM_BASE_VERTEX 0x2440
6641 if (indirect && !indirect->count_from_stream_output) {
6642 if (indirect->indirect_draw_count) {
6643 use_predicate = true;
6645 struct iris_bo *draw_count_bo =
6646 iris_resource_bo(indirect->indirect_draw_count);
6647 unsigned draw_count_offset =
6648 indirect->indirect_draw_count_offset;
6650 iris_emit_pipe_control_flush(batch,
6651 "ensure indirect draw buffer is flushed",
6652 PIPE_CONTROL_FLUSH_ENABLE);
6654 if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
6655 struct mi_builder b;
6656 mi_builder_init(&b, &batch->screen->devinfo, batch);
6658 /* comparison = draw id < draw count */
6659 struct mi_value comparison =
6660 mi_ult(&b, mi_imm(drawid_offset),
6661 mi_mem32(ro_bo(draw_count_bo, draw_count_offset)));
6663 /* predicate = comparison & conditional rendering predicate */
6664 mi_store(&b, mi_reg32(MI_PREDICATE_RESULT),
6665 mi_iand(&b, comparison, mi_reg32(CS_GPR(15))));
6667 uint32_t mi_predicate;
6669 /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
6670 iris_load_register_imm64(batch, MI_PREDICATE_SRC1, drawid_offset);
6671 /* Upload the current draw count from the draw parameters buffer
6672 * to MI_PREDICATE_SRC0.
6674 iris_load_register_mem32(batch, MI_PREDICATE_SRC0,
6675 draw_count_bo, draw_count_offset);
6676 /* Zero the top 32-bits of MI_PREDICATE_SRC0 */
6677 iris_load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0);
6679 if (drawid_offset == 0) {
6680 mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
6681 MI_PREDICATE_COMBINEOP_SET |
6682 MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
6684 /* While draw_index < draw_count the predicate's result will be
6685 * (draw_index == draw_count) ^ TRUE = TRUE
6686 * When draw_index == draw_count the result is
6687 * (TRUE) ^ TRUE = FALSE
6688 * After this all results will be:
6689 * (FALSE) ^ FALSE = FALSE
6691 mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOAD |
6692 MI_PREDICATE_COMBINEOP_XOR |
6693 MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
6695 iris_batch_emit(batch, &mi_predicate, sizeof(uint32_t));
6698 struct iris_bo *bo = iris_resource_bo(indirect->buffer);
6701 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6702 lrm.RegisterAddress = _3DPRIM_VERTEX_COUNT;
6703 lrm.MemoryAddress = ro_bo(bo, indirect->offset + 0);
6705 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6706 lrm.RegisterAddress = _3DPRIM_INSTANCE_COUNT;
6707 lrm.MemoryAddress = ro_bo(bo, indirect->offset + 4);
6709 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6710 lrm.RegisterAddress = _3DPRIM_START_VERTEX;
6711 lrm.MemoryAddress = ro_bo(bo, indirect->offset + 8);
6713 if (draw->index_size) {
6714 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6715 lrm.RegisterAddress = _3DPRIM_BASE_VERTEX;
6716 lrm.MemoryAddress = ro_bo(bo, indirect->offset + 12);
6718 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6719 lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
6720 lrm.MemoryAddress = ro_bo(bo, indirect->offset + 16);
6723 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6724 lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
6725 lrm.MemoryAddress = ro_bo(bo, indirect->offset + 12);
6727 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
6728 lri.RegisterOffset = _3DPRIM_BASE_VERTEX;
6732 } else if (indirect && indirect->count_from_stream_output) {
6733 struct iris_stream_output_target *so =
6734 (void *) indirect->count_from_stream_output;
6736 /* XXX: Replace with actual cache tracking */
6737 iris_emit_pipe_control_flush(batch,
6738 "draw count from stream output stall",
6739 PIPE_CONTROL_CS_STALL);
6741 struct mi_builder b;
6742 mi_builder_init(&b, &batch->screen->devinfo, batch);
6744 struct iris_address addr =
6745 ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
6746 struct mi_value offset =
6747 mi_iadd_imm(&b, mi_mem32(addr), -so->base.buffer_offset);
6749 mi_store(&b, mi_reg32(_3DPRIM_VERTEX_COUNT),
6750 mi_udiv32_imm(&b, offset, so->stride));
6752 _iris_emit_lri(batch, _3DPRIM_START_VERTEX, 0);
6753 _iris_emit_lri(batch, _3DPRIM_BASE_VERTEX, 0);
6754 _iris_emit_lri(batch, _3DPRIM_START_INSTANCE, 0);
6755 _iris_emit_lri(batch, _3DPRIM_INSTANCE_COUNT, draw->instance_count);
6758 iris_measure_snapshot(ice, batch, INTEL_SNAPSHOT_DRAW, draw, indirect, sc);
6760 iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
6761 prim.VertexAccessType = draw->index_size > 0 ? RANDOM : SEQUENTIAL;
6762 prim.PredicateEnable = use_predicate;
6765 prim.IndirectParameterEnable = true;
6767 prim.StartInstanceLocation = draw->start_instance;
6768 prim.InstanceCount = draw->instance_count;
6769 prim.VertexCountPerInstance = sc->count;
6771 prim.StartVertexLocation = sc->start;
6773 if (draw->index_size) {
6774 prim.BaseVertexLocation += sc->index_bias;
6779 iris_batch_sync_region_end(batch);
6783 iris_load_indirect_location(struct iris_context *ice,
6784 struct iris_batch *batch,
6785 const struct pipe_grid_info *grid)
6787 #define GPGPU_DISPATCHDIMX 0x2500
6788 #define GPGPU_DISPATCHDIMY 0x2504
6789 #define GPGPU_DISPATCHDIMZ 0x2508
6791 assert(grid->indirect);
6793 struct iris_state_ref *grid_size = &ice->state.grid_size;
6794 struct iris_bo *bo = iris_resource_bo(grid_size->res);
6795 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6796 lrm.RegisterAddress = GPGPU_DISPATCHDIMX;
6797 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 0);
6799 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6800 lrm.RegisterAddress = GPGPU_DISPATCHDIMY;
6801 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 4);
6803 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6804 lrm.RegisterAddress = GPGPU_DISPATCHDIMZ;
6805 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 8);
6809 #if GFX_VERx10 >= 125
6812 iris_upload_compute_walker(struct iris_context *ice,
6813 struct iris_batch *batch,
6814 const struct pipe_grid_info *grid)
6816 const uint64_t stage_dirty = ice->state.stage_dirty;
6817 struct iris_screen *screen = batch->screen;
6818 const struct intel_device_info *devinfo = &screen->devinfo;
6819 struct iris_binder *binder = &ice->state.binder;
6820 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
6821 struct iris_compiled_shader *shader =
6822 ice->shaders.prog[MESA_SHADER_COMPUTE];
6823 struct brw_stage_prog_data *prog_data = shader->prog_data;
6824 struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
6825 const struct brw_cs_dispatch_info dispatch =
6826 brw_cs_get_dispatch_info(devinfo, cs_prog_data, grid->block);
6828 if (stage_dirty & IRIS_STAGE_DIRTY_CS) {
6829 iris_emit_cmd(batch, GENX(CFE_STATE), cfe) {
6830 /* TODO: Enable gfx12-hp scratch support*/
6831 assert(prog_data->total_scratch == 0);
6833 cfe.MaximumNumberofThreads =
6834 devinfo->max_cs_threads * screen->subslice_total - 1;
6839 iris_load_indirect_location(ice, batch, grid);
6841 iris_emit_cmd(batch, GENX(COMPUTE_WALKER), cw) {
6842 cw.IndirectParameterEnable = grid->indirect;
6843 cw.SIMDSize = dispatch.simd_size / 16;
6844 cw.LocalXMaximum = grid->block[0] - 1;
6845 cw.LocalYMaximum = grid->block[1] - 1;
6846 cw.LocalZMaximum = grid->block[2] - 1;
6847 cw.ThreadGroupIDXDimension = grid->grid[0];
6848 cw.ThreadGroupIDYDimension = grid->grid[1];
6849 cw.ThreadGroupIDZDimension = grid->grid[2];
6850 cw.ExecutionMask = dispatch.right_mask;
6852 cw.InterfaceDescriptor = (struct GENX(INTERFACE_DESCRIPTOR_DATA)) {
6853 .KernelStartPointer = KSP(shader),
6854 .NumberofThreadsinGPGPUThreadGroup = dispatch.threads,
6855 .SharedLocalMemorySize =
6856 encode_slm_size(GFX_VER, prog_data->total_shared),
6857 .BarrierEnable = cs_prog_data->uses_barrier,
6858 .SamplerStatePointer = shs->sampler_table.offset,
6859 .BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE],
6862 assert(brw_cs_push_const_total_size(cs_prog_data, dispatch.threads) == 0);
6867 #else /* #if GFX_VERx10 >= 125 */
6870 iris_upload_gpgpu_walker(struct iris_context *ice,
6871 struct iris_batch *batch,
6872 const struct pipe_grid_info *grid)
6874 const uint64_t stage_dirty = ice->state.stage_dirty;
6875 struct iris_screen *screen = batch->screen;
6876 const struct intel_device_info *devinfo = &screen->devinfo;
6877 struct iris_binder *binder = &ice->state.binder;
6878 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
6879 struct iris_uncompiled_shader *ish =
6880 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
6881 struct iris_compiled_shader *shader =
6882 ice->shaders.prog[MESA_SHADER_COMPUTE];
6883 struct brw_stage_prog_data *prog_data = shader->prog_data;
6884 struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
6885 const struct brw_cs_dispatch_info dispatch =
6886 brw_cs_get_dispatch_info(devinfo, cs_prog_data, grid->block);
6888 if (stage_dirty & IRIS_STAGE_DIRTY_CS) {
6889 /* The MEDIA_VFE_STATE documentation for Gfx8+ says:
6891 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
6892 * the only bits that are changed are scoreboard related: Scoreboard
6893 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta. For
6894 * these scoreboard related states, a MEDIA_STATE_FLUSH is
6897 iris_emit_pipe_control_flush(batch,
6898 "workaround: stall before MEDIA_VFE_STATE",
6899 PIPE_CONTROL_CS_STALL);
6901 iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
6902 if (prog_data->total_scratch) {
6903 uint32_t scratch_addr =
6904 pin_scratch_space(ice, batch, prog_data, MESA_SHADER_COMPUTE);
6906 vfe.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
6907 vfe.ScratchSpaceBasePointer =
6908 rw_bo(NULL, scratch_addr, IRIS_DOMAIN_NONE);
6911 vfe.MaximumNumberofThreads =
6912 devinfo->max_cs_threads * screen->subslice_total - 1;
6914 vfe.ResetGatewayTimer =
6915 Resettingrelativetimerandlatchingtheglobaltimestamp;
6918 vfe.BypassGatewayControl = true;
6920 vfe.NumberofURBEntries = 2;
6921 vfe.URBEntryAllocationSize = 2;
6923 vfe.CURBEAllocationSize =
6924 ALIGN(cs_prog_data->push.per_thread.regs * dispatch.threads +
6925 cs_prog_data->push.cross_thread.regs, 2);
6929 /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
6930 if ((stage_dirty & IRIS_STAGE_DIRTY_CS) ||
6931 cs_prog_data->local_size[0] == 0 /* Variable local group size */) {
6932 uint32_t curbe_data_offset = 0;
6933 assert(cs_prog_data->push.cross_thread.dwords == 0 &&
6934 cs_prog_data->push.per_thread.dwords == 1 &&
6935 cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
6936 const unsigned push_const_size =
6937 brw_cs_push_const_total_size(cs_prog_data, dispatch.threads);
6938 uint32_t *curbe_data_map =
6939 stream_state(batch, ice->state.dynamic_uploader,
6940 &ice->state.last_res.cs_thread_ids,
6941 ALIGN(push_const_size, 64), 64,
6942 &curbe_data_offset);
6943 assert(curbe_data_map);
6944 memset(curbe_data_map, 0x5a, ALIGN(push_const_size, 64));
6945 iris_fill_cs_push_const_buffer(cs_prog_data, dispatch.threads,
6948 iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
6949 curbe.CURBETotalDataLength = ALIGN(push_const_size, 64);
6950 curbe.CURBEDataStartAddress = curbe_data_offset;
6954 for (unsigned i = 0; i < IRIS_MAX_GLOBAL_BINDINGS; i++) {
6955 struct pipe_resource *res = ice->state.global_bindings[i];
6959 iris_use_pinned_bo(batch, iris_resource_bo(res),
6960 true, IRIS_DOMAIN_NONE);
6963 if (stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_CS |
6964 IRIS_STAGE_DIRTY_BINDINGS_CS |
6965 IRIS_STAGE_DIRTY_CONSTANTS_CS |
6966 IRIS_STAGE_DIRTY_CS)) {
6967 uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
6969 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
6970 idd.SharedLocalMemorySize =
6971 encode_slm_size(GFX_VER, ish->kernel_shared_size);
6972 idd.KernelStartPointer =
6973 KSP(shader) + brw_cs_prog_data_prog_offset(cs_prog_data,
6974 dispatch.simd_size);
6975 idd.SamplerStatePointer = shs->sampler_table.offset;
6976 idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
6977 idd.NumberofThreadsinGPGPUThreadGroup = dispatch.threads;
6980 for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
6981 desc[i] |= ((uint32_t *) shader->derived_data)[i];
6983 iris_emit_cmd(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
6984 load.InterfaceDescriptorTotalLength =
6985 GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
6986 load.InterfaceDescriptorDataStartAddress =
6987 emit_state(batch, ice->state.dynamic_uploader,
6988 &ice->state.last_res.cs_desc, desc, sizeof(desc), 64);
6993 iris_load_indirect_location(ice, batch, grid);
6995 iris_measure_snapshot(ice, batch, INTEL_SNAPSHOT_COMPUTE, NULL, NULL, NULL);
6997 iris_emit_cmd(batch, GENX(GPGPU_WALKER), ggw) {
6998 ggw.IndirectParameterEnable = grid->indirect != NULL;
6999 ggw.SIMDSize = dispatch.simd_size / 16;
7000 ggw.ThreadDepthCounterMaximum = 0;
7001 ggw.ThreadHeightCounterMaximum = 0;
7002 ggw.ThreadWidthCounterMaximum = dispatch.threads - 1;
7003 ggw.ThreadGroupIDXDimension = grid->grid[0];
7004 ggw.ThreadGroupIDYDimension = grid->grid[1];
7005 ggw.ThreadGroupIDZDimension = grid->grid[2];
7006 ggw.RightExecutionMask = dispatch.right_mask;
7007 ggw.BottomExecutionMask = 0xffffffff;
7010 iris_emit_cmd(batch, GENX(MEDIA_STATE_FLUSH), msf);
7013 #endif /* #if GFX_VERx10 >= 125 */
7016 iris_upload_compute_state(struct iris_context *ice,
7017 struct iris_batch *batch,
7018 const struct pipe_grid_info *grid)
7020 const uint64_t stage_dirty = ice->state.stage_dirty;
7021 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
7022 struct iris_compiled_shader *shader =
7023 ice->shaders.prog[MESA_SHADER_COMPUTE];
7025 iris_batch_sync_region_start(batch);
7027 /* Always pin the binder. If we're emitting new binding table pointers,
7028 * we need it. If not, we're probably inheriting old tables via the
7029 * context, and need it anyway. Since true zero-bindings cases are
7030 * practically non-existent, just pin it and avoid last_res tracking.
7032 iris_use_pinned_bo(batch, ice->state.binder.bo, false, IRIS_DOMAIN_NONE);
7034 if (((stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
7035 shs->sysvals_need_upload) ||
7036 shader->kernel_input_size > 0)
7037 upload_sysvals(ice, MESA_SHADER_COMPUTE, grid);
7039 if (stage_dirty & IRIS_STAGE_DIRTY_BINDINGS_CS)
7040 iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
7042 if (stage_dirty & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS)
7043 iris_upload_sampler_states(ice, MESA_SHADER_COMPUTE);
7045 iris_use_optional_res(batch, shs->sampler_table.res, false,
7047 iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false,
7050 if (ice->state.need_border_colors)
7051 iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false,
7055 genX(invalidate_aux_map_state)(batch);
7058 #if GFX_VERx10 >= 125
7059 iris_upload_compute_walker(ice, batch, grid);
7061 iris_upload_gpgpu_walker(ice, batch, grid);
7064 if (!batch->contains_draw_with_next_seqno) {
7065 iris_restore_compute_saved_bos(ice, batch, grid);
7066 batch->contains_draw_with_next_seqno = batch->contains_draw = true;
7069 iris_batch_sync_region_end(batch);
7073 * State module teardown.
7076 iris_destroy_state(struct iris_context *ice)
7078 struct iris_genx_state *genx = ice->state.genx;
7080 pipe_resource_reference(&ice->draw.draw_params.res, NULL);
7081 pipe_resource_reference(&ice->draw.derived_draw_params.res, NULL);
7083 /* Loop over all VBOs, including ones for draw parameters */
7084 for (unsigned i = 0; i < ARRAY_SIZE(genx->vertex_buffers); i++) {
7085 pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL);
7088 free(ice->state.genx);
7090 for (int i = 0; i < 4; i++) {
7091 pipe_so_target_reference(&ice->state.so_target[i], NULL);
7094 for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
7095 pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
7097 pipe_surface_reference(&ice->state.framebuffer.zsbuf, NULL);
7099 for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
7100 struct iris_shader_state *shs = &ice->state.shaders[stage];
7101 pipe_resource_reference(&shs->sampler_table.res, NULL);
7102 for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
7103 pipe_resource_reference(&shs->constbuf[i].buffer, NULL);
7104 pipe_resource_reference(&shs->constbuf_surf_state[i].res, NULL);
7106 for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
7107 pipe_resource_reference(&shs->image[i].base.resource, NULL);
7108 pipe_resource_reference(&shs->image[i].surface_state.ref.res, NULL);
7109 free(shs->image[i].surface_state.cpu);
7111 for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
7112 pipe_resource_reference(&shs->ssbo[i].buffer, NULL);
7113 pipe_resource_reference(&shs->ssbo_surf_state[i].res, NULL);
7115 for (int i = 0; i < IRIS_MAX_TEXTURE_SAMPLERS; i++) {
7116 pipe_sampler_view_reference((struct pipe_sampler_view **)
7117 &shs->textures[i], NULL);
7121 pipe_resource_reference(&ice->state.grid_size.res, NULL);
7122 pipe_resource_reference(&ice->state.grid_surf_state.res, NULL);
7124 pipe_resource_reference(&ice->state.null_fb.res, NULL);
7125 pipe_resource_reference(&ice->state.unbound_tex.res, NULL);
7127 pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);
7128 pipe_resource_reference(&ice->state.last_res.sf_cl_vp, NULL);
7129 pipe_resource_reference(&ice->state.last_res.color_calc, NULL);
7130 pipe_resource_reference(&ice->state.last_res.scissor, NULL);
7131 pipe_resource_reference(&ice->state.last_res.blend, NULL);
7132 pipe_resource_reference(&ice->state.last_res.index_buffer, NULL);
7133 pipe_resource_reference(&ice->state.last_res.cs_thread_ids, NULL);
7134 pipe_resource_reference(&ice->state.last_res.cs_desc, NULL);
7137 /* ------------------------------------------------------------------- */
7140 iris_rebind_buffer(struct iris_context *ice,
7141 struct iris_resource *res)
7143 struct pipe_context *ctx = &ice->ctx;
7144 struct iris_genx_state *genx = ice->state.genx;
7146 assert(res->base.b.target == PIPE_BUFFER);
7148 /* Buffers can't be framebuffer attachments, nor display related,
7149 * and we don't have upstream Clover support.
7151 assert(!(res->bind_history & (PIPE_BIND_DEPTH_STENCIL |
7152 PIPE_BIND_RENDER_TARGET |
7153 PIPE_BIND_BLENDABLE |
7154 PIPE_BIND_DISPLAY_TARGET |
7156 PIPE_BIND_COMPUTE_RESOURCE |
7157 PIPE_BIND_GLOBAL)));
7159 if (res->bind_history & PIPE_BIND_VERTEX_BUFFER) {
7160 uint64_t bound_vbs = ice->state.bound_vertex_buffers;
7162 const int i = u_bit_scan64(&bound_vbs);
7163 struct iris_vertex_buffer_state *state = &genx->vertex_buffers[i];
7165 /* Update the CPU struct */
7166 STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32);
7167 STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64);
7168 uint64_t *addr = (uint64_t *) &state->state[1];
7169 struct iris_bo *bo = iris_resource_bo(state->resource);
7171 if (*addr != bo->gtt_offset + state->offset) {
7172 *addr = bo->gtt_offset + state->offset;
7173 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
7178 /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit
7179 * the 3DSTATE_INDEX_BUFFER packet whenever the address changes.
7181 * There is also no need to handle these:
7182 * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
7183 * - PIPE_BIND_QUERY_BUFFER (no persistent state references)
7186 if (res->bind_history & PIPE_BIND_STREAM_OUTPUT) {
7187 uint32_t *so_buffers = genx->so_buffers;
7188 for (unsigned i = 0; i < 4; i++,
7189 so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
7191 /* There are no other fields in bits 127:64 */
7192 uint64_t *addr = (uint64_t *) &so_buffers[2];
7193 STATIC_ASSERT(GENX(3DSTATE_SO_BUFFER_SurfaceBaseAddress_start) == 66);
7194 STATIC_ASSERT(GENX(3DSTATE_SO_BUFFER_SurfaceBaseAddress_bits) == 46);
7196 struct pipe_stream_output_target *tgt = ice->state.so_target[i];
7198 struct iris_bo *bo = iris_resource_bo(tgt->buffer);
7199 if (*addr != bo->gtt_offset + tgt->buffer_offset) {
7200 *addr = bo->gtt_offset + tgt->buffer_offset;
7201 ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
7207 for (int s = MESA_SHADER_VERTEX; s < MESA_SHADER_STAGES; s++) {
7208 struct iris_shader_state *shs = &ice->state.shaders[s];
7209 enum pipe_shader_type p_stage = stage_to_pipe(s);
7211 if (!(res->bind_stages & (1 << s)))
7214 if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
7215 /* Skip constant buffer 0, it's for regular uniforms, not UBOs */
7216 uint32_t bound_cbufs = shs->bound_cbufs & ~1u;
7217 while (bound_cbufs) {
7218 const int i = u_bit_scan(&bound_cbufs);
7219 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
7220 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
7222 if (res->bo == iris_resource_bo(cbuf->buffer)) {
7223 pipe_resource_reference(&surf_state->res, NULL);
7224 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << s;
7229 if (res->bind_history & PIPE_BIND_SHADER_BUFFER) {
7230 uint32_t bound_ssbos = shs->bound_ssbos;
7231 while (bound_ssbos) {
7232 const int i = u_bit_scan(&bound_ssbos);
7233 struct pipe_shader_buffer *ssbo = &shs->ssbo[i];
7235 if (res->bo == iris_resource_bo(ssbo->buffer)) {
7236 struct pipe_shader_buffer buf = {
7237 .buffer = &res->base.b,
7238 .buffer_offset = ssbo->buffer_offset,
7239 .buffer_size = ssbo->buffer_size,
7241 iris_set_shader_buffers(ctx, p_stage, i, 1, &buf,
7242 (shs->writable_ssbos >> i) & 1);
7247 if (res->bind_history & PIPE_BIND_SAMPLER_VIEW) {
7248 uint32_t bound_sampler_views = shs->bound_sampler_views;
7249 while (bound_sampler_views) {
7250 const int i = u_bit_scan(&bound_sampler_views);
7251 struct iris_sampler_view *isv = shs->textures[i];
7252 struct iris_bo *bo = isv->res->bo;
7254 if (update_surface_state_addrs(ice->state.surface_uploader,
7255 &isv->surface_state, bo)) {
7256 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
7261 if (res->bind_history & PIPE_BIND_SHADER_IMAGE) {
7262 uint32_t bound_image_views = shs->bound_image_views;
7263 while (bound_image_views) {
7264 const int i = u_bit_scan(&bound_image_views);
7265 struct iris_image_view *iv = &shs->image[i];
7266 struct iris_bo *bo = iris_resource_bo(iv->base.resource);
7268 if (update_surface_state_addrs(ice->state.surface_uploader,
7269 &iv->surface_state, bo)) {
7270 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
7277 /* ------------------------------------------------------------------- */
7280 * Introduce a batch synchronization boundary, and update its cache coherency
7281 * status to reflect the execution of a PIPE_CONTROL command with the
7285 batch_mark_sync_for_pipe_control(struct iris_batch *batch, uint32_t flags)
7287 iris_batch_sync_boundary(batch);
7289 if ((flags & PIPE_CONTROL_CS_STALL)) {
7290 if ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH))
7291 iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_RENDER_WRITE);
7293 if ((flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))
7294 iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_DEPTH_WRITE);
7296 if ((flags & PIPE_CONTROL_FLUSH_ENABLE))
7297 iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_OTHER_WRITE);
7299 if ((flags & (PIPE_CONTROL_CACHE_FLUSH_BITS |
7300 PIPE_CONTROL_STALL_AT_SCOREBOARD)))
7301 iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_OTHER_READ);
7304 if ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH))
7305 iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_RENDER_WRITE);
7307 if ((flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))
7308 iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_DEPTH_WRITE);
7310 if ((flags & PIPE_CONTROL_FLUSH_ENABLE))
7311 iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_OTHER_WRITE);
7313 if ((flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) &&
7314 (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE))
7315 iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_OTHER_READ);
7319 flags_to_post_sync_op(uint32_t flags)
7321 if (flags & PIPE_CONTROL_WRITE_IMMEDIATE)
7322 return WriteImmediateData;
7324 if (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT)
7325 return WritePSDepthCount;
7327 if (flags & PIPE_CONTROL_WRITE_TIMESTAMP)
7328 return WriteTimestamp;
7334 * Do the given flags have a Post Sync or LRI Post Sync operation?
7336 static enum pipe_control_flags
7337 get_post_sync_flags(enum pipe_control_flags flags)
7339 flags &= PIPE_CONTROL_WRITE_IMMEDIATE |
7340 PIPE_CONTROL_WRITE_DEPTH_COUNT |
7341 PIPE_CONTROL_WRITE_TIMESTAMP |
7342 PIPE_CONTROL_LRI_POST_SYNC_OP;
7344 /* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
7345 * "LRI Post Sync Operation". So more than one bit set would be illegal.
7347 assert(util_bitcount(flags) <= 1);
7352 #define IS_COMPUTE_PIPELINE(batch) (batch->name == IRIS_BATCH_COMPUTE)
7355 * Emit a series of PIPE_CONTROL commands, taking into account any
7356 * workarounds necessary to actually accomplish the caller's request.
7358 * Unless otherwise noted, spec quotations in this function come from:
7360 * Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
7361 * Restrictions for PIPE_CONTROL.
7363 * You should not use this function directly. Use the helpers in
7364 * iris_pipe_control.c instead, which may split the pipe control further.
7367 iris_emit_raw_pipe_control(struct iris_batch *batch,
7374 UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
7375 enum pipe_control_flags post_sync_flags = get_post_sync_flags(flags);
7376 enum pipe_control_flags non_lri_post_sync_flags =
7377 post_sync_flags & ~PIPE_CONTROL_LRI_POST_SYNC_OP;
7379 /* Recursive PIPE_CONTROL workarounds --------------------------------
7380 * (http://knowyourmeme.com/memes/xzibit-yo-dawg)
7382 * We do these first because we want to look at the original operation,
7383 * rather than any workarounds we set.
7385 if (GFX_VER == 9 && (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE)) {
7386 /* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
7387 * lists several workarounds:
7389 * "Project: SKL, KBL, BXT
7391 * If the VF Cache Invalidation Enable is set to a 1 in a
7392 * PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
7393 * sets to 0, with the VF Cache Invalidation Enable set to 0
7394 * needs to be sent prior to the PIPE_CONTROL with VF Cache
7395 * Invalidation Enable set to a 1."
7397 iris_emit_raw_pipe_control(batch,
7398 "workaround: recursive VF cache invalidate",
7402 /* Wa_1409226450, Wait for EU to be idle before pipe control which
7403 * invalidates the instruction cache
7405 if (GFX_VER == 12 && (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE)) {
7406 iris_emit_raw_pipe_control(batch,
7407 "workaround: CS stall before instruction "
7409 PIPE_CONTROL_CS_STALL |
7410 PIPE_CONTROL_STALL_AT_SCOREBOARD, bo, offset,
7414 if ((GFX_VER == 9 || (GFX_VER == 12 && devinfo->revision == 0 /* A0*/)) &&
7415 IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
7416 /* Project: SKL / Argument: LRI Post Sync Operation [23]
7418 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
7419 * programmed prior to programming a PIPECONTROL command with "LRI
7420 * Post Sync Operation" in GPGPU mode of operation (i.e when
7421 * PIPELINE_SELECT command is set to GPGPU mode of operation)."
7423 * The same text exists a few rows below for Post Sync Op.
7425 * On Gfx12 this is Wa_1607156449.
7427 iris_emit_raw_pipe_control(batch,
7428 "workaround: CS stall before gpgpu post-sync",
7429 PIPE_CONTROL_CS_STALL, bo, offset, imm);
7432 /* "Flush Types" workarounds ---------------------------------------------
7433 * We do these now because they may add post-sync operations or CS stalls.
7436 if (GFX_VER < 11 && flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
7437 /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
7439 * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
7440 * 'Write PS Depth Count' or 'Write Timestamp'."
7443 flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7444 post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7445 non_lri_post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7446 bo = batch->screen->workaround_address.bo;
7447 offset = batch->screen->workaround_address.offset;
7451 if (flags & PIPE_CONTROL_DEPTH_STALL) {
7452 /* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
7454 * "This bit must be DISABLED for operations other than writing
7457 * This seems like nonsense. An Ivybridge workaround requires us to
7458 * emit a PIPE_CONTROL with a depth stall and write immediate post-sync
7459 * operation. Gfx8+ requires us to emit depth stalls and depth cache
7460 * flushes together. So, it's hard to imagine this means anything other
7461 * than "we originally intended this to be used for PS_DEPTH_COUNT".
7463 * We ignore the supposed restriction and do nothing.
7467 if (flags & (PIPE_CONTROL_RENDER_TARGET_FLUSH |
7468 PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
7469 /* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
7471 * "This bit must be DISABLED for End-of-pipe (Read) fences,
7472 * PS_DEPTH_COUNT or TIMESTAMP queries."
7474 * TODO: Implement end-of-pipe checking.
7476 assert(!(post_sync_flags & (PIPE_CONTROL_WRITE_DEPTH_COUNT |
7477 PIPE_CONTROL_WRITE_TIMESTAMP)));
7480 if (GFX_VER < 11 && (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
7481 /* From the PIPE_CONTROL instruction table, bit 1:
7483 * "This bit is ignored if Depth Stall Enable is set.
7484 * Further, the render cache is not flushed even if Write Cache
7485 * Flush Enable bit is set."
7487 * We assert that the caller doesn't do this combination, to try and
7488 * prevent mistakes. It shouldn't hurt the GPU, though.
7490 * We skip this check on Gfx11+ as the "Stall at Pixel Scoreboard"
7491 * and "Render Target Flush" combo is explicitly required for BTI
7492 * update workarounds.
7494 assert(!(flags & (PIPE_CONTROL_DEPTH_STALL |
7495 PIPE_CONTROL_RENDER_TARGET_FLUSH)));
7498 /* PIPE_CONTROL page workarounds ------------------------------------- */
7500 if (GFX_VER <= 8 && (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE)) {
7501 /* From the PIPE_CONTROL page itself:
7504 * Restriction: Pipe_control with CS-stall bit set must be issued
7505 * before a pipe-control command that has the State Cache
7506 * Invalidate bit set."
7508 flags |= PIPE_CONTROL_CS_STALL;
7511 if (flags & PIPE_CONTROL_FLUSH_LLC) {
7512 /* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
7515 * SW must always program Post-Sync Operation to "Write Immediate
7516 * Data" when Flush LLC is set."
7518 * For now, we just require the caller to do it.
7520 assert(flags & PIPE_CONTROL_WRITE_IMMEDIATE);
7523 /* "Post-Sync Operation" workarounds -------------------------------- */
7525 /* Project: All / Argument: Global Snapshot Count Reset [19]
7527 * "This bit must not be exercised on any product.
7528 * Requires stall bit ([20] of DW1) set."
7530 * We don't use this, so we just assert that it isn't used. The
7531 * PIPE_CONTROL instruction page indicates that they intended this
7532 * as a debug feature and don't think it is useful in production,
7533 * but it may actually be usable, should we ever want to.
7535 assert((flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) == 0);
7537 if (flags & (PIPE_CONTROL_MEDIA_STATE_CLEAR |
7538 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE)) {
7539 /* Project: All / Arguments:
7541 * - Generic Media State Clear [16]
7542 * - Indirect State Pointers Disable [16]
7544 * "Requires stall bit ([20] of DW1) set."
7546 * Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
7547 * State Clear) says:
7549 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
7550 * programmed prior to programming a PIPECONTROL command with "Media
7551 * State Clear" set in GPGPU mode of operation"
7553 * This is a subset of the earlier rule, so there's nothing to do.
7555 flags |= PIPE_CONTROL_CS_STALL;
7558 if (flags & PIPE_CONTROL_STORE_DATA_INDEX) {
7559 /* Project: All / Argument: Store Data Index
7561 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7564 * For now, we just assert that the caller does this. We might want to
7565 * automatically add a write to the workaround BO...
7567 assert(non_lri_post_sync_flags != 0);
7570 if (flags & PIPE_CONTROL_SYNC_GFDT) {
7571 /* Project: All / Argument: Sync GFDT
7573 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7574 * than '0' or 0x2520[13] must be set."
7576 * For now, we just assert that the caller does this.
7578 assert(non_lri_post_sync_flags != 0);
7581 if (flags & PIPE_CONTROL_TLB_INVALIDATE) {
7582 /* Project: IVB+ / Argument: TLB inv
7584 * "Requires stall bit ([20] of DW1) set."
7586 * Also, from the PIPE_CONTROL instruction table:
7589 * Post Sync Operation or CS stall must be set to ensure a TLB
7590 * invalidation occurs. Otherwise no cycle will occur to the TLB
7591 * cache to invalidate."
7593 * This is not a subset of the earlier rule, so there's nothing to do.
7595 flags |= PIPE_CONTROL_CS_STALL;
7598 if (GFX_VER == 9 && devinfo->gt == 4) {
7599 /* TODO: The big Skylake GT4 post sync op workaround */
7602 /* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
7604 if (IS_COMPUTE_PIPELINE(batch)) {
7605 if (GFX_VER >= 9 && (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE)) {
7606 /* Project: SKL+ / Argument: Tex Invalidate
7607 * "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
7609 flags |= PIPE_CONTROL_CS_STALL;
7612 if (GFX_VER == 8 && (post_sync_flags ||
7613 (flags & (PIPE_CONTROL_NOTIFY_ENABLE |
7614 PIPE_CONTROL_DEPTH_STALL |
7615 PIPE_CONTROL_RENDER_TARGET_FLUSH |
7616 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
7617 PIPE_CONTROL_DATA_CACHE_FLUSH)))) {
7618 /* Project: BDW / Arguments:
7620 * - LRI Post Sync Operation [23]
7621 * - Post Sync Op [15:14]
7623 * - Depth Stall [13]
7624 * - Render Target Cache Flush [12]
7625 * - Depth Cache Flush [0]
7626 * - DC Flush Enable [5]
7628 * "Requires stall bit ([20] of DW) set for all GPGPU and Media
7631 flags |= PIPE_CONTROL_CS_STALL;
7633 /* Also, from the PIPE_CONTROL instruction table, bit 20:
7636 * This bit must be always set when PIPE_CONTROL command is
7637 * programmed by GPGPU and MEDIA workloads, except for the cases
7638 * when only Read Only Cache Invalidation bits are set (State
7639 * Cache Invalidation Enable, Instruction cache Invalidation
7640 * Enable, Texture Cache Invalidation Enable, Constant Cache
7641 * Invalidation Enable). This is to WA FFDOP CG issue, this WA
7642 * need not implemented when FF_DOP_CG is disable via "Fixed
7643 * Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
7645 * It sounds like we could avoid CS stalls in some cases, but we
7646 * don't currently bother. This list isn't exactly the list above,
7652 /* "Stall" workarounds ----------------------------------------------
7653 * These have to come after the earlier ones because we may have added
7654 * some additional CS stalls above.
7657 if (GFX_VER < 9 && (flags & PIPE_CONTROL_CS_STALL)) {
7658 /* Project: PRE-SKL, VLV, CHV
7660 * "[All Stepping][All SKUs]:
7662 * One of the following must also be set:
7664 * - Render Target Cache Flush Enable ([12] of DW1)
7665 * - Depth Cache Flush Enable ([0] of DW1)
7666 * - Stall at Pixel Scoreboard ([1] of DW1)
7667 * - Depth Stall ([13] of DW1)
7668 * - Post-Sync Operation ([13] of DW1)
7669 * - DC Flush Enable ([5] of DW1)"
7671 * If we don't already have one of those bits set, we choose to add
7672 * "Stall at Pixel Scoreboard". Some of the other bits require a
7673 * CS stall as a workaround (see above), which would send us into
7674 * an infinite recursion of PIPE_CONTROLs. "Stall at Pixel Scoreboard"
7675 * appears to be safe, so we choose that.
7677 const uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
7678 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
7679 PIPE_CONTROL_WRITE_IMMEDIATE |
7680 PIPE_CONTROL_WRITE_DEPTH_COUNT |
7681 PIPE_CONTROL_WRITE_TIMESTAMP |
7682 PIPE_CONTROL_STALL_AT_SCOREBOARD |
7683 PIPE_CONTROL_DEPTH_STALL |
7684 PIPE_CONTROL_DATA_CACHE_FLUSH;
7685 if (!(flags & wa_bits))
7686 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
7689 if (GFX_VER >= 12 && (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH)) {
7692 * "PIPE_CONTROL with Depth Stall Enable bit must be set
7693 * with any PIPE_CONTROL with Depth Flush Enable bit set.
7695 flags |= PIPE_CONTROL_DEPTH_STALL;
7698 /* Emit --------------------------------------------------------------- */
7700 if (INTEL_DEBUG & DEBUG_PIPE_CONTROL) {
7702 " PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
7703 (flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
7704 (flags & PIPE_CONTROL_CS_STALL) ? "CS " : "",
7705 (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD) ? "Scoreboard " : "",
7706 (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) ? "VF " : "",
7707 (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ? "RT " : "",
7708 (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE) ? "Const " : "",
7709 (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) ? "TC " : "",
7710 (flags & PIPE_CONTROL_DATA_CACHE_FLUSH) ? "DC " : "",
7711 (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH) ? "ZFlush " : "",
7712 (flags & PIPE_CONTROL_TILE_CACHE_FLUSH) ? "Tile " : "",
7713 (flags & PIPE_CONTROL_DEPTH_STALL) ? "ZStall " : "",
7714 (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE) ? "State " : "",
7715 (flags & PIPE_CONTROL_TLB_INVALIDATE) ? "TLB " : "",
7716 (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE) ? "Inst " : "",
7717 (flags & PIPE_CONTROL_MEDIA_STATE_CLEAR) ? "MediaClear " : "",
7718 (flags & PIPE_CONTROL_NOTIFY_ENABLE) ? "Notify " : "",
7719 (flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) ?
7721 (flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE) ?
7723 (flags & PIPE_CONTROL_WRITE_IMMEDIATE) ? "WriteImm " : "",
7724 (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT) ? "WriteZCount " : "",
7725 (flags & PIPE_CONTROL_WRITE_TIMESTAMP) ? "WriteTimestamp " : "",
7726 (flags & PIPE_CONTROL_FLUSH_HDC) ? "HDC " : "",
7730 batch_mark_sync_for_pipe_control(batch, flags);
7731 iris_batch_sync_region_start(batch);
7733 iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
7735 pc.TileCacheFlushEnable = flags & PIPE_CONTROL_TILE_CACHE_FLUSH;
7738 pc.HDCPipelineFlushEnable = flags & PIPE_CONTROL_FLUSH_HDC;
7740 pc.LRIPostSyncOperation = NoLRIOperation;
7741 pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
7742 pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH;
7743 pc.StoreDataIndex = 0;
7744 pc.CommandStreamerStallEnable = flags & PIPE_CONTROL_CS_STALL;
7745 pc.GlobalSnapshotCountReset =
7746 flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET;
7747 pc.TLBInvalidate = flags & PIPE_CONTROL_TLB_INVALIDATE;
7748 pc.GenericMediaStateClear = flags & PIPE_CONTROL_MEDIA_STATE_CLEAR;
7749 pc.StallAtPixelScoreboard = flags & PIPE_CONTROL_STALL_AT_SCOREBOARD;
7750 pc.RenderTargetCacheFlushEnable =
7751 flags & PIPE_CONTROL_RENDER_TARGET_FLUSH;
7752 pc.DepthCacheFlushEnable = flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH;
7753 pc.StateCacheInvalidationEnable =
7754 flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE;
7755 pc.VFCacheInvalidationEnable = flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
7756 pc.ConstantCacheInvalidationEnable =
7757 flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE;
7758 pc.PostSyncOperation = flags_to_post_sync_op(flags);
7759 pc.DepthStallEnable = flags & PIPE_CONTROL_DEPTH_STALL;
7760 pc.InstructionCacheInvalidateEnable =
7761 flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE;
7762 pc.NotifyEnable = flags & PIPE_CONTROL_NOTIFY_ENABLE;
7763 pc.IndirectStatePointersDisable =
7764 flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE;
7765 pc.TextureCacheInvalidationEnable =
7766 flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
7767 pc.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
7768 pc.ImmediateData = imm;
7771 iris_batch_sync_region_end(batch);
7776 * Preemption on Gfx9 has to be enabled or disabled in various cases.
7778 * See these workarounds for preemption:
7779 * - WaDisableMidObjectPreemptionForGSLineStripAdj
7780 * - WaDisableMidObjectPreemptionForTrifanOrPolygon
7781 * - WaDisableMidObjectPreemptionForLineLoop
7784 * We don't put this in the vtable because it's only used on Gfx9.
7787 gfx9_toggle_preemption(struct iris_context *ice,
7788 struct iris_batch *batch,
7789 const struct pipe_draw_info *draw)
7791 struct iris_genx_state *genx = ice->state.genx;
7792 bool object_preemption = true;
7794 /* WaDisableMidObjectPreemptionForGSLineStripAdj
7796 * "WA: Disable mid-draw preemption when draw-call is a linestrip_adj
7797 * and GS is enabled."
7799 if (draw->mode == PIPE_PRIM_LINE_STRIP_ADJACENCY &&
7800 ice->shaders.prog[MESA_SHADER_GEOMETRY])
7801 object_preemption = false;
7803 /* WaDisableMidObjectPreemptionForTrifanOrPolygon
7805 * "TriFan miscompare in Execlist Preemption test. Cut index that is
7806 * on a previous context. End the previous, the resume another context
7807 * with a tri-fan or polygon, and the vertex count is corrupted. If we
7808 * prempt again we will cause corruption.
7810 * WA: Disable mid-draw preemption when draw-call has a tri-fan."
7812 if (draw->mode == PIPE_PRIM_TRIANGLE_FAN)
7813 object_preemption = false;
7815 /* WaDisableMidObjectPreemptionForLineLoop
7817 * "VF Stats Counters Missing a vertex when preemption enabled.
7819 * WA: Disable mid-draw preemption when the draw uses a lineloop
7822 if (draw->mode == PIPE_PRIM_LINE_LOOP)
7823 object_preemption = false;
7827 * "VF is corrupting GAFS data when preempted on an instance boundary
7828 * and replayed with instancing enabled.
7830 * WA: Disable preemption when using instanceing."
7832 if (draw->instance_count > 1)
7833 object_preemption = false;
7835 if (genx->object_preemption != object_preemption) {
7836 iris_enable_obj_preemption(batch, object_preemption);
7837 genx->object_preemption = object_preemption;
7843 iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch)
7845 struct iris_genx_state *genx = ice->state.genx;
7847 memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer));
7851 iris_emit_mi_report_perf_count(struct iris_batch *batch,
7853 uint32_t offset_in_bytes,
7856 iris_batch_sync_region_start(batch);
7857 iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) {
7858 mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes,
7859 IRIS_DOMAIN_OTHER_WRITE);
7860 mi_rpc.ReportID = report_id;
7862 iris_batch_sync_region_end(batch);
7866 * Update the pixel hashing modes that determine the balancing of PS threads
7867 * across subslices and slices.
7869 * \param width Width bound of the rendering area (already scaled down if \p
7870 * scale is greater than 1).
7871 * \param height Height bound of the rendering area (already scaled down if \p
7872 * scale is greater than 1).
7873 * \param scale The number of framebuffer samples that could potentially be
7874 * affected by an individual channel of the PS thread. This is
7875 * typically one for single-sampled rendering, but for operations
7876 * like CCS resolves and fast clears a single PS invocation may
7877 * update a huge number of pixels, in which case a finer
7878 * balancing is desirable in order to maximally utilize the
7879 * bandwidth available. UINT_MAX can be used as shorthand for
7880 * "finest hashing mode available".
7883 genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch,
7884 unsigned width, unsigned height, unsigned scale)
7887 const struct intel_device_info *devinfo = &batch->screen->devinfo;
7888 const unsigned slice_hashing[] = {
7889 /* Because all Gfx9 platforms with more than one slice require
7890 * three-way subslice hashing, a single "normal" 16x16 slice hashing
7891 * block is guaranteed to suffer from substantial imbalance, with one
7892 * subslice receiving twice as much work as the other two in the
7895 * The performance impact of that would be particularly severe when
7896 * three-way hashing is also in use for slice balancing (which is the
7897 * case for all Gfx9 GT4 platforms), because one of the slices
7898 * receives one every three 16x16 blocks in either direction, which
7899 * is roughly the periodicity of the underlying subslice imbalance
7900 * pattern ("roughly" because in reality the hardware's
7901 * implementation of three-way hashing doesn't do exact modulo 3
7902 * arithmetic, which somewhat decreases the magnitude of this effect
7903 * in practice). This leads to a systematic subslice imbalance
7904 * within that slice regardless of the size of the primitive. The
7905 * 32x32 hashing mode guarantees that the subslice imbalance within a
7906 * single slice hashing block is minimal, largely eliminating this
7910 /* Finest slice hashing mode available. */
7913 const unsigned subslice_hashing[] = {
7914 /* 16x16 would provide a slight cache locality benefit especially
7915 * visible in the sampler L1 cache efficiency of low-bandwidth
7916 * non-LLC platforms, but it comes at the cost of greater subslice
7917 * imbalance for primitives of dimensions approximately intermediate
7918 * between 16x4 and 16x16.
7921 /* Finest subslice hashing mode available. */
7924 /* Dimensions of the smallest hashing block of a given hashing mode. If
7925 * the rendering area is smaller than this there can't possibly be any
7926 * benefit from switching to this mode, so we optimize out the
7929 const unsigned min_size[][2] = {
7933 const unsigned idx = scale > 1;
7935 if (width > min_size[idx][0] || height > min_size[idx][1]) {
7936 iris_emit_raw_pipe_control(batch,
7937 "workaround: CS stall before GT_MODE LRI",
7938 PIPE_CONTROL_STALL_AT_SCOREBOARD |
7939 PIPE_CONTROL_CS_STALL,
7942 iris_emit_reg(batch, GENX(GT_MODE), reg) {
7943 reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0);
7944 reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0);
7945 reg.SubsliceHashing = subslice_hashing[idx];
7946 reg.SubsliceHashingMask = -1;
7949 ice->state.current_hash_scale = scale;
7955 iris_set_frontend_noop(struct pipe_context *ctx, bool enable)
7957 struct iris_context *ice = (struct iris_context *) ctx;
7959 if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER], enable)) {
7960 ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER;
7961 ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
7964 if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE], enable)) {
7965 ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
7966 ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE;
7971 genX(init_screen_state)(struct iris_screen *screen)
7973 assert(screen->devinfo.verx10 == GFX_VERx10);
7974 screen->vtbl.destroy_state = iris_destroy_state;
7975 screen->vtbl.init_render_context = iris_init_render_context;
7976 screen->vtbl.init_compute_context = iris_init_compute_context;
7977 screen->vtbl.upload_render_state = iris_upload_render_state;
7978 screen->vtbl.update_surface_base_address = iris_update_surface_base_address;
7979 screen->vtbl.upload_compute_state = iris_upload_compute_state;
7980 screen->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
7981 screen->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
7982 screen->vtbl.rebind_buffer = iris_rebind_buffer;
7983 screen->vtbl.load_register_reg32 = iris_load_register_reg32;
7984 screen->vtbl.load_register_reg64 = iris_load_register_reg64;
7985 screen->vtbl.load_register_imm32 = iris_load_register_imm32;
7986 screen->vtbl.load_register_imm64 = iris_load_register_imm64;
7987 screen->vtbl.load_register_mem32 = iris_load_register_mem32;
7988 screen->vtbl.load_register_mem64 = iris_load_register_mem64;
7989 screen->vtbl.store_register_mem32 = iris_store_register_mem32;
7990 screen->vtbl.store_register_mem64 = iris_store_register_mem64;
7991 screen->vtbl.store_data_imm32 = iris_store_data_imm32;
7992 screen->vtbl.store_data_imm64 = iris_store_data_imm64;
7993 screen->vtbl.copy_mem_mem = iris_copy_mem_mem;
7994 screen->vtbl.derived_program_state_size = iris_derived_program_state_size;
7995 screen->vtbl.store_derived_program_state = iris_store_derived_program_state;
7996 screen->vtbl.create_so_decl_list = iris_create_so_decl_list;
7997 screen->vtbl.populate_vs_key = iris_populate_vs_key;
7998 screen->vtbl.populate_tcs_key = iris_populate_tcs_key;
7999 screen->vtbl.populate_tes_key = iris_populate_tes_key;
8000 screen->vtbl.populate_gs_key = iris_populate_gs_key;
8001 screen->vtbl.populate_fs_key = iris_populate_fs_key;
8002 screen->vtbl.populate_cs_key = iris_populate_cs_key;
8003 screen->vtbl.lost_genx_state = iris_lost_genx_state;
8007 genX(init_state)(struct iris_context *ice)
8009 struct pipe_context *ctx = &ice->ctx;
8010 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
8012 ctx->create_blend_state = iris_create_blend_state;
8013 ctx->create_depth_stencil_alpha_state = iris_create_zsa_state;
8014 ctx->create_rasterizer_state = iris_create_rasterizer_state;
8015 ctx->create_sampler_state = iris_create_sampler_state;
8016 ctx->create_sampler_view = iris_create_sampler_view;
8017 ctx->create_surface = iris_create_surface;
8018 ctx->create_vertex_elements_state = iris_create_vertex_elements;
8019 ctx->bind_blend_state = iris_bind_blend_state;
8020 ctx->bind_depth_stencil_alpha_state = iris_bind_zsa_state;
8021 ctx->bind_sampler_states = iris_bind_sampler_states;
8022 ctx->bind_rasterizer_state = iris_bind_rasterizer_state;
8023 ctx->bind_vertex_elements_state = iris_bind_vertex_elements_state;
8024 ctx->delete_blend_state = iris_delete_state;
8025 ctx->delete_depth_stencil_alpha_state = iris_delete_state;
8026 ctx->delete_rasterizer_state = iris_delete_state;
8027 ctx->delete_sampler_state = iris_delete_state;
8028 ctx->delete_vertex_elements_state = iris_delete_state;
8029 ctx->set_blend_color = iris_set_blend_color;
8030 ctx->set_clip_state = iris_set_clip_state;
8031 ctx->set_constant_buffer = iris_set_constant_buffer;
8032 ctx->set_shader_buffers = iris_set_shader_buffers;
8033 ctx->set_shader_images = iris_set_shader_images;
8034 ctx->set_sampler_views = iris_set_sampler_views;
8035 ctx->set_compute_resources = iris_set_compute_resources;
8036 ctx->set_global_binding = iris_set_global_binding;
8037 ctx->set_tess_state = iris_set_tess_state;
8038 ctx->set_framebuffer_state = iris_set_framebuffer_state;
8039 ctx->set_polygon_stipple = iris_set_polygon_stipple;
8040 ctx->set_sample_mask = iris_set_sample_mask;
8041 ctx->set_scissor_states = iris_set_scissor_states;
8042 ctx->set_stencil_ref = iris_set_stencil_ref;
8043 ctx->set_vertex_buffers = iris_set_vertex_buffers;
8044 ctx->set_viewport_states = iris_set_viewport_states;
8045 ctx->sampler_view_destroy = iris_sampler_view_destroy;
8046 ctx->surface_destroy = iris_surface_destroy;
8047 ctx->draw_vbo = iris_draw_vbo;
8048 ctx->launch_grid = iris_launch_grid;
8049 ctx->create_stream_output_target = iris_create_stream_output_target;
8050 ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
8051 ctx->set_stream_output_targets = iris_set_stream_output_targets;
8052 ctx->set_frontend_noop = iris_set_frontend_noop;
8054 ice->state.dirty = ~0ull;
8055 ice->state.stage_dirty = ~0ull;
8057 ice->state.statistics_counters_enabled = true;
8059 ice->state.sample_mask = 0xffff;
8060 ice->state.num_viewports = 1;
8061 ice->state.prim_mode = PIPE_PRIM_MAX;
8062 ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
8063 ice->draw.derived_params.drawid = -1;
8065 /* Make a 1x1x1 null surface for unbound textures */
8066 void *null_surf_map =
8067 upload_state(ice->state.surface_uploader, &ice->state.unbound_tex,
8068 4 * GENX(RENDER_SURFACE_STATE_length), 64);
8069 isl_null_fill_state(&screen->isl_dev, null_surf_map,
8070 .size = isl_extent3d(1, 1, 1));
8071 ice->state.unbound_tex.offset +=
8072 iris_bo_offset_from_base_address(iris_resource_bo(ice->state.unbound_tex.res));
8074 /* Default all scissor rectangles to be empty regions. */
8075 for (int i = 0; i < IRIS_MAX_VIEWPORTS; i++) {
8076 ice->state.scissors[i] = (struct pipe_scissor_state) {
8077 .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,