1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014 Intel Corporation
6 #include "gen8_engine_cs.h"
8 #include "intel_engine_regs.h"
9 #include "intel_gpu_commands.h"
10 #include "intel_lrc.h"
11 #include "intel_ring.h"
13 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
15 bool vf_flush_wa = false, dc_flush_wa = false;
19 flags |= PIPE_CONTROL_CS_STALL;
21 if (mode & EMIT_FLUSH) {
22 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
23 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
24 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
25 flags |= PIPE_CONTROL_FLUSH_ENABLE;
28 if (mode & EMIT_INVALIDATE) {
29 flags |= PIPE_CONTROL_TLB_INVALIDATE;
30 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
31 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
32 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
33 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
34 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
35 flags |= PIPE_CONTROL_QW_WRITE;
36 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
39 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
42 if (GRAPHICS_VER(rq->i915) == 9)
45 /* WaForGAMHang:kbl */
46 if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0))
58 cs = intel_ring_begin(rq, len);
63 cs = gen8_emit_pipe_control(cs, 0, 0);
66 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
69 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
72 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
74 intel_ring_advance(rq, cs);
79 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
83 cs = intel_ring_begin(rq, 4);
87 cmd = MI_FLUSH_DW + 1;
90 * We always require a command barrier so that subsequent
91 * commands, such as breadcrumb interrupts, are strictly ordered
92 * wrt the contents of the write cache being flushed to memory
93 * (and thus being coherent from the CPU).
95 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
97 if (mode & EMIT_INVALIDATE) {
98 cmd |= MI_INVALIDATE_TLB;
99 if (rq->engine->class == VIDEO_DECODE_CLASS)
100 cmd |= MI_INVALIDATE_BSD;
104 *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
105 *cs++ = 0; /* upper addr */
106 *cs++ = 0; /* value */
107 intel_ring_advance(rq, cs);
112 int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
114 if (mode & EMIT_FLUSH) {
118 flags |= PIPE_CONTROL_CS_STALL;
120 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
121 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
122 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
123 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
124 flags |= PIPE_CONTROL_FLUSH_ENABLE;
125 flags |= PIPE_CONTROL_QW_WRITE;
126 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
128 cs = intel_ring_begin(rq, 6);
132 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
133 intel_ring_advance(rq, cs);
136 if (mode & EMIT_INVALIDATE) {
140 flags |= PIPE_CONTROL_CS_STALL;
142 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
143 flags |= PIPE_CONTROL_TLB_INVALIDATE;
144 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
145 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
146 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
147 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
148 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
149 flags |= PIPE_CONTROL_QW_WRITE;
150 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
152 cs = intel_ring_begin(rq, 6);
156 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
157 intel_ring_advance(rq, cs);
163 static u32 preparser_disable(bool state)
165 return MI_ARB_CHECK | 1 << 8 | state;
168 static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine)
170 switch (engine->id) {
172 return GEN12_CCS_AUX_INV;
174 return GEN12_BCS0_AUX_INV;
176 return GEN12_VD0_AUX_INV;
178 return GEN12_VD2_AUX_INV;
180 return GEN12_VE0_AUX_INV;
182 return GEN12_CCS0_AUX_INV;
184 return INVALID_MMIO_REG;
188 static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
190 i915_reg_t reg = gen12_get_aux_inv_reg(engine);
192 if (IS_PONTEVECCHIO(engine->i915))
196 * So far platforms supported by i915 having flat ccs do not require
197 * AUX invalidation. Check also whether the engine requires it.
199 return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915);
202 u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
204 i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine);
205 u32 gsi_offset = engine->gt->uncore->gsi_offset;
207 if (!gen12_needs_ccs_aux_inv(engine))
210 *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
211 *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
214 *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
215 MI_SEMAPHORE_REGISTER_POLL |
217 MI_SEMAPHORE_SAD_EQ_SDD;
219 *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
226 static int mtl_dummy_pipe_control(struct i915_request *rq)
229 if (IS_MTL_GRAPHICS_STEP(rq->i915, M, STEP_A0, STEP_B0) ||
230 IS_MTL_GRAPHICS_STEP(rq->i915, P, STEP_A0, STEP_B0)) {
233 /* dummy PIPE_CONTROL + depth flush */
234 cs = intel_ring_begin(rq, 6);
237 cs = gen12_emit_pipe_control(cs,
239 PIPE_CONTROL_DEPTH_CACHE_FLUSH,
240 LRC_PPHWSP_SCRATCH_ADDR);
241 intel_ring_advance(rq, cs);
247 int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
249 struct intel_engine_cs *engine = rq->engine;
252 * On Aux CCS platforms the invalidation of the Aux
253 * table requires quiescing memory traffic beforehand
255 if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) {
261 err = mtl_dummy_pipe_control(rq);
265 bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
268 * When required, in MTL and beyond platforms we
269 * need to set the CCS_FLUSH bit in the pipe control
271 if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
272 bit_group_0 |= PIPE_CONTROL_CCS_FLUSH;
275 * L3 fabric flush is needed for AUX CCS invalidation
276 * which happens as part of pipe-control so we can
277 * ignore PIPE_CONTROL_FLUSH_L3. Also PIPE_CONTROL_FLUSH_L3
278 * deals with Protected Memory which is not needed for
279 * AUX CCS invalidation and lead to unwanted side effects.
281 if (mode & EMIT_FLUSH)
282 bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
284 bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
285 bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
286 bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
287 /* Wa_1409600907:tgl,adl-p */
288 bit_group_1 |= PIPE_CONTROL_DEPTH_STALL;
289 bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE;
290 bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE;
292 bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX;
293 bit_group_1 |= PIPE_CONTROL_QW_WRITE;
295 bit_group_1 |= PIPE_CONTROL_CS_STALL;
297 if (!HAS_3D_PIPELINE(engine->i915))
298 bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
299 else if (engine->class == COMPUTE_CLASS)
300 bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
302 cs = intel_ring_begin(rq, 6);
306 cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1,
307 LRC_PPHWSP_SCRATCH_ADDR);
308 intel_ring_advance(rq, cs);
311 if (mode & EMIT_INVALIDATE) {
316 err = mtl_dummy_pipe_control(rq);
320 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
321 flags |= PIPE_CONTROL_TLB_INVALIDATE;
322 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
323 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
324 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
325 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
326 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
328 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
329 flags |= PIPE_CONTROL_QW_WRITE;
331 flags |= PIPE_CONTROL_CS_STALL;
333 if (!HAS_3D_PIPELINE(engine->i915))
334 flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
335 else if (engine->class == COMPUTE_CLASS)
336 flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
339 if (gen12_needs_ccs_aux_inv(rq->engine))
342 cs = intel_ring_begin(rq, count);
347 * Prevent the pre-parser from skipping past the TLB
348 * invalidate and loading a stale page for the batch
349 * buffer / request payload.
351 *cs++ = preparser_disable(true);
353 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
355 cs = gen12_emit_aux_table_inv(engine, cs);
357 *cs++ = preparser_disable(false);
358 intel_ring_advance(rq, cs);
364 int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
369 if (mode & EMIT_INVALIDATE) {
372 if (gen12_needs_ccs_aux_inv(rq->engine))
376 cs = intel_ring_begin(rq, cmd);
380 if (mode & EMIT_INVALIDATE)
381 *cs++ = preparser_disable(true);
383 cmd = MI_FLUSH_DW + 1;
386 * We always require a command barrier so that subsequent
387 * commands, such as breadcrumb interrupts, are strictly ordered
388 * wrt the contents of the write cache being flushed to memory
389 * (and thus being coherent from the CPU).
391 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
393 if (mode & EMIT_INVALIDATE) {
394 cmd |= MI_INVALIDATE_TLB;
395 if (rq->engine->class == VIDEO_DECODE_CLASS)
396 cmd |= MI_INVALIDATE_BSD;
398 if (gen12_needs_ccs_aux_inv(rq->engine) &&
399 rq->engine->class == COPY_ENGINE_CLASS)
400 cmd |= MI_FLUSH_DW_CCS;
404 *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
405 *cs++ = 0; /* upper addr */
406 *cs++ = 0; /* value */
408 cs = gen12_emit_aux_table_inv(rq->engine, cs);
410 if (mode & EMIT_INVALIDATE)
411 *cs++ = preparser_disable(false);
413 intel_ring_advance(rq, cs);
418 static u32 preempt_address(struct intel_engine_cs *engine)
420 return (i915_ggtt_offset(engine->status_page.vma) +
421 I915_GEM_HWS_PREEMPT_ADDR);
424 static u32 hwsp_offset(const struct i915_request *rq)
426 const struct intel_timeline *tl;
428 /* Before the request is executed, the timeline is fixed */
429 tl = rcu_dereference_protected(rq->timeline,
430 !i915_request_signaled(rq));
432 /* See the comment in i915_request_active_seqno(). */
433 return page_mask_bits(tl->hwsp_offset) + offset_in_page(rq->hwsp_seqno);
436 int gen8_emit_init_breadcrumb(struct i915_request *rq)
440 GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
441 if (!i915_request_timeline(rq)->has_initial_breadcrumb)
444 cs = intel_ring_begin(rq, 6);
448 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
449 *cs++ = hwsp_offset(rq);
451 *cs++ = rq->fence.seqno - 1;
454 * Check if we have been preempted before we even get started.
456 * After this point i915_request_started() reports true, even if
457 * we get preempted and so are no longer running.
459 * i915_request_started() is used during preemption processing
460 * to decide if the request is currently inside the user payload
461 * or spinning on a kernel semaphore (or earlier). For no-preemption
462 * requests, we do allow preemption on the semaphore before the user
463 * payload, but do not allow preemption once the request is started.
465 * i915_request_started() is similarly used during GPU hangs to
466 * determine if the user's payload was guilty, and if so, the
467 * request is banned. Before the request is started, it is assumed
468 * to be unharmed and an innocent victim of another's hang.
471 *cs++ = MI_ARB_CHECK;
473 intel_ring_advance(rq, cs);
475 /* Record the updated position of the request's payload */
476 rq->infix = intel_ring_offset(rq, cs);
478 __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
483 static int __xehp_emit_bb_start(struct i915_request *rq,
485 const unsigned int flags,
488 struct intel_context *ce = rq->context;
489 u32 wa_offset = lrc_indirect_bb(ce);
492 GEM_BUG_ON(!ce->wa_bb_page);
494 cs = intel_ring_begin(rq, 12);
498 *cs++ = MI_ARB_ON_OFF | arb;
500 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
501 MI_SRM_LRM_GLOBAL_GTT |
503 *cs++ = i915_mmio_reg_offset(RING_PREDICATE_RESULT(0));
504 *cs++ = wa_offset + DG2_PREDICATE_RESULT_WA;
507 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
508 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
509 *cs++ = lower_32_bits(offset);
510 *cs++ = upper_32_bits(offset);
512 /* Fixup stray MI_SET_PREDICATE as it prevents us executing the ring */
513 *cs++ = MI_BATCH_BUFFER_START_GEN8;
514 *cs++ = wa_offset + DG2_PREDICATE_RESULT_BB;
517 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
519 intel_ring_advance(rq, cs);
524 int xehp_emit_bb_start_noarb(struct i915_request *rq,
526 const unsigned int flags)
528 return __xehp_emit_bb_start(rq, offset, len, flags, MI_ARB_DISABLE);
531 int xehp_emit_bb_start(struct i915_request *rq,
533 const unsigned int flags)
535 return __xehp_emit_bb_start(rq, offset, len, flags, MI_ARB_ENABLE);
538 int gen8_emit_bb_start_noarb(struct i915_request *rq,
540 const unsigned int flags)
544 cs = intel_ring_begin(rq, 4);
549 * WaDisableCtxRestoreArbitration:bdw,chv
551 * We don't need to perform MI_ARB_ENABLE as often as we do (in
552 * particular all the gen that do not need the w/a at all!), if we
553 * took care to make sure that on every switch into this context
554 * (both ordinary and for preemption) that arbitrartion was enabled
555 * we would be fine. However, for gen8 there is another w/a that
556 * requires us to not preempt inside GPGPU execution, so we keep
557 * arbitration disabled for gen8 batches. Arbitration will be
558 * re-enabled before we close the request
559 * (engine->emit_fini_breadcrumb).
561 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
563 /* FIXME(BDW+): Address space and security selectors. */
564 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
565 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
566 *cs++ = lower_32_bits(offset);
567 *cs++ = upper_32_bits(offset);
569 intel_ring_advance(rq, cs);
574 int gen8_emit_bb_start(struct i915_request *rq,
576 const unsigned int flags)
580 if (unlikely(i915_request_has_nopreempt(rq)))
581 return gen8_emit_bb_start_noarb(rq, offset, len, flags);
583 cs = intel_ring_begin(rq, 6);
587 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
589 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
590 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
591 *cs++ = lower_32_bits(offset);
592 *cs++ = upper_32_bits(offset);
594 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
597 intel_ring_advance(rq, cs);
602 static void assert_request_valid(struct i915_request *rq)
604 struct intel_ring *ring __maybe_unused = rq->ring;
606 /* Can we unwind this request without appearing to go forwards? */
607 GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
611 * Reserve space for 2 NOOPs at the end of each request to be
612 * used as a workaround for not being allowed to do lite
613 * restore with HEAD==TAIL (WaIdleLiteRestore).
615 static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
617 /* Ensure there's always at least one preemption point per-request. */
618 *cs++ = MI_ARB_CHECK;
620 rq->wa_tail = intel_ring_offset(rq, cs);
622 /* Check that entire request is less than half the ring */
623 assert_request_valid(rq);
628 static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
630 *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
631 *cs++ = MI_SEMAPHORE_WAIT |
632 MI_SEMAPHORE_GLOBAL_GTT |
634 MI_SEMAPHORE_SAD_EQ_SDD;
636 *cs++ = preempt_address(rq->engine);
643 static __always_inline u32*
644 gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
646 *cs++ = MI_USER_INTERRUPT;
648 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
649 if (intel_engine_has_semaphores(rq->engine) &&
650 !intel_uc_uses_guc_submission(&rq->engine->gt->uc))
651 cs = emit_preempt_busywait(rq, cs);
653 rq->tail = intel_ring_offset(rq, cs);
654 assert_ring_tail_valid(rq->ring, rq->tail);
656 return gen8_emit_wa_tail(rq, cs);
659 static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
661 return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
664 u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
666 return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
669 u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
671 cs = gen8_emit_pipe_control(cs,
672 PIPE_CONTROL_CS_STALL |
673 PIPE_CONTROL_TLB_INVALIDATE |
674 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
675 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
676 PIPE_CONTROL_DC_FLUSH_ENABLE,
679 /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
680 cs = gen8_emit_ggtt_write_rcs(cs,
683 PIPE_CONTROL_FLUSH_ENABLE |
684 PIPE_CONTROL_CS_STALL);
686 return gen8_emit_fini_breadcrumb_tail(rq, cs);
689 u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
691 cs = gen8_emit_pipe_control(cs,
692 PIPE_CONTROL_CS_STALL |
693 PIPE_CONTROL_TLB_INVALIDATE |
694 PIPE_CONTROL_TILE_CACHE_FLUSH |
695 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
696 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
697 PIPE_CONTROL_DC_FLUSH_ENABLE,
700 /*XXX: Look at gen8_emit_fini_breadcrumb_rcs */
701 cs = gen8_emit_ggtt_write_rcs(cs,
704 PIPE_CONTROL_FLUSH_ENABLE |
705 PIPE_CONTROL_CS_STALL);
707 return gen8_emit_fini_breadcrumb_tail(rq, cs);
711 * Note that the CS instruction pre-parser will not stall on the breadcrumb
712 * flush and will continue pre-fetching the instructions after it before the
713 * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
714 * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
715 * of the next request before the memory has been flushed, we're guaranteed that
716 * we won't access the batch itself too early.
717 * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
718 * so, if the current request is modifying an instruction in the next request on
719 * the same intel_context, we might pre-fetch and then execute the pre-update
720 * instruction. To avoid this, the users of self-modifying code should either
721 * disable the parser around the code emitting the memory writes, via a new flag
722 * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
723 * the in-kernel use-cases we've opted to use a separate context, see
724 * reloc_gpu() as an example.
725 * All the above applies only to the instructions themselves. Non-inline data
726 * used by the instructions is not pre-fetched.
729 static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
731 *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
732 *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
733 MI_SEMAPHORE_GLOBAL_GTT |
735 MI_SEMAPHORE_SAD_EQ_SDD;
737 *cs++ = preempt_address(rq->engine);
744 /* Wa_14014475959:dg2 */
745 #define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540
746 static u32 ccs_semaphore_offset(struct i915_request *rq)
748 return i915_ggtt_offset(rq->context->state) +
749 (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET;
752 /* Wa_14014475959:dg2 */
753 static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
757 *cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL |
759 *cs++ = ccs_semaphore_offset(rq);
764 * When MI_ATOMIC_INLINE_DATA set this command must be 11 DW + (1 NOP)
765 * to align. 4 DWs above + 8 filler DWs here.
767 for (i = 0; i < 8; ++i)
770 *cs++ = MI_SEMAPHORE_WAIT |
771 MI_SEMAPHORE_GLOBAL_GTT |
773 MI_SEMAPHORE_SAD_EQ_SDD;
775 *cs++ = ccs_semaphore_offset(rq);
781 static __always_inline u32*
782 gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
784 *cs++ = MI_USER_INTERRUPT;
786 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
787 if (intel_engine_has_semaphores(rq->engine) &&
788 !intel_uc_uses_guc_submission(&rq->engine->gt->uc))
789 cs = gen12_emit_preempt_busywait(rq, cs);
791 /* Wa_14014475959:dg2 */
792 if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine))
793 cs = ccs_emit_wa_busywait(rq, cs);
795 rq->tail = intel_ring_offset(rq, cs);
796 assert_ring_tail_valid(rq->ring, rq->tail);
798 return gen8_emit_wa_tail(rq, cs);
801 u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
803 /* XXX Stalling flush before seqno write; post-sync not */
804 cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
805 return gen12_emit_fini_breadcrumb_tail(rq, cs);
808 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
810 struct drm_i915_private *i915 = rq->i915;
811 u32 flags = (PIPE_CONTROL_CS_STALL |
812 PIPE_CONTROL_TLB_INVALIDATE |
813 PIPE_CONTROL_TILE_CACHE_FLUSH |
814 PIPE_CONTROL_FLUSH_L3 |
815 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
816 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
817 PIPE_CONTROL_DC_FLUSH_ENABLE |
818 PIPE_CONTROL_FLUSH_ENABLE);
821 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
822 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
823 /* dummy PIPE_CONTROL + depth flush */
824 cs = gen12_emit_pipe_control(cs, 0,
825 PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
827 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
829 flags |= PIPE_CONTROL_DEPTH_STALL;
831 if (!HAS_3D_PIPELINE(rq->i915))
832 flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
833 else if (rq->engine->class == COMPUTE_CLASS)
834 flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
836 cs = gen12_emit_pipe_control(cs, PIPE_CONTROL0_HDC_PIPELINE_FLUSH, flags, 0);
838 /*XXX: Look at gen8_emit_fini_breadcrumb_rcs */
839 cs = gen12_emit_ggtt_write_rcs(cs,
843 PIPE_CONTROL_FLUSH_ENABLE |
844 PIPE_CONTROL_CS_STALL);
846 return gen12_emit_fini_breadcrumb_tail(rq, cs);