1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 #include <linux/hashtable.h>
5 #include "i915_gem_batch_pool.h"
6 #include "i915_gem_request.h"
7 #include "i915_gem_timeline.h"
8 #include "i915_selftest.h"
10 #define I915_CMD_HASH_ORDER 9
12 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
13 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
14 * to give some inclination as to some of the magic values used in the various
17 #define CACHELINE_BYTES 64
18 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
21 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
22 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
23 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
25 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
26 * cacheline, the Head Pointer must not be greater than the Tail
29 #define I915_RING_FREE_SPACE 64
31 struct intel_hw_status_page {
37 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
38 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
40 #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
41 #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
43 #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
44 #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
46 #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
47 #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
49 #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
50 #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
52 #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
53 #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
55 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
56 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
58 #define gen8_semaphore_seqno_size sizeof(uint64_t)
59 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \
60 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
61 #define GEN8_SIGNAL_OFFSET(__ring, to) \
62 (dev_priv->semaphore->node.start + \
63 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
64 #define GEN8_WAIT_OFFSET(__ring, from) \
65 (dev_priv->semaphore->node.start + \
66 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
68 enum intel_engine_hangcheck_action {
73 ENGINE_ACTIVE_SUBUNITS,
78 static inline const char *
79 hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
86 case ENGINE_ACTIVE_SEQNO:
87 return "active seqno";
88 case ENGINE_ACTIVE_HEAD:
90 case ENGINE_ACTIVE_SUBUNITS:
91 return "active subunits";
92 case ENGINE_WAIT_KICK:
101 #define I915_MAX_SLICES 3
102 #define I915_MAX_SUBSLICES 3
104 #define instdone_slice_mask(dev_priv__) \
105 (INTEL_GEN(dev_priv__) == 7 ? \
106 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
108 #define instdone_subslice_mask(dev_priv__) \
109 (INTEL_GEN(dev_priv__) == 7 ? \
110 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
112 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
113 for ((slice__) = 0, (subslice__) = 0; \
114 (slice__) < I915_MAX_SLICES; \
115 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
116 (slice__) += ((subslice__) == 0)) \
117 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
118 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
120 struct intel_instdone {
122 /* The following exist only in the RCS engine */
124 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
125 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
128 struct intel_engine_hangcheck {
131 enum intel_engine_hangcheck_action action;
132 unsigned long action_timestamp;
134 struct intel_instdone instdone;
139 struct i915_vma *vma;
142 struct intel_engine_cs *engine;
144 struct list_head request_list;
153 /** We track the position of the requests in the ring buffer, and
154 * when each is retired we increment last_retired_head as the GPU
155 * must have finished processing the request and so we know we
156 * can advance the ringbuffer up to that position.
158 * last_retired_head is set to -1 after the value is consumed so
159 * we can detect new retirements.
161 u32 last_retired_head;
164 struct i915_gem_context;
165 struct drm_i915_reg_table;
168 * we use a single page to load ctx workarounds so all of these
169 * values are referred in terms of dwords
171 * struct i915_wa_ctx_bb:
172 * offset: specifies batch starting position, also helpful in case
173 * if we want to have multiple batches at different offsets based on
174 * some criteria. It is not a requirement at the moment but provides
175 * an option for future use.
176 * size: size of the batch in DWORDS
178 struct i915_ctx_workarounds {
179 struct i915_wa_ctx_bb {
182 } indirect_ctx, per_ctx;
183 struct i915_vma *vma;
186 struct drm_i915_gem_request;
187 struct intel_render_state;
190 * Engine IDs definitions.
191 * Keep instances of the same type engine together.
193 enum intel_engine_id {
198 #define _VCS(n) (VCS + (n))
202 struct intel_engine_cs {
203 struct drm_i915_private *i915;
205 enum intel_engine_id id;
206 unsigned int exec_id;
210 unsigned int irq_shift;
211 struct intel_ring *buffer;
212 struct intel_timeline *timeline;
214 struct intel_render_state *render_state;
217 unsigned long irq_posted;
218 #define ENGINE_IRQ_BREADCRUMB 0
219 #define ENGINE_IRQ_EXECLIST 1
221 /* Rather than have every client wait upon all user interrupts,
222 * with the herd waking after every interrupt and each doing the
223 * heavyweight seqno dance, we delegate the task (of being the
224 * bottom-half of the user interrupt) to the first client. After
225 * every interrupt, we wake up one client, who does the heavyweight
226 * coherent seqno read and either goes back to sleep (if incomplete),
227 * or wakes up all the completed clients in parallel, before then
228 * transferring the bottom-half status to the next client in the queue.
230 * Compared to walking the entire list of waiters in a single dedicated
231 * bottom-half, we reduce the latency of the first waiter by avoiding
232 * a context switch, but incur additional coherent seqno reads when
233 * following the chain of request breadcrumbs. Since it is most likely
234 * that we have a single client waiting on each seqno, then reducing
235 * the overhead of waking that client is much preferred.
237 struct intel_breadcrumbs {
238 spinlock_t irq_lock; /* protects irq_*; irqsafe */
239 struct intel_wait *irq_wait; /* oldest waiter by retirement */
241 spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
242 struct rb_root waiters; /* sorted by retirement, priority */
243 struct rb_root signals; /* sorted by retirement */
244 struct task_struct *signaler; /* used for fence signalling */
245 struct drm_i915_gem_request __rcu *first_signal;
246 struct timer_list fake_irq; /* used after a missed interrupt */
247 struct timer_list hangcheck; /* detect missed interrupts */
249 unsigned int hangcheck_interrupts;
252 bool irq_enabled : 1;
253 I915_SELFTEST_DECLARE(bool mock : 1);
257 * A pool of objects to use as shadow copies of client batch buffers
258 * when the command parser is enabled. Prevents the client from
259 * modifying the batch contents after software parsing.
261 struct i915_gem_batch_pool batch_pool;
263 struct intel_hw_status_page status_page;
264 struct i915_ctx_workarounds wa_ctx;
265 struct i915_vma *scratch;
267 u32 irq_keep_mask; /* always keep these interrupts */
268 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
269 void (*irq_enable)(struct intel_engine_cs *engine);
270 void (*irq_disable)(struct intel_engine_cs *engine);
272 int (*init_hw)(struct intel_engine_cs *engine);
273 void (*reset_hw)(struct intel_engine_cs *engine,
274 struct drm_i915_gem_request *req);
276 int (*context_pin)(struct intel_engine_cs *engine,
277 struct i915_gem_context *ctx);
278 void (*context_unpin)(struct intel_engine_cs *engine,
279 struct i915_gem_context *ctx);
280 int (*request_alloc)(struct drm_i915_gem_request *req);
281 int (*init_context)(struct drm_i915_gem_request *req);
283 int (*emit_flush)(struct drm_i915_gem_request *request,
285 #define EMIT_INVALIDATE BIT(0)
286 #define EMIT_FLUSH BIT(1)
287 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
288 int (*emit_bb_start)(struct drm_i915_gem_request *req,
289 u64 offset, u32 length,
290 unsigned int dispatch_flags);
291 #define I915_DISPATCH_SECURE BIT(0)
292 #define I915_DISPATCH_PINNED BIT(1)
293 #define I915_DISPATCH_RS BIT(2)
294 void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
296 int emit_breadcrumb_sz;
298 /* Pass the request to the hardware queue (e.g. directly into
299 * the legacy ringbuffer or to the end of an execlist).
301 * This is called from an atomic context with irqs disabled; must
304 void (*submit_request)(struct drm_i915_gem_request *req);
306 /* Call when the priority on a request has changed and it and its
307 * dependencies may need rescheduling. Note the request itself may
308 * not be ready to run!
310 * Called under the struct_mutex.
312 void (*schedule)(struct drm_i915_gem_request *request,
315 /* Some chipsets are not quite as coherent as advertised and need
316 * an expensive kick to force a true read of the up-to-date seqno.
317 * However, the up-to-date seqno is not always required and the last
318 * seen value is good enough. Note that the seqno will always be
319 * monotonic, even if not coherent.
321 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
322 void (*cleanup)(struct intel_engine_cs *engine);
324 /* GEN8 signal/wait table - never trust comments!
325 * signal to signal to signal to signal to signal to
326 * RCS VCS BCS VECS VCS2
327 * --------------------------------------------------------------------
328 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
329 * |-------------------------------------------------------------------
330 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
331 * |-------------------------------------------------------------------
332 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
333 * |-------------------------------------------------------------------
334 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
335 * |-------------------------------------------------------------------
336 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
337 * |-------------------------------------------------------------------
340 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
341 * ie. transpose of g(x, y)
343 * sync from sync from sync from sync from sync from
344 * RCS VCS BCS VECS VCS2
345 * --------------------------------------------------------------------
346 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
347 * |-------------------------------------------------------------------
348 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
349 * |-------------------------------------------------------------------
350 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
351 * |-------------------------------------------------------------------
352 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
353 * |-------------------------------------------------------------------
354 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
355 * |-------------------------------------------------------------------
358 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
359 * ie. transpose of f(x, y)
363 #define GEN6_SEMAPHORE_LAST VECS_HW
364 #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
365 #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
367 /* our mbox written by others */
368 u32 wait[GEN6_NUM_SEMAPHORES];
369 /* mboxes this ring signals to */
370 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
372 u64 signal_ggtt[I915_NUM_ENGINES];
376 int (*sync_to)(struct drm_i915_gem_request *req,
377 struct drm_i915_gem_request *signal);
378 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
382 struct tasklet_struct irq_tasklet;
383 struct execlist_port {
384 struct drm_i915_gem_request *request;
386 GEM_DEBUG_DECL(u32 context_id);
388 struct rb_root execlist_queue;
389 struct rb_node *execlist_first;
390 unsigned int fw_domains;
392 /* Contexts are pinned whilst they are active on the GPU. The last
393 * context executed remains active whilst the GPU is idle - the
394 * switch away and write to the context object only occurs on the
395 * next execution. Contexts are only unpinned on retirement of the
396 * following request ensuring that we can always write to the object
397 * on the context switch even after idling. Across suspend, we switch
398 * to the kernel context and trash it as the save may not happen
399 * before the hardware is powered down.
401 struct i915_gem_context *last_retired_context;
403 /* We track the current MI_SET_CONTEXT in order to eliminate
404 * redudant context switches. This presumes that requests are not
405 * reordered! Or when they are the tracking is updated along with
406 * the emission of individual requests into the legacy command
409 struct i915_gem_context *legacy_active_context;
411 struct intel_engine_hangcheck hangcheck;
413 bool needs_cmd_parser;
416 * Table of commands the command parser needs to know about
419 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
422 * Table of registers allowed in commands that read/write registers.
424 const struct drm_i915_reg_table *reg_tables;
428 * Returns the bitmask for the length field of the specified command.
429 * Return 0 for an unrecognized/invalid command.
431 * If the command parser finds an entry for a command in the engine's
432 * cmd_tables, it gets the command's length based on the table entry.
433 * If not, it calls this function to determine the per-engine length
434 * field encoding for the command (i.e. different opcode ranges use
435 * certain bits to encode the command length in the header).
437 u32 (*get_cmd_length_mask)(u32 cmd_header);
440 static inline unsigned
441 intel_engine_flag(const struct intel_engine_cs *engine)
443 return 1 << engine->id;
447 intel_flush_status_page(struct intel_engine_cs *engine, int reg)
450 clflush(&engine->status_page.page_addr[reg]);
455 intel_read_status_page(struct intel_engine_cs *engine, int reg)
457 /* Ensure that the compiler doesn't optimize away the load. */
458 return READ_ONCE(engine->status_page.page_addr[reg]);
462 intel_write_status_page(struct intel_engine_cs *engine,
465 engine->status_page.page_addr[reg] = value;
469 * Reads a dword out of the status page, which is written to from the command
470 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
473 * The following dwords have a reserved meaning:
474 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
475 * 0x04: ring 0 head pointer
476 * 0x05: ring 1 head pointer (915-class)
477 * 0x06: ring 2 head pointer (915-class)
478 * 0x10-0x1b: Context status DWords (GM45)
479 * 0x1f: Last written status offset. (GM45)
480 * 0x20-0x2f: Reserved (Gen6+)
482 * The area from dword 0x30 to 0x3ff is available for driver usage.
484 #define I915_GEM_HWS_INDEX 0x30
485 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
486 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
487 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
490 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
491 int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
492 void intel_ring_unpin(struct intel_ring *ring);
493 void intel_ring_free(struct intel_ring *ring);
495 void intel_engine_stop(struct intel_engine_cs *engine);
496 void intel_engine_cleanup(struct intel_engine_cs *engine);
498 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
500 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
502 u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
505 intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
509 * This serves as a placeholder in the code so that the reader
510 * can compare against the preceding intel_ring_begin() and
511 * check that the number of dwords emitted matches the space
512 * reserved for the command packet (i.e. the value passed to
513 * intel_ring_begin()).
515 GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
519 intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
521 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
522 u32 offset = addr - req->ring->vaddr;
523 GEM_BUG_ON(offset > req->ring->size);
524 return offset & (req->ring->size - 1);
527 void intel_ring_update_space(struct intel_ring *ring);
529 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
531 void intel_engine_setup_common(struct intel_engine_cs *engine);
532 int intel_engine_init_common(struct intel_engine_cs *engine);
533 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
534 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
536 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
537 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
538 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
539 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
540 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
542 u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
543 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
545 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
547 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
550 static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
552 /* We are only peeking at the tail of the submit queue (and not the
553 * queue itself) in order to gain a hint as to the current active
554 * state of the engine. Callers are not expected to be taking
555 * engine->timeline->lock, nor are they expected to be concerned
556 * wtih serialising this hint with anything, so document it as
557 * a hint and nothing more.
559 return READ_ONCE(engine->timeline->seqno);
562 int init_workarounds_ring(struct intel_engine_cs *engine);
563 int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
565 void intel_engine_get_instdone(struct intel_engine_cs *engine,
566 struct intel_instdone *instdone);
569 * Arbitrary size for largest possible 'add request' sequence. The code paths
570 * are complex and variable. Empirical measurement shows that the worst case
571 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
572 * we need to allocate double the largest single packet within that emission
573 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
575 #define MIN_SPACE_FOR_ADD_REQUEST 336
577 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
579 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
582 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
583 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
585 static inline void intel_wait_init(struct intel_wait *wait,
586 struct drm_i915_gem_request *rq)
592 static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
598 static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
604 intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
607 return intel_wait_has_seqno(wait);
611 intel_wait_update_request(struct intel_wait *wait,
612 const struct drm_i915_gem_request *rq)
614 return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
618 intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
620 return wait->seqno == seqno;
624 intel_wait_check_request(const struct intel_wait *wait,
625 const struct drm_i915_gem_request *rq)
627 return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
630 static inline bool intel_wait_complete(const struct intel_wait *wait)
632 return RB_EMPTY_NODE(&wait->node);
635 bool intel_engine_add_wait(struct intel_engine_cs *engine,
636 struct intel_wait *wait);
637 void intel_engine_remove_wait(struct intel_engine_cs *engine,
638 struct intel_wait *wait);
639 void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
640 void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
642 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
644 return READ_ONCE(engine->breadcrumbs.irq_wait);
647 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
648 #define ENGINE_WAKEUP_WAITER BIT(0)
649 #define ENGINE_WAKEUP_ASLEEP BIT(1)
651 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
652 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
654 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
655 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
656 bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
658 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
660 memset(batch, 0, 6 * sizeof(u32));
662 batch[0] = GFX_OP_PIPE_CONTROL(6);
669 bool intel_engine_is_idle(struct intel_engine_cs *engine);
670 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
672 #endif /* _INTEL_RINGBUFFER_H_ */