2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
60 * Regarding the creation of contexts, we have:
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
89 * Execlists implementation:
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92 * This method works as follows:
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
134 #include <linux/interrupt.h>
136 #include <drm/drmP.h>
137 #include <drm/i915_drm.h>
138 #include "i915_drv.h"
139 #include "intel_mocs.h"
141 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
142 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
145 #define RING_EXECLIST_QFULL (1 << 0x2)
146 #define RING_EXECLIST1_VALID (1 << 0x3)
147 #define RING_EXECLIST0_VALID (1 << 0x4)
148 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
149 #define RING_EXECLIST1_ACTIVE (1 << 0x11)
150 #define RING_EXECLIST0_ACTIVE (1 << 0x12)
152 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
153 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
154 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
155 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
156 #define GEN8_CTX_STATUS_COMPLETE (1 << 4)
157 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
159 #define GEN8_CTX_STATUS_COMPLETED_MASK \
160 (GEN8_CTX_STATUS_ACTIVE_IDLE | \
161 GEN8_CTX_STATUS_PREEMPTED | \
162 GEN8_CTX_STATUS_ELEMENT_SWITCH)
164 #define CTX_LRI_HEADER_0 0x01
165 #define CTX_CONTEXT_CONTROL 0x02
166 #define CTX_RING_HEAD 0x04
167 #define CTX_RING_TAIL 0x06
168 #define CTX_RING_BUFFER_START 0x08
169 #define CTX_RING_BUFFER_CONTROL 0x0a
170 #define CTX_BB_HEAD_U 0x0c
171 #define CTX_BB_HEAD_L 0x0e
172 #define CTX_BB_STATE 0x10
173 #define CTX_SECOND_BB_HEAD_U 0x12
174 #define CTX_SECOND_BB_HEAD_L 0x14
175 #define CTX_SECOND_BB_STATE 0x16
176 #define CTX_BB_PER_CTX_PTR 0x18
177 #define CTX_RCS_INDIRECT_CTX 0x1a
178 #define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
179 #define CTX_LRI_HEADER_1 0x21
180 #define CTX_CTX_TIMESTAMP 0x22
181 #define CTX_PDP3_UDW 0x24
182 #define CTX_PDP3_LDW 0x26
183 #define CTX_PDP2_UDW 0x28
184 #define CTX_PDP2_LDW 0x2a
185 #define CTX_PDP1_UDW 0x2c
186 #define CTX_PDP1_LDW 0x2e
187 #define CTX_PDP0_UDW 0x30
188 #define CTX_PDP0_LDW 0x32
189 #define CTX_LRI_HEADER_2 0x41
190 #define CTX_R_PWR_CLK_STATE 0x42
191 #define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
193 #define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
194 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
195 (reg_state)[(pos)+1] = (val); \
198 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
199 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
200 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
201 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
204 #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
205 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
206 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
209 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
210 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
212 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
213 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
215 #define WA_TAIL_DWORDS 2
217 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
218 struct intel_engine_cs *engine);
219 static void execlists_init_reg_state(u32 *reg_state,
220 struct i915_gem_context *ctx,
221 struct intel_engine_cs *engine,
222 struct intel_ring *ring);
225 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
226 * @dev_priv: i915 device private
227 * @enable_execlists: value of i915.enable_execlists module parameter.
229 * Only certain platforms support Execlists (the prerequisites being
230 * support for Logical Ring Contexts and Aliasing PPGTT or better).
232 * Return: 1 if Execlists is supported and has to be enabled.
234 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
236 /* On platforms with execlist available, vGPU will only
237 * support execlist mode, no ring buffer mode.
239 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
242 if (INTEL_GEN(dev_priv) >= 9)
245 if (enable_execlists == 0)
248 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
249 USES_PPGTT(dev_priv) &&
250 i915.use_mmio_flip >= 0)
257 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
258 * descriptor for a pinned context
259 * @ctx: Context to work on
260 * @engine: Engine the descriptor will be used with
262 * The context descriptor encodes various attributes of a context,
263 * including its GTT address and some flags. Because it's fairly
264 * expensive to calculate, we'll just do it once and cache the result,
265 * which remains valid until the context is unpinned.
267 * This is what a descriptor looks like, from LSB to MSB::
269 * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
270 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
271 * bits 32-52: ctx ID, a globally unique tag
272 * bits 53-54: mbz, reserved for use by hardware
273 * bits 55-63: group ID, currently unused and set to 0
276 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
277 struct intel_engine_cs *engine)
279 struct intel_context *ce = &ctx->engine[engine->id];
282 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
284 desc = ctx->desc_template; /* bits 0-11 */
285 desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE;
287 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
292 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
293 struct intel_engine_cs *engine)
295 return ctx->engine[engine->id].lrc_desc;
299 execlists_context_status_change(struct drm_i915_gem_request *rq,
300 unsigned long status)
303 * Only used when GVT-g is enabled now. When GVT-g is disabled,
304 * The compiler should eliminate this function as dead-code.
306 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
309 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
313 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
315 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
316 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
317 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
318 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
321 static u64 execlists_update_context(struct drm_i915_gem_request *rq)
323 struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
324 struct i915_hw_ppgtt *ppgtt =
325 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
326 u32 *reg_state = ce->lrc_reg_state;
328 reg_state[CTX_RING_TAIL+1] = rq->tail;
330 /* True 32b PPGTT with dynamic page allocation: update PDP
331 * registers and point the unallocated PDPs to scratch page.
332 * PML4 is allocated during ppgtt init, so this is not needed
335 if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
336 execlists_update_context_pdps(ppgtt, reg_state);
341 static void execlists_submit_ports(struct intel_engine_cs *engine)
343 struct drm_i915_private *dev_priv = engine->i915;
344 struct execlist_port *port = engine->execlist_port;
346 dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
349 GEM_BUG_ON(port[0].count > 1);
351 execlists_context_status_change(port[0].request,
352 INTEL_CONTEXT_SCHEDULE_IN);
353 desc[0] = execlists_update_context(port[0].request);
354 GEM_DEBUG_EXEC(port[0].context_id = upper_32_bits(desc[0]));
357 if (port[1].request) {
358 GEM_BUG_ON(port[1].count);
359 execlists_context_status_change(port[1].request,
360 INTEL_CONTEXT_SCHEDULE_IN);
361 desc[1] = execlists_update_context(port[1].request);
362 GEM_DEBUG_EXEC(port[1].context_id = upper_32_bits(desc[1]));
367 GEM_BUG_ON(desc[0] == desc[1]);
369 /* You must always write both descriptors in the order below. */
370 writel(upper_32_bits(desc[1]), elsp);
371 writel(lower_32_bits(desc[1]), elsp);
373 writel(upper_32_bits(desc[0]), elsp);
374 /* The context is automatically loaded after the following */
375 writel(lower_32_bits(desc[0]), elsp);
378 static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
380 return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
381 i915_gem_context_force_single_submission(ctx));
384 static bool can_merge_ctx(const struct i915_gem_context *prev,
385 const struct i915_gem_context *next)
390 if (ctx_single_port_submission(prev))
396 static void execlists_dequeue(struct intel_engine_cs *engine)
398 struct drm_i915_gem_request *last;
399 struct execlist_port *port = engine->execlist_port;
404 last = port->request;
406 /* WaIdleLiteRestore:bdw,skl
407 * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
408 * as we resubmit the request. See gen8_emit_breadcrumb()
409 * for where we prepare the padding after the end of the
412 last->tail = last->wa_tail;
414 GEM_BUG_ON(port[1].request);
416 /* Hardware submission is through 2 ports. Conceptually each port
417 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
418 * static for a context, and unique to each, so we only execute
419 * requests belonging to a single context from each ring. RING_HEAD
420 * is maintained by the CS in the context image, it marks the place
421 * where it got up to last time, and through RING_TAIL we tell the CS
422 * where we want to execute up to this time.
424 * In this list the requests are in order of execution. Consecutive
425 * requests from the same context are adjacent in the ringbuffer. We
426 * can combine these requests into a single RING_TAIL update:
428 * RING_HEAD...req1...req2
430 * since to execute req2 the CS must first execute req1.
432 * Our goal then is to point each port to the end of a consecutive
433 * sequence of requests as being the most optimal (fewest wake ups
434 * and context switches) submission.
437 spin_lock_irqsave(&engine->timeline->lock, flags);
438 rb = engine->execlist_first;
440 struct drm_i915_gem_request *cursor =
441 rb_entry(rb, typeof(*cursor), priotree.node);
443 /* Can we combine this request with the current port? It has to
444 * be the same context/ringbuffer and not have any exceptions
445 * (e.g. GVT saying never to combine contexts).
447 * If we can combine the requests, we can execute both by
448 * updating the RING_TAIL to point to the end of the second
449 * request, and so we never need to tell the hardware about
452 if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
453 /* If we are on the second port and cannot combine
454 * this request with the last, then we are done.
456 if (port != engine->execlist_port)
459 /* If GVT overrides us we only ever submit port[0],
460 * leaving port[1] empty. Note that we also have
461 * to be careful that we don't queue the same
462 * context (even though a different request) to
465 if (ctx_single_port_submission(last->ctx) ||
466 ctx_single_port_submission(cursor->ctx))
469 GEM_BUG_ON(last->ctx == cursor->ctx);
471 i915_gem_request_assign(&port->request, last);
476 rb_erase(&cursor->priotree.node, &engine->execlist_queue);
477 RB_CLEAR_NODE(&cursor->priotree.node);
478 cursor->priotree.priority = INT_MAX;
480 __i915_gem_request_submit(cursor);
485 i915_gem_request_assign(&port->request, last);
486 engine->execlist_first = rb;
488 spin_unlock_irqrestore(&engine->timeline->lock, flags);
491 execlists_submit_ports(engine);
494 static bool execlists_elsp_idle(struct intel_engine_cs *engine)
496 return !engine->execlist_port[0].request;
500 * intel_execlists_idle() - Determine if all engine submission ports are idle
501 * @dev_priv: i915 device private
503 * Return true if there are no requests pending on any of the submission ports
506 bool intel_execlists_idle(struct drm_i915_private *dev_priv)
508 struct intel_engine_cs *engine;
509 enum intel_engine_id id;
511 if (!i915.enable_execlists)
514 for_each_engine(engine, dev_priv, id) {
515 /* Interrupt/tasklet pending? */
516 if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
519 /* Both ports drained, no more ELSP submission? */
520 if (!execlists_elsp_idle(engine))
527 static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
529 const struct execlist_port *port = engine->execlist_port;
531 return port[0].count + port[1].count < 2;
535 * Check the unread Context Status Buffers and manage the submission of new
536 * contexts to the ELSP accordingly.
538 static void intel_lrc_irq_handler(unsigned long data)
540 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
541 struct execlist_port *port = engine->execlist_port;
542 struct drm_i915_private *dev_priv = engine->i915;
544 intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
546 while (test_and_clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
547 u32 __iomem *csb_mmio =
548 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
550 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
551 unsigned int csb, head, tail;
553 csb = readl(csb_mmio);
554 head = GEN8_CSB_READ_PTR(csb);
555 tail = GEN8_CSB_WRITE_PTR(csb);
560 tail += GEN8_CSB_ENTRIES;
562 unsigned int idx = ++head % GEN8_CSB_ENTRIES;
563 unsigned int status = readl(buf + 2 * idx);
565 /* We are flying near dragons again.
567 * We hold a reference to the request in execlist_port[]
568 * but no more than that. We are operating in softirq
569 * context and so cannot hold any mutex or sleep. That
570 * prevents us stopping the requests we are processing
571 * in port[] from being retired simultaneously (the
572 * breadcrumb will be complete before we see the
573 * context-switch). As we only hold the reference to the
574 * request, any pointer chasing underneath the request
575 * is subject to a potential use-after-free. Thus we
576 * store all of the bookkeeping within port[] as
577 * required, and avoid using unguarded pointers beneath
578 * request itself. The same applies to the atomic
582 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
585 /* Check the context/desc id for this event matches */
586 GEM_DEBUG_BUG_ON(readl(buf + 2 * idx + 1) !=
589 GEM_BUG_ON(port[0].count == 0);
590 if (--port[0].count == 0) {
591 GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
592 execlists_context_status_change(port[0].request,
593 INTEL_CONTEXT_SCHEDULE_OUT);
595 i915_gem_request_put(port[0].request);
597 memset(&port[1], 0, sizeof(port[1]));
600 GEM_BUG_ON(port[0].count == 0 &&
601 !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
602 } while (head < tail);
604 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
605 GEN8_CSB_WRITE_PTR(csb) << 8),
609 if (execlists_elsp_ready(engine))
610 execlists_dequeue(engine);
612 intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
615 static bool insert_request(struct i915_priotree *pt, struct rb_root *root)
617 struct rb_node **p, *rb;
620 /* most positive priority is scheduled first, equal priorities fifo */
624 struct i915_priotree *pos;
627 pos = rb_entry(rb, typeof(*pos), node);
628 if (pt->priority > pos->priority) {
635 rb_link_node(&pt->node, rb, p);
636 rb_insert_color(&pt->node, root);
641 static void execlists_submit_request(struct drm_i915_gem_request *request)
643 struct intel_engine_cs *engine = request->engine;
646 /* Will be called from irq-context when using foreign fences. */
647 spin_lock_irqsave(&engine->timeline->lock, flags);
649 if (insert_request(&request->priotree, &engine->execlist_queue)) {
650 engine->execlist_first = &request->priotree.node;
651 if (execlists_elsp_ready(engine))
652 tasklet_hi_schedule(&engine->irq_tasklet);
655 spin_unlock_irqrestore(&engine->timeline->lock, flags);
658 static struct intel_engine_cs *
659 pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
661 struct intel_engine_cs *engine;
663 engine = container_of(pt,
664 struct drm_i915_gem_request,
666 if (engine != locked) {
668 spin_unlock_irq(&locked->timeline->lock);
669 spin_lock_irq(&engine->timeline->lock);
675 static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
677 struct intel_engine_cs *engine = NULL;
678 struct i915_dependency *dep, *p;
679 struct i915_dependency stack;
682 if (prio <= READ_ONCE(request->priotree.priority))
685 /* Need BKL in order to use the temporary link inside i915_dependency */
686 lockdep_assert_held(&request->i915->drm.struct_mutex);
688 stack.signaler = &request->priotree;
689 list_add(&stack.dfs_link, &dfs);
691 /* Recursively bump all dependent priorities to match the new request.
693 * A naive approach would be to use recursion:
694 * static void update_priorities(struct i915_priotree *pt, prio) {
695 * list_for_each_entry(dep, &pt->signalers_list, signal_link)
696 * update_priorities(dep->signal, prio)
697 * insert_request(pt);
699 * but that may have unlimited recursion depth and so runs a very
700 * real risk of overunning the kernel stack. Instead, we build
701 * a flat list of all dependencies starting with the current request.
702 * As we walk the list of dependencies, we add all of its dependencies
703 * to the end of the list (this may include an already visited
704 * request) and continue to walk onwards onto the new dependencies. The
705 * end result is a topological list of requests in reverse order, the
706 * last element in the list is the request we must execute first.
708 list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
709 struct i915_priotree *pt = dep->signaler;
711 list_for_each_entry(p, &pt->signalers_list, signal_link)
712 if (prio > READ_ONCE(p->signaler->priority))
713 list_move_tail(&p->dfs_link, &dfs);
715 list_safe_reset_next(dep, p, dfs_link);
716 if (!RB_EMPTY_NODE(&pt->node))
719 engine = pt_lock_engine(pt, engine);
721 /* If it is not already in the rbtree, we can update the
722 * priority inplace and skip over it (and its dependencies)
723 * if it is referenced *again* as we descend the dfs.
725 if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
727 list_del_init(&dep->dfs_link);
731 /* Fifo and depth-first replacement ensure our deps execute before us */
732 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
733 struct i915_priotree *pt = dep->signaler;
735 INIT_LIST_HEAD(&dep->dfs_link);
737 engine = pt_lock_engine(pt, engine);
739 if (prio <= pt->priority)
742 GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
745 rb_erase(&pt->node, &engine->execlist_queue);
746 if (insert_request(pt, &engine->execlist_queue))
747 engine->execlist_first = &pt->node;
751 spin_unlock_irq(&engine->timeline->lock);
753 /* XXX Do we need to preempt to make room for us and our deps? */
756 static int execlists_context_pin(struct intel_engine_cs *engine,
757 struct i915_gem_context *ctx)
759 struct intel_context *ce = &ctx->engine[engine->id];
764 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
770 ret = execlists_context_deferred_alloc(ctx, engine);
774 GEM_BUG_ON(!ce->state);
776 flags = PIN_GLOBAL | PIN_HIGH;
777 if (ctx->ggtt_offset_bias)
778 flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
780 ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, flags);
784 vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
786 ret = PTR_ERR(vaddr);
790 ret = intel_ring_pin(ce->ring, ctx->ggtt_offset_bias);
794 intel_lr_context_descriptor_update(ctx, engine);
796 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
797 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
798 i915_ggtt_offset(ce->ring->vma);
800 ce->state->obj->mm.dirty = true;
802 i915_gem_context_get(ctx);
806 i915_gem_object_unpin_map(ce->state->obj);
808 __i915_vma_unpin(ce->state);
814 static void execlists_context_unpin(struct intel_engine_cs *engine,
815 struct i915_gem_context *ctx)
817 struct intel_context *ce = &ctx->engine[engine->id];
819 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
820 GEM_BUG_ON(ce->pin_count == 0);
825 intel_ring_unpin(ce->ring);
827 i915_gem_object_unpin_map(ce->state->obj);
828 i915_vma_unpin(ce->state);
830 i915_gem_context_put(ctx);
833 static int execlists_request_alloc(struct drm_i915_gem_request *request)
835 struct intel_engine_cs *engine = request->engine;
836 struct intel_context *ce = &request->ctx->engine[engine->id];
839 GEM_BUG_ON(!ce->pin_count);
841 /* Flush enough space to reduce the likelihood of waiting after
842 * we start building the request - in which case we will just
843 * have to repeat work.
845 request->reserved_space += EXECLISTS_REQUEST_SIZE;
847 GEM_BUG_ON(!ce->ring);
848 request->ring = ce->ring;
850 if (i915.enable_guc_submission) {
852 * Check that the GuC has space for the request before
853 * going any further, as the i915_add_request() call
854 * later on mustn't fail ...
856 ret = i915_guc_wq_reserve(request);
861 ret = intel_ring_begin(request, 0);
865 if (!ce->initialised) {
866 ret = engine->init_context(request);
870 ce->initialised = true;
873 /* Note that after this point, we have committed to using
874 * this request as it is being used to both track the
875 * state of engine initialisation and liveness of the
876 * golden renderstate above. Think twice before you try
877 * to cancel/unwind this request now.
880 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
884 if (i915.enable_guc_submission)
885 i915_guc_wq_unreserve(request);
890 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
893 struct intel_ring *ring = req->ring;
894 struct i915_workarounds *w = &req->i915->workarounds;
899 ret = req->engine->emit_flush(req, EMIT_BARRIER);
903 ret = intel_ring_begin(req, w->count * 2 + 2);
907 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
908 for (i = 0; i < w->count; i++) {
909 intel_ring_emit_reg(ring, w->reg[i].addr);
910 intel_ring_emit(ring, w->reg[i].value);
912 intel_ring_emit(ring, MI_NOOP);
914 intel_ring_advance(ring);
916 ret = req->engine->emit_flush(req, EMIT_BARRIER);
923 #define wa_ctx_emit(batch, index, cmd) \
925 int __index = (index)++; \
926 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
929 batch[__index] = (cmd); \
932 #define wa_ctx_emit_reg(batch, index, reg) \
933 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
936 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
937 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
938 * but there is a slight complication as this is applied in WA batch where the
939 * values are only initialized once so we cannot take register value at the
940 * beginning and reuse it further; hence we save its value to memory, upload a
941 * constant value with bit21 set and then we restore it back with the saved value.
942 * To simplify the WA, a constant value is formed by using the default value
943 * of this register. This shouldn't be a problem because we are only modifying
944 * it for a short period and this batch in non-premptible. We can ofcourse
945 * use additional instructions that read the actual value of the register
946 * at that time and set our bit of interest but it makes the WA complicated.
948 * This WA is also required for Gen9 so extracting as a function avoids
951 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
955 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
957 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
958 MI_SRM_LRM_GLOBAL_GTT));
959 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
960 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
961 wa_ctx_emit(batch, index, 0);
963 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
964 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
965 wa_ctx_emit(batch, index, l3sqc4_flush);
967 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
968 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
969 PIPE_CONTROL_DC_FLUSH_ENABLE));
970 wa_ctx_emit(batch, index, 0);
971 wa_ctx_emit(batch, index, 0);
972 wa_ctx_emit(batch, index, 0);
973 wa_ctx_emit(batch, index, 0);
975 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
976 MI_SRM_LRM_GLOBAL_GTT));
977 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
978 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
979 wa_ctx_emit(batch, index, 0);
984 static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
986 uint32_t start_alignment)
988 return wa_ctx->offset = ALIGN(offset, start_alignment);
991 static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
993 uint32_t size_alignment)
995 wa_ctx->size = offset - wa_ctx->offset;
997 WARN(wa_ctx->size % size_alignment,
998 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
999 wa_ctx->size, size_alignment);
1004 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1005 * initialized at the beginning and shared across all contexts but this field
1006 * helps us to have multiple batches at different offsets and select them based
1007 * on a criteria. At the moment this batch always start at the beginning of the page
1008 * and at this point we don't have multiple wa_ctx batch buffers.
1010 * The number of WA applied are not known at the beginning; we use this field
1011 * to return the no of DWORDS written.
1013 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1014 * so it adds NOOPs as padding to make it cacheline aligned.
1015 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1016 * makes a complete batch buffer.
1018 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1019 struct i915_wa_ctx_bb *wa_ctx,
1023 uint32_t scratch_addr;
1024 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1026 /* WaDisableCtxRestoreArbitration:bdw,chv */
1027 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1029 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1030 if (IS_BROADWELL(engine->i915)) {
1031 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1037 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1038 /* Actual scratch location is at 128 bytes offset */
1039 scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
1041 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1042 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1043 PIPE_CONTROL_GLOBAL_GTT_IVB |
1044 PIPE_CONTROL_CS_STALL |
1045 PIPE_CONTROL_QW_WRITE));
1046 wa_ctx_emit(batch, index, scratch_addr);
1047 wa_ctx_emit(batch, index, 0);
1048 wa_ctx_emit(batch, index, 0);
1049 wa_ctx_emit(batch, index, 0);
1051 /* Pad to end of cacheline */
1052 while (index % CACHELINE_DWORDS)
1053 wa_ctx_emit(batch, index, MI_NOOP);
1056 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1057 * execution depends on the length specified in terms of cache lines
1058 * in the register CTX_RCS_INDIRECT_CTX
1061 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1065 * This batch is started immediately after indirect_ctx batch. Since we ensure
1066 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
1068 * The number of DWORDS written are returned using this field.
1070 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1071 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1073 static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
1074 struct i915_wa_ctx_bb *wa_ctx,
1078 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1080 /* WaDisableCtxRestoreArbitration:bdw,chv */
1081 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1083 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1085 return wa_ctx_end(wa_ctx, *offset = index, 1);
1088 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1089 struct i915_wa_ctx_bb *wa_ctx,
1094 struct drm_i915_private *dev_priv = engine->i915;
1095 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1097 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
1098 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1103 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
1104 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1105 wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
1106 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
1107 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
1108 wa_ctx_emit(batch, index, MI_NOOP);
1110 /* WaClearSlmSpaceAtContextSwitch:kbl */
1111 /* Actual scratch location is at 128 bytes offset */
1112 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
1114 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
1116 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1117 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1118 PIPE_CONTROL_GLOBAL_GTT_IVB |
1119 PIPE_CONTROL_CS_STALL |
1120 PIPE_CONTROL_QW_WRITE));
1121 wa_ctx_emit(batch, index, scratch_addr);
1122 wa_ctx_emit(batch, index, 0);
1123 wa_ctx_emit(batch, index, 0);
1124 wa_ctx_emit(batch, index, 0);
1127 /* WaMediaPoolStateCmdInWABB:bxt,glk */
1128 if (HAS_POOLED_EU(engine->i915)) {
1130 * EU pool configuration is setup along with golden context
1131 * during context initialization. This value depends on
1132 * device type (2x6 or 3x6) and needs to be updated based
1133 * on which subslice is disabled especially for 2x6
1134 * devices, however it is safe to load default
1135 * configuration of 3x6 device instead of masking off
1136 * corresponding bits because HW ignores bits of a disabled
1137 * subslice and drops down to appropriate config. Please
1138 * see render_state_setup() in i915_gem_render_state.c for
1139 * possible configurations, to avoid duplication they are
1140 * not shown here again.
1142 u32 eu_pool_config = 0x00777000;
1143 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
1144 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
1145 wa_ctx_emit(batch, index, eu_pool_config);
1146 wa_ctx_emit(batch, index, 0);
1147 wa_ctx_emit(batch, index, 0);
1148 wa_ctx_emit(batch, index, 0);
1151 /* Pad to end of cacheline */
1152 while (index % CACHELINE_DWORDS)
1153 wa_ctx_emit(batch, index, MI_NOOP);
1155 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1158 static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1159 struct i915_wa_ctx_bb *wa_ctx,
1163 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1165 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1167 return wa_ctx_end(wa_ctx, *offset = index, 1);
1170 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1172 struct drm_i915_gem_object *obj;
1173 struct i915_vma *vma;
1176 obj = i915_gem_object_create(engine->i915, PAGE_ALIGN(size));
1178 return PTR_ERR(obj);
1180 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
1186 err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
1190 engine->wa_ctx.vma = vma;
1194 i915_gem_object_put(obj);
1198 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
1200 i915_vma_unpin_and_release(&engine->wa_ctx.vma);
1203 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1205 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1211 WARN_ON(engine->id != RCS);
1213 /* update this when WA for higher Gen are added */
1214 if (INTEL_GEN(engine->i915) > 9) {
1215 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1216 INTEL_GEN(engine->i915));
1220 /* some WA perform writes to scratch page, ensure it is valid */
1221 if (!engine->scratch) {
1222 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
1226 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
1228 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1232 page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
1233 batch = kmap_atomic(page);
1236 if (IS_GEN8(engine->i915)) {
1237 ret = gen8_init_indirectctx_bb(engine,
1238 &wa_ctx->indirect_ctx,
1244 ret = gen8_init_perctx_bb(engine,
1250 } else if (IS_GEN9(engine->i915)) {
1251 ret = gen9_init_indirectctx_bb(engine,
1252 &wa_ctx->indirect_ctx,
1258 ret = gen9_init_perctx_bb(engine,
1267 kunmap_atomic(batch);
1269 lrc_destroy_wa_ctx_obj(engine);
1274 static u32 port_seqno(struct execlist_port *port)
1276 return port->request ? port->request->global_seqno : 0;
1279 static int gen8_init_common_ring(struct intel_engine_cs *engine)
1281 struct drm_i915_private *dev_priv = engine->i915;
1284 ret = intel_mocs_init_engine(engine);
1288 intel_engine_reset_breadcrumbs(engine);
1289 intel_engine_init_hangcheck(engine);
1291 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1292 I915_WRITE(RING_MODE_GEN7(engine),
1293 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1294 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1295 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1296 engine->status_page.ggtt_offset);
1297 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1299 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1301 /* After a GPU reset, we may have requests to replay */
1302 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
1303 if (!execlists_elsp_idle(engine)) {
1304 DRM_DEBUG_DRIVER("Restarting %s from requests [0x%x, 0x%x]\n",
1306 port_seqno(&engine->execlist_port[0]),
1307 port_seqno(&engine->execlist_port[1]));
1308 engine->execlist_port[0].count = 0;
1309 engine->execlist_port[1].count = 0;
1310 execlists_submit_ports(engine);
1316 static int gen8_init_render_ring(struct intel_engine_cs *engine)
1318 struct drm_i915_private *dev_priv = engine->i915;
1321 ret = gen8_init_common_ring(engine);
1325 /* We need to disable the AsyncFlip performance optimisations in order
1326 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1327 * programmed to '1' on all products.
1329 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1331 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1333 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1335 return init_workarounds_ring(engine);
1338 static int gen9_init_render_ring(struct intel_engine_cs *engine)
1342 ret = gen8_init_common_ring(engine);
1346 return init_workarounds_ring(engine);
1349 static void reset_common_ring(struct intel_engine_cs *engine,
1350 struct drm_i915_gem_request *request)
1352 struct execlist_port *port = engine->execlist_port;
1353 struct intel_context *ce;
1355 /* If the request was innocent, we leave the request in the ELSP
1356 * and will try to replay it on restarting. The context image may
1357 * have been corrupted by the reset, in which case we may have
1358 * to service a new GPU hang, but more likely we can continue on
1361 * If the request was guilty, we presume the context is corrupt
1362 * and have to at least restore the RING register in the context
1363 * image back to the expected values to skip over the guilty request.
1365 if (!request || request->fence.error != -EIO)
1368 /* We want a simple context + ring to execute the breadcrumb update.
1369 * We cannot rely on the context being intact across the GPU hang,
1370 * so clear it and rebuild just what we need for the breadcrumb.
1371 * All pending requests for this context will be zapped, and any
1372 * future request will be after userspace has had the opportunity
1373 * to recreate its own state.
1375 ce = &request->ctx->engine[engine->id];
1376 execlists_init_reg_state(ce->lrc_reg_state,
1377 request->ctx, engine, ce->ring);
1379 /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
1380 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
1381 i915_ggtt_offset(ce->ring->vma);
1382 ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
1384 request->ring->head = request->postfix;
1385 request->ring->last_retired_head = -1;
1386 intel_ring_update_space(request->ring);
1388 if (i915.enable_guc_submission)
1391 /* Catch up with any missed context-switch interrupts */
1392 if (request->ctx != port[0].request->ctx) {
1393 i915_gem_request_put(port[0].request);
1395 memset(&port[1], 0, sizeof(port[1]));
1398 GEM_BUG_ON(request->ctx != port[0].request->ctx);
1400 /* Reset WaIdleLiteRestore:bdw,skl as well */
1401 request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
1404 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1406 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1407 struct intel_ring *ring = req->ring;
1408 struct intel_engine_cs *engine = req->engine;
1409 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1412 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
1416 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1417 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1418 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1420 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
1421 intel_ring_emit(ring, upper_32_bits(pd_daddr));
1422 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
1423 intel_ring_emit(ring, lower_32_bits(pd_daddr));
1426 intel_ring_emit(ring, MI_NOOP);
1427 intel_ring_advance(ring);
1432 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1433 u64 offset, u32 len,
1434 unsigned int dispatch_flags)
1436 struct intel_ring *ring = req->ring;
1437 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
1440 /* Don't rely in hw updating PDPs, specially in lite-restore.
1441 * Ideally, we should set Force PD Restore in ctx descriptor,
1442 * but we can't. Force Restore would be a second option, but
1443 * it is unsafe in case of lite-restore (because the ctx is
1444 * not idle). PML4 is allocated during ppgtt init so this is
1445 * not needed in 48-bit.*/
1446 if (req->ctx->ppgtt &&
1447 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1448 if (!i915_vm_is_48bit(&req->ctx->ppgtt->base) &&
1449 !intel_vgpu_active(req->i915)) {
1450 ret = intel_logical_ring_emit_pdps(req);
1455 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
1458 ret = intel_ring_begin(req, 4);
1462 /* FIXME(BDW): Address space and security selectors. */
1463 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
1465 (dispatch_flags & I915_DISPATCH_RS ?
1466 MI_BATCH_RESOURCE_STREAMER : 0));
1467 intel_ring_emit(ring, lower_32_bits(offset));
1468 intel_ring_emit(ring, upper_32_bits(offset));
1469 intel_ring_emit(ring, MI_NOOP);
1470 intel_ring_advance(ring);
1475 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
1477 struct drm_i915_private *dev_priv = engine->i915;
1478 I915_WRITE_IMR(engine,
1479 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1480 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1483 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
1485 struct drm_i915_private *dev_priv = engine->i915;
1486 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1489 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
1491 struct intel_ring *ring = request->ring;
1495 ret = intel_ring_begin(request, 4);
1499 cmd = MI_FLUSH_DW + 1;
1501 /* We always require a command barrier so that subsequent
1502 * commands, such as breadcrumb interrupts, are strictly ordered
1503 * wrt the contents of the write cache being flushed to memory
1504 * (and thus being coherent from the CPU).
1506 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1508 if (mode & EMIT_INVALIDATE) {
1509 cmd |= MI_INVALIDATE_TLB;
1510 if (request->engine->id == VCS)
1511 cmd |= MI_INVALIDATE_BSD;
1514 intel_ring_emit(ring, cmd);
1515 intel_ring_emit(ring,
1516 I915_GEM_HWS_SCRATCH_ADDR |
1517 MI_FLUSH_DW_USE_GTT);
1518 intel_ring_emit(ring, 0); /* upper addr */
1519 intel_ring_emit(ring, 0); /* value */
1520 intel_ring_advance(ring);
1525 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1528 struct intel_ring *ring = request->ring;
1529 struct intel_engine_cs *engine = request->engine;
1531 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
1532 bool vf_flush_wa = false, dc_flush_wa = false;
1537 flags |= PIPE_CONTROL_CS_STALL;
1539 if (mode & EMIT_FLUSH) {
1540 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1541 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1542 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
1543 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1546 if (mode & EMIT_INVALIDATE) {
1547 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1548 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1549 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1550 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1551 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1552 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1553 flags |= PIPE_CONTROL_QW_WRITE;
1554 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1557 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1560 if (IS_GEN9(request->i915))
1563 /* WaForGAMHang:kbl */
1564 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1576 ret = intel_ring_begin(request, len);
1581 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1582 intel_ring_emit(ring, 0);
1583 intel_ring_emit(ring, 0);
1584 intel_ring_emit(ring, 0);
1585 intel_ring_emit(ring, 0);
1586 intel_ring_emit(ring, 0);
1590 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1591 intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
1592 intel_ring_emit(ring, 0);
1593 intel_ring_emit(ring, 0);
1594 intel_ring_emit(ring, 0);
1595 intel_ring_emit(ring, 0);
1598 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1599 intel_ring_emit(ring, flags);
1600 intel_ring_emit(ring, scratch_addr);
1601 intel_ring_emit(ring, 0);
1602 intel_ring_emit(ring, 0);
1603 intel_ring_emit(ring, 0);
1606 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1607 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
1608 intel_ring_emit(ring, 0);
1609 intel_ring_emit(ring, 0);
1610 intel_ring_emit(ring, 0);
1611 intel_ring_emit(ring, 0);
1614 intel_ring_advance(ring);
1620 * Reserve space for 2 NOOPs at the end of each request to be
1621 * used as a workaround for not being allowed to do lite
1622 * restore with HEAD==TAIL (WaIdleLiteRestore).
1624 static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
1628 request->wa_tail = intel_ring_offset(request->ring, out);
1631 static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
1634 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1635 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
1637 *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
1638 *out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
1640 *out++ = request->global_seqno;
1641 *out++ = MI_USER_INTERRUPT;
1643 request->tail = intel_ring_offset(request->ring, out);
1645 gen8_emit_wa_tail(request, out);
1648 static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
1650 static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
1653 /* We're using qword write, seqno should be aligned to 8 bytes. */
1654 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1656 /* w/a for post sync ops following a GPGPU operation we
1657 * need a prior CS_STALL, which is emitted by the flush
1658 * following the batch.
1660 *out++ = GFX_OP_PIPE_CONTROL(6);
1661 *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
1662 PIPE_CONTROL_CS_STALL |
1663 PIPE_CONTROL_QW_WRITE);
1664 *out++ = intel_hws_seqno_address(request->engine);
1666 *out++ = request->global_seqno;
1667 /* We're thrashing one dword of HWS. */
1669 *out++ = MI_USER_INTERRUPT;
1671 request->tail = intel_ring_offset(request->ring, out);
1673 gen8_emit_wa_tail(request, out);
1676 static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
1678 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
1682 ret = intel_logical_ring_workarounds_emit(req);
1686 ret = intel_rcs_context_init_mocs(req);
1688 * Failing to program the MOCS is non-fatal.The system will not
1689 * run at peak performance. So generate an error and carry on.
1692 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1694 return i915_gem_render_state_emit(req);
1698 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1699 * @engine: Engine Command Streamer.
1701 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1703 struct drm_i915_private *dev_priv;
1706 * Tasklet cannot be active at this point due intel_mark_active/idle
1707 * so this is just for documentation.
1709 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1710 tasklet_kill(&engine->irq_tasklet);
1712 dev_priv = engine->i915;
1714 if (engine->buffer) {
1715 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
1718 if (engine->cleanup)
1719 engine->cleanup(engine);
1721 if (engine->status_page.vma) {
1722 i915_gem_object_unpin_map(engine->status_page.vma->obj);
1723 engine->status_page.vma = NULL;
1726 intel_engine_cleanup_common(engine);
1728 lrc_destroy_wa_ctx_obj(engine);
1729 engine->i915 = NULL;
1730 dev_priv->engine[engine->id] = NULL;
1734 void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
1736 struct intel_engine_cs *engine;
1737 enum intel_engine_id id;
1739 for_each_engine(engine, dev_priv, id) {
1740 engine->submit_request = execlists_submit_request;
1741 engine->schedule = execlists_schedule;
1746 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1748 /* Default vfuncs which can be overriden by each engine. */
1749 engine->init_hw = gen8_init_common_ring;
1750 engine->reset_hw = reset_common_ring;
1752 engine->context_pin = execlists_context_pin;
1753 engine->context_unpin = execlists_context_unpin;
1755 engine->request_alloc = execlists_request_alloc;
1757 engine->emit_flush = gen8_emit_flush;
1758 engine->emit_breadcrumb = gen8_emit_breadcrumb;
1759 engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
1760 engine->submit_request = execlists_submit_request;
1761 engine->schedule = execlists_schedule;
1763 engine->irq_enable = gen8_logical_ring_enable_irq;
1764 engine->irq_disable = gen8_logical_ring_disable_irq;
1765 engine->emit_bb_start = gen8_emit_bb_start;
1769 logical_ring_default_irqs(struct intel_engine_cs *engine)
1771 unsigned shift = engine->irq_shift;
1772 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1773 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1777 lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
1779 const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
1782 /* The HWSP is part of the default context object in LRC mode. */
1783 hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
1785 return PTR_ERR(hws);
1787 engine->status_page.page_addr = hws + hws_offset;
1788 engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset;
1789 engine->status_page.vma = vma;
1795 logical_ring_setup(struct intel_engine_cs *engine)
1797 struct drm_i915_private *dev_priv = engine->i915;
1798 enum forcewake_domains fw_domains;
1800 intel_engine_setup_common(engine);
1802 /* Intentionally left blank. */
1803 engine->buffer = NULL;
1805 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
1809 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1810 RING_CONTEXT_STATUS_PTR(engine),
1811 FW_REG_READ | FW_REG_WRITE);
1813 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1814 RING_CONTEXT_STATUS_BUF_BASE(engine),
1817 engine->fw_domains = fw_domains;
1819 tasklet_init(&engine->irq_tasklet,
1820 intel_lrc_irq_handler, (unsigned long)engine);
1822 logical_ring_default_vfuncs(engine);
1823 logical_ring_default_irqs(engine);
1827 logical_ring_init(struct intel_engine_cs *engine)
1829 struct i915_gem_context *dctx = engine->i915->kernel_context;
1832 ret = intel_engine_init_common(engine);
1836 /* And setup the hardware status page. */
1837 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
1839 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
1846 intel_logical_ring_cleanup(engine);
1850 int logical_render_ring_init(struct intel_engine_cs *engine)
1852 struct drm_i915_private *dev_priv = engine->i915;
1855 logical_ring_setup(engine);
1857 if (HAS_L3_DPF(dev_priv))
1858 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1860 /* Override some for render ring. */
1861 if (INTEL_GEN(dev_priv) >= 9)
1862 engine->init_hw = gen9_init_render_ring;
1864 engine->init_hw = gen8_init_render_ring;
1865 engine->init_context = gen8_init_rcs_context;
1866 engine->emit_flush = gen8_emit_flush_render;
1867 engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
1868 engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
1870 ret = intel_engine_create_scratch(engine, PAGE_SIZE);
1874 ret = intel_init_workaround_bb(engine);
1877 * We continue even if we fail to initialize WA batch
1878 * because we only expect rare glitches but nothing
1879 * critical to prevent us from using GPU
1881 DRM_ERROR("WA batch buffer initialization failed: %d\n",
1885 return logical_ring_init(engine);
1888 int logical_xcs_ring_init(struct intel_engine_cs *engine)
1890 logical_ring_setup(engine);
1892 return logical_ring_init(engine);
1896 make_rpcs(struct drm_i915_private *dev_priv)
1901 * No explicit RPCS request is needed to ensure full
1902 * slice/subslice/EU enablement prior to Gen9.
1904 if (INTEL_GEN(dev_priv) < 9)
1908 * Starting in Gen9, render power gating can leave
1909 * slice/subslice/EU in a partially enabled state. We
1910 * must make an explicit request through RPCS for full
1913 if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
1914 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
1915 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
1916 GEN8_RPCS_S_CNT_SHIFT;
1917 rpcs |= GEN8_RPCS_ENABLE;
1920 if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
1921 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
1922 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
1923 GEN8_RPCS_SS_CNT_SHIFT;
1924 rpcs |= GEN8_RPCS_ENABLE;
1927 if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
1928 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
1929 GEN8_RPCS_EU_MIN_SHIFT;
1930 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
1931 GEN8_RPCS_EU_MAX_SHIFT;
1932 rpcs |= GEN8_RPCS_ENABLE;
1938 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
1940 u32 indirect_ctx_offset;
1942 switch (INTEL_GEN(engine->i915)) {
1944 MISSING_CASE(INTEL_GEN(engine->i915));
1947 indirect_ctx_offset =
1948 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1951 indirect_ctx_offset =
1952 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1956 return indirect_ctx_offset;
1959 static void execlists_init_reg_state(u32 *reg_state,
1960 struct i915_gem_context *ctx,
1961 struct intel_engine_cs *engine,
1962 struct intel_ring *ring)
1964 struct drm_i915_private *dev_priv = engine->i915;
1965 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
1967 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1968 * commands followed by (reg, value) pairs. The values we are setting here are
1969 * only for the first context restore: on a subsequent save, the GPU will
1970 * recreate this batchbuffer with new values (including all the missing
1971 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
1972 reg_state[CTX_LRI_HEADER_0] =
1973 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
1974 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
1975 RING_CONTEXT_CONTROL(engine),
1976 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
1977 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
1978 (HAS_RESOURCE_STREAMER(dev_priv) ?
1979 CTX_CTRL_RS_CTX_ENABLE : 0)));
1980 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
1982 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
1984 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
1985 RING_START(engine->mmio_base), 0);
1986 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
1987 RING_CTL(engine->mmio_base),
1988 RING_CTL_SIZE(ring->size) | RING_VALID);
1989 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
1990 RING_BBADDR_UDW(engine->mmio_base), 0);
1991 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
1992 RING_BBADDR(engine->mmio_base), 0);
1993 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
1994 RING_BBSTATE(engine->mmio_base),
1996 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
1997 RING_SBBADDR_UDW(engine->mmio_base), 0);
1998 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
1999 RING_SBBADDR(engine->mmio_base), 0);
2000 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2001 RING_SBBSTATE(engine->mmio_base), 0);
2002 if (engine->id == RCS) {
2003 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2004 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2005 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2006 RING_INDIRECT_CTX(engine->mmio_base), 0);
2007 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2008 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2009 if (engine->wa_ctx.vma) {
2010 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2011 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
2013 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2014 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2015 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2017 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
2018 intel_lr_indirect_ctx_offset(engine) << 6;
2020 reg_state[CTX_BB_PER_CTX_PTR+1] =
2021 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2025 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2026 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2027 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
2028 /* PDP values well be assigned later if needed */
2029 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2031 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2033 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2035 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2037 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2039 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2041 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2043 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2046 if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
2047 /* 64b PPGTT (48bit canonical)
2048 * PDP0_DESCRIPTOR contains the base address to PML4 and
2049 * other PDP Descriptors are ignored.
2051 ASSIGN_CTX_PML4(ppgtt, reg_state);
2054 if (engine->id == RCS) {
2055 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2056 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2057 make_rpcs(dev_priv));
2062 populate_lr_context(struct i915_gem_context *ctx,
2063 struct drm_i915_gem_object *ctx_obj,
2064 struct intel_engine_cs *engine,
2065 struct intel_ring *ring)
2070 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2072 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2076 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
2077 if (IS_ERR(vaddr)) {
2078 ret = PTR_ERR(vaddr);
2079 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2082 ctx_obj->mm.dirty = true;
2084 /* The second page of the context object contains some fields which must
2085 * be set up prior to the first execution. */
2087 execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
2090 i915_gem_object_unpin_map(ctx_obj);
2096 * intel_lr_context_size() - return the size of the context for an engine
2097 * @engine: which engine to find the context size for
2099 * Each engine may require a different amount of space for a context image,
2100 * so when allocating (or copying) an image, this function can be used to
2101 * find the right size for the specific engine.
2103 * Return: size (in bytes) of an engine-specific context image
2105 * Note: this size includes the HWSP, which is part of the context image
2106 * in LRC mode, but does not include the "shared data page" used with
2107 * GuC submission. The caller should account for this if using the GuC.
2109 uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2113 WARN_ON(INTEL_GEN(engine->i915) < 8);
2115 switch (engine->id) {
2117 if (INTEL_GEN(engine->i915) >= 9)
2118 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2120 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
2126 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2133 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2134 struct intel_engine_cs *engine)
2136 struct drm_i915_gem_object *ctx_obj;
2137 struct intel_context *ce = &ctx->engine[engine->id];
2138 struct i915_vma *vma;
2139 uint32_t context_size;
2140 struct intel_ring *ring;
2145 context_size = round_up(intel_lr_context_size(engine),
2146 I915_GTT_PAGE_SIZE);
2148 /* One extra page as the sharing data between driver and GuC */
2149 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2151 ctx_obj = i915_gem_object_create(ctx->i915, context_size);
2152 if (IS_ERR(ctx_obj)) {
2153 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2154 return PTR_ERR(ctx_obj);
2157 vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
2160 goto error_deref_obj;
2163 ring = intel_engine_create_ring(engine, ctx->ring_size);
2165 ret = PTR_ERR(ring);
2166 goto error_deref_obj;
2169 ret = populate_lr_context(ctx, ctx_obj, engine, ring);
2171 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2172 goto error_ring_free;
2177 ce->initialised = engine->init_context == NULL;
2182 intel_ring_free(ring);
2184 i915_gem_object_put(ctx_obj);
2188 void intel_lr_context_resume(struct drm_i915_private *dev_priv)
2190 struct intel_engine_cs *engine;
2191 struct i915_gem_context *ctx;
2192 enum intel_engine_id id;
2194 /* Because we emit WA_TAIL_DWORDS there may be a disparity
2195 * between our bookkeeping in ce->ring->head and ce->ring->tail and
2196 * that stored in context. As we only write new commands from
2197 * ce->ring->tail onwards, everything before that is junk. If the GPU
2198 * starts reading from its RING_HEAD from the context, it may try to
2199 * execute that junk and die.
2201 * So to avoid that we reset the context images upon resume. For
2202 * simplicity, we just zero everything out.
2204 list_for_each_entry(ctx, &dev_priv->context_list, link) {
2205 for_each_engine(engine, dev_priv, id) {
2206 struct intel_context *ce = &ctx->engine[engine->id];
2212 reg = i915_gem_object_pin_map(ce->state->obj,
2214 if (WARN_ON(IS_ERR(reg)))
2217 reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
2218 reg[CTX_RING_HEAD+1] = 0;
2219 reg[CTX_RING_TAIL+1] = 0;
2221 ce->state->obj->mm.dirty = true;
2222 i915_gem_object_unpin_map(ce->state->obj);
2224 ce->ring->head = ce->ring->tail = 0;
2225 ce->ring->last_retired_head = -1;
2226 intel_ring_update_space(ce->ring);