drm/i915: Rename conditional GEM execution macros
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / intel_lrc.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *    Michel Thierry <michel.thierry@intel.com>
26  *    Thomas Daniel <thomas.daniel@intel.com>
27  *    Oscar Mateo <oscar.mateo@intel.com>
28  *
29  */
30
31 /**
32  * DOC: Logical Rings, Logical Ring Contexts and Execlists
33  *
34  * Motivation:
35  * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36  * These expanded contexts enable a number of new abilities, especially
37  * "Execlists" (also implemented in this file).
38  *
39  * One of the main differences with the legacy HW contexts is that logical
40  * ring contexts incorporate many more things to the context's state, like
41  * PDPs or ringbuffer control registers:
42  *
43  * The reason why PDPs are included in the context is straightforward: as
44  * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45  * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46  * instead, the GPU will do it for you on the context switch.
47  *
48  * But, what about the ringbuffer control registers (head, tail, etc..)?
49  * shouldn't we just need a set of those per engine command streamer? This is
50  * where the name "Logical Rings" starts to make sense: by virtualizing the
51  * rings, the engine cs shifts to a new "ring buffer" with every context
52  * switch. When you want to submit a workload to the GPU you: A) choose your
53  * context, B) find its appropriate virtualized ring, C) write commands to it
54  * and then, finally, D) tell the GPU to switch to that context.
55  *
56  * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57  * to a contexts is via a context execution list, ergo "Execlists".
58  *
59  * LRC implementation:
60  * Regarding the creation of contexts, we have:
61  *
62  * - One global default context.
63  * - One local default context for each opened fd.
64  * - One local extra context for each context create ioctl call.
65  *
66  * Now that ringbuffers belong per-context (and not per-engine, like before)
67  * and that contexts are uniquely tied to a given engine (and not reusable,
68  * like before) we need:
69  *
70  * - One ringbuffer per-engine inside each context.
71  * - One backing object per-engine inside each context.
72  *
73  * The global default context starts its life with these new objects fully
74  * allocated and populated. The local default context for each opened fd is
75  * more complex, because we don't know at creation time which engine is going
76  * to use them. To handle this, we have implemented a deferred creation of LR
77  * contexts:
78  *
79  * The local context starts its life as a hollow or blank holder, that only
80  * gets populated for a given engine once we receive an execbuffer. If later
81  * on we receive another execbuffer ioctl for the same context but a different
82  * engine, we allocate/populate a new ringbuffer and context backing object and
83  * so on.
84  *
85  * Finally, regarding local contexts created using the ioctl call: as they are
86  * only allowed with the render ring, we can allocate & populate them right
87  * away (no need to defer anything, at least for now).
88  *
89  * Execlists implementation:
90  * Execlists are the new method by which, on gen8+ hardware, workloads are
91  * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92  * This method works as follows:
93  *
94  * When a request is committed, its commands (the BB start and any leading or
95  * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96  * for the appropriate context. The tail pointer in the hardware context is not
97  * updated at this time, but instead, kept by the driver in the ringbuffer
98  * structure. A structure representing this request is added to a request queue
99  * for the appropriate engine: this structure contains a copy of the context's
100  * tail after the request was written to the ring buffer and a pointer to the
101  * context itself.
102  *
103  * If the engine's request queue was empty before the request was added, the
104  * queue is processed immediately. Otherwise the queue will be processed during
105  * a context switch interrupt. In any case, elements on the queue will get sent
106  * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107  * globally unique 20-bits submission ID.
108  *
109  * When execution of a request completes, the GPU updates the context status
110  * buffer with a context complete event and generates a context switch interrupt.
111  * During the interrupt handling, the driver examines the events in the buffer:
112  * for each context complete event, if the announced ID matches that on the head
113  * of the request queue, then that request is retired and removed from the queue.
114  *
115  * After processing, if any requests were retired and the queue is not empty
116  * then a new execution list can be submitted. The two requests at the front of
117  * the queue are next to be submitted but since a context may not occur twice in
118  * an execution list, if subsequent requests have the same ID as the first then
119  * the two requests must be combined. This is done simply by discarding requests
120  * at the head of the queue until either only one requests is left (in which case
121  * we use a NULL second context) or the first two requests have unique IDs.
122  *
123  * By always executing the first two requests in the queue the driver ensures
124  * that the GPU is kept as busy as possible. In the case where a single context
125  * completes but a second context is still executing, the request for this second
126  * context will be at the head of the queue when we remove the first one. This
127  * request will then be resubmitted along with a new request for a different context,
128  * which will cause the hardware to continue executing the second request and queue
129  * the new request (the GPU detects the condition of a context getting preempted
130  * with the same context and optimizes the context switch flow by not doing
131  * preemption, but just sampling the new tail pointer).
132  *
133  */
134 #include <linux/interrupt.h>
135
136 #include <drm/drmP.h>
137 #include <drm/i915_drm.h>
138 #include "i915_drv.h"
139 #include "intel_mocs.h"
140
141 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
142 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
145 #define RING_EXECLIST_QFULL             (1 << 0x2)
146 #define RING_EXECLIST1_VALID            (1 << 0x3)
147 #define RING_EXECLIST0_VALID            (1 << 0x4)
148 #define RING_EXECLIST_ACTIVE_STATUS     (3 << 0xE)
149 #define RING_EXECLIST1_ACTIVE           (1 << 0x11)
150 #define RING_EXECLIST0_ACTIVE           (1 << 0x12)
151
152 #define GEN8_CTX_STATUS_IDLE_ACTIVE     (1 << 0)
153 #define GEN8_CTX_STATUS_PREEMPTED       (1 << 1)
154 #define GEN8_CTX_STATUS_ELEMENT_SWITCH  (1 << 2)
155 #define GEN8_CTX_STATUS_ACTIVE_IDLE     (1 << 3)
156 #define GEN8_CTX_STATUS_COMPLETE        (1 << 4)
157 #define GEN8_CTX_STATUS_LITE_RESTORE    (1 << 15)
158
159 #define GEN8_CTX_STATUS_COMPLETED_MASK \
160          (GEN8_CTX_STATUS_ACTIVE_IDLE | \
161           GEN8_CTX_STATUS_PREEMPTED | \
162           GEN8_CTX_STATUS_ELEMENT_SWITCH)
163
164 #define CTX_LRI_HEADER_0                0x01
165 #define CTX_CONTEXT_CONTROL             0x02
166 #define CTX_RING_HEAD                   0x04
167 #define CTX_RING_TAIL                   0x06
168 #define CTX_RING_BUFFER_START           0x08
169 #define CTX_RING_BUFFER_CONTROL         0x0a
170 #define CTX_BB_HEAD_U                   0x0c
171 #define CTX_BB_HEAD_L                   0x0e
172 #define CTX_BB_STATE                    0x10
173 #define CTX_SECOND_BB_HEAD_U            0x12
174 #define CTX_SECOND_BB_HEAD_L            0x14
175 #define CTX_SECOND_BB_STATE             0x16
176 #define CTX_BB_PER_CTX_PTR              0x18
177 #define CTX_RCS_INDIRECT_CTX            0x1a
178 #define CTX_RCS_INDIRECT_CTX_OFFSET     0x1c
179 #define CTX_LRI_HEADER_1                0x21
180 #define CTX_CTX_TIMESTAMP               0x22
181 #define CTX_PDP3_UDW                    0x24
182 #define CTX_PDP3_LDW                    0x26
183 #define CTX_PDP2_UDW                    0x28
184 #define CTX_PDP2_LDW                    0x2a
185 #define CTX_PDP1_UDW                    0x2c
186 #define CTX_PDP1_LDW                    0x2e
187 #define CTX_PDP0_UDW                    0x30
188 #define CTX_PDP0_LDW                    0x32
189 #define CTX_LRI_HEADER_2                0x41
190 #define CTX_R_PWR_CLK_STATE             0x42
191 #define CTX_GPGPU_CSR_BASE_ADDRESS      0x44
192
193 #define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
194         (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
195         (reg_state)[(pos)+1] = (val); \
196 } while (0)
197
198 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do {                \
199         const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
200         reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
201         reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
202 } while (0)
203
204 #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
205         reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
206         reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
207 } while (0)
208
209 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT        0x17
210 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT        0x26
211
212 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
213 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
214
215 #define WA_TAIL_DWORDS 2
216
217 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
218                                             struct intel_engine_cs *engine);
219 static void execlists_init_reg_state(u32 *reg_state,
220                                      struct i915_gem_context *ctx,
221                                      struct intel_engine_cs *engine,
222                                      struct intel_ring *ring);
223
224 /**
225  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
226  * @dev_priv: i915 device private
227  * @enable_execlists: value of i915.enable_execlists module parameter.
228  *
229  * Only certain platforms support Execlists (the prerequisites being
230  * support for Logical Ring Contexts and Aliasing PPGTT or better).
231  *
232  * Return: 1 if Execlists is supported and has to be enabled.
233  */
234 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
235 {
236         /* On platforms with execlist available, vGPU will only
237          * support execlist mode, no ring buffer mode.
238          */
239         if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
240                 return 1;
241
242         if (INTEL_GEN(dev_priv) >= 9)
243                 return 1;
244
245         if (enable_execlists == 0)
246                 return 0;
247
248         if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
249             USES_PPGTT(dev_priv) &&
250             i915.use_mmio_flip >= 0)
251                 return 1;
252
253         return 0;
254 }
255
256 /**
257  * intel_lr_context_descriptor_update() - calculate & cache the descriptor
258  *                                        descriptor for a pinned context
259  * @ctx: Context to work on
260  * @engine: Engine the descriptor will be used with
261  *
262  * The context descriptor encodes various attributes of a context,
263  * including its GTT address and some flags. Because it's fairly
264  * expensive to calculate, we'll just do it once and cache the result,
265  * which remains valid until the context is unpinned.
266  *
267  * This is what a descriptor looks like, from LSB to MSB::
268  *
269  *      bits  0-11:    flags, GEN8_CTX_* (cached in ctx->desc_template)
270  *      bits 12-31:    LRCA, GTT address of (the HWSP of) this context
271  *      bits 32-52:    ctx ID, a globally unique tag
272  *      bits 53-54:    mbz, reserved for use by hardware
273  *      bits 55-63:    group ID, currently unused and set to 0
274  */
275 static void
276 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
277                                    struct intel_engine_cs *engine)
278 {
279         struct intel_context *ce = &ctx->engine[engine->id];
280         u64 desc;
281
282         BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
283
284         desc = ctx->desc_template;                              /* bits  0-11 */
285         desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE;
286                                                                 /* bits 12-31 */
287         desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;           /* bits 32-52 */
288
289         ce->lrc_desc = desc;
290 }
291
292 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
293                                      struct intel_engine_cs *engine)
294 {
295         return ctx->engine[engine->id].lrc_desc;
296 }
297
298 static inline void
299 execlists_context_status_change(struct drm_i915_gem_request *rq,
300                                 unsigned long status)
301 {
302         /*
303          * Only used when GVT-g is enabled now. When GVT-g is disabled,
304          * The compiler should eliminate this function as dead-code.
305          */
306         if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
307                 return;
308
309         atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
310 }
311
312 static void
313 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
314 {
315         ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
316         ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
317         ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
318         ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
319 }
320
321 static u64 execlists_update_context(struct drm_i915_gem_request *rq)
322 {
323         struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
324         struct i915_hw_ppgtt *ppgtt =
325                 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
326         u32 *reg_state = ce->lrc_reg_state;
327
328         reg_state[CTX_RING_TAIL+1] = rq->tail;
329
330         /* True 32b PPGTT with dynamic page allocation: update PDP
331          * registers and point the unallocated PDPs to scratch page.
332          * PML4 is allocated during ppgtt init, so this is not needed
333          * in 48-bit mode.
334          */
335         if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
336                 execlists_update_context_pdps(ppgtt, reg_state);
337
338         return ce->lrc_desc;
339 }
340
341 static void execlists_submit_ports(struct intel_engine_cs *engine)
342 {
343         struct drm_i915_private *dev_priv = engine->i915;
344         struct execlist_port *port = engine->execlist_port;
345         u32 __iomem *elsp =
346                 dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
347         u64 desc[2];
348
349         GEM_BUG_ON(port[0].count > 1);
350         if (!port[0].count)
351                 execlists_context_status_change(port[0].request,
352                                                 INTEL_CONTEXT_SCHEDULE_IN);
353         desc[0] = execlists_update_context(port[0].request);
354         GEM_DEBUG_EXEC(port[0].context_id = upper_32_bits(desc[0]));
355         port[0].count++;
356
357         if (port[1].request) {
358                 GEM_BUG_ON(port[1].count);
359                 execlists_context_status_change(port[1].request,
360                                                 INTEL_CONTEXT_SCHEDULE_IN);
361                 desc[1] = execlists_update_context(port[1].request);
362                 GEM_DEBUG_EXEC(port[1].context_id = upper_32_bits(desc[1]));
363                 port[1].count = 1;
364         } else {
365                 desc[1] = 0;
366         }
367         GEM_BUG_ON(desc[0] == desc[1]);
368
369         /* You must always write both descriptors in the order below. */
370         writel(upper_32_bits(desc[1]), elsp);
371         writel(lower_32_bits(desc[1]), elsp);
372
373         writel(upper_32_bits(desc[0]), elsp);
374         /* The context is automatically loaded after the following */
375         writel(lower_32_bits(desc[0]), elsp);
376 }
377
378 static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
379 {
380         return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
381                 i915_gem_context_force_single_submission(ctx));
382 }
383
384 static bool can_merge_ctx(const struct i915_gem_context *prev,
385                           const struct i915_gem_context *next)
386 {
387         if (prev != next)
388                 return false;
389
390         if (ctx_single_port_submission(prev))
391                 return false;
392
393         return true;
394 }
395
396 static void execlists_dequeue(struct intel_engine_cs *engine)
397 {
398         struct drm_i915_gem_request *last;
399         struct execlist_port *port = engine->execlist_port;
400         unsigned long flags;
401         struct rb_node *rb;
402         bool submit = false;
403
404         last = port->request;
405         if (last)
406                 /* WaIdleLiteRestore:bdw,skl
407                  * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
408                  * as we resubmit the request. See gen8_emit_breadcrumb()
409                  * for where we prepare the padding after the end of the
410                  * request.
411                  */
412                 last->tail = last->wa_tail;
413
414         GEM_BUG_ON(port[1].request);
415
416         /* Hardware submission is through 2 ports. Conceptually each port
417          * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
418          * static for a context, and unique to each, so we only execute
419          * requests belonging to a single context from each ring. RING_HEAD
420          * is maintained by the CS in the context image, it marks the place
421          * where it got up to last time, and through RING_TAIL we tell the CS
422          * where we want to execute up to this time.
423          *
424          * In this list the requests are in order of execution. Consecutive
425          * requests from the same context are adjacent in the ringbuffer. We
426          * can combine these requests into a single RING_TAIL update:
427          *
428          *              RING_HEAD...req1...req2
429          *                                    ^- RING_TAIL
430          * since to execute req2 the CS must first execute req1.
431          *
432          * Our goal then is to point each port to the end of a consecutive
433          * sequence of requests as being the most optimal (fewest wake ups
434          * and context switches) submission.
435          */
436
437         spin_lock_irqsave(&engine->timeline->lock, flags);
438         rb = engine->execlist_first;
439         while (rb) {
440                 struct drm_i915_gem_request *cursor =
441                         rb_entry(rb, typeof(*cursor), priotree.node);
442
443                 /* Can we combine this request with the current port? It has to
444                  * be the same context/ringbuffer and not have any exceptions
445                  * (e.g. GVT saying never to combine contexts).
446                  *
447                  * If we can combine the requests, we can execute both by
448                  * updating the RING_TAIL to point to the end of the second
449                  * request, and so we never need to tell the hardware about
450                  * the first.
451                  */
452                 if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
453                         /* If we are on the second port and cannot combine
454                          * this request with the last, then we are done.
455                          */
456                         if (port != engine->execlist_port)
457                                 break;
458
459                         /* If GVT overrides us we only ever submit port[0],
460                          * leaving port[1] empty. Note that we also have
461                          * to be careful that we don't queue the same
462                          * context (even though a different request) to
463                          * the second port.
464                          */
465                         if (ctx_single_port_submission(last->ctx) ||
466                             ctx_single_port_submission(cursor->ctx))
467                                 break;
468
469                         GEM_BUG_ON(last->ctx == cursor->ctx);
470
471                         i915_gem_request_assign(&port->request, last);
472                         port++;
473                 }
474
475                 rb = rb_next(rb);
476                 rb_erase(&cursor->priotree.node, &engine->execlist_queue);
477                 RB_CLEAR_NODE(&cursor->priotree.node);
478                 cursor->priotree.priority = INT_MAX;
479
480                 __i915_gem_request_submit(cursor);
481                 last = cursor;
482                 submit = true;
483         }
484         if (submit) {
485                 i915_gem_request_assign(&port->request, last);
486                 engine->execlist_first = rb;
487         }
488         spin_unlock_irqrestore(&engine->timeline->lock, flags);
489
490         if (submit)
491                 execlists_submit_ports(engine);
492 }
493
494 static bool execlists_elsp_idle(struct intel_engine_cs *engine)
495 {
496         return !engine->execlist_port[0].request;
497 }
498
499 /**
500  * intel_execlists_idle() - Determine if all engine submission ports are idle
501  * @dev_priv: i915 device private
502  *
503  * Return true if there are no requests pending on any of the submission ports
504  * of any engines.
505  */
506 bool intel_execlists_idle(struct drm_i915_private *dev_priv)
507 {
508         struct intel_engine_cs *engine;
509         enum intel_engine_id id;
510
511         if (!i915.enable_execlists)
512                 return true;
513
514         for_each_engine(engine, dev_priv, id) {
515                 /* Interrupt/tasklet pending? */
516                 if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
517                         return false;
518
519                 /* Both ports drained, no more ELSP submission? */
520                 if (!execlists_elsp_idle(engine))
521                         return false;
522         }
523
524         return true;
525 }
526
527 static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
528 {
529         const struct execlist_port *port = engine->execlist_port;
530
531         return port[0].count + port[1].count < 2;
532 }
533
534 /*
535  * Check the unread Context Status Buffers and manage the submission of new
536  * contexts to the ELSP accordingly.
537  */
538 static void intel_lrc_irq_handler(unsigned long data)
539 {
540         struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
541         struct execlist_port *port = engine->execlist_port;
542         struct drm_i915_private *dev_priv = engine->i915;
543
544         intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
545
546         while (test_and_clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
547                 u32 __iomem *csb_mmio =
548                         dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
549                 u32 __iomem *buf =
550                         dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
551                 unsigned int csb, head, tail;
552
553                 csb = readl(csb_mmio);
554                 head = GEN8_CSB_READ_PTR(csb);
555                 tail = GEN8_CSB_WRITE_PTR(csb);
556                 if (head == tail)
557                         break;
558
559                 if (tail < head)
560                         tail += GEN8_CSB_ENTRIES;
561                 do {
562                         unsigned int idx = ++head % GEN8_CSB_ENTRIES;
563                         unsigned int status = readl(buf + 2 * idx);
564
565                         /* We are flying near dragons again.
566                          *
567                          * We hold a reference to the request in execlist_port[]
568                          * but no more than that. We are operating in softirq
569                          * context and so cannot hold any mutex or sleep. That
570                          * prevents us stopping the requests we are processing
571                          * in port[] from being retired simultaneously (the
572                          * breadcrumb will be complete before we see the
573                          * context-switch). As we only hold the reference to the
574                          * request, any pointer chasing underneath the request
575                          * is subject to a potential use-after-free. Thus we
576                          * store all of the bookkeeping within port[] as
577                          * required, and avoid using unguarded pointers beneath
578                          * request itself. The same applies to the atomic
579                          * status notifier.
580                          */
581
582                         if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
583                                 continue;
584
585                         /* Check the context/desc id for this event matches */
586                         GEM_DEBUG_BUG_ON(readl(buf + 2 * idx + 1) !=
587                                          port[0].context_id);
588
589                         GEM_BUG_ON(port[0].count == 0);
590                         if (--port[0].count == 0) {
591                                 GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
592                                 execlists_context_status_change(port[0].request,
593                                                                 INTEL_CONTEXT_SCHEDULE_OUT);
594
595                                 i915_gem_request_put(port[0].request);
596                                 port[0] = port[1];
597                                 memset(&port[1], 0, sizeof(port[1]));
598                         }
599
600                         GEM_BUG_ON(port[0].count == 0 &&
601                                    !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
602                 } while (head < tail);
603
604                 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
605                                      GEN8_CSB_WRITE_PTR(csb) << 8),
606                        csb_mmio);
607         }
608
609         if (execlists_elsp_ready(engine))
610                 execlists_dequeue(engine);
611
612         intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
613 }
614
615 static bool insert_request(struct i915_priotree *pt, struct rb_root *root)
616 {
617         struct rb_node **p, *rb;
618         bool first = true;
619
620         /* most positive priority is scheduled first, equal priorities fifo */
621         rb = NULL;
622         p = &root->rb_node;
623         while (*p) {
624                 struct i915_priotree *pos;
625
626                 rb = *p;
627                 pos = rb_entry(rb, typeof(*pos), node);
628                 if (pt->priority > pos->priority) {
629                         p = &rb->rb_left;
630                 } else {
631                         p = &rb->rb_right;
632                         first = false;
633                 }
634         }
635         rb_link_node(&pt->node, rb, p);
636         rb_insert_color(&pt->node, root);
637
638         return first;
639 }
640
641 static void execlists_submit_request(struct drm_i915_gem_request *request)
642 {
643         struct intel_engine_cs *engine = request->engine;
644         unsigned long flags;
645
646         /* Will be called from irq-context when using foreign fences. */
647         spin_lock_irqsave(&engine->timeline->lock, flags);
648
649         if (insert_request(&request->priotree, &engine->execlist_queue)) {
650                 engine->execlist_first = &request->priotree.node;
651                 if (execlists_elsp_ready(engine))
652                         tasklet_hi_schedule(&engine->irq_tasklet);
653         }
654
655         spin_unlock_irqrestore(&engine->timeline->lock, flags);
656 }
657
658 static struct intel_engine_cs *
659 pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
660 {
661         struct intel_engine_cs *engine;
662
663         engine = container_of(pt,
664                               struct drm_i915_gem_request,
665                               priotree)->engine;
666         if (engine != locked) {
667                 if (locked)
668                         spin_unlock_irq(&locked->timeline->lock);
669                 spin_lock_irq(&engine->timeline->lock);
670         }
671
672         return engine;
673 }
674
675 static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
676 {
677         struct intel_engine_cs *engine = NULL;
678         struct i915_dependency *dep, *p;
679         struct i915_dependency stack;
680         LIST_HEAD(dfs);
681
682         if (prio <= READ_ONCE(request->priotree.priority))
683                 return;
684
685         /* Need BKL in order to use the temporary link inside i915_dependency */
686         lockdep_assert_held(&request->i915->drm.struct_mutex);
687
688         stack.signaler = &request->priotree;
689         list_add(&stack.dfs_link, &dfs);
690
691         /* Recursively bump all dependent priorities to match the new request.
692          *
693          * A naive approach would be to use recursion:
694          * static void update_priorities(struct i915_priotree *pt, prio) {
695          *      list_for_each_entry(dep, &pt->signalers_list, signal_link)
696          *              update_priorities(dep->signal, prio)
697          *      insert_request(pt);
698          * }
699          * but that may have unlimited recursion depth and so runs a very
700          * real risk of overunning the kernel stack. Instead, we build
701          * a flat list of all dependencies starting with the current request.
702          * As we walk the list of dependencies, we add all of its dependencies
703          * to the end of the list (this may include an already visited
704          * request) and continue to walk onwards onto the new dependencies. The
705          * end result is a topological list of requests in reverse order, the
706          * last element in the list is the request we must execute first.
707          */
708         list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
709                 struct i915_priotree *pt = dep->signaler;
710
711                 list_for_each_entry(p, &pt->signalers_list, signal_link)
712                         if (prio > READ_ONCE(p->signaler->priority))
713                                 list_move_tail(&p->dfs_link, &dfs);
714
715                 list_safe_reset_next(dep, p, dfs_link);
716                 if (!RB_EMPTY_NODE(&pt->node))
717                         continue;
718
719                 engine = pt_lock_engine(pt, engine);
720
721                 /* If it is not already in the rbtree, we can update the
722                  * priority inplace and skip over it (and its dependencies)
723                  * if it is referenced *again* as we descend the dfs.
724                  */
725                 if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
726                         pt->priority = prio;
727                         list_del_init(&dep->dfs_link);
728                 }
729         }
730
731         /* Fifo and depth-first replacement ensure our deps execute before us */
732         list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
733                 struct i915_priotree *pt = dep->signaler;
734
735                 INIT_LIST_HEAD(&dep->dfs_link);
736
737                 engine = pt_lock_engine(pt, engine);
738
739                 if (prio <= pt->priority)
740                         continue;
741
742                 GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
743
744                 pt->priority = prio;
745                 rb_erase(&pt->node, &engine->execlist_queue);
746                 if (insert_request(pt, &engine->execlist_queue))
747                         engine->execlist_first = &pt->node;
748         }
749
750         if (engine)
751                 spin_unlock_irq(&engine->timeline->lock);
752
753         /* XXX Do we need to preempt to make room for us and our deps? */
754 }
755
756 static int execlists_context_pin(struct intel_engine_cs *engine,
757                                  struct i915_gem_context *ctx)
758 {
759         struct intel_context *ce = &ctx->engine[engine->id];
760         unsigned int flags;
761         void *vaddr;
762         int ret;
763
764         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
765
766         if (ce->pin_count++)
767                 return 0;
768
769         if (!ce->state) {
770                 ret = execlists_context_deferred_alloc(ctx, engine);
771                 if (ret)
772                         goto err;
773         }
774         GEM_BUG_ON(!ce->state);
775
776         flags = PIN_GLOBAL | PIN_HIGH;
777         if (ctx->ggtt_offset_bias)
778                 flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
779
780         ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, flags);
781         if (ret)
782                 goto err;
783
784         vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
785         if (IS_ERR(vaddr)) {
786                 ret = PTR_ERR(vaddr);
787                 goto unpin_vma;
788         }
789
790         ret = intel_ring_pin(ce->ring, ctx->ggtt_offset_bias);
791         if (ret)
792                 goto unpin_map;
793
794         intel_lr_context_descriptor_update(ctx, engine);
795
796         ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
797         ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
798                 i915_ggtt_offset(ce->ring->vma);
799
800         ce->state->obj->mm.dirty = true;
801
802         i915_gem_context_get(ctx);
803         return 0;
804
805 unpin_map:
806         i915_gem_object_unpin_map(ce->state->obj);
807 unpin_vma:
808         __i915_vma_unpin(ce->state);
809 err:
810         ce->pin_count = 0;
811         return ret;
812 }
813
814 static void execlists_context_unpin(struct intel_engine_cs *engine,
815                                     struct i915_gem_context *ctx)
816 {
817         struct intel_context *ce = &ctx->engine[engine->id];
818
819         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
820         GEM_BUG_ON(ce->pin_count == 0);
821
822         if (--ce->pin_count)
823                 return;
824
825         intel_ring_unpin(ce->ring);
826
827         i915_gem_object_unpin_map(ce->state->obj);
828         i915_vma_unpin(ce->state);
829
830         i915_gem_context_put(ctx);
831 }
832
833 static int execlists_request_alloc(struct drm_i915_gem_request *request)
834 {
835         struct intel_engine_cs *engine = request->engine;
836         struct intel_context *ce = &request->ctx->engine[engine->id];
837         int ret;
838
839         GEM_BUG_ON(!ce->pin_count);
840
841         /* Flush enough space to reduce the likelihood of waiting after
842          * we start building the request - in which case we will just
843          * have to repeat work.
844          */
845         request->reserved_space += EXECLISTS_REQUEST_SIZE;
846
847         GEM_BUG_ON(!ce->ring);
848         request->ring = ce->ring;
849
850         if (i915.enable_guc_submission) {
851                 /*
852                  * Check that the GuC has space for the request before
853                  * going any further, as the i915_add_request() call
854                  * later on mustn't fail ...
855                  */
856                 ret = i915_guc_wq_reserve(request);
857                 if (ret)
858                         goto err;
859         }
860
861         ret = intel_ring_begin(request, 0);
862         if (ret)
863                 goto err_unreserve;
864
865         if (!ce->initialised) {
866                 ret = engine->init_context(request);
867                 if (ret)
868                         goto err_unreserve;
869
870                 ce->initialised = true;
871         }
872
873         /* Note that after this point, we have committed to using
874          * this request as it is being used to both track the
875          * state of engine initialisation and liveness of the
876          * golden renderstate above. Think twice before you try
877          * to cancel/unwind this request now.
878          */
879
880         request->reserved_space -= EXECLISTS_REQUEST_SIZE;
881         return 0;
882
883 err_unreserve:
884         if (i915.enable_guc_submission)
885                 i915_guc_wq_unreserve(request);
886 err:
887         return ret;
888 }
889
890 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
891 {
892         int ret, i;
893         struct intel_ring *ring = req->ring;
894         struct i915_workarounds *w = &req->i915->workarounds;
895
896         if (w->count == 0)
897                 return 0;
898
899         ret = req->engine->emit_flush(req, EMIT_BARRIER);
900         if (ret)
901                 return ret;
902
903         ret = intel_ring_begin(req, w->count * 2 + 2);
904         if (ret)
905                 return ret;
906
907         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
908         for (i = 0; i < w->count; i++) {
909                 intel_ring_emit_reg(ring, w->reg[i].addr);
910                 intel_ring_emit(ring, w->reg[i].value);
911         }
912         intel_ring_emit(ring, MI_NOOP);
913
914         intel_ring_advance(ring);
915
916         ret = req->engine->emit_flush(req, EMIT_BARRIER);
917         if (ret)
918                 return ret;
919
920         return 0;
921 }
922
923 #define wa_ctx_emit(batch, index, cmd)                                  \
924         do {                                                            \
925                 int __index = (index)++;                                \
926                 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
927                         return -ENOSPC;                                 \
928                 }                                                       \
929                 batch[__index] = (cmd);                                 \
930         } while (0)
931
932 #define wa_ctx_emit_reg(batch, index, reg) \
933         wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
934
935 /*
936  * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
937  * PIPE_CONTROL instruction. This is required for the flush to happen correctly
938  * but there is a slight complication as this is applied in WA batch where the
939  * values are only initialized once so we cannot take register value at the
940  * beginning and reuse it further; hence we save its value to memory, upload a
941  * constant value with bit21 set and then we restore it back with the saved value.
942  * To simplify the WA, a constant value is formed by using the default value
943  * of this register. This shouldn't be a problem because we are only modifying
944  * it for a short period and this batch in non-premptible. We can ofcourse
945  * use additional instructions that read the actual value of the register
946  * at that time and set our bit of interest but it makes the WA complicated.
947  *
948  * This WA is also required for Gen9 so extracting as a function avoids
949  * code duplication.
950  */
951 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
952                                                 uint32_t *batch,
953                                                 uint32_t index)
954 {
955         uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
956
957         wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
958                                    MI_SRM_LRM_GLOBAL_GTT));
959         wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
960         wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
961         wa_ctx_emit(batch, index, 0);
962
963         wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
964         wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
965         wa_ctx_emit(batch, index, l3sqc4_flush);
966
967         wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
968         wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
969                                    PIPE_CONTROL_DC_FLUSH_ENABLE));
970         wa_ctx_emit(batch, index, 0);
971         wa_ctx_emit(batch, index, 0);
972         wa_ctx_emit(batch, index, 0);
973         wa_ctx_emit(batch, index, 0);
974
975         wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
976                                    MI_SRM_LRM_GLOBAL_GTT));
977         wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
978         wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
979         wa_ctx_emit(batch, index, 0);
980
981         return index;
982 }
983
984 static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
985                                     uint32_t offset,
986                                     uint32_t start_alignment)
987 {
988         return wa_ctx->offset = ALIGN(offset, start_alignment);
989 }
990
991 static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
992                              uint32_t offset,
993                              uint32_t size_alignment)
994 {
995         wa_ctx->size = offset - wa_ctx->offset;
996
997         WARN(wa_ctx->size % size_alignment,
998              "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
999              wa_ctx->size, size_alignment);
1000         return 0;
1001 }
1002
1003 /*
1004  * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1005  * initialized at the beginning and shared across all contexts but this field
1006  * helps us to have multiple batches at different offsets and select them based
1007  * on a criteria. At the moment this batch always start at the beginning of the page
1008  * and at this point we don't have multiple wa_ctx batch buffers.
1009  *
1010  * The number of WA applied are not known at the beginning; we use this field
1011  * to return the no of DWORDS written.
1012  *
1013  * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1014  * so it adds NOOPs as padding to make it cacheline aligned.
1015  * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1016  * makes a complete batch buffer.
1017  */
1018 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1019                                     struct i915_wa_ctx_bb *wa_ctx,
1020                                     uint32_t *batch,
1021                                     uint32_t *offset)
1022 {
1023         uint32_t scratch_addr;
1024         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1025
1026         /* WaDisableCtxRestoreArbitration:bdw,chv */
1027         wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1028
1029         /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1030         if (IS_BROADWELL(engine->i915)) {
1031                 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1032                 if (rc < 0)
1033                         return rc;
1034                 index = rc;
1035         }
1036
1037         /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1038         /* Actual scratch location is at 128 bytes offset */
1039         scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
1040
1041         wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1042         wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1043                                    PIPE_CONTROL_GLOBAL_GTT_IVB |
1044                                    PIPE_CONTROL_CS_STALL |
1045                                    PIPE_CONTROL_QW_WRITE));
1046         wa_ctx_emit(batch, index, scratch_addr);
1047         wa_ctx_emit(batch, index, 0);
1048         wa_ctx_emit(batch, index, 0);
1049         wa_ctx_emit(batch, index, 0);
1050
1051         /* Pad to end of cacheline */
1052         while (index % CACHELINE_DWORDS)
1053                 wa_ctx_emit(batch, index, MI_NOOP);
1054
1055         /*
1056          * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1057          * execution depends on the length specified in terms of cache lines
1058          * in the register CTX_RCS_INDIRECT_CTX
1059          */
1060
1061         return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1062 }
1063
1064 /*
1065  *  This batch is started immediately after indirect_ctx batch. Since we ensure
1066  *  that indirect_ctx ends on a cacheline this batch is aligned automatically.
1067  *
1068  *  The number of DWORDS written are returned using this field.
1069  *
1070  *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1071  *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1072  */
1073 static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
1074                                struct i915_wa_ctx_bb *wa_ctx,
1075                                uint32_t *batch,
1076                                uint32_t *offset)
1077 {
1078         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1079
1080         /* WaDisableCtxRestoreArbitration:bdw,chv */
1081         wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1082
1083         wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1084
1085         return wa_ctx_end(wa_ctx, *offset = index, 1);
1086 }
1087
1088 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1089                                     struct i915_wa_ctx_bb *wa_ctx,
1090                                     uint32_t *batch,
1091                                     uint32_t *offset)
1092 {
1093         int ret;
1094         struct drm_i915_private *dev_priv = engine->i915;
1095         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1096
1097         /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
1098         ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1099         if (ret < 0)
1100                 return ret;
1101         index = ret;
1102
1103         /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
1104         wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1105         wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
1106         wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
1107                             GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
1108         wa_ctx_emit(batch, index, MI_NOOP);
1109
1110         /* WaClearSlmSpaceAtContextSwitch:kbl */
1111         /* Actual scratch location is at 128 bytes offset */
1112         if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
1113                 u32 scratch_addr =
1114                         i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
1115
1116                 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1117                 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1118                                            PIPE_CONTROL_GLOBAL_GTT_IVB |
1119                                            PIPE_CONTROL_CS_STALL |
1120                                            PIPE_CONTROL_QW_WRITE));
1121                 wa_ctx_emit(batch, index, scratch_addr);
1122                 wa_ctx_emit(batch, index, 0);
1123                 wa_ctx_emit(batch, index, 0);
1124                 wa_ctx_emit(batch, index, 0);
1125         }
1126
1127         /* WaMediaPoolStateCmdInWABB:bxt,glk */
1128         if (HAS_POOLED_EU(engine->i915)) {
1129                 /*
1130                  * EU pool configuration is setup along with golden context
1131                  * during context initialization. This value depends on
1132                  * device type (2x6 or 3x6) and needs to be updated based
1133                  * on which subslice is disabled especially for 2x6
1134                  * devices, however it is safe to load default
1135                  * configuration of 3x6 device instead of masking off
1136                  * corresponding bits because HW ignores bits of a disabled
1137                  * subslice and drops down to appropriate config. Please
1138                  * see render_state_setup() in i915_gem_render_state.c for
1139                  * possible configurations, to avoid duplication they are
1140                  * not shown here again.
1141                  */
1142                 u32 eu_pool_config = 0x00777000;
1143                 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
1144                 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
1145                 wa_ctx_emit(batch, index, eu_pool_config);
1146                 wa_ctx_emit(batch, index, 0);
1147                 wa_ctx_emit(batch, index, 0);
1148                 wa_ctx_emit(batch, index, 0);
1149         }
1150
1151         /* Pad to end of cacheline */
1152         while (index % CACHELINE_DWORDS)
1153                 wa_ctx_emit(batch, index, MI_NOOP);
1154
1155         return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1156 }
1157
1158 static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1159                                struct i915_wa_ctx_bb *wa_ctx,
1160                                uint32_t *batch,
1161                                uint32_t *offset)
1162 {
1163         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1164
1165         wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1166
1167         return wa_ctx_end(wa_ctx, *offset = index, 1);
1168 }
1169
1170 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1171 {
1172         struct drm_i915_gem_object *obj;
1173         struct i915_vma *vma;
1174         int err;
1175
1176         obj = i915_gem_object_create(engine->i915, PAGE_ALIGN(size));
1177         if (IS_ERR(obj))
1178                 return PTR_ERR(obj);
1179
1180         vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
1181         if (IS_ERR(vma)) {
1182                 err = PTR_ERR(vma);
1183                 goto err;
1184         }
1185
1186         err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
1187         if (err)
1188                 goto err;
1189
1190         engine->wa_ctx.vma = vma;
1191         return 0;
1192
1193 err:
1194         i915_gem_object_put(obj);
1195         return err;
1196 }
1197
1198 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
1199 {
1200         i915_vma_unpin_and_release(&engine->wa_ctx.vma);
1201 }
1202
1203 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1204 {
1205         struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1206         uint32_t *batch;
1207         uint32_t offset;
1208         struct page *page;
1209         int ret;
1210
1211         WARN_ON(engine->id != RCS);
1212
1213         /* update this when WA for higher Gen are added */
1214         if (INTEL_GEN(engine->i915) > 9) {
1215                 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1216                           INTEL_GEN(engine->i915));
1217                 return 0;
1218         }
1219
1220         /* some WA perform writes to scratch page, ensure it is valid */
1221         if (!engine->scratch) {
1222                 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
1223                 return -EINVAL;
1224         }
1225
1226         ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
1227         if (ret) {
1228                 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1229                 return ret;
1230         }
1231
1232         page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
1233         batch = kmap_atomic(page);
1234         offset = 0;
1235
1236         if (IS_GEN8(engine->i915)) {
1237                 ret = gen8_init_indirectctx_bb(engine,
1238                                                &wa_ctx->indirect_ctx,
1239                                                batch,
1240                                                &offset);
1241                 if (ret)
1242                         goto out;
1243
1244                 ret = gen8_init_perctx_bb(engine,
1245                                           &wa_ctx->per_ctx,
1246                                           batch,
1247                                           &offset);
1248                 if (ret)
1249                         goto out;
1250         } else if (IS_GEN9(engine->i915)) {
1251                 ret = gen9_init_indirectctx_bb(engine,
1252                                                &wa_ctx->indirect_ctx,
1253                                                batch,
1254                                                &offset);
1255                 if (ret)
1256                         goto out;
1257
1258                 ret = gen9_init_perctx_bb(engine,
1259                                           &wa_ctx->per_ctx,
1260                                           batch,
1261                                           &offset);
1262                 if (ret)
1263                         goto out;
1264         }
1265
1266 out:
1267         kunmap_atomic(batch);
1268         if (ret)
1269                 lrc_destroy_wa_ctx_obj(engine);
1270
1271         return ret;
1272 }
1273
1274 static u32 port_seqno(struct execlist_port *port)
1275 {
1276         return port->request ? port->request->global_seqno : 0;
1277 }
1278
1279 static int gen8_init_common_ring(struct intel_engine_cs *engine)
1280 {
1281         struct drm_i915_private *dev_priv = engine->i915;
1282         int ret;
1283
1284         ret = intel_mocs_init_engine(engine);
1285         if (ret)
1286                 return ret;
1287
1288         intel_engine_reset_breadcrumbs(engine);
1289         intel_engine_init_hangcheck(engine);
1290
1291         I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1292         I915_WRITE(RING_MODE_GEN7(engine),
1293                    _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1294                    _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1295         I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1296                    engine->status_page.ggtt_offset);
1297         POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1298
1299         DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1300
1301         /* After a GPU reset, we may have requests to replay */
1302         clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
1303         if (!execlists_elsp_idle(engine)) {
1304                 DRM_DEBUG_DRIVER("Restarting %s from requests [0x%x, 0x%x]\n",
1305                                  engine->name,
1306                                  port_seqno(&engine->execlist_port[0]),
1307                                  port_seqno(&engine->execlist_port[1]));
1308                 engine->execlist_port[0].count = 0;
1309                 engine->execlist_port[1].count = 0;
1310                 execlists_submit_ports(engine);
1311         }
1312
1313         return 0;
1314 }
1315
1316 static int gen8_init_render_ring(struct intel_engine_cs *engine)
1317 {
1318         struct drm_i915_private *dev_priv = engine->i915;
1319         int ret;
1320
1321         ret = gen8_init_common_ring(engine);
1322         if (ret)
1323                 return ret;
1324
1325         /* We need to disable the AsyncFlip performance optimisations in order
1326          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1327          * programmed to '1' on all products.
1328          *
1329          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1330          */
1331         I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1332
1333         I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1334
1335         return init_workarounds_ring(engine);
1336 }
1337
1338 static int gen9_init_render_ring(struct intel_engine_cs *engine)
1339 {
1340         int ret;
1341
1342         ret = gen8_init_common_ring(engine);
1343         if (ret)
1344                 return ret;
1345
1346         return init_workarounds_ring(engine);
1347 }
1348
1349 static void reset_common_ring(struct intel_engine_cs *engine,
1350                               struct drm_i915_gem_request *request)
1351 {
1352         struct execlist_port *port = engine->execlist_port;
1353         struct intel_context *ce;
1354
1355         /* If the request was innocent, we leave the request in the ELSP
1356          * and will try to replay it on restarting. The context image may
1357          * have been corrupted by the reset, in which case we may have
1358          * to service a new GPU hang, but more likely we can continue on
1359          * without impact.
1360          *
1361          * If the request was guilty, we presume the context is corrupt
1362          * and have to at least restore the RING register in the context
1363          * image back to the expected values to skip over the guilty request.
1364          */
1365         if (!request || request->fence.error != -EIO)
1366                 return;
1367
1368         /* We want a simple context + ring to execute the breadcrumb update.
1369          * We cannot rely on the context being intact across the GPU hang,
1370          * so clear it and rebuild just what we need for the breadcrumb.
1371          * All pending requests for this context will be zapped, and any
1372          * future request will be after userspace has had the opportunity
1373          * to recreate its own state.
1374          */
1375         ce = &request->ctx->engine[engine->id];
1376         execlists_init_reg_state(ce->lrc_reg_state,
1377                                  request->ctx, engine, ce->ring);
1378
1379         /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
1380         ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
1381                 i915_ggtt_offset(ce->ring->vma);
1382         ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
1383
1384         request->ring->head = request->postfix;
1385         request->ring->last_retired_head = -1;
1386         intel_ring_update_space(request->ring);
1387
1388         if (i915.enable_guc_submission)
1389                 return;
1390
1391         /* Catch up with any missed context-switch interrupts */
1392         if (request->ctx != port[0].request->ctx) {
1393                 i915_gem_request_put(port[0].request);
1394                 port[0] = port[1];
1395                 memset(&port[1], 0, sizeof(port[1]));
1396         }
1397
1398         GEM_BUG_ON(request->ctx != port[0].request->ctx);
1399
1400         /* Reset WaIdleLiteRestore:bdw,skl as well */
1401         request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
1402 }
1403
1404 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1405 {
1406         struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1407         struct intel_ring *ring = req->ring;
1408         struct intel_engine_cs *engine = req->engine;
1409         const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1410         int i, ret;
1411
1412         ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
1413         if (ret)
1414                 return ret;
1415
1416         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1417         for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1418                 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1419
1420                 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
1421                 intel_ring_emit(ring, upper_32_bits(pd_daddr));
1422                 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
1423                 intel_ring_emit(ring, lower_32_bits(pd_daddr));
1424         }
1425
1426         intel_ring_emit(ring, MI_NOOP);
1427         intel_ring_advance(ring);
1428
1429         return 0;
1430 }
1431
1432 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1433                               u64 offset, u32 len,
1434                               unsigned int dispatch_flags)
1435 {
1436         struct intel_ring *ring = req->ring;
1437         bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
1438         int ret;
1439
1440         /* Don't rely in hw updating PDPs, specially in lite-restore.
1441          * Ideally, we should set Force PD Restore in ctx descriptor,
1442          * but we can't. Force Restore would be a second option, but
1443          * it is unsafe in case of lite-restore (because the ctx is
1444          * not idle). PML4 is allocated during ppgtt init so this is
1445          * not needed in 48-bit.*/
1446         if (req->ctx->ppgtt &&
1447             (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1448                 if (!i915_vm_is_48bit(&req->ctx->ppgtt->base) &&
1449                     !intel_vgpu_active(req->i915)) {
1450                         ret = intel_logical_ring_emit_pdps(req);
1451                         if (ret)
1452                                 return ret;
1453                 }
1454
1455                 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
1456         }
1457
1458         ret = intel_ring_begin(req, 4);
1459         if (ret)
1460                 return ret;
1461
1462         /* FIXME(BDW): Address space and security selectors. */
1463         intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
1464                         (ppgtt<<8) |
1465                         (dispatch_flags & I915_DISPATCH_RS ?
1466                          MI_BATCH_RESOURCE_STREAMER : 0));
1467         intel_ring_emit(ring, lower_32_bits(offset));
1468         intel_ring_emit(ring, upper_32_bits(offset));
1469         intel_ring_emit(ring, MI_NOOP);
1470         intel_ring_advance(ring);
1471
1472         return 0;
1473 }
1474
1475 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
1476 {
1477         struct drm_i915_private *dev_priv = engine->i915;
1478         I915_WRITE_IMR(engine,
1479                        ~(engine->irq_enable_mask | engine->irq_keep_mask));
1480         POSTING_READ_FW(RING_IMR(engine->mmio_base));
1481 }
1482
1483 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
1484 {
1485         struct drm_i915_private *dev_priv = engine->i915;
1486         I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1487 }
1488
1489 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
1490 {
1491         struct intel_ring *ring = request->ring;
1492         u32 cmd;
1493         int ret;
1494
1495         ret = intel_ring_begin(request, 4);
1496         if (ret)
1497                 return ret;
1498
1499         cmd = MI_FLUSH_DW + 1;
1500
1501         /* We always require a command barrier so that subsequent
1502          * commands, such as breadcrumb interrupts, are strictly ordered
1503          * wrt the contents of the write cache being flushed to memory
1504          * (and thus being coherent from the CPU).
1505          */
1506         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1507
1508         if (mode & EMIT_INVALIDATE) {
1509                 cmd |= MI_INVALIDATE_TLB;
1510                 if (request->engine->id == VCS)
1511                         cmd |= MI_INVALIDATE_BSD;
1512         }
1513
1514         intel_ring_emit(ring, cmd);
1515         intel_ring_emit(ring,
1516                         I915_GEM_HWS_SCRATCH_ADDR |
1517                         MI_FLUSH_DW_USE_GTT);
1518         intel_ring_emit(ring, 0); /* upper addr */
1519         intel_ring_emit(ring, 0); /* value */
1520         intel_ring_advance(ring);
1521
1522         return 0;
1523 }
1524
1525 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1526                                   u32 mode)
1527 {
1528         struct intel_ring *ring = request->ring;
1529         struct intel_engine_cs *engine = request->engine;
1530         u32 scratch_addr =
1531                 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
1532         bool vf_flush_wa = false, dc_flush_wa = false;
1533         u32 flags = 0;
1534         int ret;
1535         int len;
1536
1537         flags |= PIPE_CONTROL_CS_STALL;
1538
1539         if (mode & EMIT_FLUSH) {
1540                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1541                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1542                 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
1543                 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1544         }
1545
1546         if (mode & EMIT_INVALIDATE) {
1547                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1548                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1549                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1550                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1551                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1552                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1553                 flags |= PIPE_CONTROL_QW_WRITE;
1554                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1555
1556                 /*
1557                  * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1558                  * pipe control.
1559                  */
1560                 if (IS_GEN9(request->i915))
1561                         vf_flush_wa = true;
1562
1563                 /* WaForGAMHang:kbl */
1564                 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1565                         dc_flush_wa = true;
1566         }
1567
1568         len = 6;
1569
1570         if (vf_flush_wa)
1571                 len += 6;
1572
1573         if (dc_flush_wa)
1574                 len += 12;
1575
1576         ret = intel_ring_begin(request, len);
1577         if (ret)
1578                 return ret;
1579
1580         if (vf_flush_wa) {
1581                 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1582                 intel_ring_emit(ring, 0);
1583                 intel_ring_emit(ring, 0);
1584                 intel_ring_emit(ring, 0);
1585                 intel_ring_emit(ring, 0);
1586                 intel_ring_emit(ring, 0);
1587         }
1588
1589         if (dc_flush_wa) {
1590                 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1591                 intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
1592                 intel_ring_emit(ring, 0);
1593                 intel_ring_emit(ring, 0);
1594                 intel_ring_emit(ring, 0);
1595                 intel_ring_emit(ring, 0);
1596         }
1597
1598         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1599         intel_ring_emit(ring, flags);
1600         intel_ring_emit(ring, scratch_addr);
1601         intel_ring_emit(ring, 0);
1602         intel_ring_emit(ring, 0);
1603         intel_ring_emit(ring, 0);
1604
1605         if (dc_flush_wa) {
1606                 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1607                 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
1608                 intel_ring_emit(ring, 0);
1609                 intel_ring_emit(ring, 0);
1610                 intel_ring_emit(ring, 0);
1611                 intel_ring_emit(ring, 0);
1612         }
1613
1614         intel_ring_advance(ring);
1615
1616         return 0;
1617 }
1618
1619 /*
1620  * Reserve space for 2 NOOPs at the end of each request to be
1621  * used as a workaround for not being allowed to do lite
1622  * restore with HEAD==TAIL (WaIdleLiteRestore).
1623  */
1624 static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
1625 {
1626         *out++ = MI_NOOP;
1627         *out++ = MI_NOOP;
1628         request->wa_tail = intel_ring_offset(request->ring, out);
1629 }
1630
1631 static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
1632                                  u32 *out)
1633 {
1634         /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1635         BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
1636
1637         *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
1638         *out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
1639         *out++ = 0;
1640         *out++ = request->global_seqno;
1641         *out++ = MI_USER_INTERRUPT;
1642         *out++ = MI_NOOP;
1643         request->tail = intel_ring_offset(request->ring, out);
1644
1645         gen8_emit_wa_tail(request, out);
1646 }
1647
1648 static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
1649
1650 static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
1651                                         u32 *out)
1652 {
1653         /* We're using qword write, seqno should be aligned to 8 bytes. */
1654         BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1655
1656         /* w/a for post sync ops following a GPGPU operation we
1657          * need a prior CS_STALL, which is emitted by the flush
1658          * following the batch.
1659          */
1660         *out++ = GFX_OP_PIPE_CONTROL(6);
1661         *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
1662                   PIPE_CONTROL_CS_STALL |
1663                   PIPE_CONTROL_QW_WRITE);
1664         *out++ = intel_hws_seqno_address(request->engine);
1665         *out++ = 0;
1666         *out++ = request->global_seqno;
1667         /* We're thrashing one dword of HWS. */
1668         *out++ = 0;
1669         *out++ = MI_USER_INTERRUPT;
1670         *out++ = MI_NOOP;
1671         request->tail = intel_ring_offset(request->ring, out);
1672
1673         gen8_emit_wa_tail(request, out);
1674 }
1675
1676 static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
1677
1678 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
1679 {
1680         int ret;
1681
1682         ret = intel_logical_ring_workarounds_emit(req);
1683         if (ret)
1684                 return ret;
1685
1686         ret = intel_rcs_context_init_mocs(req);
1687         /*
1688          * Failing to program the MOCS is non-fatal.The system will not
1689          * run at peak performance. So generate an error and carry on.
1690          */
1691         if (ret)
1692                 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1693
1694         return i915_gem_render_state_emit(req);
1695 }
1696
1697 /**
1698  * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1699  * @engine: Engine Command Streamer.
1700  */
1701 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1702 {
1703         struct drm_i915_private *dev_priv;
1704
1705         /*
1706          * Tasklet cannot be active at this point due intel_mark_active/idle
1707          * so this is just for documentation.
1708          */
1709         if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1710                 tasklet_kill(&engine->irq_tasklet);
1711
1712         dev_priv = engine->i915;
1713
1714         if (engine->buffer) {
1715                 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
1716         }
1717
1718         if (engine->cleanup)
1719                 engine->cleanup(engine);
1720
1721         if (engine->status_page.vma) {
1722                 i915_gem_object_unpin_map(engine->status_page.vma->obj);
1723                 engine->status_page.vma = NULL;
1724         }
1725
1726         intel_engine_cleanup_common(engine);
1727
1728         lrc_destroy_wa_ctx_obj(engine);
1729         engine->i915 = NULL;
1730         dev_priv->engine[engine->id] = NULL;
1731         kfree(engine);
1732 }
1733
1734 void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
1735 {
1736         struct intel_engine_cs *engine;
1737         enum intel_engine_id id;
1738
1739         for_each_engine(engine, dev_priv, id) {
1740                 engine->submit_request = execlists_submit_request;
1741                 engine->schedule = execlists_schedule;
1742         }
1743 }
1744
1745 static void
1746 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1747 {
1748         /* Default vfuncs which can be overriden by each engine. */
1749         engine->init_hw = gen8_init_common_ring;
1750         engine->reset_hw = reset_common_ring;
1751
1752         engine->context_pin = execlists_context_pin;
1753         engine->context_unpin = execlists_context_unpin;
1754
1755         engine->request_alloc = execlists_request_alloc;
1756
1757         engine->emit_flush = gen8_emit_flush;
1758         engine->emit_breadcrumb = gen8_emit_breadcrumb;
1759         engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
1760         engine->submit_request = execlists_submit_request;
1761         engine->schedule = execlists_schedule;
1762
1763         engine->irq_enable = gen8_logical_ring_enable_irq;
1764         engine->irq_disable = gen8_logical_ring_disable_irq;
1765         engine->emit_bb_start = gen8_emit_bb_start;
1766 }
1767
1768 static inline void
1769 logical_ring_default_irqs(struct intel_engine_cs *engine)
1770 {
1771         unsigned shift = engine->irq_shift;
1772         engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1773         engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1774 }
1775
1776 static int
1777 lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
1778 {
1779         const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
1780         void *hws;
1781
1782         /* The HWSP is part of the default context object in LRC mode. */
1783         hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
1784         if (IS_ERR(hws))
1785                 return PTR_ERR(hws);
1786
1787         engine->status_page.page_addr = hws + hws_offset;
1788         engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset;
1789         engine->status_page.vma = vma;
1790
1791         return 0;
1792 }
1793
1794 static void
1795 logical_ring_setup(struct intel_engine_cs *engine)
1796 {
1797         struct drm_i915_private *dev_priv = engine->i915;
1798         enum forcewake_domains fw_domains;
1799
1800         intel_engine_setup_common(engine);
1801
1802         /* Intentionally left blank. */
1803         engine->buffer = NULL;
1804
1805         fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
1806                                                     RING_ELSP(engine),
1807                                                     FW_REG_WRITE);
1808
1809         fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1810                                                      RING_CONTEXT_STATUS_PTR(engine),
1811                                                      FW_REG_READ | FW_REG_WRITE);
1812
1813         fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1814                                                      RING_CONTEXT_STATUS_BUF_BASE(engine),
1815                                                      FW_REG_READ);
1816
1817         engine->fw_domains = fw_domains;
1818
1819         tasklet_init(&engine->irq_tasklet,
1820                      intel_lrc_irq_handler, (unsigned long)engine);
1821
1822         logical_ring_default_vfuncs(engine);
1823         logical_ring_default_irqs(engine);
1824 }
1825
1826 static int
1827 logical_ring_init(struct intel_engine_cs *engine)
1828 {
1829         struct i915_gem_context *dctx = engine->i915->kernel_context;
1830         int ret;
1831
1832         ret = intel_engine_init_common(engine);
1833         if (ret)
1834                 goto error;
1835
1836         /* And setup the hardware status page. */
1837         ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
1838         if (ret) {
1839                 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
1840                 goto error;
1841         }
1842
1843         return 0;
1844
1845 error:
1846         intel_logical_ring_cleanup(engine);
1847         return ret;
1848 }
1849
1850 int logical_render_ring_init(struct intel_engine_cs *engine)
1851 {
1852         struct drm_i915_private *dev_priv = engine->i915;
1853         int ret;
1854
1855         logical_ring_setup(engine);
1856
1857         if (HAS_L3_DPF(dev_priv))
1858                 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1859
1860         /* Override some for render ring. */
1861         if (INTEL_GEN(dev_priv) >= 9)
1862                 engine->init_hw = gen9_init_render_ring;
1863         else
1864                 engine->init_hw = gen8_init_render_ring;
1865         engine->init_context = gen8_init_rcs_context;
1866         engine->emit_flush = gen8_emit_flush_render;
1867         engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
1868         engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
1869
1870         ret = intel_engine_create_scratch(engine, PAGE_SIZE);
1871         if (ret)
1872                 return ret;
1873
1874         ret = intel_init_workaround_bb(engine);
1875         if (ret) {
1876                 /*
1877                  * We continue even if we fail to initialize WA batch
1878                  * because we only expect rare glitches but nothing
1879                  * critical to prevent us from using GPU
1880                  */
1881                 DRM_ERROR("WA batch buffer initialization failed: %d\n",
1882                           ret);
1883         }
1884
1885         return logical_ring_init(engine);
1886 }
1887
1888 int logical_xcs_ring_init(struct intel_engine_cs *engine)
1889 {
1890         logical_ring_setup(engine);
1891
1892         return logical_ring_init(engine);
1893 }
1894
1895 static u32
1896 make_rpcs(struct drm_i915_private *dev_priv)
1897 {
1898         u32 rpcs = 0;
1899
1900         /*
1901          * No explicit RPCS request is needed to ensure full
1902          * slice/subslice/EU enablement prior to Gen9.
1903         */
1904         if (INTEL_GEN(dev_priv) < 9)
1905                 return 0;
1906
1907         /*
1908          * Starting in Gen9, render power gating can leave
1909          * slice/subslice/EU in a partially enabled state. We
1910          * must make an explicit request through RPCS for full
1911          * enablement.
1912         */
1913         if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
1914                 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
1915                 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
1916                         GEN8_RPCS_S_CNT_SHIFT;
1917                 rpcs |= GEN8_RPCS_ENABLE;
1918         }
1919
1920         if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
1921                 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
1922                 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
1923                         GEN8_RPCS_SS_CNT_SHIFT;
1924                 rpcs |= GEN8_RPCS_ENABLE;
1925         }
1926
1927         if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
1928                 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
1929                         GEN8_RPCS_EU_MIN_SHIFT;
1930                 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
1931                         GEN8_RPCS_EU_MAX_SHIFT;
1932                 rpcs |= GEN8_RPCS_ENABLE;
1933         }
1934
1935         return rpcs;
1936 }
1937
1938 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
1939 {
1940         u32 indirect_ctx_offset;
1941
1942         switch (INTEL_GEN(engine->i915)) {
1943         default:
1944                 MISSING_CASE(INTEL_GEN(engine->i915));
1945                 /* fall through */
1946         case 9:
1947                 indirect_ctx_offset =
1948                         GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1949                 break;
1950         case 8:
1951                 indirect_ctx_offset =
1952                         GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1953                 break;
1954         }
1955
1956         return indirect_ctx_offset;
1957 }
1958
1959 static void execlists_init_reg_state(u32 *reg_state,
1960                                      struct i915_gem_context *ctx,
1961                                      struct intel_engine_cs *engine,
1962                                      struct intel_ring *ring)
1963 {
1964         struct drm_i915_private *dev_priv = engine->i915;
1965         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
1966
1967         /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1968          * commands followed by (reg, value) pairs. The values we are setting here are
1969          * only for the first context restore: on a subsequent save, the GPU will
1970          * recreate this batchbuffer with new values (including all the missing
1971          * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
1972         reg_state[CTX_LRI_HEADER_0] =
1973                 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
1974         ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
1975                        RING_CONTEXT_CONTROL(engine),
1976                        _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
1977                                           CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
1978                                           (HAS_RESOURCE_STREAMER(dev_priv) ?
1979                                            CTX_CTRL_RS_CTX_ENABLE : 0)));
1980         ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
1981                        0);
1982         ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
1983                        0);
1984         ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
1985                        RING_START(engine->mmio_base), 0);
1986         ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
1987                        RING_CTL(engine->mmio_base),
1988                        RING_CTL_SIZE(ring->size) | RING_VALID);
1989         ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
1990                        RING_BBADDR_UDW(engine->mmio_base), 0);
1991         ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
1992                        RING_BBADDR(engine->mmio_base), 0);
1993         ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
1994                        RING_BBSTATE(engine->mmio_base),
1995                        RING_BB_PPGTT);
1996         ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
1997                        RING_SBBADDR_UDW(engine->mmio_base), 0);
1998         ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
1999                        RING_SBBADDR(engine->mmio_base), 0);
2000         ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2001                        RING_SBBSTATE(engine->mmio_base), 0);
2002         if (engine->id == RCS) {
2003                 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2004                                RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2005                 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2006                                RING_INDIRECT_CTX(engine->mmio_base), 0);
2007                 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2008                                RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2009                 if (engine->wa_ctx.vma) {
2010                         struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2011                         u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
2012
2013                         reg_state[CTX_RCS_INDIRECT_CTX+1] =
2014                                 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2015                                 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2016
2017                         reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
2018                                 intel_lr_indirect_ctx_offset(engine) << 6;
2019
2020                         reg_state[CTX_BB_PER_CTX_PTR+1] =
2021                                 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2022                                 0x01;
2023                 }
2024         }
2025         reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2026         ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2027                        RING_CTX_TIMESTAMP(engine->mmio_base), 0);
2028         /* PDP values well be assigned later if needed */
2029         ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2030                        0);
2031         ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2032                        0);
2033         ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2034                        0);
2035         ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2036                        0);
2037         ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2038                        0);
2039         ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2040                        0);
2041         ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2042                        0);
2043         ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2044                        0);
2045
2046         if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
2047                 /* 64b PPGTT (48bit canonical)
2048                  * PDP0_DESCRIPTOR contains the base address to PML4 and
2049                  * other PDP Descriptors are ignored.
2050                  */
2051                 ASSIGN_CTX_PML4(ppgtt, reg_state);
2052         }
2053
2054         if (engine->id == RCS) {
2055                 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2056                 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2057                                make_rpcs(dev_priv));
2058         }
2059 }
2060
2061 static int
2062 populate_lr_context(struct i915_gem_context *ctx,
2063                     struct drm_i915_gem_object *ctx_obj,
2064                     struct intel_engine_cs *engine,
2065                     struct intel_ring *ring)
2066 {
2067         void *vaddr;
2068         int ret;
2069
2070         ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2071         if (ret) {
2072                 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2073                 return ret;
2074         }
2075
2076         vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
2077         if (IS_ERR(vaddr)) {
2078                 ret = PTR_ERR(vaddr);
2079                 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2080                 return ret;
2081         }
2082         ctx_obj->mm.dirty = true;
2083
2084         /* The second page of the context object contains some fields which must
2085          * be set up prior to the first execution. */
2086
2087         execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
2088                                  ctx, engine, ring);
2089
2090         i915_gem_object_unpin_map(ctx_obj);
2091
2092         return 0;
2093 }
2094
2095 /**
2096  * intel_lr_context_size() - return the size of the context for an engine
2097  * @engine: which engine to find the context size for
2098  *
2099  * Each engine may require a different amount of space for a context image,
2100  * so when allocating (or copying) an image, this function can be used to
2101  * find the right size for the specific engine.
2102  *
2103  * Return: size (in bytes) of an engine-specific context image
2104  *
2105  * Note: this size includes the HWSP, which is part of the context image
2106  * in LRC mode, but does not include the "shared data page" used with
2107  * GuC submission. The caller should account for this if using the GuC.
2108  */
2109 uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2110 {
2111         int ret = 0;
2112
2113         WARN_ON(INTEL_GEN(engine->i915) < 8);
2114
2115         switch (engine->id) {
2116         case RCS:
2117                 if (INTEL_GEN(engine->i915) >= 9)
2118                         ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2119                 else
2120                         ret = GEN8_LR_CONTEXT_RENDER_SIZE;
2121                 break;
2122         case VCS:
2123         case BCS:
2124         case VECS:
2125         case VCS2:
2126                 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2127                 break;
2128         }
2129
2130         return ret;
2131 }
2132
2133 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2134                                             struct intel_engine_cs *engine)
2135 {
2136         struct drm_i915_gem_object *ctx_obj;
2137         struct intel_context *ce = &ctx->engine[engine->id];
2138         struct i915_vma *vma;
2139         uint32_t context_size;
2140         struct intel_ring *ring;
2141         int ret;
2142
2143         WARN_ON(ce->state);
2144
2145         context_size = round_up(intel_lr_context_size(engine),
2146                                 I915_GTT_PAGE_SIZE);
2147
2148         /* One extra page as the sharing data between driver and GuC */
2149         context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2150
2151         ctx_obj = i915_gem_object_create(ctx->i915, context_size);
2152         if (IS_ERR(ctx_obj)) {
2153                 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2154                 return PTR_ERR(ctx_obj);
2155         }
2156
2157         vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
2158         if (IS_ERR(vma)) {
2159                 ret = PTR_ERR(vma);
2160                 goto error_deref_obj;
2161         }
2162
2163         ring = intel_engine_create_ring(engine, ctx->ring_size);
2164         if (IS_ERR(ring)) {
2165                 ret = PTR_ERR(ring);
2166                 goto error_deref_obj;
2167         }
2168
2169         ret = populate_lr_context(ctx, ctx_obj, engine, ring);
2170         if (ret) {
2171                 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2172                 goto error_ring_free;
2173         }
2174
2175         ce->ring = ring;
2176         ce->state = vma;
2177         ce->initialised = engine->init_context == NULL;
2178
2179         return 0;
2180
2181 error_ring_free:
2182         intel_ring_free(ring);
2183 error_deref_obj:
2184         i915_gem_object_put(ctx_obj);
2185         return ret;
2186 }
2187
2188 void intel_lr_context_resume(struct drm_i915_private *dev_priv)
2189 {
2190         struct intel_engine_cs *engine;
2191         struct i915_gem_context *ctx;
2192         enum intel_engine_id id;
2193
2194         /* Because we emit WA_TAIL_DWORDS there may be a disparity
2195          * between our bookkeeping in ce->ring->head and ce->ring->tail and
2196          * that stored in context. As we only write new commands from
2197          * ce->ring->tail onwards, everything before that is junk. If the GPU
2198          * starts reading from its RING_HEAD from the context, it may try to
2199          * execute that junk and die.
2200          *
2201          * So to avoid that we reset the context images upon resume. For
2202          * simplicity, we just zero everything out.
2203          */
2204         list_for_each_entry(ctx, &dev_priv->context_list, link) {
2205                 for_each_engine(engine, dev_priv, id) {
2206                         struct intel_context *ce = &ctx->engine[engine->id];
2207                         u32 *reg;
2208
2209                         if (!ce->state)
2210                                 continue;
2211
2212                         reg = i915_gem_object_pin_map(ce->state->obj,
2213                                                       I915_MAP_WB);
2214                         if (WARN_ON(IS_ERR(reg)))
2215                                 continue;
2216
2217                         reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
2218                         reg[CTX_RING_HEAD+1] = 0;
2219                         reg[CTX_RING_TAIL+1] = 0;
2220
2221                         ce->state->obj->mm.dirty = true;
2222                         i915_gem_object_unpin_map(ce->state->obj);
2223
2224                         ce->ring->head = ce->ring->tail = 0;
2225                         ce->ring->last_retired_head = -1;
2226                         intel_ring_update_space(ce->ring);
2227                 }
2228         }
2229 }