Merge tag 'drm-next-2019-12-06' of git://anongit.freedesktop.org/drm/drm
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / gem / i915_gem_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2011-2012 Intel Corporation
5  */
6
7 /*
8  * This file implements HW context support. On gen5+ a HW context consists of an
9  * opaque GPU object which is referenced at times of context saves and restores.
10  * With RC6 enabled, the context is also referenced as the GPU enters and exists
11  * from RC6 (GPU has it's own internal power context, except on gen5). Though
12  * something like a context does exist for the media ring, the code only
13  * supports contexts for the render ring.
14  *
15  * In software, there is a distinction between contexts created by the user,
16  * and the default HW context. The default HW context is used by GPU clients
17  * that do not request setup of their own hardware context. The default
18  * context's state is never restored to help prevent programming errors. This
19  * would happen if a client ran and piggy-backed off another clients GPU state.
20  * The default context only exists to give the GPU some offset to load as the
21  * current to invoke a save of the context we actually care about. In fact, the
22  * code could likely be constructed, albeit in a more complicated fashion, to
23  * never use the default context, though that limits the driver's ability to
24  * swap out, and/or destroy other contexts.
25  *
26  * All other contexts are created as a request by the GPU client. These contexts
27  * store GPU state, and thus allow GPU clients to not re-emit state (and
28  * potentially query certain state) at any time. The kernel driver makes
29  * certain that the appropriate commands are inserted.
30  *
31  * The context life cycle is semi-complicated in that context BOs may live
32  * longer than the context itself because of the way the hardware, and object
33  * tracking works. Below is a very crude representation of the state machine
34  * describing the context life.
35  *                                         refcount     pincount     active
36  * S0: initial state                          0            0           0
37  * S1: context created                        1            0           0
38  * S2: context is currently running           2            1           X
39  * S3: GPU referenced, but not current        2            0           1
40  * S4: context is current, but destroyed      1            1           0
41  * S5: like S3, but destroyed                 1            0           1
42  *
43  * The most common (but not all) transitions:
44  * S0->S1: client creates a context
45  * S1->S2: client submits execbuf with context
46  * S2->S3: other clients submits execbuf with context
47  * S3->S1: context object was retired
48  * S3->S2: clients submits another execbuf
49  * S2->S4: context destroy called with current context
50  * S3->S5->S0: destroy path
51  * S4->S5->S0: destroy path on current context
52  *
53  * There are two confusing terms used above:
54  *  The "current context" means the context which is currently running on the
55  *  GPU. The GPU has loaded its state already and has stored away the gtt
56  *  offset of the BO. The GPU is not actively referencing the data at this
57  *  offset, but it will on the next context switch. The only way to avoid this
58  *  is to do a GPU reset.
59  *
60  *  An "active context' is one which was previously the "current context" and is
61  *  on the active list waiting for the next context switch to occur. Until this
62  *  happens, the object must remain at the same gtt offset. It is therefore
63  *  possible to destroy a context, but it is still active.
64  *
65  */
66
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
69
70 #include <drm/i915_drm.h>
71
72 #include "gt/intel_engine_heartbeat.h"
73 #include "gt/intel_engine_user.h"
74 #include "gt/intel_lrc_reg.h"
75 #include "gt/intel_ring.h"
76
77 #include "i915_gem_context.h"
78 #include "i915_globals.h"
79 #include "i915_trace.h"
80 #include "i915_user_extensions.h"
81
82 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
83
84 static struct i915_global_gem_context {
85         struct i915_global base;
86         struct kmem_cache *slab_luts;
87 } global;
88
89 struct i915_lut_handle *i915_lut_handle_alloc(void)
90 {
91         return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
92 }
93
94 void i915_lut_handle_free(struct i915_lut_handle *lut)
95 {
96         return kmem_cache_free(global.slab_luts, lut);
97 }
98
99 static void lut_close(struct i915_gem_context *ctx)
100 {
101         struct radix_tree_iter iter;
102         void __rcu **slot;
103
104         lockdep_assert_held(&ctx->mutex);
105
106         rcu_read_lock();
107         radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
108                 struct i915_vma *vma = rcu_dereference_raw(*slot);
109                 struct drm_i915_gem_object *obj = vma->obj;
110                 struct i915_lut_handle *lut;
111
112                 if (!kref_get_unless_zero(&obj->base.refcount))
113                         continue;
114
115                 rcu_read_unlock();
116                 i915_gem_object_lock(obj);
117                 list_for_each_entry(lut, &obj->lut_list, obj_link) {
118                         if (lut->ctx != ctx)
119                                 continue;
120
121                         if (lut->handle != iter.index)
122                                 continue;
123
124                         list_del(&lut->obj_link);
125                         break;
126                 }
127                 i915_gem_object_unlock(obj);
128                 rcu_read_lock();
129
130                 if (&lut->obj_link != &obj->lut_list) {
131                         i915_lut_handle_free(lut);
132                         radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
133                         if (atomic_dec_and_test(&vma->open_count) &&
134                             !i915_vma_is_ggtt(vma))
135                                 i915_vma_close(vma);
136                         i915_gem_object_put(obj);
137                 }
138
139                 i915_gem_object_put(obj);
140         }
141         rcu_read_unlock();
142 }
143
144 static struct intel_context *
145 lookup_user_engine(struct i915_gem_context *ctx,
146                    unsigned long flags,
147                    const struct i915_engine_class_instance *ci)
148 #define LOOKUP_USER_INDEX BIT(0)
149 {
150         int idx;
151
152         if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
153                 return ERR_PTR(-EINVAL);
154
155         if (!i915_gem_context_user_engines(ctx)) {
156                 struct intel_engine_cs *engine;
157
158                 engine = intel_engine_lookup_user(ctx->i915,
159                                                   ci->engine_class,
160                                                   ci->engine_instance);
161                 if (!engine)
162                         return ERR_PTR(-EINVAL);
163
164                 idx = engine->legacy_idx;
165         } else {
166                 idx = ci->engine_instance;
167         }
168
169         return i915_gem_context_get_engine(ctx, idx);
170 }
171
172 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
173 {
174         while (count--) {
175                 if (!e->engines[count])
176                         continue;
177
178                 intel_context_put(e->engines[count]);
179         }
180         kfree(e);
181 }
182
183 static void free_engines(struct i915_gem_engines *e)
184 {
185         __free_engines(e, e->num_engines);
186 }
187
188 static void free_engines_rcu(struct rcu_head *rcu)
189 {
190         free_engines(container_of(rcu, struct i915_gem_engines, rcu));
191 }
192
193 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
194 {
195         const struct intel_gt *gt = &ctx->i915->gt;
196         struct intel_engine_cs *engine;
197         struct i915_gem_engines *e;
198         enum intel_engine_id id;
199
200         e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
201         if (!e)
202                 return ERR_PTR(-ENOMEM);
203
204         init_rcu_head(&e->rcu);
205         for_each_engine(engine, gt, id) {
206                 struct intel_context *ce;
207
208                 if (engine->legacy_idx == INVALID_ENGINE)
209                         continue;
210
211                 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
212                 GEM_BUG_ON(e->engines[engine->legacy_idx]);
213
214                 ce = intel_context_create(ctx, engine);
215                 if (IS_ERR(ce)) {
216                         __free_engines(e, e->num_engines + 1);
217                         return ERR_CAST(ce);
218                 }
219
220                 e->engines[engine->legacy_idx] = ce;
221                 e->num_engines = max(e->num_engines, engine->legacy_idx);
222         }
223         e->num_engines++;
224
225         return e;
226 }
227
228 static void i915_gem_context_free(struct i915_gem_context *ctx)
229 {
230         GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
231
232         spin_lock(&ctx->i915->gem.contexts.lock);
233         list_del(&ctx->link);
234         spin_unlock(&ctx->i915->gem.contexts.lock);
235
236         free_engines(rcu_access_pointer(ctx->engines));
237         mutex_destroy(&ctx->engines_mutex);
238
239         kfree(ctx->jump_whitelist);
240
241         if (ctx->timeline)
242                 intel_timeline_put(ctx->timeline);
243
244         kfree(ctx->name);
245         put_pid(ctx->pid);
246
247         mutex_destroy(&ctx->mutex);
248
249         kfree_rcu(ctx, rcu);
250 }
251
252 static void contexts_free_all(struct llist_node *list)
253 {
254         struct i915_gem_context *ctx, *cn;
255
256         llist_for_each_entry_safe(ctx, cn, list, free_link)
257                 i915_gem_context_free(ctx);
258 }
259
260 static void contexts_flush_free(struct i915_gem_contexts *gc)
261 {
262         contexts_free_all(llist_del_all(&gc->free_list));
263 }
264
265 static void contexts_free_worker(struct work_struct *work)
266 {
267         struct i915_gem_contexts *gc =
268                 container_of(work, typeof(*gc), free_work);
269
270         contexts_flush_free(gc);
271 }
272
273 void i915_gem_context_release(struct kref *ref)
274 {
275         struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
276         struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
277
278         trace_i915_context_free(ctx);
279         if (llist_add(&ctx->free_link, &gc->free_list))
280                 schedule_work(&gc->free_work);
281 }
282
283 static inline struct i915_gem_engines *
284 __context_engines_static(const struct i915_gem_context *ctx)
285 {
286         return rcu_dereference_protected(ctx->engines, true);
287 }
288
289 static bool __reset_engine(struct intel_engine_cs *engine)
290 {
291         struct intel_gt *gt = engine->gt;
292         bool success = false;
293
294         if (!intel_has_reset_engine(gt))
295                 return false;
296
297         if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
298                               &gt->reset.flags)) {
299                 success = intel_engine_reset(engine, NULL) == 0;
300                 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
301                                       &gt->reset.flags);
302         }
303
304         return success;
305 }
306
307 static void __reset_context(struct i915_gem_context *ctx,
308                             struct intel_engine_cs *engine)
309 {
310         intel_gt_handle_error(engine->gt, engine->mask, 0,
311                               "context closure in %s", ctx->name);
312 }
313
314 static bool __cancel_engine(struct intel_engine_cs *engine)
315 {
316         /*
317          * Send a "high priority pulse" down the engine to cause the
318          * current request to be momentarily preempted. (If it fails to
319          * be preempted, it will be reset). As we have marked our context
320          * as banned, any incomplete request, including any running, will
321          * be skipped following the preemption.
322          *
323          * If there is no hangchecking (one of the reasons why we try to
324          * cancel the context) and no forced preemption, there may be no
325          * means by which we reset the GPU and evict the persistent hog.
326          * Ergo if we are unable to inject a preemptive pulse that can
327          * kill the banned context, we fallback to doing a local reset
328          * instead.
329          */
330         if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
331             !intel_engine_pulse(engine))
332                 return true;
333
334         /* If we are unable to send a pulse, try resetting this engine. */
335         return __reset_engine(engine);
336 }
337
338 static struct intel_engine_cs *__active_engine(struct i915_request *rq)
339 {
340         struct intel_engine_cs *engine, *locked;
341
342         /*
343          * Serialise with __i915_request_submit() so that it sees
344          * is-banned?, or we know the request is already inflight.
345          */
346         locked = READ_ONCE(rq->engine);
347         spin_lock_irq(&locked->active.lock);
348         while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
349                 spin_unlock(&locked->active.lock);
350                 spin_lock(&engine->active.lock);
351                 locked = engine;
352         }
353
354         engine = NULL;
355         if (i915_request_is_active(rq) && !rq->fence.error)
356                 engine = rq->engine;
357
358         spin_unlock_irq(&locked->active.lock);
359
360         return engine;
361 }
362
363 static struct intel_engine_cs *active_engine(struct intel_context *ce)
364 {
365         struct intel_engine_cs *engine = NULL;
366         struct i915_request *rq;
367
368         if (!ce->timeline)
369                 return NULL;
370
371         mutex_lock(&ce->timeline->mutex);
372         list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
373                 if (i915_request_completed(rq))
374                         break;
375
376                 /* Check with the backend if the request is inflight */
377                 engine = __active_engine(rq);
378                 if (engine)
379                         break;
380         }
381         mutex_unlock(&ce->timeline->mutex);
382
383         return engine;
384 }
385
386 static void kill_context(struct i915_gem_context *ctx)
387 {
388         struct i915_gem_engines_iter it;
389         struct intel_context *ce;
390
391         /*
392          * If we are already banned, it was due to a guilty request causing
393          * a reset and the entire context being evicted from the GPU.
394          */
395         if (i915_gem_context_is_banned(ctx))
396                 return;
397
398         i915_gem_context_set_banned(ctx);
399
400         /*
401          * Map the user's engine back to the actual engines; one virtual
402          * engine will be mapped to multiple engines, and using ctx->engine[]
403          * the same engine may be have multiple instances in the user's map.
404          * However, we only care about pending requests, so only include
405          * engines on which there are incomplete requests.
406          */
407         for_each_gem_engine(ce, __context_engines_static(ctx), it) {
408                 struct intel_engine_cs *engine;
409
410                 /*
411                  * Check the current active state of this context; if we
412                  * are currently executing on the GPU we need to evict
413                  * ourselves. On the other hand, if we haven't yet been
414                  * submitted to the GPU or if everything is complete,
415                  * we have nothing to do.
416                  */
417                 engine = active_engine(ce);
418
419                 /* First attempt to gracefully cancel the context */
420                 if (engine && !__cancel_engine(engine))
421                         /*
422                          * If we are unable to send a preemptive pulse to bump
423                          * the context from the GPU, we have to resort to a full
424                          * reset. We hope the collateral damage is worth it.
425                          */
426                         __reset_context(ctx, engine);
427         }
428 }
429
430 static void context_close(struct i915_gem_context *ctx)
431 {
432         struct i915_address_space *vm;
433
434         i915_gem_context_set_closed(ctx);
435
436         mutex_lock(&ctx->mutex);
437
438         vm = i915_gem_context_vm(ctx);
439         if (vm)
440                 i915_vm_close(vm);
441
442         ctx->file_priv = ERR_PTR(-EBADF);
443
444         /*
445          * The LUT uses the VMA as a backpointer to unref the object,
446          * so we need to clear the LUT before we close all the VMA (inside
447          * the ppgtt).
448          */
449         lut_close(ctx);
450
451         mutex_unlock(&ctx->mutex);
452
453         /*
454          * If the user has disabled hangchecking, we can not be sure that
455          * the batches will ever complete after the context is closed,
456          * keeping the context and all resources pinned forever. So in this
457          * case we opt to forcibly kill off all remaining requests on
458          * context close.
459          */
460         if (!i915_gem_context_is_persistent(ctx) ||
461             !i915_modparams.enable_hangcheck)
462                 kill_context(ctx);
463
464         i915_gem_context_put(ctx);
465 }
466
467 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
468 {
469         if (i915_gem_context_is_persistent(ctx) == state)
470                 return 0;
471
472         if (state) {
473                 /*
474                  * Only contexts that are short-lived [that will expire or be
475                  * reset] are allowed to survive past termination. We require
476                  * hangcheck to ensure that the persistent requests are healthy.
477                  */
478                 if (!i915_modparams.enable_hangcheck)
479                         return -EINVAL;
480
481                 i915_gem_context_set_persistence(ctx);
482         } else {
483                 /* To cancel a context we use "preempt-to-idle" */
484                 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
485                         return -ENODEV;
486
487                 i915_gem_context_clear_persistence(ctx);
488         }
489
490         return 0;
491 }
492
493 static struct i915_gem_context *
494 __create_context(struct drm_i915_private *i915)
495 {
496         struct i915_gem_context *ctx;
497         struct i915_gem_engines *e;
498         int err;
499         int i;
500
501         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
502         if (!ctx)
503                 return ERR_PTR(-ENOMEM);
504
505         kref_init(&ctx->ref);
506         ctx->i915 = i915;
507         ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
508         mutex_init(&ctx->mutex);
509
510         mutex_init(&ctx->engines_mutex);
511         e = default_engines(ctx);
512         if (IS_ERR(e)) {
513                 err = PTR_ERR(e);
514                 goto err_free;
515         }
516         RCU_INIT_POINTER(ctx->engines, e);
517
518         INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
519
520         /* NB: Mark all slices as needing a remap so that when the context first
521          * loads it will restore whatever remap state already exists. If there
522          * is no remap info, it will be a NOP. */
523         ctx->remap_slice = ALL_L3_SLICES(i915);
524
525         i915_gem_context_set_bannable(ctx);
526         i915_gem_context_set_recoverable(ctx);
527         __context_set_persistence(ctx, true /* cgroup hook? */);
528
529         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
530                 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
531
532         ctx->jump_whitelist = NULL;
533         ctx->jump_whitelist_cmds = 0;
534
535         spin_lock(&i915->gem.contexts.lock);
536         list_add_tail(&ctx->link, &i915->gem.contexts.list);
537         spin_unlock(&i915->gem.contexts.lock);
538
539         return ctx;
540
541 err_free:
542         kfree(ctx);
543         return ERR_PTR(err);
544 }
545
546 static void
547 context_apply_all(struct i915_gem_context *ctx,
548                   void (*fn)(struct intel_context *ce, void *data),
549                   void *data)
550 {
551         struct i915_gem_engines_iter it;
552         struct intel_context *ce;
553
554         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
555                 fn(ce, data);
556         i915_gem_context_unlock_engines(ctx);
557 }
558
559 static void __apply_ppgtt(struct intel_context *ce, void *vm)
560 {
561         i915_vm_put(ce->vm);
562         ce->vm = i915_vm_get(vm);
563 }
564
565 static struct i915_address_space *
566 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
567 {
568         struct i915_address_space *old = i915_gem_context_vm(ctx);
569
570         GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
571
572         rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
573         context_apply_all(ctx, __apply_ppgtt, vm);
574
575         return old;
576 }
577
578 static void __assign_ppgtt(struct i915_gem_context *ctx,
579                            struct i915_address_space *vm)
580 {
581         if (vm == rcu_access_pointer(ctx->vm))
582                 return;
583
584         vm = __set_ppgtt(ctx, vm);
585         if (vm)
586                 i915_vm_close(vm);
587 }
588
589 static void __set_timeline(struct intel_timeline **dst,
590                            struct intel_timeline *src)
591 {
592         struct intel_timeline *old = *dst;
593
594         *dst = src ? intel_timeline_get(src) : NULL;
595
596         if (old)
597                 intel_timeline_put(old);
598 }
599
600 static void __apply_timeline(struct intel_context *ce, void *timeline)
601 {
602         __set_timeline(&ce->timeline, timeline);
603 }
604
605 static void __assign_timeline(struct i915_gem_context *ctx,
606                               struct intel_timeline *timeline)
607 {
608         __set_timeline(&ctx->timeline, timeline);
609         context_apply_all(ctx, __apply_timeline, timeline);
610 }
611
612 static struct i915_gem_context *
613 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
614 {
615         struct i915_gem_context *ctx;
616
617         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
618             !HAS_EXECLISTS(i915))
619                 return ERR_PTR(-EINVAL);
620
621         /* Reap the stale contexts */
622         contexts_flush_free(&i915->gem.contexts);
623
624         ctx = __create_context(i915);
625         if (IS_ERR(ctx))
626                 return ctx;
627
628         if (HAS_FULL_PPGTT(i915)) {
629                 struct i915_ppgtt *ppgtt;
630
631                 ppgtt = i915_ppgtt_create(i915);
632                 if (IS_ERR(ppgtt)) {
633                         DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
634                                          PTR_ERR(ppgtt));
635                         context_close(ctx);
636                         return ERR_CAST(ppgtt);
637                 }
638
639                 mutex_lock(&ctx->mutex);
640                 __assign_ppgtt(ctx, &ppgtt->vm);
641                 mutex_unlock(&ctx->mutex);
642
643                 i915_vm_put(&ppgtt->vm);
644         }
645
646         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
647                 struct intel_timeline *timeline;
648
649                 timeline = intel_timeline_create(&i915->gt, NULL);
650                 if (IS_ERR(timeline)) {
651                         context_close(ctx);
652                         return ERR_CAST(timeline);
653                 }
654
655                 __assign_timeline(ctx, timeline);
656                 intel_timeline_put(timeline);
657         }
658
659         trace_i915_context_create(ctx);
660
661         return ctx;
662 }
663
664 static void
665 destroy_kernel_context(struct i915_gem_context **ctxp)
666 {
667         struct i915_gem_context *ctx;
668
669         /* Keep the context ref so that we can free it immediately ourselves */
670         ctx = i915_gem_context_get(fetch_and_zero(ctxp));
671         GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
672
673         context_close(ctx);
674         i915_gem_context_free(ctx);
675 }
676
677 struct i915_gem_context *
678 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
679 {
680         struct i915_gem_context *ctx;
681
682         ctx = i915_gem_create_context(i915, 0);
683         if (IS_ERR(ctx))
684                 return ctx;
685
686         i915_gem_context_clear_bannable(ctx);
687         i915_gem_context_set_persistence(ctx);
688         ctx->sched.priority = I915_USER_PRIORITY(prio);
689
690         GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
691
692         return ctx;
693 }
694
695 static void init_contexts(struct i915_gem_contexts *gc)
696 {
697         spin_lock_init(&gc->lock);
698         INIT_LIST_HEAD(&gc->list);
699
700         INIT_WORK(&gc->free_work, contexts_free_worker);
701         init_llist_head(&gc->free_list);
702 }
703
704 int i915_gem_init_contexts(struct drm_i915_private *i915)
705 {
706         struct i915_gem_context *ctx;
707
708         /* Reassure ourselves we are only called once */
709         GEM_BUG_ON(i915->kernel_context);
710
711         init_contexts(&i915->gem.contexts);
712
713         /* lowest priority; idle task */
714         ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
715         if (IS_ERR(ctx)) {
716                 DRM_ERROR("Failed to create default global context\n");
717                 return PTR_ERR(ctx);
718         }
719         i915->kernel_context = ctx;
720
721         DRM_DEBUG_DRIVER("%s context support initialized\n",
722                          DRIVER_CAPS(i915)->has_logical_contexts ?
723                          "logical" : "fake");
724         return 0;
725 }
726
727 void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
728 {
729         destroy_kernel_context(&i915->kernel_context);
730         flush_work(&i915->gem.contexts.free_work);
731 }
732
733 static int context_idr_cleanup(int id, void *p, void *data)
734 {
735         context_close(p);
736         return 0;
737 }
738
739 static int vm_idr_cleanup(int id, void *p, void *data)
740 {
741         i915_vm_put(p);
742         return 0;
743 }
744
745 static int gem_context_register(struct i915_gem_context *ctx,
746                                 struct drm_i915_file_private *fpriv)
747 {
748         struct i915_address_space *vm;
749         int ret;
750
751         ctx->file_priv = fpriv;
752
753         mutex_lock(&ctx->mutex);
754         vm = i915_gem_context_vm(ctx);
755         if (vm)
756                 WRITE_ONCE(vm->file, fpriv); /* XXX */
757         mutex_unlock(&ctx->mutex);
758
759         ctx->pid = get_task_pid(current, PIDTYPE_PID);
760         ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
761                               current->comm, pid_nr(ctx->pid));
762         if (!ctx->name) {
763                 ret = -ENOMEM;
764                 goto err_pid;
765         }
766
767         /* And finally expose ourselves to userspace via the idr */
768         mutex_lock(&fpriv->context_idr_lock);
769         ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
770         mutex_unlock(&fpriv->context_idr_lock);
771         if (ret >= 0)
772                 goto out;
773
774         kfree(fetch_and_zero(&ctx->name));
775 err_pid:
776         put_pid(fetch_and_zero(&ctx->pid));
777 out:
778         return ret;
779 }
780
781 int i915_gem_context_open(struct drm_i915_private *i915,
782                           struct drm_file *file)
783 {
784         struct drm_i915_file_private *file_priv = file->driver_priv;
785         struct i915_gem_context *ctx;
786         int err;
787
788         mutex_init(&file_priv->context_idr_lock);
789         mutex_init(&file_priv->vm_idr_lock);
790
791         idr_init(&file_priv->context_idr);
792         idr_init_base(&file_priv->vm_idr, 1);
793
794         ctx = i915_gem_create_context(i915, 0);
795         if (IS_ERR(ctx)) {
796                 err = PTR_ERR(ctx);
797                 goto err;
798         }
799
800         err = gem_context_register(ctx, file_priv);
801         if (err < 0)
802                 goto err_ctx;
803
804         GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
805         GEM_BUG_ON(err > 0);
806
807         return 0;
808
809 err_ctx:
810         context_close(ctx);
811 err:
812         idr_destroy(&file_priv->vm_idr);
813         idr_destroy(&file_priv->context_idr);
814         mutex_destroy(&file_priv->vm_idr_lock);
815         mutex_destroy(&file_priv->context_idr_lock);
816         return err;
817 }
818
819 void i915_gem_context_close(struct drm_file *file)
820 {
821         struct drm_i915_file_private *file_priv = file->driver_priv;
822         struct drm_i915_private *i915 = file_priv->dev_priv;
823
824         idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
825         idr_destroy(&file_priv->context_idr);
826         mutex_destroy(&file_priv->context_idr_lock);
827
828         idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
829         idr_destroy(&file_priv->vm_idr);
830         mutex_destroy(&file_priv->vm_idr_lock);
831
832         contexts_flush_free(&i915->gem.contexts);
833 }
834
835 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
836                              struct drm_file *file)
837 {
838         struct drm_i915_private *i915 = to_i915(dev);
839         struct drm_i915_gem_vm_control *args = data;
840         struct drm_i915_file_private *file_priv = file->driver_priv;
841         struct i915_ppgtt *ppgtt;
842         int err;
843
844         if (!HAS_FULL_PPGTT(i915))
845                 return -ENODEV;
846
847         if (args->flags)
848                 return -EINVAL;
849
850         ppgtt = i915_ppgtt_create(i915);
851         if (IS_ERR(ppgtt))
852                 return PTR_ERR(ppgtt);
853
854         ppgtt->vm.file = file_priv;
855
856         if (args->extensions) {
857                 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
858                                            NULL, 0,
859                                            ppgtt);
860                 if (err)
861                         goto err_put;
862         }
863
864         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
865         if (err)
866                 goto err_put;
867
868         err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
869         if (err < 0)
870                 goto err_unlock;
871
872         GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
873
874         mutex_unlock(&file_priv->vm_idr_lock);
875
876         args->vm_id = err;
877         return 0;
878
879 err_unlock:
880         mutex_unlock(&file_priv->vm_idr_lock);
881 err_put:
882         i915_vm_put(&ppgtt->vm);
883         return err;
884 }
885
886 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
887                               struct drm_file *file)
888 {
889         struct drm_i915_file_private *file_priv = file->driver_priv;
890         struct drm_i915_gem_vm_control *args = data;
891         struct i915_address_space *vm;
892         int err;
893         u32 id;
894
895         if (args->flags)
896                 return -EINVAL;
897
898         if (args->extensions)
899                 return -EINVAL;
900
901         id = args->vm_id;
902         if (!id)
903                 return -ENOENT;
904
905         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
906         if (err)
907                 return err;
908
909         vm = idr_remove(&file_priv->vm_idr, id);
910
911         mutex_unlock(&file_priv->vm_idr_lock);
912         if (!vm)
913                 return -ENOENT;
914
915         i915_vm_put(vm);
916         return 0;
917 }
918
919 struct context_barrier_task {
920         struct i915_active base;
921         void (*task)(void *data);
922         void *data;
923 };
924
925 __i915_active_call
926 static void cb_retire(struct i915_active *base)
927 {
928         struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
929
930         if (cb->task)
931                 cb->task(cb->data);
932
933         i915_active_fini(&cb->base);
934         kfree(cb);
935 }
936
937 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
938 static int context_barrier_task(struct i915_gem_context *ctx,
939                                 intel_engine_mask_t engines,
940                                 bool (*skip)(struct intel_context *ce, void *data),
941                                 int (*emit)(struct i915_request *rq, void *data),
942                                 void (*task)(void *data),
943                                 void *data)
944 {
945         struct context_barrier_task *cb;
946         struct i915_gem_engines_iter it;
947         struct intel_context *ce;
948         int err = 0;
949
950         GEM_BUG_ON(!task);
951
952         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
953         if (!cb)
954                 return -ENOMEM;
955
956         i915_active_init(&cb->base, NULL, cb_retire);
957         err = i915_active_acquire(&cb->base);
958         if (err) {
959                 kfree(cb);
960                 return err;
961         }
962
963         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
964                 struct i915_request *rq;
965
966                 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
967                                        ce->engine->mask)) {
968                         err = -ENXIO;
969                         break;
970                 }
971
972                 if (!(ce->engine->mask & engines))
973                         continue;
974
975                 if (skip && skip(ce, data))
976                         continue;
977
978                 rq = intel_context_create_request(ce);
979                 if (IS_ERR(rq)) {
980                         err = PTR_ERR(rq);
981                         break;
982                 }
983
984                 err = 0;
985                 if (emit)
986                         err = emit(rq, data);
987                 if (err == 0)
988                         err = i915_active_add_request(&cb->base, rq);
989
990                 i915_request_add(rq);
991                 if (err)
992                         break;
993         }
994         i915_gem_context_unlock_engines(ctx);
995
996         cb->task = err ? NULL : task; /* caller needs to unwind instead */
997         cb->data = data;
998
999         i915_active_release(&cb->base);
1000
1001         return err;
1002 }
1003
1004 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1005                      struct i915_gem_context *ctx,
1006                      struct drm_i915_gem_context_param *args)
1007 {
1008         struct i915_address_space *vm;
1009         int ret;
1010
1011         if (!rcu_access_pointer(ctx->vm))
1012                 return -ENODEV;
1013
1014         rcu_read_lock();
1015         vm = i915_vm_get(ctx->vm);
1016         rcu_read_unlock();
1017
1018         ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1019         if (ret)
1020                 goto err_put;
1021
1022         ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
1023         GEM_BUG_ON(!ret);
1024         if (ret < 0)
1025                 goto err_unlock;
1026
1027         i915_vm_open(vm);
1028
1029         args->size = 0;
1030         args->value = ret;
1031
1032         ret = 0;
1033 err_unlock:
1034         mutex_unlock(&file_priv->vm_idr_lock);
1035 err_put:
1036         i915_vm_put(vm);
1037         return ret;
1038 }
1039
1040 static void set_ppgtt_barrier(void *data)
1041 {
1042         struct i915_address_space *old = data;
1043
1044         if (INTEL_GEN(old->i915) < 8)
1045                 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
1046
1047         i915_vm_close(old);
1048 }
1049
1050 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1051 {
1052         struct i915_address_space *vm = rq->hw_context->vm;
1053         struct intel_engine_cs *engine = rq->engine;
1054         u32 base = engine->mmio_base;
1055         u32 *cs;
1056         int i;
1057
1058         if (i915_vm_is_4lvl(vm)) {
1059                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1060                 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1061
1062                 cs = intel_ring_begin(rq, 6);
1063                 if (IS_ERR(cs))
1064                         return PTR_ERR(cs);
1065
1066                 *cs++ = MI_LOAD_REGISTER_IMM(2);
1067
1068                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1069                 *cs++ = upper_32_bits(pd_daddr);
1070                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1071                 *cs++ = lower_32_bits(pd_daddr);
1072
1073                 *cs++ = MI_NOOP;
1074                 intel_ring_advance(rq, cs);
1075         } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1076                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1077                 int err;
1078
1079                 /* Magic required to prevent forcewake errors! */
1080                 err = engine->emit_flush(rq, EMIT_INVALIDATE);
1081                 if (err)
1082                         return err;
1083
1084                 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1085                 if (IS_ERR(cs))
1086                         return PTR_ERR(cs);
1087
1088                 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
1089                 for (i = GEN8_3LVL_PDPES; i--; ) {
1090                         const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1091
1092                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1093                         *cs++ = upper_32_bits(pd_daddr);
1094                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1095                         *cs++ = lower_32_bits(pd_daddr);
1096                 }
1097                 *cs++ = MI_NOOP;
1098                 intel_ring_advance(rq, cs);
1099         } else {
1100                 /* ppGTT is not part of the legacy context image */
1101                 gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
1102         }
1103
1104         return 0;
1105 }
1106
1107 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1108 {
1109         if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1110                 return !ce->state;
1111         else
1112                 return !atomic_read(&ce->pin_count);
1113 }
1114
1115 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1116                      struct i915_gem_context *ctx,
1117                      struct drm_i915_gem_context_param *args)
1118 {
1119         struct i915_address_space *vm, *old;
1120         int err;
1121
1122         if (args->size)
1123                 return -EINVAL;
1124
1125         if (!rcu_access_pointer(ctx->vm))
1126                 return -ENODEV;
1127
1128         if (upper_32_bits(args->value))
1129                 return -ENOENT;
1130
1131         rcu_read_lock();
1132         vm = idr_find(&file_priv->vm_idr, args->value);
1133         if (vm && !kref_get_unless_zero(&vm->ref))
1134                 vm = NULL;
1135         rcu_read_unlock();
1136         if (!vm)
1137                 return -ENOENT;
1138
1139         err = mutex_lock_interruptible(&ctx->mutex);
1140         if (err)
1141                 goto out;
1142
1143         if (i915_gem_context_is_closed(ctx)) {
1144                 err = -ENOENT;
1145                 goto unlock;
1146         }
1147
1148         if (vm == rcu_access_pointer(ctx->vm))
1149                 goto unlock;
1150
1151         /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1152         lut_close(ctx);
1153
1154         old = __set_ppgtt(ctx, vm);
1155
1156         /*
1157          * We need to flush any requests using the current ppgtt before
1158          * we release it as the requests do not hold a reference themselves,
1159          * only indirectly through the context.
1160          */
1161         err = context_barrier_task(ctx, ALL_ENGINES,
1162                                    skip_ppgtt_update,
1163                                    emit_ppgtt_update,
1164                                    set_ppgtt_barrier,
1165                                    old);
1166         if (err) {
1167                 i915_vm_close(__set_ppgtt(ctx, old));
1168                 i915_vm_close(old);
1169         }
1170
1171 unlock:
1172         mutex_unlock(&ctx->mutex);
1173 out:
1174         i915_vm_put(vm);
1175         return err;
1176 }
1177
1178 static int gen8_emit_rpcs_config(struct i915_request *rq,
1179                                  struct intel_context *ce,
1180                                  struct intel_sseu sseu)
1181 {
1182         u64 offset;
1183         u32 *cs;
1184
1185         cs = intel_ring_begin(rq, 4);
1186         if (IS_ERR(cs))
1187                 return PTR_ERR(cs);
1188
1189         offset = i915_ggtt_offset(ce->state) +
1190                  LRC_STATE_PN * PAGE_SIZE +
1191                  CTX_R_PWR_CLK_STATE * 4;
1192
1193         *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1194         *cs++ = lower_32_bits(offset);
1195         *cs++ = upper_32_bits(offset);
1196         *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1197
1198         intel_ring_advance(rq, cs);
1199
1200         return 0;
1201 }
1202
1203 static int
1204 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1205 {
1206         struct i915_request *rq;
1207         int ret;
1208
1209         lockdep_assert_held(&ce->pin_mutex);
1210
1211         /*
1212          * If the context is not idle, we have to submit an ordered request to
1213          * modify its context image via the kernel context (writing to our own
1214          * image, or into the registers directory, does not stick). Pristine
1215          * and idle contexts will be configured on pinning.
1216          */
1217         if (!intel_context_is_pinned(ce))
1218                 return 0;
1219
1220         rq = i915_request_create(ce->engine->kernel_context);
1221         if (IS_ERR(rq))
1222                 return PTR_ERR(rq);
1223
1224         /* Serialise with the remote context */
1225         ret = intel_context_prepare_remote_request(ce, rq);
1226         if (ret == 0)
1227                 ret = gen8_emit_rpcs_config(rq, ce, sseu);
1228
1229         i915_request_add(rq);
1230         return ret;
1231 }
1232
1233 static int
1234 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1235 {
1236         int ret;
1237
1238         GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
1239
1240         ret = intel_context_lock_pinned(ce);
1241         if (ret)
1242                 return ret;
1243
1244         /* Nothing to do if unmodified. */
1245         if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1246                 goto unlock;
1247
1248         ret = gen8_modify_rpcs(ce, sseu);
1249         if (!ret)
1250                 ce->sseu = sseu;
1251
1252 unlock:
1253         intel_context_unlock_pinned(ce);
1254         return ret;
1255 }
1256
1257 static int
1258 user_to_context_sseu(struct drm_i915_private *i915,
1259                      const struct drm_i915_gem_context_param_sseu *user,
1260                      struct intel_sseu *context)
1261 {
1262         const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1263
1264         /* No zeros in any field. */
1265         if (!user->slice_mask || !user->subslice_mask ||
1266             !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1267                 return -EINVAL;
1268
1269         /* Max > min. */
1270         if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1271                 return -EINVAL;
1272
1273         /*
1274          * Some future proofing on the types since the uAPI is wider than the
1275          * current internal implementation.
1276          */
1277         if (overflows_type(user->slice_mask, context->slice_mask) ||
1278             overflows_type(user->subslice_mask, context->subslice_mask) ||
1279             overflows_type(user->min_eus_per_subslice,
1280                            context->min_eus_per_subslice) ||
1281             overflows_type(user->max_eus_per_subslice,
1282                            context->max_eus_per_subslice))
1283                 return -EINVAL;
1284
1285         /* Check validity against hardware. */
1286         if (user->slice_mask & ~device->slice_mask)
1287                 return -EINVAL;
1288
1289         if (user->subslice_mask & ~device->subslice_mask[0])
1290                 return -EINVAL;
1291
1292         if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1293                 return -EINVAL;
1294
1295         context->slice_mask = user->slice_mask;
1296         context->subslice_mask = user->subslice_mask;
1297         context->min_eus_per_subslice = user->min_eus_per_subslice;
1298         context->max_eus_per_subslice = user->max_eus_per_subslice;
1299
1300         /* Part specific restrictions. */
1301         if (IS_GEN(i915, 11)) {
1302                 unsigned int hw_s = hweight8(device->slice_mask);
1303                 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1304                 unsigned int req_s = hweight8(context->slice_mask);
1305                 unsigned int req_ss = hweight8(context->subslice_mask);
1306
1307                 /*
1308                  * Only full subslice enablement is possible if more than one
1309                  * slice is turned on.
1310                  */
1311                 if (req_s > 1 && req_ss != hw_ss_per_s)
1312                         return -EINVAL;
1313
1314                 /*
1315                  * If more than four (SScount bitfield limit) subslices are
1316                  * requested then the number has to be even.
1317                  */
1318                 if (req_ss > 4 && (req_ss & 1))
1319                         return -EINVAL;
1320
1321                 /*
1322                  * If only one slice is enabled and subslice count is below the
1323                  * device full enablement, it must be at most half of the all
1324                  * available subslices.
1325                  */
1326                 if (req_s == 1 && req_ss < hw_ss_per_s &&
1327                     req_ss > (hw_ss_per_s / 2))
1328                         return -EINVAL;
1329
1330                 /* ABI restriction - VME use case only. */
1331
1332                 /* All slices or one slice only. */
1333                 if (req_s != 1 && req_s != hw_s)
1334                         return -EINVAL;
1335
1336                 /*
1337                  * Half subslices or full enablement only when one slice is
1338                  * enabled.
1339                  */
1340                 if (req_s == 1 &&
1341                     (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1342                         return -EINVAL;
1343
1344                 /* No EU configuration changes. */
1345                 if ((user->min_eus_per_subslice !=
1346                      device->max_eus_per_subslice) ||
1347                     (user->max_eus_per_subslice !=
1348                      device->max_eus_per_subslice))
1349                         return -EINVAL;
1350         }
1351
1352         return 0;
1353 }
1354
1355 static int set_sseu(struct i915_gem_context *ctx,
1356                     struct drm_i915_gem_context_param *args)
1357 {
1358         struct drm_i915_private *i915 = ctx->i915;
1359         struct drm_i915_gem_context_param_sseu user_sseu;
1360         struct intel_context *ce;
1361         struct intel_sseu sseu;
1362         unsigned long lookup;
1363         int ret;
1364
1365         if (args->size < sizeof(user_sseu))
1366                 return -EINVAL;
1367
1368         if (!IS_GEN(i915, 11))
1369                 return -ENODEV;
1370
1371         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1372                            sizeof(user_sseu)))
1373                 return -EFAULT;
1374
1375         if (user_sseu.rsvd)
1376                 return -EINVAL;
1377
1378         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1379                 return -EINVAL;
1380
1381         lookup = 0;
1382         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1383                 lookup |= LOOKUP_USER_INDEX;
1384
1385         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1386         if (IS_ERR(ce))
1387                 return PTR_ERR(ce);
1388
1389         /* Only render engine supports RPCS configuration. */
1390         if (ce->engine->class != RENDER_CLASS) {
1391                 ret = -ENODEV;
1392                 goto out_ce;
1393         }
1394
1395         ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1396         if (ret)
1397                 goto out_ce;
1398
1399         ret = intel_context_reconfigure_sseu(ce, sseu);
1400         if (ret)
1401                 goto out_ce;
1402
1403         args->size = sizeof(user_sseu);
1404
1405 out_ce:
1406         intel_context_put(ce);
1407         return ret;
1408 }
1409
1410 struct set_engines {
1411         struct i915_gem_context *ctx;
1412         struct i915_gem_engines *engines;
1413 };
1414
1415 static int
1416 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1417 {
1418         struct i915_context_engines_load_balance __user *ext =
1419                 container_of_user(base, typeof(*ext), base);
1420         const struct set_engines *set = data;
1421         struct intel_engine_cs *stack[16];
1422         struct intel_engine_cs **siblings;
1423         struct intel_context *ce;
1424         u16 num_siblings, idx;
1425         unsigned int n;
1426         int err;
1427
1428         if (!HAS_EXECLISTS(set->ctx->i915))
1429                 return -ENODEV;
1430
1431         if (USES_GUC_SUBMISSION(set->ctx->i915))
1432                 return -ENODEV; /* not implement yet */
1433
1434         if (get_user(idx, &ext->engine_index))
1435                 return -EFAULT;
1436
1437         if (idx >= set->engines->num_engines) {
1438                 DRM_DEBUG("Invalid placement value, %d >= %d\n",
1439                           idx, set->engines->num_engines);
1440                 return -EINVAL;
1441         }
1442
1443         idx = array_index_nospec(idx, set->engines->num_engines);
1444         if (set->engines->engines[idx]) {
1445                 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
1446                 return -EEXIST;
1447         }
1448
1449         if (get_user(num_siblings, &ext->num_siblings))
1450                 return -EFAULT;
1451
1452         err = check_user_mbz(&ext->flags);
1453         if (err)
1454                 return err;
1455
1456         err = check_user_mbz(&ext->mbz64);
1457         if (err)
1458                 return err;
1459
1460         siblings = stack;
1461         if (num_siblings > ARRAY_SIZE(stack)) {
1462                 siblings = kmalloc_array(num_siblings,
1463                                          sizeof(*siblings),
1464                                          GFP_KERNEL);
1465                 if (!siblings)
1466                         return -ENOMEM;
1467         }
1468
1469         for (n = 0; n < num_siblings; n++) {
1470                 struct i915_engine_class_instance ci;
1471
1472                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1473                         err = -EFAULT;
1474                         goto out_siblings;
1475                 }
1476
1477                 siblings[n] = intel_engine_lookup_user(set->ctx->i915,
1478                                                        ci.engine_class,
1479                                                        ci.engine_instance);
1480                 if (!siblings[n]) {
1481                         DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
1482                                   n, ci.engine_class, ci.engine_instance);
1483                         err = -EINVAL;
1484                         goto out_siblings;
1485                 }
1486         }
1487
1488         ce = intel_execlists_create_virtual(set->ctx, siblings, n);
1489         if (IS_ERR(ce)) {
1490                 err = PTR_ERR(ce);
1491                 goto out_siblings;
1492         }
1493
1494         if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1495                 intel_context_put(ce);
1496                 err = -EEXIST;
1497                 goto out_siblings;
1498         }
1499
1500 out_siblings:
1501         if (siblings != stack)
1502                 kfree(siblings);
1503
1504         return err;
1505 }
1506
1507 static int
1508 set_engines__bond(struct i915_user_extension __user *base, void *data)
1509 {
1510         struct i915_context_engines_bond __user *ext =
1511                 container_of_user(base, typeof(*ext), base);
1512         const struct set_engines *set = data;
1513         struct i915_engine_class_instance ci;
1514         struct intel_engine_cs *virtual;
1515         struct intel_engine_cs *master;
1516         u16 idx, num_bonds;
1517         int err, n;
1518
1519         if (get_user(idx, &ext->virtual_index))
1520                 return -EFAULT;
1521
1522         if (idx >= set->engines->num_engines) {
1523                 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
1524                           idx, set->engines->num_engines);
1525                 return -EINVAL;
1526         }
1527
1528         idx = array_index_nospec(idx, set->engines->num_engines);
1529         if (!set->engines->engines[idx]) {
1530                 DRM_DEBUG("Invalid engine at %d\n", idx);
1531                 return -EINVAL;
1532         }
1533         virtual = set->engines->engines[idx]->engine;
1534
1535         err = check_user_mbz(&ext->flags);
1536         if (err)
1537                 return err;
1538
1539         for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1540                 err = check_user_mbz(&ext->mbz64[n]);
1541                 if (err)
1542                         return err;
1543         }
1544
1545         if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1546                 return -EFAULT;
1547
1548         master = intel_engine_lookup_user(set->ctx->i915,
1549                                           ci.engine_class, ci.engine_instance);
1550         if (!master) {
1551                 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
1552                           ci.engine_class, ci.engine_instance);
1553                 return -EINVAL;
1554         }
1555
1556         if (get_user(num_bonds, &ext->num_bonds))
1557                 return -EFAULT;
1558
1559         for (n = 0; n < num_bonds; n++) {
1560                 struct intel_engine_cs *bond;
1561
1562                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1563                         return -EFAULT;
1564
1565                 bond = intel_engine_lookup_user(set->ctx->i915,
1566                                                 ci.engine_class,
1567                                                 ci.engine_instance);
1568                 if (!bond) {
1569                         DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1570                                   n, ci.engine_class, ci.engine_instance);
1571                         return -EINVAL;
1572                 }
1573
1574                 /*
1575                  * A non-virtual engine has no siblings to choose between; and
1576                  * a submit fence will always be directed to the one engine.
1577                  */
1578                 if (intel_engine_is_virtual(virtual)) {
1579                         err = intel_virtual_engine_attach_bond(virtual,
1580                                                                master,
1581                                                                bond);
1582                         if (err)
1583                                 return err;
1584                 }
1585         }
1586
1587         return 0;
1588 }
1589
1590 static const i915_user_extension_fn set_engines__extensions[] = {
1591         [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1592         [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1593 };
1594
1595 static int
1596 set_engines(struct i915_gem_context *ctx,
1597             const struct drm_i915_gem_context_param *args)
1598 {
1599         struct i915_context_param_engines __user *user =
1600                 u64_to_user_ptr(args->value);
1601         struct set_engines set = { .ctx = ctx };
1602         unsigned int num_engines, n;
1603         u64 extensions;
1604         int err;
1605
1606         if (!args->size) { /* switch back to legacy user_ring_map */
1607                 if (!i915_gem_context_user_engines(ctx))
1608                         return 0;
1609
1610                 set.engines = default_engines(ctx);
1611                 if (IS_ERR(set.engines))
1612                         return PTR_ERR(set.engines);
1613
1614                 goto replace;
1615         }
1616
1617         BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1618         if (args->size < sizeof(*user) ||
1619             !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1620                 DRM_DEBUG("Invalid size for engine array: %d\n",
1621                           args->size);
1622                 return -EINVAL;
1623         }
1624
1625         /*
1626          * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1627          * first 64 engines defined here.
1628          */
1629         num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1630
1631         set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1632                               GFP_KERNEL);
1633         if (!set.engines)
1634                 return -ENOMEM;
1635
1636         init_rcu_head(&set.engines->rcu);
1637         for (n = 0; n < num_engines; n++) {
1638                 struct i915_engine_class_instance ci;
1639                 struct intel_engine_cs *engine;
1640                 struct intel_context *ce;
1641
1642                 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1643                         __free_engines(set.engines, n);
1644                         return -EFAULT;
1645                 }
1646
1647                 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1648                     ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1649                         set.engines->engines[n] = NULL;
1650                         continue;
1651                 }
1652
1653                 engine = intel_engine_lookup_user(ctx->i915,
1654                                                   ci.engine_class,
1655                                                   ci.engine_instance);
1656                 if (!engine) {
1657                         DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1658                                   n, ci.engine_class, ci.engine_instance);
1659                         __free_engines(set.engines, n);
1660                         return -ENOENT;
1661                 }
1662
1663                 ce = intel_context_create(ctx, engine);
1664                 if (IS_ERR(ce)) {
1665                         __free_engines(set.engines, n);
1666                         return PTR_ERR(ce);
1667                 }
1668
1669                 set.engines->engines[n] = ce;
1670         }
1671         set.engines->num_engines = num_engines;
1672
1673         err = -EFAULT;
1674         if (!get_user(extensions, &user->extensions))
1675                 err = i915_user_extensions(u64_to_user_ptr(extensions),
1676                                            set_engines__extensions,
1677                                            ARRAY_SIZE(set_engines__extensions),
1678                                            &set);
1679         if (err) {
1680                 free_engines(set.engines);
1681                 return err;
1682         }
1683
1684 replace:
1685         mutex_lock(&ctx->engines_mutex);
1686         if (args->size)
1687                 i915_gem_context_set_user_engines(ctx);
1688         else
1689                 i915_gem_context_clear_user_engines(ctx);
1690         set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
1691         mutex_unlock(&ctx->engines_mutex);
1692
1693         call_rcu(&set.engines->rcu, free_engines_rcu);
1694
1695         return 0;
1696 }
1697
1698 static struct i915_gem_engines *
1699 __copy_engines(struct i915_gem_engines *e)
1700 {
1701         struct i915_gem_engines *copy;
1702         unsigned int n;
1703
1704         copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1705         if (!copy)
1706                 return ERR_PTR(-ENOMEM);
1707
1708         init_rcu_head(&copy->rcu);
1709         for (n = 0; n < e->num_engines; n++) {
1710                 if (e->engines[n])
1711                         copy->engines[n] = intel_context_get(e->engines[n]);
1712                 else
1713                         copy->engines[n] = NULL;
1714         }
1715         copy->num_engines = n;
1716
1717         return copy;
1718 }
1719
1720 static int
1721 get_engines(struct i915_gem_context *ctx,
1722             struct drm_i915_gem_context_param *args)
1723 {
1724         struct i915_context_param_engines __user *user;
1725         struct i915_gem_engines *e;
1726         size_t n, count, size;
1727         int err = 0;
1728
1729         err = mutex_lock_interruptible(&ctx->engines_mutex);
1730         if (err)
1731                 return err;
1732
1733         e = NULL;
1734         if (i915_gem_context_user_engines(ctx))
1735                 e = __copy_engines(i915_gem_context_engines(ctx));
1736         mutex_unlock(&ctx->engines_mutex);
1737         if (IS_ERR_OR_NULL(e)) {
1738                 args->size = 0;
1739                 return PTR_ERR_OR_ZERO(e);
1740         }
1741
1742         count = e->num_engines;
1743
1744         /* Be paranoid in case we have an impedance mismatch */
1745         if (!check_struct_size(user, engines, count, &size)) {
1746                 err = -EINVAL;
1747                 goto err_free;
1748         }
1749         if (overflows_type(size, args->size)) {
1750                 err = -EINVAL;
1751                 goto err_free;
1752         }
1753
1754         if (!args->size) {
1755                 args->size = size;
1756                 goto err_free;
1757         }
1758
1759         if (args->size < size) {
1760                 err = -EINVAL;
1761                 goto err_free;
1762         }
1763
1764         user = u64_to_user_ptr(args->value);
1765         if (!access_ok(user, size)) {
1766                 err = -EFAULT;
1767                 goto err_free;
1768         }
1769
1770         if (put_user(0, &user->extensions)) {
1771                 err = -EFAULT;
1772                 goto err_free;
1773         }
1774
1775         for (n = 0; n < count; n++) {
1776                 struct i915_engine_class_instance ci = {
1777                         .engine_class = I915_ENGINE_CLASS_INVALID,
1778                         .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1779                 };
1780
1781                 if (e->engines[n]) {
1782                         ci.engine_class = e->engines[n]->engine->uabi_class;
1783                         ci.engine_instance = e->engines[n]->engine->uabi_instance;
1784                 }
1785
1786                 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1787                         err = -EFAULT;
1788                         goto err_free;
1789                 }
1790         }
1791
1792         args->size = size;
1793
1794 err_free:
1795         free_engines(e);
1796         return err;
1797 }
1798
1799 static int
1800 set_persistence(struct i915_gem_context *ctx,
1801                 const struct drm_i915_gem_context_param *args)
1802 {
1803         if (args->size)
1804                 return -EINVAL;
1805
1806         return __context_set_persistence(ctx, args->value);
1807 }
1808
1809 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1810                         struct i915_gem_context *ctx,
1811                         struct drm_i915_gem_context_param *args)
1812 {
1813         int ret = 0;
1814
1815         switch (args->param) {
1816         case I915_CONTEXT_PARAM_NO_ZEROMAP:
1817                 if (args->size)
1818                         ret = -EINVAL;
1819                 else if (args->value)
1820                         set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1821                 else
1822                         clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1823                 break;
1824
1825         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1826                 if (args->size)
1827                         ret = -EINVAL;
1828                 else if (args->value)
1829                         i915_gem_context_set_no_error_capture(ctx);
1830                 else
1831                         i915_gem_context_clear_no_error_capture(ctx);
1832                 break;
1833
1834         case I915_CONTEXT_PARAM_BANNABLE:
1835                 if (args->size)
1836                         ret = -EINVAL;
1837                 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1838                         ret = -EPERM;
1839                 else if (args->value)
1840                         i915_gem_context_set_bannable(ctx);
1841                 else
1842                         i915_gem_context_clear_bannable(ctx);
1843                 break;
1844
1845         case I915_CONTEXT_PARAM_RECOVERABLE:
1846                 if (args->size)
1847                         ret = -EINVAL;
1848                 else if (args->value)
1849                         i915_gem_context_set_recoverable(ctx);
1850                 else
1851                         i915_gem_context_clear_recoverable(ctx);
1852                 break;
1853
1854         case I915_CONTEXT_PARAM_PRIORITY:
1855                 {
1856                         s64 priority = args->value;
1857
1858                         if (args->size)
1859                                 ret = -EINVAL;
1860                         else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1861                                 ret = -ENODEV;
1862                         else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1863                                  priority < I915_CONTEXT_MIN_USER_PRIORITY)
1864                                 ret = -EINVAL;
1865                         else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1866                                  !capable(CAP_SYS_NICE))
1867                                 ret = -EPERM;
1868                         else
1869                                 ctx->sched.priority =
1870                                         I915_USER_PRIORITY(priority);
1871                 }
1872                 break;
1873
1874         case I915_CONTEXT_PARAM_SSEU:
1875                 ret = set_sseu(ctx, args);
1876                 break;
1877
1878         case I915_CONTEXT_PARAM_VM:
1879                 ret = set_ppgtt(fpriv, ctx, args);
1880                 break;
1881
1882         case I915_CONTEXT_PARAM_ENGINES:
1883                 ret = set_engines(ctx, args);
1884                 break;
1885
1886         case I915_CONTEXT_PARAM_PERSISTENCE:
1887                 ret = set_persistence(ctx, args);
1888                 break;
1889
1890         case I915_CONTEXT_PARAM_BAN_PERIOD:
1891         default:
1892                 ret = -EINVAL;
1893                 break;
1894         }
1895
1896         return ret;
1897 }
1898
1899 struct create_ext {
1900         struct i915_gem_context *ctx;
1901         struct drm_i915_file_private *fpriv;
1902 };
1903
1904 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1905 {
1906         struct drm_i915_gem_context_create_ext_setparam local;
1907         const struct create_ext *arg = data;
1908
1909         if (copy_from_user(&local, ext, sizeof(local)))
1910                 return -EFAULT;
1911
1912         if (local.param.ctx_id)
1913                 return -EINVAL;
1914
1915         return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1916 }
1917
1918 static int clone_engines(struct i915_gem_context *dst,
1919                          struct i915_gem_context *src)
1920 {
1921         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1922         struct i915_gem_engines *clone;
1923         bool user_engines;
1924         unsigned long n;
1925
1926         clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1927         if (!clone)
1928                 goto err_unlock;
1929
1930         init_rcu_head(&clone->rcu);
1931         for (n = 0; n < e->num_engines; n++) {
1932                 struct intel_engine_cs *engine;
1933
1934                 if (!e->engines[n]) {
1935                         clone->engines[n] = NULL;
1936                         continue;
1937                 }
1938                 engine = e->engines[n]->engine;
1939
1940                 /*
1941                  * Virtual engines are singletons; they can only exist
1942                  * inside a single context, because they embed their
1943                  * HW context... As each virtual context implies a single
1944                  * timeline (each engine can only dequeue a single request
1945                  * at any time), it would be surprising for two contexts
1946                  * to use the same engine. So let's create a copy of
1947                  * the virtual engine instead.
1948                  */
1949                 if (intel_engine_is_virtual(engine))
1950                         clone->engines[n] =
1951                                 intel_execlists_clone_virtual(dst, engine);
1952                 else
1953                         clone->engines[n] = intel_context_create(dst, engine);
1954                 if (IS_ERR_OR_NULL(clone->engines[n])) {
1955                         __free_engines(clone, n);
1956                         goto err_unlock;
1957                 }
1958         }
1959         clone->num_engines = n;
1960
1961         user_engines = i915_gem_context_user_engines(src);
1962         i915_gem_context_unlock_engines(src);
1963
1964         free_engines(dst->engines);
1965         RCU_INIT_POINTER(dst->engines, clone);
1966         if (user_engines)
1967                 i915_gem_context_set_user_engines(dst);
1968         else
1969                 i915_gem_context_clear_user_engines(dst);
1970         return 0;
1971
1972 err_unlock:
1973         i915_gem_context_unlock_engines(src);
1974         return -ENOMEM;
1975 }
1976
1977 static int clone_flags(struct i915_gem_context *dst,
1978                        struct i915_gem_context *src)
1979 {
1980         dst->user_flags = src->user_flags;
1981         return 0;
1982 }
1983
1984 static int clone_schedattr(struct i915_gem_context *dst,
1985                            struct i915_gem_context *src)
1986 {
1987         dst->sched = src->sched;
1988         return 0;
1989 }
1990
1991 static int clone_sseu(struct i915_gem_context *dst,
1992                       struct i915_gem_context *src)
1993 {
1994         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1995         struct i915_gem_engines *clone;
1996         unsigned long n;
1997         int err;
1998
1999         clone = dst->engines; /* no locking required; sole access */
2000         if (e->num_engines != clone->num_engines) {
2001                 err = -EINVAL;
2002                 goto unlock;
2003         }
2004
2005         for (n = 0; n < e->num_engines; n++) {
2006                 struct intel_context *ce = e->engines[n];
2007
2008                 if (clone->engines[n]->engine->class != ce->engine->class) {
2009                         /* Must have compatible engine maps! */
2010                         err = -EINVAL;
2011                         goto unlock;
2012                 }
2013
2014                 /* serialises with set_sseu */
2015                 err = intel_context_lock_pinned(ce);
2016                 if (err)
2017                         goto unlock;
2018
2019                 clone->engines[n]->sseu = ce->sseu;
2020                 intel_context_unlock_pinned(ce);
2021         }
2022
2023         err = 0;
2024 unlock:
2025         i915_gem_context_unlock_engines(src);
2026         return err;
2027 }
2028
2029 static int clone_timeline(struct i915_gem_context *dst,
2030                           struct i915_gem_context *src)
2031 {
2032         if (src->timeline)
2033                 __assign_timeline(dst, src->timeline);
2034
2035         return 0;
2036 }
2037
2038 static int clone_vm(struct i915_gem_context *dst,
2039                     struct i915_gem_context *src)
2040 {
2041         struct i915_address_space *vm;
2042         int err = 0;
2043
2044         rcu_read_lock();
2045         do {
2046                 vm = rcu_dereference(src->vm);
2047                 if (!vm)
2048                         break;
2049
2050                 if (!kref_get_unless_zero(&vm->ref))
2051                         continue;
2052
2053                 /*
2054                  * This ppgtt may have be reallocated between
2055                  * the read and the kref, and reassigned to a third
2056                  * context. In order to avoid inadvertent sharing
2057                  * of this ppgtt with that third context (and not
2058                  * src), we have to confirm that we have the same
2059                  * ppgtt after passing through the strong memory
2060                  * barrier implied by a successful
2061                  * kref_get_unless_zero().
2062                  *
2063                  * Once we have acquired the current ppgtt of src,
2064                  * we no longer care if it is released from src, as
2065                  * it cannot be reallocated elsewhere.
2066                  */
2067
2068                 if (vm == rcu_access_pointer(src->vm))
2069                         break;
2070
2071                 i915_vm_put(vm);
2072         } while (1);
2073         rcu_read_unlock();
2074
2075         if (vm) {
2076                 if (!mutex_lock_interruptible(&dst->mutex)) {
2077                         __assign_ppgtt(dst, vm);
2078                         mutex_unlock(&dst->mutex);
2079                 } else {
2080                         err = -EINTR;
2081                 }
2082                 i915_vm_put(vm);
2083         }
2084
2085         return err;
2086 }
2087
2088 static int create_clone(struct i915_user_extension __user *ext, void *data)
2089 {
2090         static int (* const fn[])(struct i915_gem_context *dst,
2091                                   struct i915_gem_context *src) = {
2092 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2093                 MAP(ENGINES, clone_engines),
2094                 MAP(FLAGS, clone_flags),
2095                 MAP(SCHEDATTR, clone_schedattr),
2096                 MAP(SSEU, clone_sseu),
2097                 MAP(TIMELINE, clone_timeline),
2098                 MAP(VM, clone_vm),
2099 #undef MAP
2100         };
2101         struct drm_i915_gem_context_create_ext_clone local;
2102         const struct create_ext *arg = data;
2103         struct i915_gem_context *dst = arg->ctx;
2104         struct i915_gem_context *src;
2105         int err, bit;
2106
2107         if (copy_from_user(&local, ext, sizeof(local)))
2108                 return -EFAULT;
2109
2110         BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2111                      I915_CONTEXT_CLONE_UNKNOWN);
2112
2113         if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2114                 return -EINVAL;
2115
2116         if (local.rsvd)
2117                 return -EINVAL;
2118
2119         rcu_read_lock();
2120         src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2121         rcu_read_unlock();
2122         if (!src)
2123                 return -ENOENT;
2124
2125         GEM_BUG_ON(src == dst);
2126
2127         for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2128                 if (!(local.flags & BIT(bit)))
2129                         continue;
2130
2131                 err = fn[bit](dst, src);
2132                 if (err)
2133                         return err;
2134         }
2135
2136         return 0;
2137 }
2138
2139 static const i915_user_extension_fn create_extensions[] = {
2140         [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2141         [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2142 };
2143
2144 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2145 {
2146         return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2147 }
2148
2149 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2150                                   struct drm_file *file)
2151 {
2152         struct drm_i915_private *i915 = to_i915(dev);
2153         struct drm_i915_gem_context_create_ext *args = data;
2154         struct create_ext ext_data;
2155         int ret;
2156
2157         if (!DRIVER_CAPS(i915)->has_logical_contexts)
2158                 return -ENODEV;
2159
2160         if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2161                 return -EINVAL;
2162
2163         ret = intel_gt_terminally_wedged(&i915->gt);
2164         if (ret)
2165                 return ret;
2166
2167         ext_data.fpriv = file->driver_priv;
2168         if (client_is_banned(ext_data.fpriv)) {
2169                 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
2170                           current->comm,
2171                           pid_nr(get_task_pid(current, PIDTYPE_PID)));
2172                 return -EIO;
2173         }
2174
2175         ext_data.ctx = i915_gem_create_context(i915, args->flags);
2176         if (IS_ERR(ext_data.ctx))
2177                 return PTR_ERR(ext_data.ctx);
2178
2179         if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2180                 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2181                                            create_extensions,
2182                                            ARRAY_SIZE(create_extensions),
2183                                            &ext_data);
2184                 if (ret)
2185                         goto err_ctx;
2186         }
2187
2188         ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
2189         if (ret < 0)
2190                 goto err_ctx;
2191
2192         args->ctx_id = ret;
2193         DRM_DEBUG("HW context %d created\n", args->ctx_id);
2194
2195         return 0;
2196
2197 err_ctx:
2198         context_close(ext_data.ctx);
2199         return ret;
2200 }
2201
2202 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2203                                    struct drm_file *file)
2204 {
2205         struct drm_i915_gem_context_destroy *args = data;
2206         struct drm_i915_file_private *file_priv = file->driver_priv;
2207         struct i915_gem_context *ctx;
2208
2209         if (args->pad != 0)
2210                 return -EINVAL;
2211
2212         if (!args->ctx_id)
2213                 return -ENOENT;
2214
2215         if (mutex_lock_interruptible(&file_priv->context_idr_lock))
2216                 return -EINTR;
2217
2218         ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
2219         mutex_unlock(&file_priv->context_idr_lock);
2220         if (!ctx)
2221                 return -ENOENT;
2222
2223         context_close(ctx);
2224         return 0;
2225 }
2226
2227 static int get_sseu(struct i915_gem_context *ctx,
2228                     struct drm_i915_gem_context_param *args)
2229 {
2230         struct drm_i915_gem_context_param_sseu user_sseu;
2231         struct intel_context *ce;
2232         unsigned long lookup;
2233         int err;
2234
2235         if (args->size == 0)
2236                 goto out;
2237         else if (args->size < sizeof(user_sseu))
2238                 return -EINVAL;
2239
2240         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2241                            sizeof(user_sseu)))
2242                 return -EFAULT;
2243
2244         if (user_sseu.rsvd)
2245                 return -EINVAL;
2246
2247         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2248                 return -EINVAL;
2249
2250         lookup = 0;
2251         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2252                 lookup |= LOOKUP_USER_INDEX;
2253
2254         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2255         if (IS_ERR(ce))
2256                 return PTR_ERR(ce);
2257
2258         err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2259         if (err) {
2260                 intel_context_put(ce);
2261                 return err;
2262         }
2263
2264         user_sseu.slice_mask = ce->sseu.slice_mask;
2265         user_sseu.subslice_mask = ce->sseu.subslice_mask;
2266         user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2267         user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2268
2269         intel_context_unlock_pinned(ce);
2270         intel_context_put(ce);
2271
2272         if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2273                          sizeof(user_sseu)))
2274                 return -EFAULT;
2275
2276 out:
2277         args->size = sizeof(user_sseu);
2278
2279         return 0;
2280 }
2281
2282 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2283                                     struct drm_file *file)
2284 {
2285         struct drm_i915_file_private *file_priv = file->driver_priv;
2286         struct drm_i915_gem_context_param *args = data;
2287         struct i915_gem_context *ctx;
2288         int ret = 0;
2289
2290         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2291         if (!ctx)
2292                 return -ENOENT;
2293
2294         switch (args->param) {
2295         case I915_CONTEXT_PARAM_NO_ZEROMAP:
2296                 args->size = 0;
2297                 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2298                 break;
2299
2300         case I915_CONTEXT_PARAM_GTT_SIZE:
2301                 args->size = 0;
2302                 rcu_read_lock();
2303                 if (rcu_access_pointer(ctx->vm))
2304                         args->value = rcu_dereference(ctx->vm)->total;
2305                 else
2306                         args->value = to_i915(dev)->ggtt.vm.total;
2307                 rcu_read_unlock();
2308                 break;
2309
2310         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2311                 args->size = 0;
2312                 args->value = i915_gem_context_no_error_capture(ctx);
2313                 break;
2314
2315         case I915_CONTEXT_PARAM_BANNABLE:
2316                 args->size = 0;
2317                 args->value = i915_gem_context_is_bannable(ctx);
2318                 break;
2319
2320         case I915_CONTEXT_PARAM_RECOVERABLE:
2321                 args->size = 0;
2322                 args->value = i915_gem_context_is_recoverable(ctx);
2323                 break;
2324
2325         case I915_CONTEXT_PARAM_PRIORITY:
2326                 args->size = 0;
2327                 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2328                 break;
2329
2330         case I915_CONTEXT_PARAM_SSEU:
2331                 ret = get_sseu(ctx, args);
2332                 break;
2333
2334         case I915_CONTEXT_PARAM_VM:
2335                 ret = get_ppgtt(file_priv, ctx, args);
2336                 break;
2337
2338         case I915_CONTEXT_PARAM_ENGINES:
2339                 ret = get_engines(ctx, args);
2340                 break;
2341
2342         case I915_CONTEXT_PARAM_PERSISTENCE:
2343                 args->size = 0;
2344                 args->value = i915_gem_context_is_persistent(ctx);
2345                 break;
2346
2347         case I915_CONTEXT_PARAM_BAN_PERIOD:
2348         default:
2349                 ret = -EINVAL;
2350                 break;
2351         }
2352
2353         i915_gem_context_put(ctx);
2354         return ret;
2355 }
2356
2357 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2358                                     struct drm_file *file)
2359 {
2360         struct drm_i915_file_private *file_priv = file->driver_priv;
2361         struct drm_i915_gem_context_param *args = data;
2362         struct i915_gem_context *ctx;
2363         int ret;
2364
2365         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2366         if (!ctx)
2367                 return -ENOENT;
2368
2369         ret = ctx_setparam(file_priv, ctx, args);
2370
2371         i915_gem_context_put(ctx);
2372         return ret;
2373 }
2374
2375 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2376                                        void *data, struct drm_file *file)
2377 {
2378         struct drm_i915_private *i915 = to_i915(dev);
2379         struct drm_i915_reset_stats *args = data;
2380         struct i915_gem_context *ctx;
2381         int ret;
2382
2383         if (args->flags || args->pad)
2384                 return -EINVAL;
2385
2386         ret = -ENOENT;
2387         rcu_read_lock();
2388         ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2389         if (!ctx)
2390                 goto out;
2391
2392         /*
2393          * We opt for unserialised reads here. This may result in tearing
2394          * in the extremely unlikely event of a GPU hang on this context
2395          * as we are querying them. If we need that extra layer of protection,
2396          * we should wrap the hangstats with a seqlock.
2397          */
2398
2399         if (capable(CAP_SYS_ADMIN))
2400                 args->reset_count = i915_reset_count(&i915->gpu_error);
2401         else
2402                 args->reset_count = 0;
2403
2404         args->batch_active = atomic_read(&ctx->guilty_count);
2405         args->batch_pending = atomic_read(&ctx->active_count);
2406
2407         ret = 0;
2408 out:
2409         rcu_read_unlock();
2410         return ret;
2411 }
2412
2413 /* GEM context-engines iterator: for_each_gem_engine() */
2414 struct intel_context *
2415 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2416 {
2417         const struct i915_gem_engines *e = it->engines;
2418         struct intel_context *ctx;
2419
2420         do {
2421                 if (it->idx >= e->num_engines)
2422                         return NULL;
2423
2424                 ctx = e->engines[it->idx++];
2425         } while (!ctx);
2426
2427         return ctx;
2428 }
2429
2430 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2431 #include "selftests/mock_context.c"
2432 #include "selftests/i915_gem_context.c"
2433 #endif
2434
2435 static void i915_global_gem_context_shrink(void)
2436 {
2437         kmem_cache_shrink(global.slab_luts);
2438 }
2439
2440 static void i915_global_gem_context_exit(void)
2441 {
2442         kmem_cache_destroy(global.slab_luts);
2443 }
2444
2445 static struct i915_global_gem_context global = { {
2446         .shrink = i915_global_gem_context_shrink,
2447         .exit = i915_global_gem_context_exit,
2448 } };
2449
2450 int __init i915_global_gem_context_init(void)
2451 {
2452         global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2453         if (!global.slab_luts)
2454                 return -ENOMEM;
2455
2456         i915_global_register(&global.base);
2457         return 0;
2458 }