drm/i915: Use uabi engines for the default engine map
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / gem / i915_gem_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2011-2012 Intel Corporation
5  */
6
7 /*
8  * This file implements HW context support. On gen5+ a HW context consists of an
9  * opaque GPU object which is referenced at times of context saves and restores.
10  * With RC6 enabled, the context is also referenced as the GPU enters and exists
11  * from RC6 (GPU has it's own internal power context, except on gen5). Though
12  * something like a context does exist for the media ring, the code only
13  * supports contexts for the render ring.
14  *
15  * In software, there is a distinction between contexts created by the user,
16  * and the default HW context. The default HW context is used by GPU clients
17  * that do not request setup of their own hardware context. The default
18  * context's state is never restored to help prevent programming errors. This
19  * would happen if a client ran and piggy-backed off another clients GPU state.
20  * The default context only exists to give the GPU some offset to load as the
21  * current to invoke a save of the context we actually care about. In fact, the
22  * code could likely be constructed, albeit in a more complicated fashion, to
23  * never use the default context, though that limits the driver's ability to
24  * swap out, and/or destroy other contexts.
25  *
26  * All other contexts are created as a request by the GPU client. These contexts
27  * store GPU state, and thus allow GPU clients to not re-emit state (and
28  * potentially query certain state) at any time. The kernel driver makes
29  * certain that the appropriate commands are inserted.
30  *
31  * The context life cycle is semi-complicated in that context BOs may live
32  * longer than the context itself because of the way the hardware, and object
33  * tracking works. Below is a very crude representation of the state machine
34  * describing the context life.
35  *                                         refcount     pincount     active
36  * S0: initial state                          0            0           0
37  * S1: context created                        1            0           0
38  * S2: context is currently running           2            1           X
39  * S3: GPU referenced, but not current        2            0           1
40  * S4: context is current, but destroyed      1            1           0
41  * S5: like S3, but destroyed                 1            0           1
42  *
43  * The most common (but not all) transitions:
44  * S0->S1: client creates a context
45  * S1->S2: client submits execbuf with context
46  * S2->S3: other clients submits execbuf with context
47  * S3->S1: context object was retired
48  * S3->S2: clients submits another execbuf
49  * S2->S4: context destroy called with current context
50  * S3->S5->S0: destroy path
51  * S4->S5->S0: destroy path on current context
52  *
53  * There are two confusing terms used above:
54  *  The "current context" means the context which is currently running on the
55  *  GPU. The GPU has loaded its state already and has stored away the gtt
56  *  offset of the BO. The GPU is not actively referencing the data at this
57  *  offset, but it will on the next context switch. The only way to avoid this
58  *  is to do a GPU reset.
59  *
60  *  An "active context' is one which was previously the "current context" and is
61  *  on the active list waiting for the next context switch to occur. Until this
62  *  happens, the object must remain at the same gtt offset. It is therefore
63  *  possible to destroy a context, but it is still active.
64  *
65  */
66
67 #include <linux/highmem.h>
68 #include <linux/log2.h>
69 #include <linux/nospec.h>
70
71 #include <drm/drm_cache.h>
72 #include <drm/drm_syncobj.h>
73
74 #include "gt/gen6_ppgtt.h"
75 #include "gt/intel_context.h"
76 #include "gt/intel_context_param.h"
77 #include "gt/intel_engine_heartbeat.h"
78 #include "gt/intel_engine_user.h"
79 #include "gt/intel_gpu_commands.h"
80 #include "gt/intel_ring.h"
81
82 #include "pxp/intel_pxp.h"
83
84 #include "i915_file_private.h"
85 #include "i915_gem_context.h"
86 #include "i915_trace.h"
87 #include "i915_user_extensions.h"
88
89 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
90
91 static struct kmem_cache *slab_luts;
92
93 struct i915_lut_handle *i915_lut_handle_alloc(void)
94 {
95         return kmem_cache_alloc(slab_luts, GFP_KERNEL);
96 }
97
98 void i915_lut_handle_free(struct i915_lut_handle *lut)
99 {
100         return kmem_cache_free(slab_luts, lut);
101 }
102
103 static void lut_close(struct i915_gem_context *ctx)
104 {
105         struct radix_tree_iter iter;
106         void __rcu **slot;
107
108         mutex_lock(&ctx->lut_mutex);
109         rcu_read_lock();
110         radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
111                 struct i915_vma *vma = rcu_dereference_raw(*slot);
112                 struct drm_i915_gem_object *obj = vma->obj;
113                 struct i915_lut_handle *lut;
114
115                 if (!kref_get_unless_zero(&obj->base.refcount))
116                         continue;
117
118                 spin_lock(&obj->lut_lock);
119                 list_for_each_entry(lut, &obj->lut_list, obj_link) {
120                         if (lut->ctx != ctx)
121                                 continue;
122
123                         if (lut->handle != iter.index)
124                                 continue;
125
126                         list_del(&lut->obj_link);
127                         break;
128                 }
129                 spin_unlock(&obj->lut_lock);
130
131                 if (&lut->obj_link != &obj->lut_list) {
132                         i915_lut_handle_free(lut);
133                         radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
134                         i915_vma_close(vma);
135                         i915_gem_object_put(obj);
136                 }
137
138                 i915_gem_object_put(obj);
139         }
140         rcu_read_unlock();
141         mutex_unlock(&ctx->lut_mutex);
142 }
143
144 static struct intel_context *
145 lookup_user_engine(struct i915_gem_context *ctx,
146                    unsigned long flags,
147                    const struct i915_engine_class_instance *ci)
148 #define LOOKUP_USER_INDEX BIT(0)
149 {
150         int idx;
151
152         if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
153                 return ERR_PTR(-EINVAL);
154
155         if (!i915_gem_context_user_engines(ctx)) {
156                 struct intel_engine_cs *engine;
157
158                 engine = intel_engine_lookup_user(ctx->i915,
159                                                   ci->engine_class,
160                                                   ci->engine_instance);
161                 if (!engine)
162                         return ERR_PTR(-EINVAL);
163
164                 idx = engine->legacy_idx;
165         } else {
166                 idx = ci->engine_instance;
167         }
168
169         return i915_gem_context_get_engine(ctx, idx);
170 }
171
172 static int validate_priority(struct drm_i915_private *i915,
173                              const struct drm_i915_gem_context_param *args)
174 {
175         s64 priority = args->value;
176
177         if (args->size)
178                 return -EINVAL;
179
180         if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
181                 return -ENODEV;
182
183         if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
184             priority < I915_CONTEXT_MIN_USER_PRIORITY)
185                 return -EINVAL;
186
187         if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
188             !capable(CAP_SYS_NICE))
189                 return -EPERM;
190
191         return 0;
192 }
193
194 static void proto_context_close(struct drm_i915_private *i915,
195                                 struct i915_gem_proto_context *pc)
196 {
197         int i;
198
199         if (pc->pxp_wakeref)
200                 intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
201         if (pc->vm)
202                 i915_vm_put(pc->vm);
203         if (pc->user_engines) {
204                 for (i = 0; i < pc->num_user_engines; i++)
205                         kfree(pc->user_engines[i].siblings);
206                 kfree(pc->user_engines);
207         }
208         kfree(pc);
209 }
210
211 static int proto_context_set_persistence(struct drm_i915_private *i915,
212                                          struct i915_gem_proto_context *pc,
213                                          bool persist)
214 {
215         if (persist) {
216                 /*
217                  * Only contexts that are short-lived [that will expire or be
218                  * reset] are allowed to survive past termination. We require
219                  * hangcheck to ensure that the persistent requests are healthy.
220                  */
221                 if (!i915->params.enable_hangcheck)
222                         return -EINVAL;
223
224                 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
225         } else {
226                 /* To cancel a context we use "preempt-to-idle" */
227                 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
228                         return -ENODEV;
229
230                 /*
231                  * If the cancel fails, we then need to reset, cleanly!
232                  *
233                  * If the per-engine reset fails, all hope is lost! We resort
234                  * to a full GPU reset in that unlikely case, but realistically
235                  * if the engine could not reset, the full reset does not fare
236                  * much better. The damage has been done.
237                  *
238                  * However, if we cannot reset an engine by itself, we cannot
239                  * cleanup a hanging persistent context without causing
240                  * colateral damage, and we should not pretend we can by
241                  * exposing the interface.
242                  */
243                 if (!intel_has_reset_engine(to_gt(i915)))
244                         return -ENODEV;
245
246                 pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
247         }
248
249         return 0;
250 }
251
252 static int proto_context_set_protected(struct drm_i915_private *i915,
253                                        struct i915_gem_proto_context *pc,
254                                        bool protected)
255 {
256         int ret = 0;
257
258         if (!protected) {
259                 pc->uses_protected_content = false;
260         } else if (!intel_pxp_is_enabled(i915->pxp)) {
261                 ret = -ENODEV;
262         } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
263                    !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
264                 ret = -EPERM;
265         } else {
266                 pc->uses_protected_content = true;
267
268                 /*
269                  * protected context usage requires the PXP session to be up,
270                  * which in turn requires the device to be active.
271                  */
272                 pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
273
274                 if (!intel_pxp_is_active(i915->pxp))
275                         ret = intel_pxp_start(i915->pxp);
276         }
277
278         return ret;
279 }
280
281 static struct i915_gem_proto_context *
282 proto_context_create(struct drm_i915_private *i915, unsigned int flags)
283 {
284         struct i915_gem_proto_context *pc, *err;
285
286         pc = kzalloc(sizeof(*pc), GFP_KERNEL);
287         if (!pc)
288                 return ERR_PTR(-ENOMEM);
289
290         pc->num_user_engines = -1;
291         pc->user_engines = NULL;
292         pc->user_flags = BIT(UCONTEXT_BANNABLE) |
293                          BIT(UCONTEXT_RECOVERABLE);
294         if (i915->params.enable_hangcheck)
295                 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
296         pc->sched.priority = I915_PRIORITY_NORMAL;
297
298         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
299                 if (!HAS_EXECLISTS(i915)) {
300                         err = ERR_PTR(-EINVAL);
301                         goto proto_close;
302                 }
303                 pc->single_timeline = true;
304         }
305
306         return pc;
307
308 proto_close:
309         proto_context_close(i915, pc);
310         return err;
311 }
312
313 static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
314                                          struct i915_gem_proto_context *pc,
315                                          u32 *id)
316 {
317         int ret;
318         void *old;
319
320         lockdep_assert_held(&fpriv->proto_context_lock);
321
322         ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
323         if (ret)
324                 return ret;
325
326         old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
327         if (xa_is_err(old)) {
328                 xa_erase(&fpriv->context_xa, *id);
329                 return xa_err(old);
330         }
331         WARN_ON(old);
332
333         return 0;
334 }
335
336 static int proto_context_register(struct drm_i915_file_private *fpriv,
337                                   struct i915_gem_proto_context *pc,
338                                   u32 *id)
339 {
340         int ret;
341
342         mutex_lock(&fpriv->proto_context_lock);
343         ret = proto_context_register_locked(fpriv, pc, id);
344         mutex_unlock(&fpriv->proto_context_lock);
345
346         return ret;
347 }
348
349 static struct i915_address_space *
350 i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
351 {
352         struct i915_address_space *vm;
353
354         xa_lock(&file_priv->vm_xa);
355         vm = xa_load(&file_priv->vm_xa, id);
356         if (vm)
357                 kref_get(&vm->ref);
358         xa_unlock(&file_priv->vm_xa);
359
360         return vm;
361 }
362
363 static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
364                             struct i915_gem_proto_context *pc,
365                             const struct drm_i915_gem_context_param *args)
366 {
367         struct drm_i915_private *i915 = fpriv->dev_priv;
368         struct i915_address_space *vm;
369
370         if (args->size)
371                 return -EINVAL;
372
373         if (!HAS_FULL_PPGTT(i915))
374                 return -ENODEV;
375
376         if (upper_32_bits(args->value))
377                 return -ENOENT;
378
379         vm = i915_gem_vm_lookup(fpriv, args->value);
380         if (!vm)
381                 return -ENOENT;
382
383         if (pc->vm)
384                 i915_vm_put(pc->vm);
385         pc->vm = vm;
386
387         return 0;
388 }
389
390 struct set_proto_ctx_engines {
391         struct drm_i915_private *i915;
392         unsigned num_engines;
393         struct i915_gem_proto_engine *engines;
394 };
395
396 static int
397 set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
398                               void *data)
399 {
400         struct i915_context_engines_load_balance __user *ext =
401                 container_of_user(base, typeof(*ext), base);
402         const struct set_proto_ctx_engines *set = data;
403         struct drm_i915_private *i915 = set->i915;
404         struct intel_engine_cs **siblings;
405         u16 num_siblings, idx;
406         unsigned int n;
407         int err;
408
409         if (!HAS_EXECLISTS(i915))
410                 return -ENODEV;
411
412         if (get_user(idx, &ext->engine_index))
413                 return -EFAULT;
414
415         if (idx >= set->num_engines) {
416                 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
417                         idx, set->num_engines);
418                 return -EINVAL;
419         }
420
421         idx = array_index_nospec(idx, set->num_engines);
422         if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
423                 drm_dbg(&i915->drm,
424                         "Invalid placement[%d], already occupied\n", idx);
425                 return -EEXIST;
426         }
427
428         if (get_user(num_siblings, &ext->num_siblings))
429                 return -EFAULT;
430
431         err = check_user_mbz(&ext->flags);
432         if (err)
433                 return err;
434
435         err = check_user_mbz(&ext->mbz64);
436         if (err)
437                 return err;
438
439         if (num_siblings == 0)
440                 return 0;
441
442         siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
443         if (!siblings)
444                 return -ENOMEM;
445
446         for (n = 0; n < num_siblings; n++) {
447                 struct i915_engine_class_instance ci;
448
449                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
450                         err = -EFAULT;
451                         goto err_siblings;
452                 }
453
454                 siblings[n] = intel_engine_lookup_user(i915,
455                                                        ci.engine_class,
456                                                        ci.engine_instance);
457                 if (!siblings[n]) {
458                         drm_dbg(&i915->drm,
459                                 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
460                                 n, ci.engine_class, ci.engine_instance);
461                         err = -EINVAL;
462                         goto err_siblings;
463                 }
464         }
465
466         if (num_siblings == 1) {
467                 set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
468                 set->engines[idx].engine = siblings[0];
469                 kfree(siblings);
470         } else {
471                 set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
472                 set->engines[idx].num_siblings = num_siblings;
473                 set->engines[idx].siblings = siblings;
474         }
475
476         return 0;
477
478 err_siblings:
479         kfree(siblings);
480
481         return err;
482 }
483
484 static int
485 set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
486 {
487         struct i915_context_engines_bond __user *ext =
488                 container_of_user(base, typeof(*ext), base);
489         const struct set_proto_ctx_engines *set = data;
490         struct drm_i915_private *i915 = set->i915;
491         struct i915_engine_class_instance ci;
492         struct intel_engine_cs *master;
493         u16 idx, num_bonds;
494         int err, n;
495
496         if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
497             !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
498                 drm_dbg(&i915->drm,
499                         "Bonding not supported on this platform\n");
500                 return -ENODEV;
501         }
502
503         if (get_user(idx, &ext->virtual_index))
504                 return -EFAULT;
505
506         if (idx >= set->num_engines) {
507                 drm_dbg(&i915->drm,
508                         "Invalid index for virtual engine: %d >= %d\n",
509                         idx, set->num_engines);
510                 return -EINVAL;
511         }
512
513         idx = array_index_nospec(idx, set->num_engines);
514         if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
515                 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
516                 return -EINVAL;
517         }
518
519         if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
520                 drm_dbg(&i915->drm,
521                         "Bonding with virtual engines not allowed\n");
522                 return -EINVAL;
523         }
524
525         err = check_user_mbz(&ext->flags);
526         if (err)
527                 return err;
528
529         for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
530                 err = check_user_mbz(&ext->mbz64[n]);
531                 if (err)
532                         return err;
533         }
534
535         if (copy_from_user(&ci, &ext->master, sizeof(ci)))
536                 return -EFAULT;
537
538         master = intel_engine_lookup_user(i915,
539                                           ci.engine_class,
540                                           ci.engine_instance);
541         if (!master) {
542                 drm_dbg(&i915->drm,
543                         "Unrecognised master engine: { class:%u, instance:%u }\n",
544                         ci.engine_class, ci.engine_instance);
545                 return -EINVAL;
546         }
547
548         if (intel_engine_uses_guc(master)) {
549                 drm_dbg(&i915->drm, "bonding extension not supported with GuC submission");
550                 return -ENODEV;
551         }
552
553         if (get_user(num_bonds, &ext->num_bonds))
554                 return -EFAULT;
555
556         for (n = 0; n < num_bonds; n++) {
557                 struct intel_engine_cs *bond;
558
559                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
560                         return -EFAULT;
561
562                 bond = intel_engine_lookup_user(i915,
563                                                 ci.engine_class,
564                                                 ci.engine_instance);
565                 if (!bond) {
566                         drm_dbg(&i915->drm,
567                                 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
568                                 n, ci.engine_class, ci.engine_instance);
569                         return -EINVAL;
570                 }
571         }
572
573         return 0;
574 }
575
576 static int
577 set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
578                                       void *data)
579 {
580         struct i915_context_engines_parallel_submit __user *ext =
581                 container_of_user(base, typeof(*ext), base);
582         const struct set_proto_ctx_engines *set = data;
583         struct drm_i915_private *i915 = set->i915;
584         struct i915_engine_class_instance prev_engine;
585         u64 flags;
586         int err = 0, n, i, j;
587         u16 slot, width, num_siblings;
588         struct intel_engine_cs **siblings = NULL;
589         intel_engine_mask_t prev_mask;
590
591         if (get_user(slot, &ext->engine_index))
592                 return -EFAULT;
593
594         if (get_user(width, &ext->width))
595                 return -EFAULT;
596
597         if (get_user(num_siblings, &ext->num_siblings))
598                 return -EFAULT;
599
600         if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) &&
601             num_siblings != 1) {
602                 drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",
603                         num_siblings);
604                 return -EINVAL;
605         }
606
607         if (slot >= set->num_engines) {
608                 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
609                         slot, set->num_engines);
610                 return -EINVAL;
611         }
612
613         if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
614                 drm_dbg(&i915->drm,
615                         "Invalid placement[%d], already occupied\n", slot);
616                 return -EINVAL;
617         }
618
619         if (get_user(flags, &ext->flags))
620                 return -EFAULT;
621
622         if (flags) {
623                 drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
624                 return -EINVAL;
625         }
626
627         for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
628                 err = check_user_mbz(&ext->mbz64[n]);
629                 if (err)
630                         return err;
631         }
632
633         if (width < 2) {
634                 drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
635                 return -EINVAL;
636         }
637
638         if (num_siblings < 1) {
639                 drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
640                         num_siblings);
641                 return -EINVAL;
642         }
643
644         siblings = kmalloc_array(num_siblings * width,
645                                  sizeof(*siblings),
646                                  GFP_KERNEL);
647         if (!siblings)
648                 return -ENOMEM;
649
650         /* Create contexts / engines */
651         for (i = 0; i < width; ++i) {
652                 intel_engine_mask_t current_mask = 0;
653
654                 for (j = 0; j < num_siblings; ++j) {
655                         struct i915_engine_class_instance ci;
656
657                         n = i * num_siblings + j;
658                         if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
659                                 err = -EFAULT;
660                                 goto out_err;
661                         }
662
663                         siblings[n] =
664                                 intel_engine_lookup_user(i915, ci.engine_class,
665                                                          ci.engine_instance);
666                         if (!siblings[n]) {
667                                 drm_dbg(&i915->drm,
668                                         "Invalid sibling[%d]: { class:%d, inst:%d }\n",
669                                         n, ci.engine_class, ci.engine_instance);
670                                 err = -EINVAL;
671                                 goto out_err;
672                         }
673
674                         /*
675                          * We don't support breadcrumb handshake on these
676                          * classes
677                          */
678                         if (siblings[n]->class == RENDER_CLASS ||
679                             siblings[n]->class == COMPUTE_CLASS) {
680                                 err = -EINVAL;
681                                 goto out_err;
682                         }
683
684                         if (n) {
685                                 if (prev_engine.engine_class !=
686                                     ci.engine_class) {
687                                         drm_dbg(&i915->drm,
688                                                 "Mismatched class %d, %d\n",
689                                                 prev_engine.engine_class,
690                                                 ci.engine_class);
691                                         err = -EINVAL;
692                                         goto out_err;
693                                 }
694                         }
695
696                         prev_engine = ci;
697                         current_mask |= siblings[n]->logical_mask;
698                 }
699
700                 if (i > 0) {
701                         if (current_mask != prev_mask << 1) {
702                                 drm_dbg(&i915->drm,
703                                         "Non contiguous logical mask 0x%x, 0x%x\n",
704                                         prev_mask, current_mask);
705                                 err = -EINVAL;
706                                 goto out_err;
707                         }
708                 }
709                 prev_mask = current_mask;
710         }
711
712         set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
713         set->engines[slot].num_siblings = num_siblings;
714         set->engines[slot].width = width;
715         set->engines[slot].siblings = siblings;
716
717         return 0;
718
719 out_err:
720         kfree(siblings);
721
722         return err;
723 }
724
725 static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
726         [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
727         [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
728         [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
729                 set_proto_ctx_engines_parallel_submit,
730 };
731
732 static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
733                                  struct i915_gem_proto_context *pc,
734                                  const struct drm_i915_gem_context_param *args)
735 {
736         struct drm_i915_private *i915 = fpriv->dev_priv;
737         struct set_proto_ctx_engines set = { .i915 = i915 };
738         struct i915_context_param_engines __user *user =
739                 u64_to_user_ptr(args->value);
740         unsigned int n;
741         u64 extensions;
742         int err;
743
744         if (pc->num_user_engines >= 0) {
745                 drm_dbg(&i915->drm, "Cannot set engines twice");
746                 return -EINVAL;
747         }
748
749         if (args->size < sizeof(*user) ||
750             !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
751                 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
752                         args->size);
753                 return -EINVAL;
754         }
755
756         set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
757         /* RING_MASK has no shift so we can use it directly here */
758         if (set.num_engines > I915_EXEC_RING_MASK + 1)
759                 return -EINVAL;
760
761         set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
762         if (!set.engines)
763                 return -ENOMEM;
764
765         for (n = 0; n < set.num_engines; n++) {
766                 struct i915_engine_class_instance ci;
767                 struct intel_engine_cs *engine;
768
769                 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
770                         kfree(set.engines);
771                         return -EFAULT;
772                 }
773
774                 memset(&set.engines[n], 0, sizeof(set.engines[n]));
775
776                 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
777                     ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
778                         continue;
779
780                 engine = intel_engine_lookup_user(i915,
781                                                   ci.engine_class,
782                                                   ci.engine_instance);
783                 if (!engine) {
784                         drm_dbg(&i915->drm,
785                                 "Invalid engine[%d]: { class:%d, instance:%d }\n",
786                                 n, ci.engine_class, ci.engine_instance);
787                         kfree(set.engines);
788                         return -ENOENT;
789                 }
790
791                 set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
792                 set.engines[n].engine = engine;
793         }
794
795         err = -EFAULT;
796         if (!get_user(extensions, &user->extensions))
797                 err = i915_user_extensions(u64_to_user_ptr(extensions),
798                                            set_proto_ctx_engines_extensions,
799                                            ARRAY_SIZE(set_proto_ctx_engines_extensions),
800                                            &set);
801         if (err) {
802                 kfree(set.engines);
803                 return err;
804         }
805
806         pc->num_user_engines = set.num_engines;
807         pc->user_engines = set.engines;
808
809         return 0;
810 }
811
812 static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
813                               struct i915_gem_proto_context *pc,
814                               struct drm_i915_gem_context_param *args)
815 {
816         struct drm_i915_private *i915 = fpriv->dev_priv;
817         struct drm_i915_gem_context_param_sseu user_sseu;
818         struct intel_sseu *sseu;
819         int ret;
820
821         if (args->size < sizeof(user_sseu))
822                 return -EINVAL;
823
824         if (GRAPHICS_VER(i915) != 11)
825                 return -ENODEV;
826
827         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
828                            sizeof(user_sseu)))
829                 return -EFAULT;
830
831         if (user_sseu.rsvd)
832                 return -EINVAL;
833
834         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
835                 return -EINVAL;
836
837         if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
838                 return -EINVAL;
839
840         if (pc->num_user_engines >= 0) {
841                 int idx = user_sseu.engine.engine_instance;
842                 struct i915_gem_proto_engine *pe;
843
844                 if (idx >= pc->num_user_engines)
845                         return -EINVAL;
846
847                 pe = &pc->user_engines[idx];
848
849                 /* Only render engine supports RPCS configuration. */
850                 if (pe->engine->class != RENDER_CLASS)
851                         return -EINVAL;
852
853                 sseu = &pe->sseu;
854         } else {
855                 /* Only render engine supports RPCS configuration. */
856                 if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
857                         return -EINVAL;
858
859                 /* There is only one render engine */
860                 if (user_sseu.engine.engine_instance != 0)
861                         return -EINVAL;
862
863                 sseu = &pc->legacy_rcs_sseu;
864         }
865
866         ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
867         if (ret)
868                 return ret;
869
870         args->size = sizeof(user_sseu);
871
872         return 0;
873 }
874
875 static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
876                                struct i915_gem_proto_context *pc,
877                                struct drm_i915_gem_context_param *args)
878 {
879         int ret = 0;
880
881         switch (args->param) {
882         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
883                 if (args->size)
884                         ret = -EINVAL;
885                 else if (args->value)
886                         pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
887                 else
888                         pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
889                 break;
890
891         case I915_CONTEXT_PARAM_BANNABLE:
892                 if (args->size)
893                         ret = -EINVAL;
894                 else if (!capable(CAP_SYS_ADMIN) && !args->value)
895                         ret = -EPERM;
896                 else if (args->value)
897                         pc->user_flags |= BIT(UCONTEXT_BANNABLE);
898                 else if (pc->uses_protected_content)
899                         ret = -EPERM;
900                 else
901                         pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
902                 break;
903
904         case I915_CONTEXT_PARAM_RECOVERABLE:
905                 if (args->size)
906                         ret = -EINVAL;
907                 else if (!args->value)
908                         pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
909                 else if (pc->uses_protected_content)
910                         ret = -EPERM;
911                 else
912                         pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
913                 break;
914
915         case I915_CONTEXT_PARAM_PRIORITY:
916                 ret = validate_priority(fpriv->dev_priv, args);
917                 if (!ret)
918                         pc->sched.priority = args->value;
919                 break;
920
921         case I915_CONTEXT_PARAM_SSEU:
922                 ret = set_proto_ctx_sseu(fpriv, pc, args);
923                 break;
924
925         case I915_CONTEXT_PARAM_VM:
926                 ret = set_proto_ctx_vm(fpriv, pc, args);
927                 break;
928
929         case I915_CONTEXT_PARAM_ENGINES:
930                 ret = set_proto_ctx_engines(fpriv, pc, args);
931                 break;
932
933         case I915_CONTEXT_PARAM_PERSISTENCE:
934                 if (args->size)
935                         ret = -EINVAL;
936                 else
937                         ret = proto_context_set_persistence(fpriv->dev_priv, pc,
938                                                             args->value);
939                 break;
940
941         case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
942                 ret = proto_context_set_protected(fpriv->dev_priv, pc,
943                                                   args->value);
944                 break;
945
946         case I915_CONTEXT_PARAM_NO_ZEROMAP:
947         case I915_CONTEXT_PARAM_BAN_PERIOD:
948         case I915_CONTEXT_PARAM_RINGSIZE:
949         default:
950                 ret = -EINVAL;
951                 break;
952         }
953
954         return ret;
955 }
956
957 static int intel_context_set_gem(struct intel_context *ce,
958                                  struct i915_gem_context *ctx,
959                                  struct intel_sseu sseu)
960 {
961         int ret = 0;
962
963         GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
964         RCU_INIT_POINTER(ce->gem_context, ctx);
965
966         GEM_BUG_ON(intel_context_is_pinned(ce));
967         ce->ring_size = SZ_16K;
968
969         i915_vm_put(ce->vm);
970         ce->vm = i915_gem_context_get_eb_vm(ctx);
971
972         if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
973             intel_engine_has_timeslices(ce->engine) &&
974             intel_engine_has_semaphores(ce->engine))
975                 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
976
977         if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
978             ctx->i915->params.request_timeout_ms) {
979                 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
980
981                 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
982         }
983
984         /* A valid SSEU has no zero fields */
985         if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
986                 ret = intel_context_reconfigure_sseu(ce, sseu);
987
988         return ret;
989 }
990
991 static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
992 {
993         while (count--) {
994                 struct intel_context *ce = e->engines[count], *child;
995
996                 if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
997                         continue;
998
999                 for_each_child(ce, child)
1000                         intel_context_unpin(child);
1001                 intel_context_unpin(ce);
1002         }
1003 }
1004
1005 static void unpin_engines(struct i915_gem_engines *e)
1006 {
1007         __unpin_engines(e, e->num_engines);
1008 }
1009
1010 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
1011 {
1012         while (count--) {
1013                 if (!e->engines[count])
1014                         continue;
1015
1016                 intel_context_put(e->engines[count]);
1017         }
1018         kfree(e);
1019 }
1020
1021 static void free_engines(struct i915_gem_engines *e)
1022 {
1023         __free_engines(e, e->num_engines);
1024 }
1025
1026 static void free_engines_rcu(struct rcu_head *rcu)
1027 {
1028         struct i915_gem_engines *engines =
1029                 container_of(rcu, struct i915_gem_engines, rcu);
1030
1031         i915_sw_fence_fini(&engines->fence);
1032         free_engines(engines);
1033 }
1034
1035 static void accumulate_runtime(struct i915_drm_client *client,
1036                                struct i915_gem_engines *engines)
1037 {
1038         struct i915_gem_engines_iter it;
1039         struct intel_context *ce;
1040
1041         if (!client)
1042                 return;
1043
1044         /* Transfer accumulated runtime to the parent GEM context. */
1045         for_each_gem_engine(ce, engines, it) {
1046                 unsigned int class = ce->engine->uabi_class;
1047
1048                 GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime));
1049                 atomic64_add(intel_context_get_total_runtime_ns(ce),
1050                              &client->past_runtime[class]);
1051         }
1052 }
1053
1054 static int
1055 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
1056 {
1057         struct i915_gem_engines *engines =
1058                 container_of(fence, typeof(*engines), fence);
1059         struct i915_gem_context *ctx = engines->ctx;
1060
1061         switch (state) {
1062         case FENCE_COMPLETE:
1063                 if (!list_empty(&engines->link)) {
1064                         unsigned long flags;
1065
1066                         spin_lock_irqsave(&ctx->stale.lock, flags);
1067                         list_del(&engines->link);
1068                         spin_unlock_irqrestore(&ctx->stale.lock, flags);
1069                 }
1070                 accumulate_runtime(ctx->client, engines);
1071                 i915_gem_context_put(ctx);
1072
1073                 break;
1074
1075         case FENCE_FREE:
1076                 init_rcu_head(&engines->rcu);
1077                 call_rcu(&engines->rcu, free_engines_rcu);
1078                 break;
1079         }
1080
1081         return NOTIFY_DONE;
1082 }
1083
1084 static struct i915_gem_engines *alloc_engines(unsigned int count)
1085 {
1086         struct i915_gem_engines *e;
1087
1088         e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
1089         if (!e)
1090                 return NULL;
1091
1092         i915_sw_fence_init(&e->fence, engines_notify);
1093         return e;
1094 }
1095
1096 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
1097                                                 struct intel_sseu rcs_sseu)
1098 {
1099         const unsigned int max = I915_NUM_ENGINES;
1100         struct intel_engine_cs *engine;
1101         struct i915_gem_engines *e, *err;
1102
1103         e = alloc_engines(max);
1104         if (!e)
1105                 return ERR_PTR(-ENOMEM);
1106
1107         for_each_uabi_engine(engine, ctx->i915) {
1108                 struct intel_context *ce;
1109                 struct intel_sseu sseu = {};
1110                 int ret;
1111
1112                 if (engine->legacy_idx == INVALID_ENGINE)
1113                         continue;
1114
1115                 GEM_BUG_ON(engine->legacy_idx >= max);
1116                 GEM_BUG_ON(e->engines[engine->legacy_idx]);
1117
1118                 ce = intel_context_create(engine);
1119                 if (IS_ERR(ce)) {
1120                         err = ERR_CAST(ce);
1121                         goto free_engines;
1122                 }
1123
1124                 e->engines[engine->legacy_idx] = ce;
1125                 e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
1126
1127                 if (engine->class == RENDER_CLASS)
1128                         sseu = rcs_sseu;
1129
1130                 ret = intel_context_set_gem(ce, ctx, sseu);
1131                 if (ret) {
1132                         err = ERR_PTR(ret);
1133                         goto free_engines;
1134                 }
1135
1136         }
1137
1138         return e;
1139
1140 free_engines:
1141         free_engines(e);
1142         return err;
1143 }
1144
1145 static int perma_pin_contexts(struct intel_context *ce)
1146 {
1147         struct intel_context *child;
1148         int i = 0, j = 0, ret;
1149
1150         GEM_BUG_ON(!intel_context_is_parent(ce));
1151
1152         ret = intel_context_pin(ce);
1153         if (unlikely(ret))
1154                 return ret;
1155
1156         for_each_child(ce, child) {
1157                 ret = intel_context_pin(child);
1158                 if (unlikely(ret))
1159                         goto unwind;
1160                 ++i;
1161         }
1162
1163         set_bit(CONTEXT_PERMA_PIN, &ce->flags);
1164
1165         return 0;
1166
1167 unwind:
1168         intel_context_unpin(ce);
1169         for_each_child(ce, child) {
1170                 if (j++ < i)
1171                         intel_context_unpin(child);
1172                 else
1173                         break;
1174         }
1175
1176         return ret;
1177 }
1178
1179 static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
1180                                              unsigned int num_engines,
1181                                              struct i915_gem_proto_engine *pe)
1182 {
1183         struct i915_gem_engines *e, *err;
1184         unsigned int n;
1185
1186         e = alloc_engines(num_engines);
1187         if (!e)
1188                 return ERR_PTR(-ENOMEM);
1189         e->num_engines = num_engines;
1190
1191         for (n = 0; n < num_engines; n++) {
1192                 struct intel_context *ce, *child;
1193                 int ret;
1194
1195                 switch (pe[n].type) {
1196                 case I915_GEM_ENGINE_TYPE_PHYSICAL:
1197                         ce = intel_context_create(pe[n].engine);
1198                         break;
1199
1200                 case I915_GEM_ENGINE_TYPE_BALANCED:
1201                         ce = intel_engine_create_virtual(pe[n].siblings,
1202                                                          pe[n].num_siblings, 0);
1203                         break;
1204
1205                 case I915_GEM_ENGINE_TYPE_PARALLEL:
1206                         ce = intel_engine_create_parallel(pe[n].siblings,
1207                                                           pe[n].num_siblings,
1208                                                           pe[n].width);
1209                         break;
1210
1211                 case I915_GEM_ENGINE_TYPE_INVALID:
1212                 default:
1213                         GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
1214                         continue;
1215                 }
1216
1217                 if (IS_ERR(ce)) {
1218                         err = ERR_CAST(ce);
1219                         goto free_engines;
1220                 }
1221
1222                 e->engines[n] = ce;
1223
1224                 ret = intel_context_set_gem(ce, ctx, pe->sseu);
1225                 if (ret) {
1226                         err = ERR_PTR(ret);
1227                         goto free_engines;
1228                 }
1229                 for_each_child(ce, child) {
1230                         ret = intel_context_set_gem(child, ctx, pe->sseu);
1231                         if (ret) {
1232                                 err = ERR_PTR(ret);
1233                                 goto free_engines;
1234                         }
1235                 }
1236
1237                 /*
1238                  * XXX: Must be done after calling intel_context_set_gem as that
1239                  * function changes the ring size. The ring is allocated when
1240                  * the context is pinned. If the ring size is changed after
1241                  * allocation we have a mismatch of the ring size and will cause
1242                  * the context to hang. Presumably with a bit of reordering we
1243                  * could move the perma-pin step to the backend function
1244                  * intel_engine_create_parallel.
1245                  */
1246                 if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
1247                         ret = perma_pin_contexts(ce);
1248                         if (ret) {
1249                                 err = ERR_PTR(ret);
1250                                 goto free_engines;
1251                         }
1252                 }
1253         }
1254
1255         return e;
1256
1257 free_engines:
1258         free_engines(e);
1259         return err;
1260 }
1261
1262 static void i915_gem_context_release_work(struct work_struct *work)
1263 {
1264         struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
1265                                                     release_work);
1266         struct i915_address_space *vm;
1267
1268         trace_i915_context_free(ctx);
1269         GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1270
1271         spin_lock(&ctx->i915->gem.contexts.lock);
1272         list_del(&ctx->link);
1273         spin_unlock(&ctx->i915->gem.contexts.lock);
1274
1275         if (ctx->syncobj)
1276                 drm_syncobj_put(ctx->syncobj);
1277
1278         vm = ctx->vm;
1279         if (vm)
1280                 i915_vm_put(vm);
1281
1282         if (ctx->pxp_wakeref)
1283                 intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
1284
1285         if (ctx->client)
1286                 i915_drm_client_put(ctx->client);
1287
1288         mutex_destroy(&ctx->engines_mutex);
1289         mutex_destroy(&ctx->lut_mutex);
1290
1291         put_pid(ctx->pid);
1292         mutex_destroy(&ctx->mutex);
1293
1294         kfree_rcu(ctx, rcu);
1295 }
1296
1297 void i915_gem_context_release(struct kref *ref)
1298 {
1299         struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
1300
1301         queue_work(ctx->i915->wq, &ctx->release_work);
1302 }
1303
1304 static inline struct i915_gem_engines *
1305 __context_engines_static(const struct i915_gem_context *ctx)
1306 {
1307         return rcu_dereference_protected(ctx->engines, true);
1308 }
1309
1310 static void __reset_context(struct i915_gem_context *ctx,
1311                             struct intel_engine_cs *engine)
1312 {
1313         intel_gt_handle_error(engine->gt, engine->mask, 0,
1314                               "context closure in %s", ctx->name);
1315 }
1316
1317 static bool __cancel_engine(struct intel_engine_cs *engine)
1318 {
1319         /*
1320          * Send a "high priority pulse" down the engine to cause the
1321          * current request to be momentarily preempted. (If it fails to
1322          * be preempted, it will be reset). As we have marked our context
1323          * as banned, any incomplete request, including any running, will
1324          * be skipped following the preemption.
1325          *
1326          * If there is no hangchecking (one of the reasons why we try to
1327          * cancel the context) and no forced preemption, there may be no
1328          * means by which we reset the GPU and evict the persistent hog.
1329          * Ergo if we are unable to inject a preemptive pulse that can
1330          * kill the banned context, we fallback to doing a local reset
1331          * instead.
1332          */
1333         return intel_engine_pulse(engine) == 0;
1334 }
1335
1336 static struct intel_engine_cs *active_engine(struct intel_context *ce)
1337 {
1338         struct intel_engine_cs *engine = NULL;
1339         struct i915_request *rq;
1340
1341         if (intel_context_has_inflight(ce))
1342                 return intel_context_inflight(ce);
1343
1344         if (!ce->timeline)
1345                 return NULL;
1346
1347         /*
1348          * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
1349          * to the request to prevent it being transferred to a new timeline
1350          * (and onto a new timeline->requests list).
1351          */
1352         rcu_read_lock();
1353         list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
1354                 bool found;
1355
1356                 /* timeline is already completed upto this point? */
1357                 if (!i915_request_get_rcu(rq))
1358                         break;
1359
1360                 /* Check with the backend if the request is inflight */
1361                 found = true;
1362                 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
1363                         found = i915_request_active_engine(rq, &engine);
1364
1365                 i915_request_put(rq);
1366                 if (found)
1367                         break;
1368         }
1369         rcu_read_unlock();
1370
1371         return engine;
1372 }
1373
1374 static void
1375 kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
1376 {
1377         struct i915_gem_engines_iter it;
1378         struct intel_context *ce;
1379
1380         /*
1381          * Map the user's engine back to the actual engines; one virtual
1382          * engine will be mapped to multiple engines, and using ctx->engine[]
1383          * the same engine may be have multiple instances in the user's map.
1384          * However, we only care about pending requests, so only include
1385          * engines on which there are incomplete requests.
1386          */
1387         for_each_gem_engine(ce, engines, it) {
1388                 struct intel_engine_cs *engine;
1389
1390                 if ((exit || !persistent) && intel_context_revoke(ce))
1391                         continue; /* Already marked. */
1392
1393                 /*
1394                  * Check the current active state of this context; if we
1395                  * are currently executing on the GPU we need to evict
1396                  * ourselves. On the other hand, if we haven't yet been
1397                  * submitted to the GPU or if everything is complete,
1398                  * we have nothing to do.
1399                  */
1400                 engine = active_engine(ce);
1401
1402                 /* First attempt to gracefully cancel the context */
1403                 if (engine && !__cancel_engine(engine) && (exit || !persistent))
1404                         /*
1405                          * If we are unable to send a preemptive pulse to bump
1406                          * the context from the GPU, we have to resort to a full
1407                          * reset. We hope the collateral damage is worth it.
1408                          */
1409                         __reset_context(engines->ctx, engine);
1410         }
1411 }
1412
1413 static void kill_context(struct i915_gem_context *ctx)
1414 {
1415         struct i915_gem_engines *pos, *next;
1416
1417         spin_lock_irq(&ctx->stale.lock);
1418         GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1419         list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1420                 if (!i915_sw_fence_await(&pos->fence)) {
1421                         list_del_init(&pos->link);
1422                         continue;
1423                 }
1424
1425                 spin_unlock_irq(&ctx->stale.lock);
1426
1427                 kill_engines(pos, !ctx->i915->params.enable_hangcheck,
1428                              i915_gem_context_is_persistent(ctx));
1429
1430                 spin_lock_irq(&ctx->stale.lock);
1431                 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
1432                 list_safe_reset_next(pos, next, link);
1433                 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
1434
1435                 i915_sw_fence_complete(&pos->fence);
1436         }
1437         spin_unlock_irq(&ctx->stale.lock);
1438 }
1439
1440 static void engines_idle_release(struct i915_gem_context *ctx,
1441                                  struct i915_gem_engines *engines)
1442 {
1443         struct i915_gem_engines_iter it;
1444         struct intel_context *ce;
1445
1446         INIT_LIST_HEAD(&engines->link);
1447
1448         engines->ctx = i915_gem_context_get(ctx);
1449
1450         for_each_gem_engine(ce, engines, it) {
1451                 int err;
1452
1453                 /* serialises with execbuf */
1454                 intel_context_close(ce);
1455                 if (!intel_context_pin_if_active(ce))
1456                         continue;
1457
1458                 /* Wait until context is finally scheduled out and retired */
1459                 err = i915_sw_fence_await_active(&engines->fence,
1460                                                  &ce->active,
1461                                                  I915_ACTIVE_AWAIT_BARRIER);
1462                 intel_context_unpin(ce);
1463                 if (err)
1464                         goto kill;
1465         }
1466
1467         spin_lock_irq(&ctx->stale.lock);
1468         if (!i915_gem_context_is_closed(ctx))
1469                 list_add_tail(&engines->link, &ctx->stale.engines);
1470         spin_unlock_irq(&ctx->stale.lock);
1471
1472 kill:
1473         if (list_empty(&engines->link)) /* raced, already closed */
1474                 kill_engines(engines, true,
1475                              i915_gem_context_is_persistent(ctx));
1476
1477         i915_sw_fence_commit(&engines->fence);
1478 }
1479
1480 static void set_closed_name(struct i915_gem_context *ctx)
1481 {
1482         char *s;
1483
1484         /* Replace '[]' with '<>' to indicate closed in debug prints */
1485
1486         s = strrchr(ctx->name, '[');
1487         if (!s)
1488                 return;
1489
1490         *s = '<';
1491
1492         s = strchr(s + 1, ']');
1493         if (s)
1494                 *s = '>';
1495 }
1496
1497 static void context_close(struct i915_gem_context *ctx)
1498 {
1499         struct i915_drm_client *client;
1500
1501         /* Flush any concurrent set_engines() */
1502         mutex_lock(&ctx->engines_mutex);
1503         unpin_engines(__context_engines_static(ctx));
1504         engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
1505         i915_gem_context_set_closed(ctx);
1506         mutex_unlock(&ctx->engines_mutex);
1507
1508         mutex_lock(&ctx->mutex);
1509
1510         set_closed_name(ctx);
1511
1512         /*
1513          * The LUT uses the VMA as a backpointer to unref the object,
1514          * so we need to clear the LUT before we close all the VMA (inside
1515          * the ppgtt).
1516          */
1517         lut_close(ctx);
1518
1519         ctx->file_priv = ERR_PTR(-EBADF);
1520
1521         client = ctx->client;
1522         if (client) {
1523                 spin_lock(&client->ctx_lock);
1524                 list_del_rcu(&ctx->client_link);
1525                 spin_unlock(&client->ctx_lock);
1526         }
1527
1528         mutex_unlock(&ctx->mutex);
1529
1530         /*
1531          * If the user has disabled hangchecking, we can not be sure that
1532          * the batches will ever complete after the context is closed,
1533          * keeping the context and all resources pinned forever. So in this
1534          * case we opt to forcibly kill off all remaining requests on
1535          * context close.
1536          */
1537         kill_context(ctx);
1538
1539         i915_gem_context_put(ctx);
1540 }
1541
1542 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1543 {
1544         if (i915_gem_context_is_persistent(ctx) == state)
1545                 return 0;
1546
1547         if (state) {
1548                 /*
1549                  * Only contexts that are short-lived [that will expire or be
1550                  * reset] are allowed to survive past termination. We require
1551                  * hangcheck to ensure that the persistent requests are healthy.
1552                  */
1553                 if (!ctx->i915->params.enable_hangcheck)
1554                         return -EINVAL;
1555
1556                 i915_gem_context_set_persistence(ctx);
1557         } else {
1558                 /* To cancel a context we use "preempt-to-idle" */
1559                 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1560                         return -ENODEV;
1561
1562                 /*
1563                  * If the cancel fails, we then need to reset, cleanly!
1564                  *
1565                  * If the per-engine reset fails, all hope is lost! We resort
1566                  * to a full GPU reset in that unlikely case, but realistically
1567                  * if the engine could not reset, the full reset does not fare
1568                  * much better. The damage has been done.
1569                  *
1570                  * However, if we cannot reset an engine by itself, we cannot
1571                  * cleanup a hanging persistent context without causing
1572                  * colateral damage, and we should not pretend we can by
1573                  * exposing the interface.
1574                  */
1575                 if (!intel_has_reset_engine(to_gt(ctx->i915)))
1576                         return -ENODEV;
1577
1578                 i915_gem_context_clear_persistence(ctx);
1579         }
1580
1581         return 0;
1582 }
1583
1584 static struct i915_gem_context *
1585 i915_gem_create_context(struct drm_i915_private *i915,
1586                         const struct i915_gem_proto_context *pc)
1587 {
1588         struct i915_gem_context *ctx;
1589         struct i915_address_space *vm = NULL;
1590         struct i915_gem_engines *e;
1591         int err;
1592         int i;
1593
1594         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1595         if (!ctx)
1596                 return ERR_PTR(-ENOMEM);
1597
1598         kref_init(&ctx->ref);
1599         ctx->i915 = i915;
1600         ctx->sched = pc->sched;
1601         mutex_init(&ctx->mutex);
1602         INIT_LIST_HEAD(&ctx->link);
1603         INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
1604
1605         spin_lock_init(&ctx->stale.lock);
1606         INIT_LIST_HEAD(&ctx->stale.engines);
1607
1608         if (pc->vm) {
1609                 vm = i915_vm_get(pc->vm);
1610         } else if (HAS_FULL_PPGTT(i915)) {
1611                 struct i915_ppgtt *ppgtt;
1612
1613                 ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1614                 if (IS_ERR(ppgtt)) {
1615                         drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
1616                                 PTR_ERR(ppgtt));
1617                         err = PTR_ERR(ppgtt);
1618                         goto err_ctx;
1619                 }
1620                 vm = &ppgtt->vm;
1621         }
1622         if (vm)
1623                 ctx->vm = vm;
1624
1625         mutex_init(&ctx->engines_mutex);
1626         if (pc->num_user_engines >= 0) {
1627                 i915_gem_context_set_user_engines(ctx);
1628                 e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
1629         } else {
1630                 i915_gem_context_clear_user_engines(ctx);
1631                 e = default_engines(ctx, pc->legacy_rcs_sseu);
1632         }
1633         if (IS_ERR(e)) {
1634                 err = PTR_ERR(e);
1635                 goto err_vm;
1636         }
1637         RCU_INIT_POINTER(ctx->engines, e);
1638
1639         INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
1640         mutex_init(&ctx->lut_mutex);
1641
1642         /* NB: Mark all slices as needing a remap so that when the context first
1643          * loads it will restore whatever remap state already exists. If there
1644          * is no remap info, it will be a NOP. */
1645         ctx->remap_slice = ALL_L3_SLICES(i915);
1646
1647         ctx->user_flags = pc->user_flags;
1648
1649         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
1650                 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1651
1652         if (pc->single_timeline) {
1653                 err = drm_syncobj_create(&ctx->syncobj,
1654                                          DRM_SYNCOBJ_CREATE_SIGNALED,
1655                                          NULL);
1656                 if (err)
1657                         goto err_engines;
1658         }
1659
1660         if (pc->uses_protected_content) {
1661                 ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1662                 ctx->uses_protected_content = true;
1663         }
1664
1665         trace_i915_context_create(ctx);
1666
1667         return ctx;
1668
1669 err_engines:
1670         free_engines(e);
1671 err_vm:
1672         if (ctx->vm)
1673                 i915_vm_put(ctx->vm);
1674 err_ctx:
1675         kfree(ctx);
1676         return ERR_PTR(err);
1677 }
1678
1679 static void init_contexts(struct i915_gem_contexts *gc)
1680 {
1681         spin_lock_init(&gc->lock);
1682         INIT_LIST_HEAD(&gc->list);
1683 }
1684
1685 void i915_gem_init__contexts(struct drm_i915_private *i915)
1686 {
1687         init_contexts(&i915->gem.contexts);
1688 }
1689
1690 /*
1691  * Note that this implicitly consumes the ctx reference, by placing
1692  * the ctx in the context_xa.
1693  */
1694 static void gem_context_register(struct i915_gem_context *ctx,
1695                                  struct drm_i915_file_private *fpriv,
1696                                  u32 id)
1697 {
1698         struct drm_i915_private *i915 = ctx->i915;
1699         void *old;
1700
1701         ctx->file_priv = fpriv;
1702
1703         ctx->pid = get_task_pid(current, PIDTYPE_PID);
1704         ctx->client = i915_drm_client_get(fpriv->client);
1705
1706         snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
1707                  current->comm, pid_nr(ctx->pid));
1708
1709         spin_lock(&ctx->client->ctx_lock);
1710         list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
1711         spin_unlock(&ctx->client->ctx_lock);
1712
1713         spin_lock(&i915->gem.contexts.lock);
1714         list_add_tail(&ctx->link, &i915->gem.contexts.list);
1715         spin_unlock(&i915->gem.contexts.lock);
1716
1717         /* And finally expose ourselves to userspace via the idr */
1718         old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
1719         WARN_ON(old);
1720 }
1721
1722 int i915_gem_context_open(struct drm_i915_private *i915,
1723                           struct drm_file *file)
1724 {
1725         struct drm_i915_file_private *file_priv = file->driver_priv;
1726         struct i915_gem_proto_context *pc;
1727         struct i915_gem_context *ctx;
1728         int err;
1729
1730         mutex_init(&file_priv->proto_context_lock);
1731         xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1732
1733         /* 0 reserved for the default context */
1734         xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
1735
1736         /* 0 reserved for invalid/unassigned ppgtt */
1737         xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
1738
1739         pc = proto_context_create(i915, 0);
1740         if (IS_ERR(pc)) {
1741                 err = PTR_ERR(pc);
1742                 goto err;
1743         }
1744
1745         ctx = i915_gem_create_context(i915, pc);
1746         proto_context_close(i915, pc);
1747         if (IS_ERR(ctx)) {
1748                 err = PTR_ERR(ctx);
1749                 goto err;
1750         }
1751
1752         gem_context_register(ctx, file_priv, 0);
1753
1754         return 0;
1755
1756 err:
1757         xa_destroy(&file_priv->vm_xa);
1758         xa_destroy(&file_priv->context_xa);
1759         xa_destroy(&file_priv->proto_context_xa);
1760         mutex_destroy(&file_priv->proto_context_lock);
1761         return err;
1762 }
1763
1764 void i915_gem_context_close(struct drm_file *file)
1765 {
1766         struct drm_i915_file_private *file_priv = file->driver_priv;
1767         struct i915_gem_proto_context *pc;
1768         struct i915_address_space *vm;
1769         struct i915_gem_context *ctx;
1770         unsigned long idx;
1771
1772         xa_for_each(&file_priv->proto_context_xa, idx, pc)
1773                 proto_context_close(file_priv->dev_priv, pc);
1774         xa_destroy(&file_priv->proto_context_xa);
1775         mutex_destroy(&file_priv->proto_context_lock);
1776
1777         xa_for_each(&file_priv->context_xa, idx, ctx)
1778                 context_close(ctx);
1779         xa_destroy(&file_priv->context_xa);
1780
1781         xa_for_each(&file_priv->vm_xa, idx, vm)
1782                 i915_vm_put(vm);
1783         xa_destroy(&file_priv->vm_xa);
1784 }
1785
1786 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1787                              struct drm_file *file)
1788 {
1789         struct drm_i915_private *i915 = to_i915(dev);
1790         struct drm_i915_gem_vm_control *args = data;
1791         struct drm_i915_file_private *file_priv = file->driver_priv;
1792         struct i915_ppgtt *ppgtt;
1793         u32 id;
1794         int err;
1795
1796         if (!HAS_FULL_PPGTT(i915))
1797                 return -ENODEV;
1798
1799         if (args->flags)
1800                 return -EINVAL;
1801
1802         ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1803         if (IS_ERR(ppgtt))
1804                 return PTR_ERR(ppgtt);
1805
1806         if (args->extensions) {
1807                 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1808                                            NULL, 0,
1809                                            ppgtt);
1810                 if (err)
1811                         goto err_put;
1812         }
1813
1814         err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1815                        xa_limit_32b, GFP_KERNEL);
1816         if (err)
1817                 goto err_put;
1818
1819         GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1820         args->vm_id = id;
1821         return 0;
1822
1823 err_put:
1824         i915_vm_put(&ppgtt->vm);
1825         return err;
1826 }
1827
1828 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1829                               struct drm_file *file)
1830 {
1831         struct drm_i915_file_private *file_priv = file->driver_priv;
1832         struct drm_i915_gem_vm_control *args = data;
1833         struct i915_address_space *vm;
1834
1835         if (args->flags)
1836                 return -EINVAL;
1837
1838         if (args->extensions)
1839                 return -EINVAL;
1840
1841         vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1842         if (!vm)
1843                 return -ENOENT;
1844
1845         i915_vm_put(vm);
1846         return 0;
1847 }
1848
1849 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1850                      struct i915_gem_context *ctx,
1851                      struct drm_i915_gem_context_param *args)
1852 {
1853         struct i915_address_space *vm;
1854         int err;
1855         u32 id;
1856
1857         if (!i915_gem_context_has_full_ppgtt(ctx))
1858                 return -ENODEV;
1859
1860         vm = ctx->vm;
1861         GEM_BUG_ON(!vm);
1862
1863         err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1864         if (err)
1865                 return err;
1866
1867         i915_vm_get(vm);
1868
1869         GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1870         args->value = id;
1871         args->size = 0;
1872
1873         return err;
1874 }
1875
1876 int
1877 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1878                               const struct drm_i915_gem_context_param_sseu *user,
1879                               struct intel_sseu *context)
1880 {
1881         const struct sseu_dev_info *device = &gt->info.sseu;
1882         struct drm_i915_private *i915 = gt->i915;
1883         unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0);
1884
1885         /* No zeros in any field. */
1886         if (!user->slice_mask || !user->subslice_mask ||
1887             !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1888                 return -EINVAL;
1889
1890         /* Max > min. */
1891         if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1892                 return -EINVAL;
1893
1894         /*
1895          * Some future proofing on the types since the uAPI is wider than the
1896          * current internal implementation.
1897          */
1898         if (overflows_type(user->slice_mask, context->slice_mask) ||
1899             overflows_type(user->subslice_mask, context->subslice_mask) ||
1900             overflows_type(user->min_eus_per_subslice,
1901                            context->min_eus_per_subslice) ||
1902             overflows_type(user->max_eus_per_subslice,
1903                            context->max_eus_per_subslice))
1904                 return -EINVAL;
1905
1906         /* Check validity against hardware. */
1907         if (user->slice_mask & ~device->slice_mask)
1908                 return -EINVAL;
1909
1910         if (user->subslice_mask & ~dev_subslice_mask)
1911                 return -EINVAL;
1912
1913         if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1914                 return -EINVAL;
1915
1916         context->slice_mask = user->slice_mask;
1917         context->subslice_mask = user->subslice_mask;
1918         context->min_eus_per_subslice = user->min_eus_per_subslice;
1919         context->max_eus_per_subslice = user->max_eus_per_subslice;
1920
1921         /* Part specific restrictions. */
1922         if (GRAPHICS_VER(i915) == 11) {
1923                 unsigned int hw_s = hweight8(device->slice_mask);
1924                 unsigned int hw_ss_per_s = hweight8(dev_subslice_mask);
1925                 unsigned int req_s = hweight8(context->slice_mask);
1926                 unsigned int req_ss = hweight8(context->subslice_mask);
1927
1928                 /*
1929                  * Only full subslice enablement is possible if more than one
1930                  * slice is turned on.
1931                  */
1932                 if (req_s > 1 && req_ss != hw_ss_per_s)
1933                         return -EINVAL;
1934
1935                 /*
1936                  * If more than four (SScount bitfield limit) subslices are
1937                  * requested then the number has to be even.
1938                  */
1939                 if (req_ss > 4 && (req_ss & 1))
1940                         return -EINVAL;
1941
1942                 /*
1943                  * If only one slice is enabled and subslice count is below the
1944                  * device full enablement, it must be at most half of the all
1945                  * available subslices.
1946                  */
1947                 if (req_s == 1 && req_ss < hw_ss_per_s &&
1948                     req_ss > (hw_ss_per_s / 2))
1949                         return -EINVAL;
1950
1951                 /* ABI restriction - VME use case only. */
1952
1953                 /* All slices or one slice only. */
1954                 if (req_s != 1 && req_s != hw_s)
1955                         return -EINVAL;
1956
1957                 /*
1958                  * Half subslices or full enablement only when one slice is
1959                  * enabled.
1960                  */
1961                 if (req_s == 1 &&
1962                     (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1963                         return -EINVAL;
1964
1965                 /* No EU configuration changes. */
1966                 if ((user->min_eus_per_subslice !=
1967                      device->max_eus_per_subslice) ||
1968                     (user->max_eus_per_subslice !=
1969                      device->max_eus_per_subslice))
1970                         return -EINVAL;
1971         }
1972
1973         return 0;
1974 }
1975
1976 static int set_sseu(struct i915_gem_context *ctx,
1977                     struct drm_i915_gem_context_param *args)
1978 {
1979         struct drm_i915_private *i915 = ctx->i915;
1980         struct drm_i915_gem_context_param_sseu user_sseu;
1981         struct intel_context *ce;
1982         struct intel_sseu sseu;
1983         unsigned long lookup;
1984         int ret;
1985
1986         if (args->size < sizeof(user_sseu))
1987                 return -EINVAL;
1988
1989         if (GRAPHICS_VER(i915) != 11)
1990                 return -ENODEV;
1991
1992         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1993                            sizeof(user_sseu)))
1994                 return -EFAULT;
1995
1996         if (user_sseu.rsvd)
1997                 return -EINVAL;
1998
1999         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2000                 return -EINVAL;
2001
2002         lookup = 0;
2003         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2004                 lookup |= LOOKUP_USER_INDEX;
2005
2006         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2007         if (IS_ERR(ce))
2008                 return PTR_ERR(ce);
2009
2010         /* Only render engine supports RPCS configuration. */
2011         if (ce->engine->class != RENDER_CLASS) {
2012                 ret = -ENODEV;
2013                 goto out_ce;
2014         }
2015
2016         ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
2017         if (ret)
2018                 goto out_ce;
2019
2020         ret = intel_context_reconfigure_sseu(ce, sseu);
2021         if (ret)
2022                 goto out_ce;
2023
2024         args->size = sizeof(user_sseu);
2025
2026 out_ce:
2027         intel_context_put(ce);
2028         return ret;
2029 }
2030
2031 static int
2032 set_persistence(struct i915_gem_context *ctx,
2033                 const struct drm_i915_gem_context_param *args)
2034 {
2035         if (args->size)
2036                 return -EINVAL;
2037
2038         return __context_set_persistence(ctx, args->value);
2039 }
2040
2041 static int set_priority(struct i915_gem_context *ctx,
2042                         const struct drm_i915_gem_context_param *args)
2043 {
2044         struct i915_gem_engines_iter it;
2045         struct intel_context *ce;
2046         int err;
2047
2048         err = validate_priority(ctx->i915, args);
2049         if (err)
2050                 return err;
2051
2052         ctx->sched.priority = args->value;
2053
2054         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2055                 if (!intel_engine_has_timeslices(ce->engine))
2056                         continue;
2057
2058                 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
2059                     intel_engine_has_semaphores(ce->engine))
2060                         intel_context_set_use_semaphores(ce);
2061                 else
2062                         intel_context_clear_use_semaphores(ce);
2063         }
2064         i915_gem_context_unlock_engines(ctx);
2065
2066         return 0;
2067 }
2068
2069 static int get_protected(struct i915_gem_context *ctx,
2070                          struct drm_i915_gem_context_param *args)
2071 {
2072         args->size = 0;
2073         args->value = i915_gem_context_uses_protected_content(ctx);
2074
2075         return 0;
2076 }
2077
2078 static int ctx_setparam(struct drm_i915_file_private *fpriv,
2079                         struct i915_gem_context *ctx,
2080                         struct drm_i915_gem_context_param *args)
2081 {
2082         int ret = 0;
2083
2084         switch (args->param) {
2085         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2086                 if (args->size)
2087                         ret = -EINVAL;
2088                 else if (args->value)
2089                         i915_gem_context_set_no_error_capture(ctx);
2090                 else
2091                         i915_gem_context_clear_no_error_capture(ctx);
2092                 break;
2093
2094         case I915_CONTEXT_PARAM_BANNABLE:
2095                 if (args->size)
2096                         ret = -EINVAL;
2097                 else if (!capable(CAP_SYS_ADMIN) && !args->value)
2098                         ret = -EPERM;
2099                 else if (args->value)
2100                         i915_gem_context_set_bannable(ctx);
2101                 else if (i915_gem_context_uses_protected_content(ctx))
2102                         ret = -EPERM; /* can't clear this for protected contexts */
2103                 else
2104                         i915_gem_context_clear_bannable(ctx);
2105                 break;
2106
2107         case I915_CONTEXT_PARAM_RECOVERABLE:
2108                 if (args->size)
2109                         ret = -EINVAL;
2110                 else if (!args->value)
2111                         i915_gem_context_clear_recoverable(ctx);
2112                 else if (i915_gem_context_uses_protected_content(ctx))
2113                         ret = -EPERM; /* can't set this for protected contexts */
2114                 else
2115                         i915_gem_context_set_recoverable(ctx);
2116                 break;
2117
2118         case I915_CONTEXT_PARAM_PRIORITY:
2119                 ret = set_priority(ctx, args);
2120                 break;
2121
2122         case I915_CONTEXT_PARAM_SSEU:
2123                 ret = set_sseu(ctx, args);
2124                 break;
2125
2126         case I915_CONTEXT_PARAM_PERSISTENCE:
2127                 ret = set_persistence(ctx, args);
2128                 break;
2129
2130         case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2131         case I915_CONTEXT_PARAM_NO_ZEROMAP:
2132         case I915_CONTEXT_PARAM_BAN_PERIOD:
2133         case I915_CONTEXT_PARAM_RINGSIZE:
2134         case I915_CONTEXT_PARAM_VM:
2135         case I915_CONTEXT_PARAM_ENGINES:
2136         default:
2137                 ret = -EINVAL;
2138                 break;
2139         }
2140
2141         return ret;
2142 }
2143
2144 struct create_ext {
2145         struct i915_gem_proto_context *pc;
2146         struct drm_i915_file_private *fpriv;
2147 };
2148
2149 static int create_setparam(struct i915_user_extension __user *ext, void *data)
2150 {
2151         struct drm_i915_gem_context_create_ext_setparam local;
2152         const struct create_ext *arg = data;
2153
2154         if (copy_from_user(&local, ext, sizeof(local)))
2155                 return -EFAULT;
2156
2157         if (local.param.ctx_id)
2158                 return -EINVAL;
2159
2160         return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
2161 }
2162
2163 static int invalid_ext(struct i915_user_extension __user *ext, void *data)
2164 {
2165         return -EINVAL;
2166 }
2167
2168 static const i915_user_extension_fn create_extensions[] = {
2169         [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2170         [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
2171 };
2172
2173 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2174 {
2175         return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2176 }
2177
2178 static inline struct i915_gem_context *
2179 __context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2180 {
2181         struct i915_gem_context *ctx;
2182
2183         rcu_read_lock();
2184         ctx = xa_load(&file_priv->context_xa, id);
2185         if (ctx && !kref_get_unless_zero(&ctx->ref))
2186                 ctx = NULL;
2187         rcu_read_unlock();
2188
2189         return ctx;
2190 }
2191
2192 static struct i915_gem_context *
2193 finalize_create_context_locked(struct drm_i915_file_private *file_priv,
2194                                struct i915_gem_proto_context *pc, u32 id)
2195 {
2196         struct i915_gem_context *ctx;
2197         void *old;
2198
2199         lockdep_assert_held(&file_priv->proto_context_lock);
2200
2201         ctx = i915_gem_create_context(file_priv->dev_priv, pc);
2202         if (IS_ERR(ctx))
2203                 return ctx;
2204
2205         /*
2206          * One for the xarray and one for the caller.  We need to grab
2207          * the reference *prior* to making the ctx visble to userspace
2208          * in gem_context_register(), as at any point after that
2209          * userspace can try to race us with another thread destroying
2210          * the context under our feet.
2211          */
2212         i915_gem_context_get(ctx);
2213
2214         gem_context_register(ctx, file_priv, id);
2215
2216         old = xa_erase(&file_priv->proto_context_xa, id);
2217         GEM_BUG_ON(old != pc);
2218         proto_context_close(file_priv->dev_priv, pc);
2219
2220         return ctx;
2221 }
2222
2223 struct i915_gem_context *
2224 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2225 {
2226         struct i915_gem_proto_context *pc;
2227         struct i915_gem_context *ctx;
2228
2229         ctx = __context_lookup(file_priv, id);
2230         if (ctx)
2231                 return ctx;
2232
2233         mutex_lock(&file_priv->proto_context_lock);
2234         /* Try one more time under the lock */
2235         ctx = __context_lookup(file_priv, id);
2236         if (!ctx) {
2237                 pc = xa_load(&file_priv->proto_context_xa, id);
2238                 if (!pc)
2239                         ctx = ERR_PTR(-ENOENT);
2240                 else
2241                         ctx = finalize_create_context_locked(file_priv, pc, id);
2242         }
2243         mutex_unlock(&file_priv->proto_context_lock);
2244
2245         return ctx;
2246 }
2247
2248 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2249                                   struct drm_file *file)
2250 {
2251         struct drm_i915_private *i915 = to_i915(dev);
2252         struct drm_i915_gem_context_create_ext *args = data;
2253         struct create_ext ext_data;
2254         int ret;
2255         u32 id;
2256
2257         if (!DRIVER_CAPS(i915)->has_logical_contexts)
2258                 return -ENODEV;
2259
2260         if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2261                 return -EINVAL;
2262
2263         ret = intel_gt_terminally_wedged(to_gt(i915));
2264         if (ret)
2265                 return ret;
2266
2267         ext_data.fpriv = file->driver_priv;
2268         if (client_is_banned(ext_data.fpriv)) {
2269                 drm_dbg(&i915->drm,
2270                         "client %s[%d] banned from creating ctx\n",
2271                         current->comm, task_pid_nr(current));
2272                 return -EIO;
2273         }
2274
2275         ext_data.pc = proto_context_create(i915, args->flags);
2276         if (IS_ERR(ext_data.pc))
2277                 return PTR_ERR(ext_data.pc);
2278
2279         if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2280                 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2281                                            create_extensions,
2282                                            ARRAY_SIZE(create_extensions),
2283                                            &ext_data);
2284                 if (ret)
2285                         goto err_pc;
2286         }
2287
2288         if (GRAPHICS_VER(i915) > 12) {
2289                 struct i915_gem_context *ctx;
2290
2291                 /* Get ourselves a context ID */
2292                 ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
2293                                xa_limit_32b, GFP_KERNEL);
2294                 if (ret)
2295                         goto err_pc;
2296
2297                 ctx = i915_gem_create_context(i915, ext_data.pc);
2298                 if (IS_ERR(ctx)) {
2299                         ret = PTR_ERR(ctx);
2300                         goto err_pc;
2301                 }
2302
2303                 proto_context_close(i915, ext_data.pc);
2304                 gem_context_register(ctx, ext_data.fpriv, id);
2305         } else {
2306                 ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
2307                 if (ret < 0)
2308                         goto err_pc;
2309         }
2310
2311         args->ctx_id = id;
2312
2313         return 0;
2314
2315 err_pc:
2316         proto_context_close(i915, ext_data.pc);
2317         return ret;
2318 }
2319
2320 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2321                                    struct drm_file *file)
2322 {
2323         struct drm_i915_gem_context_destroy *args = data;
2324         struct drm_i915_file_private *file_priv = file->driver_priv;
2325         struct i915_gem_proto_context *pc;
2326         struct i915_gem_context *ctx;
2327
2328         if (args->pad != 0)
2329                 return -EINVAL;
2330
2331         if (!args->ctx_id)
2332                 return -ENOENT;
2333
2334         /* We need to hold the proto-context lock here to prevent races
2335          * with finalize_create_context_locked().
2336          */
2337         mutex_lock(&file_priv->proto_context_lock);
2338         ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2339         pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2340         mutex_unlock(&file_priv->proto_context_lock);
2341
2342         if (!ctx && !pc)
2343                 return -ENOENT;
2344         GEM_WARN_ON(ctx && pc);
2345
2346         if (pc)
2347                 proto_context_close(file_priv->dev_priv, pc);
2348
2349         if (ctx)
2350                 context_close(ctx);
2351
2352         return 0;
2353 }
2354
2355 static int get_sseu(struct i915_gem_context *ctx,
2356                     struct drm_i915_gem_context_param *args)
2357 {
2358         struct drm_i915_gem_context_param_sseu user_sseu;
2359         struct intel_context *ce;
2360         unsigned long lookup;
2361         int err;
2362
2363         if (args->size == 0)
2364                 goto out;
2365         else if (args->size < sizeof(user_sseu))
2366                 return -EINVAL;
2367
2368         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2369                            sizeof(user_sseu)))
2370                 return -EFAULT;
2371
2372         if (user_sseu.rsvd)
2373                 return -EINVAL;
2374
2375         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2376                 return -EINVAL;
2377
2378         lookup = 0;
2379         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2380                 lookup |= LOOKUP_USER_INDEX;
2381
2382         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2383         if (IS_ERR(ce))
2384                 return PTR_ERR(ce);
2385
2386         err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2387         if (err) {
2388                 intel_context_put(ce);
2389                 return err;
2390         }
2391
2392         user_sseu.slice_mask = ce->sseu.slice_mask;
2393         user_sseu.subslice_mask = ce->sseu.subslice_mask;
2394         user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2395         user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2396
2397         intel_context_unlock_pinned(ce);
2398         intel_context_put(ce);
2399
2400         if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2401                          sizeof(user_sseu)))
2402                 return -EFAULT;
2403
2404 out:
2405         args->size = sizeof(user_sseu);
2406
2407         return 0;
2408 }
2409
2410 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2411                                     struct drm_file *file)
2412 {
2413         struct drm_i915_file_private *file_priv = file->driver_priv;
2414         struct drm_i915_gem_context_param *args = data;
2415         struct i915_gem_context *ctx;
2416         struct i915_address_space *vm;
2417         int ret = 0;
2418
2419         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2420         if (IS_ERR(ctx))
2421                 return PTR_ERR(ctx);
2422
2423         switch (args->param) {
2424         case I915_CONTEXT_PARAM_GTT_SIZE:
2425                 args->size = 0;
2426                 vm = i915_gem_context_get_eb_vm(ctx);
2427                 args->value = vm->total;
2428                 i915_vm_put(vm);
2429
2430                 break;
2431
2432         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2433                 args->size = 0;
2434                 args->value = i915_gem_context_no_error_capture(ctx);
2435                 break;
2436
2437         case I915_CONTEXT_PARAM_BANNABLE:
2438                 args->size = 0;
2439                 args->value = i915_gem_context_is_bannable(ctx);
2440                 break;
2441
2442         case I915_CONTEXT_PARAM_RECOVERABLE:
2443                 args->size = 0;
2444                 args->value = i915_gem_context_is_recoverable(ctx);
2445                 break;
2446
2447         case I915_CONTEXT_PARAM_PRIORITY:
2448                 args->size = 0;
2449                 args->value = ctx->sched.priority;
2450                 break;
2451
2452         case I915_CONTEXT_PARAM_SSEU:
2453                 ret = get_sseu(ctx, args);
2454                 break;
2455
2456         case I915_CONTEXT_PARAM_VM:
2457                 ret = get_ppgtt(file_priv, ctx, args);
2458                 break;
2459
2460         case I915_CONTEXT_PARAM_PERSISTENCE:
2461                 args->size = 0;
2462                 args->value = i915_gem_context_is_persistent(ctx);
2463                 break;
2464
2465         case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2466                 ret = get_protected(ctx, args);
2467                 break;
2468
2469         case I915_CONTEXT_PARAM_NO_ZEROMAP:
2470         case I915_CONTEXT_PARAM_BAN_PERIOD:
2471         case I915_CONTEXT_PARAM_ENGINES:
2472         case I915_CONTEXT_PARAM_RINGSIZE:
2473         default:
2474                 ret = -EINVAL;
2475                 break;
2476         }
2477
2478         i915_gem_context_put(ctx);
2479         return ret;
2480 }
2481
2482 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2483                                     struct drm_file *file)
2484 {
2485         struct drm_i915_file_private *file_priv = file->driver_priv;
2486         struct drm_i915_gem_context_param *args = data;
2487         struct i915_gem_proto_context *pc;
2488         struct i915_gem_context *ctx;
2489         int ret = 0;
2490
2491         mutex_lock(&file_priv->proto_context_lock);
2492         ctx = __context_lookup(file_priv, args->ctx_id);
2493         if (!ctx) {
2494                 pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2495                 if (pc) {
2496                         /* Contexts should be finalized inside
2497                          * GEM_CONTEXT_CREATE starting with graphics
2498                          * version 13.
2499                          */
2500                         WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
2501                         ret = set_proto_ctx_param(file_priv, pc, args);
2502                 } else {
2503                         ret = -ENOENT;
2504                 }
2505         }
2506         mutex_unlock(&file_priv->proto_context_lock);
2507
2508         if (ctx) {
2509                 ret = ctx_setparam(file_priv, ctx, args);
2510                 i915_gem_context_put(ctx);
2511         }
2512
2513         return ret;
2514 }
2515
2516 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2517                                        void *data, struct drm_file *file)
2518 {
2519         struct drm_i915_private *i915 = to_i915(dev);
2520         struct drm_i915_reset_stats *args = data;
2521         struct i915_gem_context *ctx;
2522
2523         if (args->flags || args->pad)
2524                 return -EINVAL;
2525
2526         ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2527         if (IS_ERR(ctx))
2528                 return PTR_ERR(ctx);
2529
2530         /*
2531          * We opt for unserialised reads here. This may result in tearing
2532          * in the extremely unlikely event of a GPU hang on this context
2533          * as we are querying them. If we need that extra layer of protection,
2534          * we should wrap the hangstats with a seqlock.
2535          */
2536
2537         if (capable(CAP_SYS_ADMIN))
2538                 args->reset_count = i915_reset_count(&i915->gpu_error);
2539         else
2540                 args->reset_count = 0;
2541
2542         args->batch_active = atomic_read(&ctx->guilty_count);
2543         args->batch_pending = atomic_read(&ctx->active_count);
2544
2545         i915_gem_context_put(ctx);
2546         return 0;
2547 }
2548
2549 /* GEM context-engines iterator: for_each_gem_engine() */
2550 struct intel_context *
2551 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2552 {
2553         const struct i915_gem_engines *e = it->engines;
2554         struct intel_context *ctx;
2555
2556         if (unlikely(!e))
2557                 return NULL;
2558
2559         do {
2560                 if (it->idx >= e->num_engines)
2561                         return NULL;
2562
2563                 ctx = e->engines[it->idx++];
2564         } while (!ctx);
2565
2566         return ctx;
2567 }
2568
2569 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2570 #include "selftests/mock_context.c"
2571 #include "selftests/i915_gem_context.c"
2572 #endif
2573
2574 void i915_gem_context_module_exit(void)
2575 {
2576         kmem_cache_destroy(slab_luts);
2577 }
2578
2579 int __init i915_gem_context_module_init(void)
2580 {
2581         slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2582         if (!slab_luts)
2583                 return -ENOMEM;
2584
2585         return 0;
2586 }